2 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
21 #include <drm/drm_crtc.h>
22 #include <linux/debugfs.h>
27 #include "dsi_display.h"
33 #include "sde_core_irq.h"
34 #include "sde_formats.h"
35 #include "sde_hw_vbif.h"
37 #include "sde_encoder.h"
38 #include "sde_plane.h"
41 #define CREATE_TRACE_POINTS
42 #include "sde_trace.h"
45 * Controls size of event log buffer. Specified as a power of 2.
47 #define SDE_EVTLOG_SIZE 1024
50 * To enable overall DRM driver logging
51 * # echo 0x2 > /sys/module/drm/parameters/debug
53 * To enable DRM driver h/w logging
54 * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
56 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
58 #define SDE_DEBUGFS_DIR "msm_sde"
59 #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
62 * sdecustom - enable certain driver customizations for sde clients
63 * Enabling this modifies the standard DRM behavior slightly and assumes
64 * that the clients have specific knowledge about the modifications that
65 * are involved, so don't enable this unless you know what you're doing.
67 * Parts of the driver that are affected by this setting may be located by
68 * searching for invocations of the 'sde_is_custom_client()' function.
70 * This is disabled by default.
72 static bool sdecustom = true;
73 module_param(sdecustom, bool, 0400);
74 MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
76 static int sde_kms_hw_init(struct msm_kms *kms);
77 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
79 bool sde_is_custom_client(void)
84 #ifdef CONFIG_DEBUG_FS
85 static int _sde_danger_signal_status(struct seq_file *s,
88 struct sde_kms *kms = (struct sde_kms *)s->private;
89 struct msm_drm_private *priv;
90 struct sde_danger_safe_status status;
93 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
94 SDE_ERROR("invalid arg(s)\n");
98 priv = kms->dev->dev_private;
99 memset(&status, 0, sizeof(struct sde_danger_safe_status));
101 sde_power_resource_enable(&priv->phandle, kms->core_client, true);
103 seq_puts(s, "\nDanger signal status:\n");
104 if (kms->hw_mdp->ops.get_danger_status)
105 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
108 seq_puts(s, "\nSafe signal status:\n");
109 if (kms->hw_mdp->ops.get_danger_status)
110 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
113 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
115 seq_printf(s, "MDP : 0x%x\n", status.mdp);
117 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
118 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
122 for (i = WB_0; i < WB_MAX; i++)
123 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
130 #define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
131 static int __prefix ## _open(struct inode *inode, struct file *file) \
133 return single_open(file, __prefix ## _show, inode->i_private); \
135 static const struct file_operations __prefix ## _fops = { \
136 .owner = THIS_MODULE, \
137 .open = __prefix ## _open, \
138 .release = single_release, \
140 .llseek = seq_lseek, \
143 static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
145 return _sde_danger_signal_status(s, true);
147 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
149 static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
151 return _sde_danger_signal_status(s, false);
153 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
155 static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
157 debugfs_remove_recursive(sde_kms->debugfs_danger);
158 sde_kms->debugfs_danger = NULL;
161 static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
162 struct dentry *parent)
164 sde_kms->debugfs_danger = debugfs_create_dir("danger",
166 if (!sde_kms->debugfs_danger) {
167 SDE_ERROR("failed to create danger debugfs\n");
171 debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
172 sde_kms, &sde_debugfs_danger_stats_fops);
173 debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
174 sde_kms, &sde_debugfs_safe_stats_fops);
179 static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
181 struct sde_debugfs_regset32 *regset;
182 struct sde_kms *sde_kms;
183 struct drm_device *dev;
184 struct msm_drm_private *priv;
188 if (!s || !s->private)
193 sde_kms = regset->sde_kms;
194 if (!sde_kms || !sde_kms->mmio)
201 priv = dev->dev_private;
205 base = sde_kms->mmio + regset->offset;
207 /* insert padding spaces, if needed */
208 if (regset->offset & 0xF) {
209 seq_printf(s, "[%x]", regset->offset & ~0xF);
210 for (i = 0; i < (regset->offset & 0xF); i += 4)
214 if (sde_power_resource_enable(&priv->phandle,
215 sde_kms->core_client, true)) {
216 seq_puts(s, "failed to enable sde clocks\n");
220 /* main register output */
221 for (i = 0; i < regset->blk_len; i += 4) {
222 addr = regset->offset + i;
223 if ((addr & 0xF) == 0x0)
224 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
225 seq_printf(s, " %08x", readl_relaxed(base + i));
228 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
233 static int sde_debugfs_open_regset32(struct inode *inode,
236 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
239 static const struct file_operations sde_fops_regset32 = {
240 .open = sde_debugfs_open_regset32,
243 .release = single_release,
246 void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
247 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
250 regset->offset = offset;
251 regset->blk_len = length;
252 regset->sde_kms = sde_kms;
256 void *sde_debugfs_create_regset32(const char *name, umode_t mode,
257 void *parent, struct sde_debugfs_regset32 *regset)
259 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
262 /* make sure offset is a multiple of 4 */
263 regset->offset = round_down(regset->offset, 4);
265 return debugfs_create_file(name, mode, parent,
266 regset, &sde_fops_regset32);
269 void *sde_debugfs_get_root(struct sde_kms *sde_kms)
271 return sde_kms ? sde_kms->debugfs_root : 0;
274 static int _sde_debugfs_init(struct sde_kms *sde_kms)
278 p = sde_hw_util_get_log_mask_ptr();
283 if (sde_kms->dev && sde_kms->dev->primary)
284 sde_kms->debugfs_root = sde_kms->dev->primary->debugfs_root;
286 sde_kms->debugfs_root = debugfs_create_dir(SDE_DEBUGFS_DIR, 0);
288 /* allow debugfs_root to be NULL */
289 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME,
290 0644, sde_kms->debugfs_root, p);
292 /* create common folder for debug information */
293 sde_kms->debugfs_debug = debugfs_create_dir("debug",
294 sde_kms->debugfs_root);
295 if (!sde_kms->debugfs_debug)
296 SDE_ERROR("failed to create debugfs debug directory\n");
298 sde_debugfs_danger_init(sde_kms, sde_kms->debugfs_debug);
299 sde_debugfs_vbif_init(sde_kms, sde_kms->debugfs_debug);
304 static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
306 /* don't need to NULL check debugfs_root */
308 sde_debugfs_vbif_destroy(sde_kms);
309 sde_debugfs_danger_destroy(sde_kms);
310 debugfs_remove_recursive(sde_kms->debugfs_debug);
311 sde_kms->debugfs_debug = 0;
312 debugfs_remove_recursive(sde_kms->debugfs_root);
313 sde_kms->debugfs_root = 0;
317 static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
318 struct dentry *parent)
322 static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
323 struct dentry *parent)
329 static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
331 return sde_crtc_vblank(crtc, true);
334 static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
336 sde_crtc_vblank(crtc, false);
339 static void sde_kms_prepare_commit(struct msm_kms *kms,
340 struct drm_atomic_state *state)
342 struct sde_kms *sde_kms = to_sde_kms(kms);
343 struct drm_device *dev = sde_kms->dev;
344 struct msm_drm_private *priv = dev->dev_private;
346 if (sde_kms->splash_info.handoff)
347 sde_splash_clean_up_exit_lk(kms);
349 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
352 static void sde_kms_commit(struct msm_kms *kms,
353 struct drm_atomic_state *old_state)
355 struct drm_crtc *crtc;
356 struct drm_crtc_state *old_crtc_state;
359 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
360 if (crtc->state->active) {
361 SDE_EVT32(DRMID(crtc));
362 sde_crtc_commit_kickoff(crtc);
367 static void sde_kms_complete_commit(struct msm_kms *kms,
368 struct drm_atomic_state *old_state)
370 struct sde_kms *sde_kms = to_sde_kms(kms);
371 struct drm_device *dev = sde_kms->dev;
372 struct msm_drm_private *priv = dev->dev_private;
373 struct drm_crtc *crtc;
374 struct drm_crtc_state *old_crtc_state;
377 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
378 sde_crtc_complete_commit(crtc, old_crtc_state);
379 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
381 SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
384 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
385 struct drm_crtc *crtc)
387 struct drm_encoder *encoder;
388 struct drm_device *dev;
391 if (!kms || !crtc || !crtc->state) {
392 SDE_ERROR("invalid params\n");
398 if (!crtc->state->enable) {
399 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
403 if (!crtc->state->active) {
404 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
408 ret = drm_crtc_vblank_get(crtc);
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
413 if (encoder->crtc != crtc)
416 * Wait post-flush if necessary to delay before plane_cleanup
417 * For example, wait for vsync in case of video mode panels
418 * This should be a no-op for command mode panels
420 SDE_EVT32(DRMID(crtc));
421 ret = sde_encoder_wait_for_commit_done(encoder);
422 if (ret && ret != -EWOULDBLOCK) {
423 SDE_ERROR("wait for commit done returned %d\n", ret);
428 drm_crtc_vblank_put(crtc);
431 static void sde_kms_prepare_fence(struct msm_kms *kms,
432 struct drm_atomic_state *old_state)
434 struct drm_crtc *crtc;
435 struct drm_crtc_state *old_crtc_state;
438 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
439 SDE_ERROR("invalid argument(s)\n");
444 /* attempt to acquire ww mutex for connection */
445 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
446 old_state->acquire_ctx);
448 if (rc == -EDEADLK) {
449 drm_modeset_backoff(old_state->acquire_ctx);
453 /* old_state actually contains updated crtc pointers */
454 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
455 sde_crtc_prepare_commit(crtc, old_crtc_state);
459 * _sde_kms_get_displays - query for underlying display handles and cache them
460 * @sde_kms: Pointer to sde kms structure
461 * Returns: Zero on success
463 static int _sde_kms_get_displays(struct sde_kms *sde_kms)
468 SDE_ERROR("invalid sde kms\n");
473 sde_kms->dsi_displays = NULL;
474 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
475 if (sde_kms->dsi_display_count) {
476 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
479 if (!sde_kms->dsi_displays) {
480 SDE_ERROR("failed to allocate dsi displays\n");
481 goto exit_deinit_dsi;
483 sde_kms->dsi_display_count =
484 dsi_display_get_active_displays(sde_kms->dsi_displays,
485 sde_kms->dsi_display_count);
489 sde_kms->wb_displays = NULL;
490 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
491 if (sde_kms->wb_display_count) {
492 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
495 if (!sde_kms->wb_displays) {
496 SDE_ERROR("failed to allocate wb displays\n");
499 sde_kms->wb_display_count =
500 wb_display_get_displays(sde_kms->wb_displays,
501 sde_kms->wb_display_count);
505 sde_kms->hdmi_displays = NULL;
506 sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
507 SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
508 if (sde_kms->hdmi_display_count) {
509 sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
512 if (!sde_kms->hdmi_displays) {
513 SDE_ERROR("failed to allocate hdmi displays\n");
514 goto exit_deinit_hdmi;
516 sde_kms->hdmi_display_count =
517 sde_hdmi_get_displays(sde_kms->hdmi_displays,
518 sde_kms->hdmi_display_count);
524 sde_kms->hdmi_display_count = 0;
525 sde_kms->hdmi_displays = NULL;
528 kfree(sde_kms->wb_displays);
529 sde_kms->wb_display_count = 0;
530 sde_kms->wb_displays = NULL;
533 kfree(sde_kms->dsi_displays);
534 sde_kms->dsi_display_count = 0;
535 sde_kms->dsi_displays = NULL;
540 * _sde_kms_release_displays - release cache of underlying display handles
541 * @sde_kms: Pointer to sde kms structure
543 static void _sde_kms_release_displays(struct sde_kms *sde_kms)
546 SDE_ERROR("invalid sde kms\n");
549 kfree(sde_kms->hdmi_displays);
550 sde_kms->hdmi_display_count = 0;
551 sde_kms->hdmi_displays = NULL;
553 kfree(sde_kms->wb_displays);
554 sde_kms->wb_displays = NULL;
555 sde_kms->wb_display_count = 0;
557 kfree(sde_kms->dsi_displays);
558 sde_kms->dsi_displays = NULL;
559 sde_kms->dsi_display_count = 0;
563 * _sde_kms_setup_displays - create encoders, bridges and connectors
564 * for underlying displays
565 * @dev: Pointer to drm device structure
566 * @priv: Pointer to private drm device data
567 * @sde_kms: Pointer to sde kms structure
568 * Returns: Zero on success
570 static int _sde_kms_setup_displays(struct drm_device *dev,
571 struct msm_drm_private *priv,
572 struct sde_kms *sde_kms)
574 static const struct sde_connector_ops dsi_ops = {
575 .post_init = dsi_conn_post_init,
576 .detect = dsi_conn_detect,
577 .get_modes = dsi_connector_get_modes,
578 .mode_valid = dsi_conn_mode_valid,
579 .get_info = dsi_display_get_info,
580 .set_backlight = dsi_display_set_backlight
582 static const struct sde_connector_ops wb_ops = {
583 .post_init = sde_wb_connector_post_init,
584 .detect = sde_wb_connector_detect,
585 .get_modes = sde_wb_connector_get_modes,
586 .set_property = sde_wb_connector_set_property,
587 .get_info = sde_wb_get_info,
589 static const struct sde_connector_ops hdmi_ops = {
590 .pre_deinit = sde_hdmi_connector_pre_deinit,
591 .post_init = sde_hdmi_connector_post_init,
592 .detect = sde_hdmi_connector_detect,
593 .get_modes = sde_hdmi_connector_get_modes,
594 .mode_valid = sde_hdmi_mode_valid,
595 .get_info = sde_hdmi_get_info,
596 .set_property = sde_hdmi_set_property,
597 .get_property = sde_hdmi_get_property,
598 .pre_kickoff = sde_hdmi_pre_kickoff,
599 .mode_needs_full_range = sde_hdmi_mode_needs_full_range,
600 .get_csc_type = sde_hdmi_get_csc_type
602 struct msm_display_info info = {0};
603 struct drm_encoder *encoder;
604 void *display, *connector;
609 if (!dev || !priv || !sde_kms) {
610 SDE_ERROR("invalid argument(s)\n");
614 max_encoders = sde_kms->dsi_display_count +
615 sde_kms->wb_display_count +
616 sde_kms->hdmi_display_count;
618 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
619 max_encoders = ARRAY_SIZE(priv->encoders);
620 SDE_ERROR("capping number of displays to %d", max_encoders);
624 for (i = 0; i < sde_kms->dsi_display_count &&
625 priv->num_encoders < max_encoders; ++i) {
626 display = sde_kms->dsi_displays[i];
629 memset(&info, 0x0, sizeof(info));
630 rc = dsi_display_get_info(&info, display);
632 SDE_ERROR("dsi get_info %d failed\n", i);
636 encoder = sde_encoder_init(dev, &info);
637 if (IS_ERR_OR_NULL(encoder)) {
638 SDE_ERROR("encoder init failed for dsi %d\n", i);
642 rc = dsi_display_drm_bridge_init(display, encoder);
644 SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
645 sde_encoder_destroy(encoder);
649 connector = sde_connector_init(dev,
654 DRM_CONNECTOR_POLL_HPD,
655 DRM_MODE_CONNECTOR_DSI);
657 priv->encoders[priv->num_encoders++] = encoder;
659 SDE_ERROR("dsi %d connector init failed\n", i);
660 dsi_display_drm_bridge_deinit(display);
661 sde_encoder_destroy(encoder);
666 for (i = 0; i < sde_kms->wb_display_count &&
667 priv->num_encoders < max_encoders; ++i) {
668 display = sde_kms->wb_displays[i];
671 memset(&info, 0x0, sizeof(info));
672 rc = sde_wb_get_info(&info, display);
674 SDE_ERROR("wb get_info %d failed\n", i);
678 encoder = sde_encoder_init(dev, &info);
679 if (IS_ERR_OR_NULL(encoder)) {
680 SDE_ERROR("encoder init failed for wb %d\n", i);
684 rc = sde_wb_drm_init(display, encoder);
686 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
687 sde_encoder_destroy(encoder);
691 connector = sde_connector_init(dev,
696 DRM_CONNECTOR_POLL_HPD,
697 DRM_MODE_CONNECTOR_VIRTUAL);
699 priv->encoders[priv->num_encoders++] = encoder;
701 SDE_ERROR("wb %d connector init failed\n", i);
702 sde_wb_drm_deinit(display);
703 sde_encoder_destroy(encoder);
708 for (i = 0; i < sde_kms->hdmi_display_count &&
709 priv->num_encoders < max_encoders; ++i) {
710 display = sde_kms->hdmi_displays[i];
713 memset(&info, 0x0, sizeof(info));
714 rc = sde_hdmi_dev_init(display);
716 SDE_ERROR("hdmi dev_init %d failed\n", i);
719 rc = sde_hdmi_get_info(&info, display);
721 SDE_ERROR("hdmi get_info %d failed\n", i);
724 if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
725 connector_poll = DRM_CONNECTOR_POLL_HPD;
728 encoder = sde_encoder_init(dev, &info);
729 if (IS_ERR_OR_NULL(encoder)) {
730 SDE_ERROR("encoder init failed for hdmi %d\n", i);
734 rc = sde_hdmi_drm_init(display, encoder);
736 SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
737 sde_encoder_destroy(encoder);
741 connector = sde_connector_init(dev,
747 DRM_MODE_CONNECTOR_HDMIA);
749 priv->encoders[priv->num_encoders++] = encoder;
751 SDE_ERROR("hdmi %d connector init failed\n", i);
752 sde_hdmi_dev_deinit(display);
753 sde_hdmi_drm_deinit(display);
754 sde_encoder_destroy(encoder);
761 static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
763 struct msm_drm_private *priv;
767 SDE_ERROR("invalid sde_kms\n");
769 } else if (!sde_kms->dev) {
770 SDE_ERROR("invalid dev\n");
772 } else if (!sde_kms->dev->dev_private) {
773 SDE_ERROR("invalid dev_private\n");
776 priv = sde_kms->dev->dev_private;
778 for (i = 0; i < priv->num_crtcs; i++)
779 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
782 for (i = 0; i < priv->num_planes; i++)
783 priv->planes[i]->funcs->destroy(priv->planes[i]);
784 priv->num_planes = 0;
786 for (i = 0; i < priv->num_connectors; i++)
787 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
788 priv->num_connectors = 0;
790 for (i = 0; i < priv->num_encoders; i++)
791 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
792 priv->num_encoders = 0;
794 _sde_kms_release_displays(sde_kms);
797 static inline int sde_get_crtc_id(const char *display_type)
799 if (!strcmp(display_type, "primary"))
801 else if (!strcmp(display_type, "secondary"))
807 static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
809 struct drm_device *dev;
810 struct drm_plane *primary_planes[MAX_PLANES], *plane;
811 struct drm_crtc *crtc;
813 struct msm_drm_private *priv;
814 struct sde_mdss_cfg *catalog;
816 int primary_planes_idx, i, ret;
817 int max_crtc_count, max_plane_count;
819 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
820 SDE_ERROR("invalid sde_kms\n");
825 priv = dev->dev_private;
826 catalog = sde_kms->catalog;
828 ret = sde_core_irq_domain_add(sde_kms);
832 * Query for underlying display drivers, and create connectors,
833 * bridges and encoders for them.
835 if (!_sde_kms_get_displays(sde_kms))
836 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
838 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
840 /* Create the planes */
841 primary_planes_idx = 0;
842 if (catalog->vp_count) {
843 max_plane_count = min_t(u32, catalog->vp_count, MAX_PLANES);
845 for (i = 0; i < max_plane_count; i++) {
848 sde_get_crtc_id(catalog->vp[i].display_type);
850 if (strcmp(catalog->vp[i].plane_type, "primary"))
853 plane = sde_plane_init(dev, catalog->vp[i].id,
854 primary, 1UL << crtc_id, true);
856 SDE_ERROR("sde_plane_init failed\n");
857 ret = PTR_ERR(plane);
860 priv->planes[priv->num_planes++] = plane;
863 primary_planes[crtc_id] = plane;
864 primary_planes_idx++;
868 max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
870 for (i = 0; i < max_plane_count; i++) {
873 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
874 || primary_planes_idx >= max_crtc_count)
877 plane = sde_plane_init(dev, catalog->sspp[i].id,
878 primary, (1UL << max_crtc_count) - 1,
881 SDE_ERROR("sde_plane_init failed\n");
882 ret = PTR_ERR(plane);
885 priv->planes[priv->num_planes++] = plane;
888 primary_planes[primary_planes_idx++] = plane;
892 max_crtc_count = min(max_crtc_count, primary_planes_idx);
894 /* Create one CRTC per encoder */
895 for (i = 0; i < max_crtc_count; i++) {
896 crtc = sde_crtc_init(dev, primary_planes[i]);
901 priv->crtcs[priv->num_crtcs++] = crtc;
904 if (sde_is_custom_client()) {
905 /* All CRTCs are compatible with all planes */
906 for (i = 0; i < priv->num_planes; i++)
907 priv->planes[i]->possible_crtcs =
908 (1 << priv->num_crtcs) - 1;
911 /* All CRTCs are compatible with all encoders */
912 for (i = 0; i < priv->num_encoders; i++)
913 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
917 _sde_kms_drm_obj_destroy(sde_kms);
919 sde_core_irq_domain_fini(sde_kms);
923 static int sde_kms_postinit(struct msm_kms *kms)
925 struct sde_kms *sde_kms = to_sde_kms(kms);
926 struct drm_device *dev;
928 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
929 SDE_ERROR("invalid sde_kms\n");
936 * Allow vblank interrupt to be disabled by drm vblank timer.
938 dev->vblank_disable_allowed = true;
943 static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
944 struct drm_encoder *encoder)
949 static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
950 struct platform_device *pdev)
952 struct drm_device *dev;
953 struct msm_drm_private *priv;
956 if (!sde_kms || !pdev)
963 priv = dev->dev_private;
967 if (sde_kms->hw_intr)
968 sde_hw_intr_destroy(sde_kms->hw_intr);
969 sde_kms->hw_intr = NULL;
971 _sde_kms_release_displays(sde_kms);
973 /* safe to call these more than once during shutdown */
974 _sde_debugfs_destroy(sde_kms);
975 _sde_kms_mmu_destroy(sde_kms);
976 sde_core_perf_destroy(&sde_kms->perf);
978 if (sde_kms->catalog) {
979 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
980 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
982 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
983 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
987 if (sde_kms->rm_init)
988 sde_rm_destroy(&sde_kms->rm);
989 sde_kms->rm_init = false;
991 if (sde_kms->catalog)
992 sde_hw_catalog_deinit(sde_kms->catalog);
993 sde_kms->catalog = NULL;
995 if (sde_kms->splash_info.handoff) {
996 if (sde_kms->core_client)
997 sde_splash_destroy(&sde_kms->splash_info,
998 &priv->phandle, sde_kms->core_client);
1001 if (sde_kms->core_client)
1002 sde_power_client_destroy(&priv->phandle,
1003 sde_kms->core_client);
1004 sde_kms->core_client = NULL;
1006 if (sde_kms->vbif[VBIF_NRT])
1007 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
1008 sde_kms->vbif[VBIF_NRT] = NULL;
1010 if (sde_kms->vbif[VBIF_RT])
1011 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
1012 sde_kms->vbif[VBIF_RT] = NULL;
1015 msm_iounmap(pdev, sde_kms->mmio);
1016 sde_kms->mmio = NULL;
1019 static void sde_kms_destroy(struct msm_kms *kms)
1021 struct sde_kms *sde_kms;
1022 struct drm_device *dev;
1025 SDE_ERROR("invalid kms\n");
1029 sde_kms = to_sde_kms(kms);
1032 SDE_ERROR("invalid device\n");
1036 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
1040 static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
1042 struct sde_kms *sde_kms = to_sde_kms(kms);
1043 struct drm_device *dev = sde_kms->dev;
1044 struct msm_drm_private *priv = dev->dev_private;
1047 for (i = 0; i < priv->num_crtcs; i++)
1048 sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
1051 static const struct msm_kms_funcs kms_funcs = {
1052 .hw_init = sde_kms_hw_init,
1053 .postinit = sde_kms_postinit,
1054 .irq_preinstall = sde_irq_preinstall,
1055 .irq_postinstall = sde_irq_postinstall,
1056 .irq_uninstall = sde_irq_uninstall,
1058 .preclose = sde_kms_preclose,
1059 .prepare_fence = sde_kms_prepare_fence,
1060 .prepare_commit = sde_kms_prepare_commit,
1061 .commit = sde_kms_commit,
1062 .complete_commit = sde_kms_complete_commit,
1063 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
1064 .enable_vblank = sde_kms_enable_vblank,
1065 .disable_vblank = sde_kms_disable_vblank,
1066 .check_modified_format = sde_format_check_modified_format,
1067 .get_format = sde_get_msm_format,
1068 .round_pixclk = sde_kms_round_pixclk,
1069 .destroy = sde_kms_destroy,
1072 /* the caller api needs to turn on clock before calling it */
1073 static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
1075 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
1078 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
1080 struct msm_mmu *mmu;
1083 for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
1084 if (!sde_kms->aspace[i])
1087 mmu = sde_kms->aspace[i]->mmu;
1089 mmu->funcs->detach(mmu);
1090 msm_gem_address_space_put(sde_kms->aspace[i]);
1092 sde_kms->aspace[i] = NULL;
1098 static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
1100 struct msm_mmu *mmu;
1103 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1104 struct msm_gem_address_space *aspace;
1106 mmu = msm_smmu_new(sde_kms->dev->dev, i);
1108 /* MMU's can be optional depending on platform */
1110 DRM_INFO("failed to init iommu id %d: rc: %d\n", i,
1115 /* Attaching smmu means IOMMU HW starts to work immediately.
1116 * However, display HW in LK is still accessing memory
1117 * while the memory map is not done yet.
1118 * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass
1119 * stage 1 translation in IOMMU HW.
1121 if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
1122 sde_kms->splash_info.handoff) {
1123 ret = mmu->funcs->set_property(mmu,
1124 DOMAIN_ATTR_EARLY_MAP,
1125 &sde_kms->splash_info.handoff);
1127 SDE_ERROR("failed to set map att: %d\n", ret);
1128 mmu->funcs->destroy(mmu);
1133 aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
1135 if (IS_ERR(aspace)) {
1136 ret = PTR_ERR(aspace);
1137 mmu->funcs->destroy(mmu);
1141 sde_kms->aspace[i] = aspace;
1143 ret = mmu->funcs->attach(mmu, NULL, 0);
1145 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
1146 msm_gem_address_space_put(aspace);
1151 * It's safe now to map the physical memory blcok LK accesses.
1153 if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
1154 sde_kms->splash_info.handoff) {
1155 ret = sde_splash_smmu_map(sde_kms->dev, mmu,
1156 &sde_kms->splash_info);
1158 SDE_ERROR("map rsv mem failed: %d\n", ret);
1159 msm_gem_address_space_put(aspace);
1167 _sde_kms_mmu_destroy(sde_kms);
1172 static void __iomem *_sde_kms_ioremap(struct platform_device *pdev,
1173 const char *name, unsigned long *out_size)
1175 struct resource *res;
1183 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1188 /* availability depends on platform */
1189 SDE_DEBUG("failed to get memory resource: %s\n", name);
1193 size = resource_size(res);
1195 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
1197 SDE_ERROR("failed to ioremap: %s\n", name);
1201 SDE_DEBUG("IO:region %s %pK %08lx\n", name, ptr, size);
1210 static int sde_kms_hw_init(struct msm_kms *kms)
1212 struct sde_kms *sde_kms;
1213 struct drm_device *dev;
1214 struct msm_drm_private *priv;
1215 struct sde_splash_info *sinfo;
1216 int i, rc = -EINVAL;
1219 SDE_ERROR("invalid kms\n");
1223 sde_kms = to_sde_kms(kms);
1225 if (!dev || !dev->platformdev) {
1226 SDE_ERROR("invalid device\n");
1230 priv = dev->dev_private;
1232 SDE_ERROR("invalid private data\n");
1236 sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys",
1237 &sde_kms->mmio_len);
1238 if (!sde_kms->mmio) {
1239 SDE_ERROR("mdp register memory map failed\n");
1242 DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
1244 rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
1247 SDE_ERROR("dbg base register kms failed: %d\n", rc);
1249 sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys",
1250 &sde_kms->vbif_len[VBIF_RT]);
1251 if (!sde_kms->vbif[VBIF_RT]) {
1252 SDE_ERROR("vbif register memory map failed\n");
1256 rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
1257 sde_kms->vbif_len[VBIF_RT]);
1259 SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
1261 sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev,
1262 "vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]);
1263 if (!sde_kms->vbif[VBIF_NRT]) {
1264 SDE_DEBUG("VBIF NRT is not defined");
1266 rc = sde_dbg_reg_register_base("vbif_nrt",
1267 sde_kms->vbif[VBIF_NRT],
1268 sde_kms->vbif_len[VBIF_NRT]);
1270 SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
1274 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
1275 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
1276 rc = PTR_ERR(sde_kms->core_client);
1277 SDE_ERROR("sde power client create failed: %d\n", rc);
1278 sde_kms->core_client = NULL;
1282 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
1285 SDE_ERROR("resource enable failed: %d\n", rc);
1289 _sde_kms_core_hw_rev_init(sde_kms);
1291 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
1293 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
1294 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
1295 rc = PTR_ERR(sde_kms->catalog);
1296 SDE_ERROR("catalog init failed: %d\n", rc);
1297 sde_kms->catalog = NULL;
1301 sde_dbg_init_dbg_buses(sde_kms->core_rev);
1303 rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
1306 SDE_ERROR("rm init failed: %d\n", rc);
1310 sde_kms->rm_init = true;
1312 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
1313 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
1314 rc = PTR_ERR(sde_kms->hw_mdp);
1315 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
1316 sde_kms->hw_mdp = NULL;
1321 * Read the DISP_INTF_SEL register to check
1322 * whether early display is enabled in LK.
1324 rc = sde_splash_get_handoff_status(kms);
1326 SDE_ERROR("get early splash status failed: %d\n", rc);
1331 * when LK has enabled early display, sde_splash_parse_dt and
1332 * sde_splash_init must be called. The first function is to parse the
1333 * mandatory memory node for splash function, and the second function
1334 * will first do bandwidth voting job, because display hardware is now
1335 * accessing AHB data bus, otherwise device reboot will happen, and then
1336 * to check if the memory is reserved.
1338 sinfo = &sde_kms->splash_info;
1339 if (sinfo->handoff) {
1340 rc = sde_splash_parse_dt(dev);
1342 SDE_ERROR("parse dt for splash info failed: %d\n", rc);
1346 sde_splash_init(&priv->phandle, kms);
1349 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
1350 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
1352 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
1353 sde_kms->vbif[vbif_idx], sde_kms->catalog);
1354 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
1355 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
1356 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1357 sde_kms->hw_vbif[vbif_idx] = NULL;
1363 * Now we need to read the HW catalog and initialize resources such as
1364 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1366 rc = _sde_kms_mmu_init(sde_kms);
1368 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
1373 * NOTE: Calling sde_debugfs_init here so that the drm_minor device for
1374 * 'primary' is already created.
1376 rc = _sde_debugfs_init(sde_kms);
1378 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
1382 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
1383 &priv->phandle, priv->pclient, "core_clk_src",
1384 sde_kms->debugfs_debug);
1386 SDE_ERROR("failed to init perf %d\n", rc);
1390 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
1391 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
1392 rc = PTR_ERR(sde_kms->hw_intr);
1393 SDE_ERROR("hw_intr init failed: %d\n", rc);
1394 sde_kms->hw_intr = NULL;
1395 goto hw_intr_init_err;
1399 * _sde_kms_drm_obj_init should create the DRM related objects
1400 * i.e. CRTCs, planes, encoders, connectors and so forth
1402 rc = _sde_kms_drm_obj_init(sde_kms);
1404 SDE_ERROR("modeset init failed: %d\n", rc);
1405 goto drm_obj_init_err;
1408 dev->mode_config.min_width = 0;
1409 dev->mode_config.min_height = 0;
1412 * max crtc width is equal to the max mixer width * 2 and max height is
1415 dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
1416 dev->mode_config.max_height = 4096;
1419 * Support format modifiers for compression etc.
1421 dev->mode_config.allow_fb_modifiers = true;
1423 if (!sde_kms->splash_info.handoff)
1424 sde_power_resource_enable(&priv->phandle,
1425 sde_kms->core_client, false);
1430 sde_core_perf_destroy(&sde_kms->perf);
1434 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
1436 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
1441 struct msm_kms *sde_kms_init(struct drm_device *dev)
1443 struct msm_drm_private *priv;
1444 struct sde_kms *sde_kms;
1446 if (!dev || !dev->dev_private) {
1447 SDE_ERROR("drm device node invalid\n");
1448 return ERR_PTR(-EINVAL);
1451 priv = dev->dev_private;
1453 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
1455 SDE_ERROR("failed to allocate sde kms\n");
1456 return ERR_PTR(-ENOMEM);
1459 msm_kms_init(&sde_kms->base, &kms_funcs);
1462 return &sde_kms->base;