2 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
21 #include <drm/drm_crtc.h>
22 #include <linux/debugfs.h>
27 #include "dsi_display.h"
33 #include "sde_core_irq.h"
34 #include "sde_formats.h"
35 #include "sde_hw_vbif.h"
37 #include "sde_encoder.h"
38 #include "sde_plane.h"
41 #define CREATE_TRACE_POINTS
42 #include "sde_trace.h"
44 static const char * const iommu_ports[] = {
49 * Controls size of event log buffer. Specified as a power of 2.
51 #define SDE_EVTLOG_SIZE 1024
54 * To enable overall DRM driver logging
55 * # echo 0x2 > /sys/module/drm/parameters/debug
57 * To enable DRM driver h/w logging
58 * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
60 * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
62 #define SDE_DEBUGFS_DIR "msm_sde"
63 #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
66 * sdecustom - enable certain driver customizations for sde clients
67 * Enabling this modifies the standard DRM behavior slightly and assumes
68 * that the clients have specific knowledge about the modifications that
69 * are involved, so don't enable this unless you know what you're doing.
71 * Parts of the driver that are affected by this setting may be located by
72 * searching for invocations of the 'sde_is_custom_client()' function.
74 * This is disabled by default.
76 static bool sdecustom = true;
77 module_param(sdecustom, bool, 0400);
78 MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
80 static int sde_kms_hw_init(struct msm_kms *kms);
81 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
83 bool sde_is_custom_client(void)
88 #ifdef CONFIG_DEBUG_FS
89 static int _sde_danger_signal_status(struct seq_file *s,
92 struct sde_kms *kms = (struct sde_kms *)s->private;
93 struct msm_drm_private *priv;
94 struct sde_danger_safe_status status;
97 if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
98 SDE_ERROR("invalid arg(s)\n");
102 priv = kms->dev->dev_private;
103 memset(&status, 0, sizeof(struct sde_danger_safe_status));
105 sde_power_resource_enable(&priv->phandle, kms->core_client, true);
107 seq_puts(s, "\nDanger signal status:\n");
108 if (kms->hw_mdp->ops.get_danger_status)
109 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
112 seq_puts(s, "\nSafe signal status:\n");
113 if (kms->hw_mdp->ops.get_danger_status)
114 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
117 sde_power_resource_enable(&priv->phandle, kms->core_client, false);
119 seq_printf(s, "MDP : 0x%x\n", status.mdp);
121 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
122 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
126 for (i = WB_0; i < WB_MAX; i++)
127 seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
134 #define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
135 static int __prefix ## _open(struct inode *inode, struct file *file) \
137 return single_open(file, __prefix ## _show, inode->i_private); \
139 static const struct file_operations __prefix ## _fops = { \
140 .owner = THIS_MODULE, \
141 .open = __prefix ## _open, \
142 .release = single_release, \
144 .llseek = seq_lseek, \
147 static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
149 return _sde_danger_signal_status(s, true);
151 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
153 static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
155 return _sde_danger_signal_status(s, false);
157 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
159 static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
161 debugfs_remove_recursive(sde_kms->debugfs_danger);
162 sde_kms->debugfs_danger = NULL;
165 static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
166 struct dentry *parent)
168 sde_kms->debugfs_danger = debugfs_create_dir("danger",
170 if (!sde_kms->debugfs_danger) {
171 SDE_ERROR("failed to create danger debugfs\n");
175 debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
176 sde_kms, &sde_debugfs_danger_stats_fops);
177 debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
178 sde_kms, &sde_debugfs_safe_stats_fops);
183 static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
185 struct sde_debugfs_regset32 *regset;
186 struct sde_kms *sde_kms;
187 struct drm_device *dev;
188 struct msm_drm_private *priv;
192 if (!s || !s->private)
197 sde_kms = regset->sde_kms;
198 if (!sde_kms || !sde_kms->mmio)
205 priv = dev->dev_private;
209 base = sde_kms->mmio + regset->offset;
211 /* insert padding spaces, if needed */
212 if (regset->offset & 0xF) {
213 seq_printf(s, "[%x]", regset->offset & ~0xF);
214 for (i = 0; i < (regset->offset & 0xF); i += 4)
218 if (sde_power_resource_enable(&priv->phandle,
219 sde_kms->core_client, true)) {
220 seq_puts(s, "failed to enable sde clocks\n");
224 /* main register output */
225 for (i = 0; i < regset->blk_len; i += 4) {
226 addr = regset->offset + i;
227 if ((addr & 0xF) == 0x0)
228 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
229 seq_printf(s, " %08x", readl_relaxed(base + i));
232 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
237 static int sde_debugfs_open_regset32(struct inode *inode,
240 return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
243 static const struct file_operations sde_fops_regset32 = {
244 .open = sde_debugfs_open_regset32,
247 .release = single_release,
250 void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
251 uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
254 regset->offset = offset;
255 regset->blk_len = length;
256 regset->sde_kms = sde_kms;
260 void *sde_debugfs_create_regset32(const char *name, umode_t mode,
261 void *parent, struct sde_debugfs_regset32 *regset)
263 if (!name || !regset || !regset->sde_kms || !regset->blk_len)
266 /* make sure offset is a multiple of 4 */
267 regset->offset = round_down(regset->offset, 4);
269 return debugfs_create_file(name, mode, parent,
270 regset, &sde_fops_regset32);
273 void *sde_debugfs_get_root(struct sde_kms *sde_kms)
275 return sde_kms ? sde_kms->debugfs_root : 0;
278 static int _sde_debugfs_init(struct sde_kms *sde_kms)
282 p = sde_hw_util_get_log_mask_ptr();
287 if (sde_kms->dev && sde_kms->dev->primary)
288 sde_kms->debugfs_root = sde_kms->dev->primary->debugfs_root;
290 sde_kms->debugfs_root = debugfs_create_dir(SDE_DEBUGFS_DIR, 0);
292 /* allow debugfs_root to be NULL */
293 debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME,
294 0644, sde_kms->debugfs_root, p);
296 /* create common folder for debug information */
297 sde_kms->debugfs_debug = debugfs_create_dir("debug",
298 sde_kms->debugfs_root);
299 if (!sde_kms->debugfs_debug)
300 SDE_ERROR("failed to create debugfs debug directory\n");
302 sde_debugfs_danger_init(sde_kms, sde_kms->debugfs_debug);
303 sde_debugfs_vbif_init(sde_kms, sde_kms->debugfs_debug);
308 static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
310 /* don't need to NULL check debugfs_root */
312 sde_debugfs_vbif_destroy(sde_kms);
313 sde_debugfs_danger_destroy(sde_kms);
314 debugfs_remove_recursive(sde_kms->debugfs_debug);
315 sde_kms->debugfs_debug = 0;
316 debugfs_remove_recursive(sde_kms->debugfs_root);
317 sde_kms->debugfs_root = 0;
321 static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
322 struct dentry *parent)
326 static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
327 struct dentry *parent)
333 static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
335 struct sde_kms *sde_kms = to_sde_kms(kms);
336 struct drm_device *dev = sde_kms->dev;
337 struct msm_drm_private *priv = dev->dev_private;
339 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
341 return sde_crtc_vblank(crtc, true);
344 static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
346 struct sde_kms *sde_kms = to_sde_kms(kms);
347 struct drm_device *dev = sde_kms->dev;
348 struct msm_drm_private *priv = dev->dev_private;
350 sde_crtc_vblank(crtc, false);
352 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
355 static void sde_kms_prepare_commit(struct msm_kms *kms,
356 struct drm_atomic_state *state)
358 struct sde_kms *sde_kms = to_sde_kms(kms);
359 struct drm_device *dev = sde_kms->dev;
360 struct msm_drm_private *priv = dev->dev_private;
362 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
365 static void sde_kms_commit(struct msm_kms *kms,
366 struct drm_atomic_state *old_state)
368 struct drm_crtc *crtc;
369 struct drm_crtc_state *old_crtc_state;
372 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
373 if (crtc->state->active) {
374 SDE_EVT32(DRMID(crtc));
375 sde_crtc_commit_kickoff(crtc);
380 static void sde_kms_complete_commit(struct msm_kms *kms,
381 struct drm_atomic_state *old_state)
383 struct sde_kms *sde_kms = to_sde_kms(kms);
384 struct drm_device *dev = sde_kms->dev;
385 struct msm_drm_private *priv = dev->dev_private;
386 struct drm_crtc *crtc;
387 struct drm_crtc_state *old_crtc_state;
390 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
391 sde_crtc_complete_commit(crtc, old_crtc_state);
392 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
394 SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
397 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
398 struct drm_crtc *crtc)
400 struct drm_encoder *encoder;
401 struct drm_device *dev = crtc->dev;
404 if (!kms || !crtc || !crtc->state) {
405 SDE_ERROR("invalid params\n");
409 if (!crtc->state->enable) {
410 SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
414 if (!crtc->state->active) {
415 SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
419 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
420 if (encoder->crtc != crtc)
423 * Wait post-flush if necessary to delay before plane_cleanup
424 * For example, wait for vsync in case of video mode panels
425 * This should be a no-op for command mode panels
427 SDE_EVT32(DRMID(crtc));
428 ret = sde_encoder_wait_for_commit_done(encoder);
429 if (ret && ret != -EWOULDBLOCK) {
430 SDE_ERROR("wait for commit done returned %d\n", ret);
436 static void sde_kms_prepare_fence(struct msm_kms *kms,
437 struct drm_atomic_state *old_state)
439 struct drm_crtc *crtc;
440 struct drm_crtc_state *old_crtc_state;
443 if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
444 SDE_ERROR("invalid argument(s)\n");
449 /* attempt to acquire ww mutex for connection */
450 rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
451 old_state->acquire_ctx);
453 if (rc == -EDEADLK) {
454 drm_modeset_backoff(old_state->acquire_ctx);
458 /* old_state actually contains updated crtc pointers */
459 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
460 sde_crtc_prepare_commit(crtc, old_crtc_state);
464 * _sde_kms_get_displays - query for underlying display handles and cache them
465 * @sde_kms: Pointer to sde kms structure
466 * Returns: Zero on success
468 static int _sde_kms_get_displays(struct sde_kms *sde_kms)
473 SDE_ERROR("invalid sde kms\n");
478 sde_kms->dsi_displays = NULL;
479 sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
480 if (sde_kms->dsi_display_count) {
481 sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
484 if (!sde_kms->dsi_displays) {
485 SDE_ERROR("failed to allocate dsi displays\n");
486 goto exit_deinit_dsi;
488 sde_kms->dsi_display_count =
489 dsi_display_get_active_displays(sde_kms->dsi_displays,
490 sde_kms->dsi_display_count);
494 sde_kms->wb_displays = NULL;
495 sde_kms->wb_display_count = sde_wb_get_num_of_displays();
496 if (sde_kms->wb_display_count) {
497 sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
500 if (!sde_kms->wb_displays) {
501 SDE_ERROR("failed to allocate wb displays\n");
504 sde_kms->wb_display_count =
505 wb_display_get_displays(sde_kms->wb_displays,
506 sde_kms->wb_display_count);
510 sde_kms->hdmi_displays = NULL;
511 sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
512 SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
513 if (sde_kms->hdmi_display_count) {
514 sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
517 if (!sde_kms->hdmi_displays) {
518 SDE_ERROR("failed to allocate hdmi displays\n");
519 goto exit_deinit_hdmi;
521 sde_kms->hdmi_display_count =
522 sde_hdmi_get_displays(sde_kms->hdmi_displays,
523 sde_kms->hdmi_display_count);
529 sde_kms->hdmi_display_count = 0;
530 sde_kms->hdmi_displays = NULL;
533 kfree(sde_kms->wb_displays);
534 sde_kms->wb_display_count = 0;
535 sde_kms->wb_displays = NULL;
538 kfree(sde_kms->dsi_displays);
539 sde_kms->dsi_display_count = 0;
540 sde_kms->dsi_displays = NULL;
545 * _sde_kms_release_displays - release cache of underlying display handles
546 * @sde_kms: Pointer to sde kms structure
548 static void _sde_kms_release_displays(struct sde_kms *sde_kms)
551 SDE_ERROR("invalid sde kms\n");
554 kfree(sde_kms->hdmi_displays);
555 sde_kms->hdmi_display_count = 0;
556 sde_kms->hdmi_displays = NULL;
558 kfree(sde_kms->wb_displays);
559 sde_kms->wb_displays = NULL;
560 sde_kms->wb_display_count = 0;
562 kfree(sde_kms->dsi_displays);
563 sde_kms->dsi_displays = NULL;
564 sde_kms->dsi_display_count = 0;
568 * _sde_kms_setup_displays - create encoders, bridges and connectors
569 * for underlying displays
570 * @dev: Pointer to drm device structure
571 * @priv: Pointer to private drm device data
572 * @sde_kms: Pointer to sde kms structure
573 * Returns: Zero on success
575 static int _sde_kms_setup_displays(struct drm_device *dev,
576 struct msm_drm_private *priv,
577 struct sde_kms *sde_kms)
579 static const struct sde_connector_ops dsi_ops = {
580 .post_init = dsi_conn_post_init,
581 .detect = dsi_conn_detect,
582 .get_modes = dsi_connector_get_modes,
583 .mode_valid = dsi_conn_mode_valid,
584 .get_info = dsi_display_get_info,
585 .set_backlight = dsi_display_set_backlight
587 static const struct sde_connector_ops wb_ops = {
588 .post_init = sde_wb_connector_post_init,
589 .detect = sde_wb_connector_detect,
590 .get_modes = sde_wb_connector_get_modes,
591 .set_property = sde_wb_connector_set_property,
592 .get_info = sde_wb_get_info,
594 static const struct sde_connector_ops hdmi_ops = {
595 .pre_deinit = sde_hdmi_connector_pre_deinit,
596 .post_init = sde_hdmi_connector_post_init,
597 .detect = sde_hdmi_connector_detect,
598 .get_modes = sde_hdmi_connector_get_modes,
599 .mode_valid = sde_hdmi_mode_valid,
600 .get_info = sde_hdmi_get_info,
602 struct msm_display_info info = {0};
603 struct drm_encoder *encoder;
604 void *display, *connector;
609 if (!dev || !priv || !sde_kms) {
610 SDE_ERROR("invalid argument(s)\n");
614 max_encoders = sde_kms->dsi_display_count +
615 sde_kms->wb_display_count +
616 sde_kms->hdmi_display_count;
618 if (max_encoders > ARRAY_SIZE(priv->encoders)) {
619 max_encoders = ARRAY_SIZE(priv->encoders);
620 SDE_ERROR("capping number of displays to %d", max_encoders);
624 for (i = 0; i < sde_kms->dsi_display_count &&
625 priv->num_encoders < max_encoders; ++i) {
626 display = sde_kms->dsi_displays[i];
629 memset(&info, 0x0, sizeof(info));
630 rc = dsi_display_get_info(&info, display);
632 SDE_ERROR("dsi get_info %d failed\n", i);
636 encoder = sde_encoder_init(dev, &info);
637 if (IS_ERR_OR_NULL(encoder)) {
638 SDE_ERROR("encoder init failed for dsi %d\n", i);
642 rc = dsi_display_drm_bridge_init(display, encoder);
644 SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
645 sde_encoder_destroy(encoder);
649 connector = sde_connector_init(dev,
654 DRM_CONNECTOR_POLL_HPD,
655 DRM_MODE_CONNECTOR_DSI);
657 priv->encoders[priv->num_encoders++] = encoder;
659 SDE_ERROR("dsi %d connector init failed\n", i);
660 dsi_display_drm_bridge_deinit(display);
661 sde_encoder_destroy(encoder);
666 for (i = 0; i < sde_kms->wb_display_count &&
667 priv->num_encoders < max_encoders; ++i) {
668 display = sde_kms->wb_displays[i];
671 memset(&info, 0x0, sizeof(info));
672 rc = sde_wb_get_info(&info, display);
674 SDE_ERROR("wb get_info %d failed\n", i);
678 encoder = sde_encoder_init(dev, &info);
679 if (IS_ERR_OR_NULL(encoder)) {
680 SDE_ERROR("encoder init failed for wb %d\n", i);
684 rc = sde_wb_drm_init(display, encoder);
686 SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
687 sde_encoder_destroy(encoder);
691 connector = sde_connector_init(dev,
696 DRM_CONNECTOR_POLL_HPD,
697 DRM_MODE_CONNECTOR_VIRTUAL);
699 priv->encoders[priv->num_encoders++] = encoder;
701 SDE_ERROR("wb %d connector init failed\n", i);
702 sde_wb_drm_deinit(display);
703 sde_encoder_destroy(encoder);
708 for (i = 0; i < sde_kms->hdmi_display_count &&
709 priv->num_encoders < max_encoders; ++i) {
710 display = sde_kms->hdmi_displays[i];
713 memset(&info, 0x0, sizeof(info));
714 rc = sde_hdmi_dev_init(display);
716 SDE_ERROR("hdmi dev_init %d failed\n", i);
719 rc = sde_hdmi_get_info(&info, display);
721 SDE_ERROR("hdmi get_info %d failed\n", i);
724 if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
725 connector_poll = DRM_CONNECTOR_POLL_HPD;
728 encoder = sde_encoder_init(dev, &info);
729 if (IS_ERR_OR_NULL(encoder)) {
730 SDE_ERROR("encoder init failed for hdmi %d\n", i);
734 rc = sde_hdmi_drm_init(display, encoder);
736 SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
737 sde_encoder_destroy(encoder);
741 connector = sde_connector_init(dev,
747 DRM_MODE_CONNECTOR_HDMIA);
749 priv->encoders[priv->num_encoders++] = encoder;
751 SDE_ERROR("hdmi %d connector init failed\n", i);
752 sde_hdmi_dev_deinit(display);
753 sde_hdmi_drm_deinit(display);
754 sde_encoder_destroy(encoder);
761 static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
763 struct msm_drm_private *priv;
767 SDE_ERROR("invalid sde_kms\n");
769 } else if (!sde_kms->dev) {
770 SDE_ERROR("invalid dev\n");
772 } else if (!sde_kms->dev->dev_private) {
773 SDE_ERROR("invalid dev_private\n");
776 priv = sde_kms->dev->dev_private;
778 for (i = 0; i < priv->num_crtcs; i++)
779 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
782 for (i = 0; i < priv->num_planes; i++)
783 priv->planes[i]->funcs->destroy(priv->planes[i]);
784 priv->num_planes = 0;
786 for (i = 0; i < priv->num_connectors; i++)
787 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
788 priv->num_connectors = 0;
790 for (i = 0; i < priv->num_encoders; i++)
791 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
792 priv->num_encoders = 0;
794 _sde_kms_release_displays(sde_kms);
797 static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
799 struct drm_device *dev;
800 struct drm_plane *primary_planes[MAX_PLANES], *plane;
801 struct drm_crtc *crtc;
803 struct msm_drm_private *priv;
804 struct sde_mdss_cfg *catalog;
806 int primary_planes_idx, i, ret;
807 int max_crtc_count, max_plane_count;
809 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
810 SDE_ERROR("invalid sde_kms\n");
815 priv = dev->dev_private;
816 catalog = sde_kms->catalog;
818 ret = sde_core_irq_domain_add(sde_kms);
822 * Query for underlying display drivers, and create connectors,
823 * bridges and encoders for them.
825 if (!_sde_kms_get_displays(sde_kms))
826 (void)_sde_kms_setup_displays(dev, priv, sde_kms);
828 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
829 max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
831 /* Create the planes */
832 primary_planes_idx = 0;
833 for (i = 0; i < max_plane_count; i++) {
836 if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
837 || primary_planes_idx >= max_crtc_count)
840 plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
841 (1UL << max_crtc_count) - 1);
843 SDE_ERROR("sde_plane_init failed\n");
844 ret = PTR_ERR(plane);
847 priv->planes[priv->num_planes++] = plane;
850 primary_planes[primary_planes_idx++] = plane;
853 max_crtc_count = min(max_crtc_count, primary_planes_idx);
855 /* Create one CRTC per encoder */
856 for (i = 0; i < max_crtc_count; i++) {
857 crtc = sde_crtc_init(dev, primary_planes[i]);
862 priv->crtcs[priv->num_crtcs++] = crtc;
865 if (sde_is_custom_client()) {
866 /* All CRTCs are compatible with all planes */
867 for (i = 0; i < priv->num_planes; i++)
868 priv->planes[i]->possible_crtcs =
869 (1 << priv->num_crtcs) - 1;
872 /* All CRTCs are compatible with all encoders */
873 for (i = 0; i < priv->num_encoders; i++)
874 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
878 _sde_kms_drm_obj_destroy(sde_kms);
880 sde_core_irq_domain_fini(sde_kms);
884 static int sde_kms_postinit(struct msm_kms *kms)
886 struct sde_kms *sde_kms = to_sde_kms(kms);
887 struct drm_device *dev;
889 if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
890 SDE_ERROR("invalid sde_kms\n");
897 * Allow vblank interrupt to be disabled by drm vblank timer.
899 dev->vblank_disable_allowed = true;
904 static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
905 struct drm_encoder *encoder)
910 static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
911 struct platform_device *pdev)
913 struct drm_device *dev;
914 struct msm_drm_private *priv;
917 if (!sde_kms || !pdev)
924 priv = dev->dev_private;
928 if (sde_kms->hw_intr)
929 sde_hw_intr_destroy(sde_kms->hw_intr);
930 sde_kms->hw_intr = NULL;
932 _sde_kms_release_displays(sde_kms);
934 /* safe to call these more than once during shutdown */
935 _sde_debugfs_destroy(sde_kms);
936 _sde_kms_mmu_destroy(sde_kms);
937 sde_core_perf_destroy(&sde_kms->perf);
939 if (sde_kms->catalog) {
940 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
941 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
943 if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
944 sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
948 if (sde_kms->rm_init)
949 sde_rm_destroy(&sde_kms->rm);
950 sde_kms->rm_init = false;
952 if (sde_kms->catalog)
953 sde_hw_catalog_deinit(sde_kms->catalog);
954 sde_kms->catalog = NULL;
956 if (sde_kms->core_client)
957 sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
958 sde_kms->core_client = NULL;
960 if (sde_kms->vbif[VBIF_NRT])
961 msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
962 sde_kms->vbif[VBIF_NRT] = NULL;
964 if (sde_kms->vbif[VBIF_RT])
965 msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
966 sde_kms->vbif[VBIF_RT] = NULL;
969 msm_iounmap(pdev, sde_kms->mmio);
970 sde_kms->mmio = NULL;
973 static void sde_kms_destroy(struct msm_kms *kms)
975 struct sde_kms *sde_kms;
976 struct drm_device *dev;
979 SDE_ERROR("invalid kms\n");
983 sde_kms = to_sde_kms(kms);
986 SDE_ERROR("invalid device\n");
990 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
994 static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
996 struct sde_kms *sde_kms = to_sde_kms(kms);
997 struct drm_device *dev = sde_kms->dev;
998 struct msm_drm_private *priv = dev->dev_private;
1001 for (i = 0; i < priv->num_crtcs; i++)
1002 sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
1005 static const struct msm_kms_funcs kms_funcs = {
1006 .hw_init = sde_kms_hw_init,
1007 .postinit = sde_kms_postinit,
1008 .irq_preinstall = sde_irq_preinstall,
1009 .irq_postinstall = sde_irq_postinstall,
1010 .irq_uninstall = sde_irq_uninstall,
1012 .preclose = sde_kms_preclose,
1013 .prepare_fence = sde_kms_prepare_fence,
1014 .prepare_commit = sde_kms_prepare_commit,
1015 .commit = sde_kms_commit,
1016 .complete_commit = sde_kms_complete_commit,
1017 .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
1018 .enable_vblank = sde_kms_enable_vblank,
1019 .disable_vblank = sde_kms_disable_vblank,
1020 .check_modified_format = sde_format_check_modified_format,
1021 .get_format = sde_get_msm_format,
1022 .round_pixclk = sde_kms_round_pixclk,
1023 .destroy = sde_kms_destroy,
1026 /* the caller api needs to turn on clock before calling it */
1027 static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
1029 sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
1032 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
1034 struct msm_mmu *mmu;
1037 for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
1038 if (!sde_kms->aspace[i])
1041 mmu = sde_kms->aspace[i]->mmu;
1043 mmu->funcs->detach(mmu);
1044 msm_gem_address_space_put(sde_kms->aspace[i]);
1046 sde_kms->aspace[i] = NULL;
1052 static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
1054 struct msm_mmu *mmu;
1057 for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
1058 struct msm_gem_address_space *aspace;
1060 mmu = msm_smmu_new(sde_kms->dev->dev, i);
1062 /* MMU's can be optional depending on platform */
1064 DRM_INFO("failed to init iommu id %d: rc: %d\n", i,
1069 aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
1071 if (IS_ERR(aspace)) {
1072 ret = PTR_ERR(aspace);
1073 mmu->funcs->destroy(mmu);
1077 sde_kms->aspace[i] = aspace;
1079 ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
1080 ARRAY_SIZE(iommu_ports));
1082 SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
1083 msm_gem_address_space_put(aspace);
1091 _sde_kms_mmu_destroy(sde_kms);
1096 static int sde_kms_hw_init(struct msm_kms *kms)
1098 struct sde_kms *sde_kms;
1099 struct drm_device *dev;
1100 struct msm_drm_private *priv;
1101 int i, rc = -EINVAL;
1104 SDE_ERROR("invalid kms\n");
1108 sde_kms = to_sde_kms(kms);
1110 if (!dev || !dev->platformdev) {
1111 SDE_ERROR("invalid device\n");
1115 priv = dev->dev_private;
1117 SDE_ERROR("invalid private data\n");
1121 sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "SDE");
1122 if (IS_ERR(sde_kms->mmio)) {
1123 rc = PTR_ERR(sde_kms->mmio);
1124 SDE_ERROR("mdp register memory map failed: %d\n", rc);
1125 sde_kms->mmio = NULL;
1128 DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
1130 sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev,
1131 "vbif_phys", "VBIF");
1132 if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
1133 rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
1134 SDE_ERROR("vbif register memory map failed: %d\n", rc);
1135 sde_kms->vbif[VBIF_RT] = NULL;
1139 sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev,
1140 "vbif_nrt_phys", "VBIF_NRT");
1141 if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
1142 sde_kms->vbif[VBIF_NRT] = NULL;
1143 SDE_DEBUG("VBIF NRT is not defined");
1146 sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
1147 if (IS_ERR_OR_NULL(sde_kms->core_client)) {
1148 rc = PTR_ERR(sde_kms->core_client);
1149 SDE_ERROR("sde power client create failed: %d\n", rc);
1150 sde_kms->core_client = NULL;
1154 rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
1157 SDE_ERROR("resource enable failed: %d\n", rc);
1161 _sde_kms_core_hw_rev_init(sde_kms);
1163 pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
1165 sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
1166 if (IS_ERR_OR_NULL(sde_kms->catalog)) {
1167 rc = PTR_ERR(sde_kms->catalog);
1168 SDE_ERROR("catalog init failed: %d\n", rc);
1169 sde_kms->catalog = NULL;
1173 rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
1176 SDE_ERROR("rm init failed: %d\n", rc);
1180 sde_kms->rm_init = true;
1182 sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
1183 if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
1184 rc = PTR_ERR(sde_kms->hw_mdp);
1185 SDE_ERROR("failed to get hw_mdp: %d\n", rc);
1186 sde_kms->hw_mdp = NULL;
1190 for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
1191 u32 vbif_idx = sde_kms->catalog->vbif[i].id;
1193 sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
1194 sde_kms->vbif[vbif_idx], sde_kms->catalog);
1195 if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
1196 rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
1197 SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1198 sde_kms->hw_vbif[vbif_idx] = NULL;
1204 * Now we need to read the HW catalog and initialize resources such as
1205 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1207 rc = _sde_kms_mmu_init(sde_kms);
1209 SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
1214 * NOTE: Calling sde_debugfs_init here so that the drm_minor device for
1215 * 'primary' is already created.
1217 rc = _sde_debugfs_init(sde_kms);
1219 SDE_ERROR("sde_debugfs init failed: %d\n", rc);
1223 rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
1224 &priv->phandle, priv->pclient, "core_clk_src",
1225 sde_kms->debugfs_debug);
1227 SDE_ERROR("failed to init perf %d\n", rc);
1231 sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
1232 if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
1233 rc = PTR_ERR(sde_kms->hw_intr);
1234 SDE_ERROR("hw_intr init failed: %d\n", rc);
1235 sde_kms->hw_intr = NULL;
1236 goto hw_intr_init_err;
1240 * _sde_kms_drm_obj_init should create the DRM related objects
1241 * i.e. CRTCs, planes, encoders, connectors and so forth
1243 rc = _sde_kms_drm_obj_init(sde_kms);
1245 SDE_ERROR("modeset init failed: %d\n", rc);
1246 goto drm_obj_init_err;
1249 dev->mode_config.min_width = 0;
1250 dev->mode_config.min_height = 0;
1253 * max crtc width is equal to the max mixer width * 2 and max height is
1256 dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
1257 dev->mode_config.max_height = 4096;
1260 * Support format modifiers for compression etc.
1262 dev->mode_config.allow_fb_modifiers = true;
1264 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
1268 sde_core_perf_destroy(&sde_kms->perf);
1272 sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
1274 _sde_kms_hw_destroy(sde_kms, dev->platformdev);
1279 struct msm_kms *sde_kms_init(struct drm_device *dev)
1281 struct msm_drm_private *priv;
1282 struct sde_kms *sde_kms;
1284 if (!dev || !dev->dev_private) {
1285 SDE_ERROR("drm device node invalid\n");
1286 return ERR_PTR(-EINVAL);
1289 priv = dev->dev_private;
1291 sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
1293 SDE_ERROR("failed to allocate sde kms\n");
1294 return ERR_PTR(-ENOMEM);
1297 msm_kms_init(&sde_kms->base, &kms_funcs);
1300 return &sde_kms->base;