1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
42 #include <acpi/video.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_atomic_helper.h>
47 #include <drm/i915_drm.h>
50 #include "i915_trace.h"
51 #include "i915_vgpu.h"
52 #include "intel_drv.h"
55 static struct drm_driver driver;
57 static unsigned int i915_load_fail_count;
59 bool __i915_inject_load_failure(const char *func, int line)
61 if (i915_load_fail_count >= i915.inject_load_failure)
64 if (++i915_load_fail_count == i915.inject_load_failure) {
65 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
66 i915.inject_load_failure, func, line);
73 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
74 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
75 "providing the dmesg log by booting with drm.debug=0xf"
78 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
81 static bool shown_bug_once;
82 struct device *kdev = dev_priv->drm.dev;
83 bool is_error = level[1] <= KERN_ERR[1];
84 bool is_debug = level[1] == KERN_DEBUG[1];
88 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
96 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
97 __builtin_return_address(0), &vaf);
99 if (is_error && !shown_bug_once) {
100 dev_notice(kdev, "%s", FDO_BUG_MSG);
101 shown_bug_once = true;
107 static bool i915_error_injected(struct drm_i915_private *dev_priv)
109 return i915.inject_load_failure &&
110 i915_load_fail_count == i915.inject_load_failure;
113 #define i915_load_error(dev_priv, fmt, ...) \
114 __i915_printk(dev_priv, \
115 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
119 static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
121 enum intel_pch ret = PCH_NOP;
124 * In a virtualized passthrough environment we can be in a
125 * setup where the ISA bridge is not able to be passed through.
126 * In this case, a south bridge can be emulated and we have to
127 * make an educated guess as to which PCH is really there.
130 if (IS_GEN5(dev_priv)) {
132 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
133 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
135 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
136 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
138 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
139 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
141 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
142 } else if (IS_CANNONLAKE(dev_priv)) {
149 static void intel_detect_pch(struct drm_i915_private *dev_priv)
151 struct pci_dev *pch = NULL;
153 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
154 * (which really amounts to a PCH but no South Display).
156 if (INTEL_INFO(dev_priv)->num_pipes == 0) {
157 dev_priv->pch_type = PCH_NOP;
162 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
163 * make graphics device passthrough work easy for VMM, that only
164 * need to expose ISA bridge to let driver know the real hardware
165 * underneath. This is a requirement from virtualization team.
167 * In some virtualized environments (e.g. XEN), there is irrelevant
168 * ISA bridge in the system. To work reliably, we should scan trhough
169 * all the ISA bridge devices and check for the first match, instead
170 * of only checking the first one.
172 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
173 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
174 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
175 unsigned short id_ext = pch->device &
176 INTEL_PCH_DEVICE_ID_MASK_EXT;
178 dev_priv->pch_id = id;
180 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
181 dev_priv->pch_type = PCH_IBX;
182 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
183 WARN_ON(!IS_GEN5(dev_priv));
184 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
185 dev_priv->pch_type = PCH_CPT;
186 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
187 WARN_ON(!(IS_GEN6(dev_priv) ||
188 IS_IVYBRIDGE(dev_priv)));
189 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
190 /* PantherPoint is CPT compatible */
191 dev_priv->pch_type = PCH_CPT;
192 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
193 WARN_ON(!(IS_GEN6(dev_priv) ||
194 IS_IVYBRIDGE(dev_priv)));
195 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
196 dev_priv->pch_type = PCH_LPT;
197 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
198 WARN_ON(!IS_HASWELL(dev_priv) &&
199 !IS_BROADWELL(dev_priv));
200 WARN_ON(IS_HSW_ULT(dev_priv) ||
201 IS_BDW_ULT(dev_priv));
202 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
203 dev_priv->pch_type = PCH_LPT;
204 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
205 WARN_ON(!IS_HASWELL(dev_priv) &&
206 !IS_BROADWELL(dev_priv));
207 WARN_ON(!IS_HSW_ULT(dev_priv) &&
208 !IS_BDW_ULT(dev_priv));
209 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
210 dev_priv->pch_type = PCH_SPT;
211 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
212 WARN_ON(!IS_SKYLAKE(dev_priv) &&
213 !IS_KABYLAKE(dev_priv));
214 } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
215 dev_priv->pch_type = PCH_SPT;
216 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
217 WARN_ON(!IS_SKYLAKE(dev_priv) &&
218 !IS_KABYLAKE(dev_priv));
219 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
220 dev_priv->pch_type = PCH_KBP;
221 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
222 WARN_ON(!IS_SKYLAKE(dev_priv) &&
223 !IS_KABYLAKE(dev_priv));
224 } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
225 dev_priv->pch_type = PCH_CNP;
226 DRM_DEBUG_KMS("Found CannonPoint PCH\n");
227 WARN_ON(!IS_CANNONLAKE(dev_priv));
228 } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
229 dev_priv->pch_type = PCH_CNP;
230 DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
231 WARN_ON(!IS_CANNONLAKE(dev_priv));
232 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
233 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
234 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
235 pch->subsystem_vendor ==
236 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
237 pch->subsystem_device ==
238 PCI_SUBDEVICE_ID_QEMU)) {
240 intel_virt_detect_pch(dev_priv);
248 DRM_DEBUG_KMS("No PCH found.\n");
253 static int i915_getparam(struct drm_device *dev, void *data,
254 struct drm_file *file_priv)
256 struct drm_i915_private *dev_priv = to_i915(dev);
257 struct pci_dev *pdev = dev_priv->drm.pdev;
258 drm_i915_getparam_t *param = data;
261 switch (param->param) {
262 case I915_PARAM_IRQ_ACTIVE:
263 case I915_PARAM_ALLOW_BATCHBUFFER:
264 case I915_PARAM_LAST_DISPATCH:
265 case I915_PARAM_HAS_EXEC_CONSTANTS:
266 /* Reject all old ums/dri params. */
268 case I915_PARAM_CHIPSET_ID:
269 value = pdev->device;
271 case I915_PARAM_REVISION:
272 value = pdev->revision;
274 case I915_PARAM_NUM_FENCES_AVAIL:
275 value = dev_priv->num_fence_regs;
277 case I915_PARAM_HAS_OVERLAY:
278 value = dev_priv->overlay ? 1 : 0;
280 case I915_PARAM_HAS_BSD:
281 value = !!dev_priv->engine[VCS];
283 case I915_PARAM_HAS_BLT:
284 value = !!dev_priv->engine[BCS];
286 case I915_PARAM_HAS_VEBOX:
287 value = !!dev_priv->engine[VECS];
289 case I915_PARAM_HAS_BSD2:
290 value = !!dev_priv->engine[VCS2];
292 case I915_PARAM_HAS_LLC:
293 value = HAS_LLC(dev_priv);
295 case I915_PARAM_HAS_WT:
296 value = HAS_WT(dev_priv);
298 case I915_PARAM_HAS_ALIASING_PPGTT:
299 value = USES_PPGTT(dev_priv);
301 case I915_PARAM_HAS_SEMAPHORES:
302 value = i915.semaphores;
304 case I915_PARAM_HAS_SECURE_BATCHES:
305 value = capable(CAP_SYS_ADMIN);
307 case I915_PARAM_CMD_PARSER_VERSION:
308 value = i915_cmd_parser_get_version(dev_priv);
310 case I915_PARAM_SUBSLICE_TOTAL:
311 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
315 case I915_PARAM_EU_TOTAL:
316 value = INTEL_INFO(dev_priv)->sseu.eu_total;
320 case I915_PARAM_HAS_GPU_RESET:
321 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
323 case I915_PARAM_HAS_RESOURCE_STREAMER:
324 value = HAS_RESOURCE_STREAMER(dev_priv);
326 case I915_PARAM_HAS_POOLED_EU:
327 value = HAS_POOLED_EU(dev_priv);
329 case I915_PARAM_MIN_EU_IN_POOL:
330 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
332 case I915_PARAM_HUC_STATUS:
333 intel_runtime_pm_get(dev_priv);
334 value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
335 intel_runtime_pm_put(dev_priv);
337 case I915_PARAM_MMAP_GTT_VERSION:
338 /* Though we've started our numbering from 1, and so class all
339 * earlier versions as 0, in effect their value is undefined as
340 * the ioctl will report EINVAL for the unknown param!
342 value = i915_gem_mmap_gtt_version();
344 case I915_PARAM_HAS_SCHEDULER:
345 value = dev_priv->engine[RCS] &&
346 dev_priv->engine[RCS]->schedule;
348 case I915_PARAM_MMAP_VERSION:
349 /* Remember to bump this if the version changes! */
350 case I915_PARAM_HAS_GEM:
351 case I915_PARAM_HAS_PAGEFLIPPING:
352 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
353 case I915_PARAM_HAS_RELAXED_FENCING:
354 case I915_PARAM_HAS_COHERENT_RINGS:
355 case I915_PARAM_HAS_RELAXED_DELTA:
356 case I915_PARAM_HAS_GEN7_SOL_RESET:
357 case I915_PARAM_HAS_WAIT_TIMEOUT:
358 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
359 case I915_PARAM_HAS_PINNED_BATCHES:
360 case I915_PARAM_HAS_EXEC_NO_RELOC:
361 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
362 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
363 case I915_PARAM_HAS_EXEC_SOFTPIN:
364 case I915_PARAM_HAS_EXEC_ASYNC:
365 case I915_PARAM_HAS_EXEC_FENCE:
366 case I915_PARAM_HAS_EXEC_CAPTURE:
367 /* For the time being all of these are always true;
368 * if some supported hardware does not have one of these
369 * features this value needs to be provided from
370 * INTEL_INFO(), a feature macro, or similar.
375 DRM_DEBUG("Unknown parameter %d\n", param->param);
379 if (put_user(value, param->value))
385 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
387 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
388 if (!dev_priv->bridge_dev) {
389 DRM_ERROR("bridge device not found\n");
395 /* Allocate space for the MCH regs if needed, return nonzero on error */
397 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
399 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
400 u32 temp_lo, temp_hi = 0;
404 if (INTEL_GEN(dev_priv) >= 4)
405 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
406 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
407 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
409 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
412 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
416 /* Get some space for it */
417 dev_priv->mch_res.name = "i915 MCHBAR";
418 dev_priv->mch_res.flags = IORESOURCE_MEM;
419 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
421 MCHBAR_SIZE, MCHBAR_SIZE,
423 0, pcibios_align_resource,
424 dev_priv->bridge_dev);
426 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
427 dev_priv->mch_res.start = 0;
431 if (INTEL_GEN(dev_priv) >= 4)
432 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
433 upper_32_bits(dev_priv->mch_res.start));
435 pci_write_config_dword(dev_priv->bridge_dev, reg,
436 lower_32_bits(dev_priv->mch_res.start));
440 /* Setup MCHBAR if possible, return true if we should disable it again */
442 intel_setup_mchbar(struct drm_i915_private *dev_priv)
444 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
448 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
451 dev_priv->mchbar_need_disable = false;
453 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
454 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
455 enabled = !!(temp & DEVEN_MCHBAR_EN);
457 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
461 /* If it's already enabled, don't have to do anything */
465 if (intel_alloc_mchbar_resource(dev_priv))
468 dev_priv->mchbar_need_disable = true;
470 /* Space is allocated or reserved, so enable it. */
471 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
472 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
473 temp | DEVEN_MCHBAR_EN);
475 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
476 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
481 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
483 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
485 if (dev_priv->mchbar_need_disable) {
486 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
489 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
491 deven_val &= ~DEVEN_MCHBAR_EN;
492 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
497 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
500 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
505 if (dev_priv->mch_res.start)
506 release_resource(&dev_priv->mch_res);
509 /* true = enable decode, false = disable decoder */
510 static unsigned int i915_vga_set_decode(void *cookie, bool state)
512 struct drm_i915_private *dev_priv = cookie;
514 intel_modeset_vga_set_state(dev_priv, state);
516 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
517 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
519 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
522 static int i915_resume_switcheroo(struct drm_device *dev);
523 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
525 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
527 struct drm_device *dev = pci_get_drvdata(pdev);
528 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
530 if (state == VGA_SWITCHEROO_ON) {
531 pr_info("switched on\n");
532 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
533 /* i915 resume handler doesn't set to D0 */
534 pci_set_power_state(pdev, PCI_D0);
535 i915_resume_switcheroo(dev);
536 dev->switch_power_state = DRM_SWITCH_POWER_ON;
538 pr_info("switched off\n");
539 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
540 i915_suspend_switcheroo(dev, pmm);
541 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
545 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
547 struct drm_device *dev = pci_get_drvdata(pdev);
550 * FIXME: open_count is protected by drm_global_mutex but that would lead to
551 * locking inversion with the driver load path. And the access here is
552 * completely racy anyway. So don't bother with locking for now.
554 return dev->open_count == 0;
557 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
558 .set_gpu_state = i915_switcheroo_set_state,
560 .can_switch = i915_switcheroo_can_switch,
563 static void i915_gem_fini(struct drm_i915_private *dev_priv)
565 mutex_lock(&dev_priv->drm.struct_mutex);
566 intel_uc_fini_hw(dev_priv);
567 i915_gem_cleanup_engines(dev_priv);
568 i915_gem_context_fini(dev_priv);
569 mutex_unlock(&dev_priv->drm.struct_mutex);
571 i915_gem_drain_freed_objects(dev_priv);
573 WARN_ON(!list_empty(&dev_priv->context_list));
576 static int i915_load_modeset_init(struct drm_device *dev)
578 struct drm_i915_private *dev_priv = to_i915(dev);
579 struct pci_dev *pdev = dev_priv->drm.pdev;
582 if (i915_inject_load_failure())
585 intel_bios_init(dev_priv);
587 /* If we have > 1 VGA cards, then we need to arbitrate access
588 * to the common VGA resources.
590 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
591 * then we do not take part in VGA arbitration and the
592 * vga_client_register() fails with -ENODEV.
594 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
595 if (ret && ret != -ENODEV)
598 intel_register_dsm_handler();
600 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
602 goto cleanup_vga_client;
604 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
605 intel_update_rawclk(dev_priv);
607 intel_power_domains_init_hw(dev_priv, false);
609 intel_csr_ucode_init(dev_priv);
611 ret = intel_irq_install(dev_priv);
615 intel_setup_gmbus(dev_priv);
617 /* Important: The output setup functions called by modeset_init need
618 * working irqs for e.g. gmbus and dp aux transfers. */
619 ret = intel_modeset_init(dev);
623 intel_uc_init_fw(dev_priv);
625 ret = i915_gem_init(dev_priv);
629 intel_modeset_gem_init(dev);
631 if (INTEL_INFO(dev_priv)->num_pipes == 0)
634 ret = intel_fbdev_init(dev);
638 /* Only enable hotplug handling once the fbdev is fully set up. */
639 intel_hpd_init(dev_priv);
641 drm_kms_helper_poll_init(dev);
646 if (i915_gem_suspend(dev_priv))
647 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
648 i915_gem_fini(dev_priv);
650 intel_uc_fini_fw(dev_priv);
652 drm_irq_uninstall(dev);
653 intel_teardown_gmbus(dev_priv);
655 intel_csr_ucode_fini(dev_priv);
656 intel_power_domains_fini(dev_priv);
657 vga_switcheroo_unregister_client(pdev);
659 vga_client_register(pdev, NULL, NULL, NULL);
664 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
666 struct apertures_struct *ap;
667 struct pci_dev *pdev = dev_priv->drm.pdev;
668 struct i915_ggtt *ggtt = &dev_priv->ggtt;
672 ap = alloc_apertures(1);
676 ap->ranges[0].base = ggtt->mappable_base;
677 ap->ranges[0].size = ggtt->mappable_end;
680 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
682 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
689 #if !defined(CONFIG_VGA_CONSOLE)
690 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
694 #elif !defined(CONFIG_DUMMY_CONSOLE)
695 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
700 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
704 DRM_INFO("Replacing VGA console driver\n");
707 if (con_is_bound(&vga_con))
708 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
710 ret = do_unregister_con_driver(&vga_con);
712 /* Ignore "already unregistered". */
722 static void intel_init_dpio(struct drm_i915_private *dev_priv)
725 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
726 * CHV x1 PHY (DP/HDMI D)
727 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
729 if (IS_CHERRYVIEW(dev_priv)) {
730 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
731 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
732 } else if (IS_VALLEYVIEW(dev_priv)) {
733 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
737 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
740 * The i915 workqueue is primarily used for batched retirement of
741 * requests (and thus managing bo) once the task has been completed
742 * by the GPU. i915_gem_retire_requests() is called directly when we
743 * need high-priority retirement, such as waiting for an explicit
746 * It is also used for periodic low-priority events, such as
747 * idle-timers and recording error state.
749 * All tasks on the workqueue are expected to acquire the dev mutex
750 * so there is no point in running more than one instance of the
751 * workqueue at any time. Use an ordered one.
753 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
754 if (dev_priv->wq == NULL)
757 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
758 if (dev_priv->hotplug.dp_wq == NULL)
764 destroy_workqueue(dev_priv->wq);
766 DRM_ERROR("Failed to allocate workqueues.\n");
771 static void i915_engines_cleanup(struct drm_i915_private *i915)
773 struct intel_engine_cs *engine;
774 enum intel_engine_id id;
776 for_each_engine(engine, i915, id)
780 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
782 destroy_workqueue(dev_priv->hotplug.dp_wq);
783 destroy_workqueue(dev_priv->wq);
787 * We don't keep the workarounds for pre-production hardware, so we expect our
788 * driver to fail on these machines in one way or another. A little warning on
789 * dmesg may help both the user and the bug triagers.
791 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
795 pre |= IS_HSW_EARLY_SDV(dev_priv);
796 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
797 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
800 DRM_ERROR("This is a pre-production stepping. "
801 "It may not be fully functional.\n");
802 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
807 * i915_driver_init_early - setup state not requiring device access
808 * @dev_priv: device private
810 * Initialize everything that is a "SW-only" state, that is state not
811 * requiring accessing the device or exposing the driver via kernel internal
812 * or userspace interfaces. Example steps belonging here: lock initialization,
813 * system memory allocation, setting up device specific attributes and
814 * function hooks not requiring accessing the device.
816 static int i915_driver_init_early(struct drm_i915_private *dev_priv,
817 const struct pci_device_id *ent)
819 const struct intel_device_info *match_info =
820 (struct intel_device_info *)ent->driver_data;
821 struct intel_device_info *device_info;
824 if (i915_inject_load_failure())
827 /* Setup the write-once "constant" device info */
828 device_info = mkwrite_device_info(dev_priv);
829 memcpy(device_info, match_info, sizeof(*device_info));
830 device_info->device_id = dev_priv->drm.pdev->device;
832 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
833 device_info->gen_mask = BIT(device_info->gen - 1);
835 spin_lock_init(&dev_priv->irq_lock);
836 spin_lock_init(&dev_priv->gpu_error.lock);
837 mutex_init(&dev_priv->backlight_lock);
838 spin_lock_init(&dev_priv->uncore.lock);
840 spin_lock_init(&dev_priv->mm.object_stat_lock);
841 spin_lock_init(&dev_priv->mmio_flip_lock);
842 mutex_init(&dev_priv->sb_lock);
843 mutex_init(&dev_priv->modeset_restore_lock);
844 mutex_init(&dev_priv->av_mutex);
845 mutex_init(&dev_priv->wm.wm_mutex);
846 mutex_init(&dev_priv->pps_mutex);
848 intel_uc_init_early(dev_priv);
849 i915_memcpy_init_early(dev_priv);
851 ret = i915_workqueues_init(dev_priv);
855 /* This must be called before any calls to HAS_PCH_* */
856 intel_detect_pch(dev_priv);
858 intel_pm_setup(dev_priv);
859 intel_init_dpio(dev_priv);
860 intel_power_domains_init(dev_priv);
861 intel_irq_init(dev_priv);
862 intel_hangcheck_init(dev_priv);
863 intel_init_display_hooks(dev_priv);
864 intel_init_clock_gating_hooks(dev_priv);
865 intel_init_audio_hooks(dev_priv);
866 ret = i915_gem_load_init(dev_priv);
870 intel_display_crc_init(dev_priv);
872 intel_device_info_dump(dev_priv);
874 intel_detect_preproduction_hw(dev_priv);
876 i915_perf_init(dev_priv);
881 intel_irq_fini(dev_priv);
882 i915_workqueues_cleanup(dev_priv);
884 i915_engines_cleanup(dev_priv);
889 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
890 * @dev_priv: device private
892 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
894 i915_perf_fini(dev_priv);
895 i915_gem_load_cleanup(dev_priv);
896 intel_irq_fini(dev_priv);
897 i915_workqueues_cleanup(dev_priv);
898 i915_engines_cleanup(dev_priv);
901 static int i915_mmio_setup(struct drm_i915_private *dev_priv)
903 struct pci_dev *pdev = dev_priv->drm.pdev;
907 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
909 * Before gen4, the registers and the GTT are behind different BARs.
910 * However, from gen4 onwards, the registers and the GTT are shared
911 * in the same BAR, so we want to restrict this ioremap from
912 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
913 * the register BAR remains the same size for all the earlier
914 * generations up to Ironlake.
916 if (INTEL_GEN(dev_priv) < 5)
917 mmio_size = 512 * 1024;
919 mmio_size = 2 * 1024 * 1024;
920 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
921 if (dev_priv->regs == NULL) {
922 DRM_ERROR("failed to map registers\n");
927 /* Try to make sure MCHBAR is enabled before poking at it */
928 intel_setup_mchbar(dev_priv);
933 static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
935 struct pci_dev *pdev = dev_priv->drm.pdev;
937 intel_teardown_mchbar(dev_priv);
938 pci_iounmap(pdev, dev_priv->regs);
942 * i915_driver_init_mmio - setup device MMIO
943 * @dev_priv: device private
945 * Setup minimal device state necessary for MMIO accesses later in the
946 * initialization sequence. The setup here should avoid any other device-wide
947 * side effects or exposing the driver via kernel internal or user space
950 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
954 if (i915_inject_load_failure())
957 if (i915_get_bridge_dev(dev_priv))
960 ret = i915_mmio_setup(dev_priv);
964 intel_uncore_init(dev_priv);
966 ret = intel_engines_init_mmio(dev_priv);
970 i915_gem_init_mmio(dev_priv);
975 intel_uncore_fini(dev_priv);
977 pci_dev_put(dev_priv->bridge_dev);
983 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
984 * @dev_priv: device private
986 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
988 intel_uncore_fini(dev_priv);
989 i915_mmio_cleanup(dev_priv);
990 pci_dev_put(dev_priv->bridge_dev);
993 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
995 i915.enable_execlists =
996 intel_sanitize_enable_execlists(dev_priv,
997 i915.enable_execlists);
1000 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1001 * user's requested state against the hardware/driver capabilities. We
1002 * do this now so that we can print out any log messages once rather
1003 * than every time we check intel_enable_ppgtt().
1006 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1007 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1009 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
1010 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
1012 intel_uc_sanitize_options(dev_priv);
1014 intel_gvt_sanitize_options(dev_priv);
1018 * i915_driver_init_hw - setup state requiring device access
1019 * @dev_priv: device private
1021 * Setup state that requires accessing the device, but doesn't require
1022 * exposing the driver via kernel internal or userspace interfaces.
1024 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1026 struct pci_dev *pdev = dev_priv->drm.pdev;
1029 if (i915_inject_load_failure())
1032 intel_device_info_runtime_init(dev_priv);
1034 intel_sanitize_options(dev_priv);
1036 ret = i915_ggtt_probe_hw(dev_priv);
1040 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1041 * otherwise the vga fbdev driver falls over. */
1042 ret = i915_kick_out_firmware_fb(dev_priv);
1044 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1048 ret = i915_kick_out_vgacon(dev_priv);
1050 DRM_ERROR("failed to remove conflicting VGA console\n");
1054 ret = i915_ggtt_init_hw(dev_priv);
1058 ret = i915_ggtt_enable_hw(dev_priv);
1060 DRM_ERROR("failed to enable GGTT\n");
1064 pci_set_master(pdev);
1066 /* overlay on gen2 is broken and can't address above 1G */
1067 if (IS_GEN2(dev_priv)) {
1068 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1070 DRM_ERROR("failed to set DMA mask\n");
1076 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1077 * using 32bit addressing, overwriting memory if HWS is located
1080 * The documentation also mentions an issue with undefined
1081 * behaviour if any general state is accessed within a page above 4GB,
1082 * which also needs to be handled carefully.
1084 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1085 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1088 DRM_ERROR("failed to set DMA mask\n");
1094 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1095 PM_QOS_DEFAULT_VALUE);
1097 intel_uncore_sanitize(dev_priv);
1099 intel_opregion_setup(dev_priv);
1101 i915_gem_load_init_fences(dev_priv);
1103 /* On the 945G/GM, the chipset reports the MSI capability on the
1104 * integrated graphics even though the support isn't actually there
1105 * according to the published specs. It doesn't appear to function
1106 * correctly in testing on 945G.
1107 * This may be a side effect of MSI having been made available for PEG
1108 * and the registers being closely associated.
1110 * According to chipset errata, on the 965GM, MSI interrupts may
1111 * be lost or delayed, but we use them anyways to avoid
1112 * stuck interrupts on some machines.
1114 if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
1115 if (pci_enable_msi(pdev) < 0)
1116 DRM_DEBUG_DRIVER("can't enable MSI");
1119 ret = intel_gvt_init(dev_priv);
1126 i915_ggtt_cleanup_hw(dev_priv);
1132 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1133 * @dev_priv: device private
1135 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1137 struct pci_dev *pdev = dev_priv->drm.pdev;
1139 if (pdev->msi_enabled)
1140 pci_disable_msi(pdev);
1142 pm_qos_remove_request(&dev_priv->pm_qos);
1143 i915_ggtt_cleanup_hw(dev_priv);
1147 * i915_driver_register - register the driver with the rest of the system
1148 * @dev_priv: device private
1150 * Perform any steps necessary to make the driver available via kernel
1151 * internal or userspace interfaces.
1153 static void i915_driver_register(struct drm_i915_private *dev_priv)
1155 struct drm_device *dev = &dev_priv->drm;
1157 i915_gem_shrinker_init(dev_priv);
1160 * Notify a valid surface after modesetting,
1161 * when running inside a VM.
1163 if (intel_vgpu_active(dev_priv))
1164 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1166 /* Reveal our presence to userspace */
1167 if (drm_dev_register(dev, 0) == 0) {
1168 i915_debugfs_register(dev_priv);
1169 i915_guc_log_register(dev_priv);
1170 i915_setup_sysfs(dev_priv);
1172 /* Depends on sysfs having been initialized */
1173 i915_perf_register(dev_priv);
1175 DRM_ERROR("Failed to register driver for userspace access!\n");
1177 if (INTEL_INFO(dev_priv)->num_pipes) {
1178 /* Must be done after probing outputs */
1179 intel_opregion_register(dev_priv);
1180 acpi_video_register();
1183 if (IS_GEN5(dev_priv))
1184 intel_gpu_ips_init(dev_priv);
1186 intel_audio_init(dev_priv);
1189 * Some ports require correctly set-up hpd registers for detection to
1190 * work properly (leading to ghost connected connector status), e.g. VGA
1191 * on gm45. Hence we can only set up the initial fbdev config after hpd
1192 * irqs are fully enabled. We do it last so that the async config
1193 * cannot run before the connectors are registered.
1195 intel_fbdev_initial_config_async(dev);
1199 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1200 * @dev_priv: device private
1202 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1204 intel_audio_deinit(dev_priv);
1206 intel_gpu_ips_teardown();
1207 acpi_video_unregister();
1208 intel_opregion_unregister(dev_priv);
1210 i915_perf_unregister(dev_priv);
1212 i915_teardown_sysfs(dev_priv);
1213 i915_guc_log_unregister(dev_priv);
1214 drm_dev_unregister(&dev_priv->drm);
1216 i915_gem_shrinker_cleanup(dev_priv);
1220 * i915_driver_load - setup chip and create an initial config
1222 * @ent: matching PCI ID entry
1224 * The driver load routine has to do several things:
1225 * - drive output discovery via intel_modeset_init()
1226 * - initialize the memory manager
1227 * - allocate initial config memory
1228 * - setup the DRM framebuffer with the allocated memory
1230 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1232 const struct intel_device_info *match_info =
1233 (struct intel_device_info *)ent->driver_data;
1234 struct drm_i915_private *dev_priv;
1237 /* Enable nuclear pageflip on ILK+ */
1238 if (!i915.nuclear_pageflip && match_info->gen < 5)
1239 driver.driver_features &= ~DRIVER_ATOMIC;
1242 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1244 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1246 DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
1250 dev_priv->drm.pdev = pdev;
1251 dev_priv->drm.dev_private = dev_priv;
1253 ret = pci_enable_device(pdev);
1257 pci_set_drvdata(pdev, &dev_priv->drm);
1259 * Disable the system suspend direct complete optimization, which can
1260 * leave the device suspended skipping the driver's suspend handlers
1261 * if the device was already runtime suspended. This is needed due to
1262 * the difference in our runtime and system suspend sequence and
1263 * becaue the HDA driver may require us to enable the audio power
1264 * domain during system suspend.
1266 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
1268 ret = i915_driver_init_early(dev_priv, ent);
1270 goto out_pci_disable;
1272 intel_runtime_pm_get(dev_priv);
1274 ret = i915_driver_init_mmio(dev_priv);
1276 goto out_runtime_pm_put;
1278 ret = i915_driver_init_hw(dev_priv);
1280 goto out_cleanup_mmio;
1283 * TODO: move the vblank init and parts of modeset init steps into one
1284 * of the i915_driver_init_/i915_driver_register functions according
1285 * to the role/effect of the given init step.
1287 if (INTEL_INFO(dev_priv)->num_pipes) {
1288 ret = drm_vblank_init(&dev_priv->drm,
1289 INTEL_INFO(dev_priv)->num_pipes);
1291 goto out_cleanup_hw;
1294 ret = i915_load_modeset_init(&dev_priv->drm);
1296 goto out_cleanup_vblank;
1298 i915_driver_register(dev_priv);
1300 intel_runtime_pm_enable(dev_priv);
1302 dev_priv->ipc_enabled = false;
1304 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1305 DRM_INFO("DRM_I915_DEBUG enabled\n");
1306 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1307 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1309 intel_runtime_pm_put(dev_priv);
1314 drm_vblank_cleanup(&dev_priv->drm);
1316 i915_driver_cleanup_hw(dev_priv);
1318 i915_driver_cleanup_mmio(dev_priv);
1320 intel_runtime_pm_put(dev_priv);
1321 i915_driver_cleanup_early(dev_priv);
1323 pci_disable_device(pdev);
1325 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1326 drm_dev_fini(&dev_priv->drm);
1332 void i915_driver_unload(struct drm_device *dev)
1334 struct drm_i915_private *dev_priv = to_i915(dev);
1335 struct pci_dev *pdev = dev_priv->drm.pdev;
1337 intel_fbdev_fini(dev);
1339 if (i915_gem_suspend(dev_priv))
1340 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
1342 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1344 drm_atomic_helper_shutdown(dev);
1346 intel_gvt_cleanup(dev_priv);
1348 i915_driver_unregister(dev_priv);
1350 drm_vblank_cleanup(dev);
1352 intel_modeset_cleanup(dev);
1355 * free the memory space allocated for the child device
1356 * config parsed from VBT
1358 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1359 kfree(dev_priv->vbt.child_dev);
1360 dev_priv->vbt.child_dev = NULL;
1361 dev_priv->vbt.child_dev_num = 0;
1363 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1364 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1365 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1366 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1368 vga_switcheroo_unregister_client(pdev);
1369 vga_client_register(pdev, NULL, NULL, NULL);
1371 intel_csr_ucode_fini(dev_priv);
1373 /* Free error state after interrupts are fully disabled. */
1374 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1375 i915_reset_error_state(dev_priv);
1377 /* Flush any outstanding unpin_work. */
1378 drain_workqueue(dev_priv->wq);
1380 i915_gem_fini(dev_priv);
1381 intel_uc_fini_fw(dev_priv);
1382 intel_fbc_cleanup_cfb(dev_priv);
1384 intel_power_domains_fini(dev_priv);
1386 i915_driver_cleanup_hw(dev_priv);
1387 i915_driver_cleanup_mmio(dev_priv);
1389 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1392 static void i915_driver_release(struct drm_device *dev)
1394 struct drm_i915_private *dev_priv = to_i915(dev);
1396 i915_driver_cleanup_early(dev_priv);
1397 drm_dev_fini(&dev_priv->drm);
1402 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1406 ret = i915_gem_open(dev, file);
1414 * i915_driver_lastclose - clean up after all DRM clients have exited
1417 * Take care of cleaning up after all DRM clients have exited. In the
1418 * mode setting case, we want to restore the kernel's initial mode (just
1419 * in case the last client left us in a bad state).
1421 * Additionally, in the non-mode setting case, we'll tear down the GTT
1422 * and DMA structures, since the kernel won't be using them, and clea
1425 static void i915_driver_lastclose(struct drm_device *dev)
1427 intel_fbdev_restore_mode(dev);
1428 vga_switcheroo_process_delayed_switch();
1431 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1433 struct drm_i915_file_private *file_priv = file->driver_priv;
1435 mutex_lock(&dev->struct_mutex);
1436 i915_gem_context_close(dev, file);
1437 i915_gem_release(dev, file);
1438 mutex_unlock(&dev->struct_mutex);
1443 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1445 struct drm_device *dev = &dev_priv->drm;
1446 struct intel_encoder *encoder;
1448 drm_modeset_lock_all(dev);
1449 for_each_intel_encoder(dev, encoder)
1450 if (encoder->suspend)
1451 encoder->suspend(encoder);
1452 drm_modeset_unlock_all(dev);
1455 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1457 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1459 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1461 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1462 if (acpi_target_system_state() < ACPI_STATE_S3)
1468 static int i915_drm_suspend(struct drm_device *dev)
1470 struct drm_i915_private *dev_priv = to_i915(dev);
1471 struct pci_dev *pdev = dev_priv->drm.pdev;
1472 pci_power_t opregion_target_state;
1475 /* ignore lid events during suspend */
1476 mutex_lock(&dev_priv->modeset_restore_lock);
1477 dev_priv->modeset_restore = MODESET_SUSPENDED;
1478 mutex_unlock(&dev_priv->modeset_restore_lock);
1480 disable_rpm_wakeref_asserts(dev_priv);
1482 /* We do a lot of poking in a lot of registers, make sure they work
1484 intel_display_set_init_power(dev_priv, true);
1486 drm_kms_helper_poll_disable(dev);
1488 pci_save_state(pdev);
1490 error = i915_gem_suspend(dev_priv);
1493 "GEM idle failed, resume might fail\n");
1497 intel_display_suspend(dev);
1499 intel_dp_mst_suspend(dev);
1501 intel_runtime_pm_disable_interrupts(dev_priv);
1502 intel_hpd_cancel_work(dev_priv);
1504 intel_suspend_encoders(dev_priv);
1506 intel_suspend_hw(dev_priv);
1508 i915_gem_suspend_gtt_mappings(dev_priv);
1510 i915_save_state(dev_priv);
1512 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1513 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
1515 intel_uncore_suspend(dev_priv);
1516 intel_opregion_unregister(dev_priv);
1518 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1520 dev_priv->suspend_count++;
1522 intel_csr_ucode_suspend(dev_priv);
1525 enable_rpm_wakeref_asserts(dev_priv);
1530 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1532 struct drm_i915_private *dev_priv = to_i915(dev);
1533 struct pci_dev *pdev = dev_priv->drm.pdev;
1537 disable_rpm_wakeref_asserts(dev_priv);
1539 intel_display_set_init_power(dev_priv, false);
1541 fw_csr = !IS_GEN9_LP(dev_priv) &&
1542 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1544 * In case of firmware assisted context save/restore don't manually
1545 * deinit the power domains. This also means the CSR/DMC firmware will
1546 * stay active, it will power down any HW resources as required and
1547 * also enable deeper system power states that would be blocked if the
1548 * firmware was inactive.
1551 intel_power_domains_suspend(dev_priv);
1554 if (IS_GEN9_LP(dev_priv))
1555 bxt_enable_dc9(dev_priv);
1556 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1557 hsw_enable_pc8(dev_priv);
1558 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1559 ret = vlv_suspend_complete(dev_priv);
1562 DRM_ERROR("Suspend complete failed: %d\n", ret);
1564 intel_power_domains_init_hw(dev_priv, true);
1569 pci_disable_device(pdev);
1571 * During hibernation on some platforms the BIOS may try to access
1572 * the device even though it's already in D3 and hang the machine. So
1573 * leave the device in D0 on those platforms and hope the BIOS will
1574 * power down the device properly. The issue was seen on multiple old
1575 * GENs with different BIOS vendors, so having an explicit blacklist
1576 * is inpractical; apply the workaround on everything pre GEN6. The
1577 * platforms where the issue was seen:
1578 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1582 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
1583 pci_set_power_state(pdev, PCI_D3hot);
1585 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
1588 enable_rpm_wakeref_asserts(dev_priv);
1593 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
1598 DRM_ERROR("dev: %p\n", dev);
1599 DRM_ERROR("DRM not initialized, aborting suspend.\n");
1603 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1604 state.event != PM_EVENT_FREEZE))
1607 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1610 error = i915_drm_suspend(dev);
1614 return i915_drm_suspend_late(dev, false);
1617 static int i915_drm_resume(struct drm_device *dev)
1619 struct drm_i915_private *dev_priv = to_i915(dev);
1622 disable_rpm_wakeref_asserts(dev_priv);
1623 intel_sanitize_gt_powersave(dev_priv);
1625 ret = i915_ggtt_enable_hw(dev_priv);
1627 DRM_ERROR("failed to re-enable GGTT\n");
1629 intel_csr_ucode_resume(dev_priv);
1631 i915_gem_resume(dev_priv);
1633 i915_restore_state(dev_priv);
1634 intel_pps_unlock_regs_wa(dev_priv);
1635 intel_opregion_setup(dev_priv);
1637 intel_init_pch_refclk(dev_priv);
1640 * Interrupts have to be enabled before any batches are run. If not the
1641 * GPU will hang. i915_gem_init_hw() will initiate batches to
1642 * update/restore the context.
1644 * drm_mode_config_reset() needs AUX interrupts.
1646 * Modeset enabling in intel_modeset_init_hw() also needs working
1649 intel_runtime_pm_enable_interrupts(dev_priv);
1651 drm_mode_config_reset(dev);
1653 mutex_lock(&dev->struct_mutex);
1654 if (i915_gem_init_hw(dev_priv)) {
1655 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
1656 i915_gem_set_wedged(dev_priv);
1658 mutex_unlock(&dev->struct_mutex);
1660 intel_guc_resume(dev_priv);
1662 intel_modeset_init_hw(dev);
1664 spin_lock_irq(&dev_priv->irq_lock);
1665 if (dev_priv->display.hpd_irq_setup)
1666 dev_priv->display.hpd_irq_setup(dev_priv);
1667 spin_unlock_irq(&dev_priv->irq_lock);
1669 intel_dp_mst_resume(dev);
1671 intel_display_resume(dev);
1673 drm_kms_helper_poll_enable(dev);
1676 * ... but also need to make sure that hotplug processing
1677 * doesn't cause havoc. Like in the driver load code we don't
1678 * bother with the tiny race here where we might loose hotplug
1681 intel_hpd_init(dev_priv);
1683 intel_opregion_register(dev_priv);
1685 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1687 mutex_lock(&dev_priv->modeset_restore_lock);
1688 dev_priv->modeset_restore = MODESET_DONE;
1689 mutex_unlock(&dev_priv->modeset_restore_lock);
1691 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1693 intel_autoenable_gt_powersave(dev_priv);
1695 enable_rpm_wakeref_asserts(dev_priv);
1700 static int i915_drm_resume_early(struct drm_device *dev)
1702 struct drm_i915_private *dev_priv = to_i915(dev);
1703 struct pci_dev *pdev = dev_priv->drm.pdev;
1707 * We have a resume ordering issue with the snd-hda driver also
1708 * requiring our device to be power up. Due to the lack of a
1709 * parent/child relationship we currently solve this with an early
1712 * FIXME: This should be solved with a special hdmi sink device or
1713 * similar so that power domains can be employed.
1717 * Note that we need to set the power state explicitly, since we
1718 * powered off the device during freeze and the PCI core won't power
1719 * it back up for us during thaw. Powering off the device during
1720 * freeze is not a hard requirement though, and during the
1721 * suspend/resume phases the PCI core makes sure we get here with the
1722 * device powered on. So in case we change our freeze logic and keep
1723 * the device powered we can also remove the following set power state
1726 ret = pci_set_power_state(pdev, PCI_D0);
1728 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
1733 * Note that pci_enable_device() first enables any parent bridge
1734 * device and only then sets the power state for this device. The
1735 * bridge enabling is a nop though, since bridge devices are resumed
1736 * first. The order of enabling power and enabling the device is
1737 * imposed by the PCI core as described above, so here we preserve the
1738 * same order for the freeze/thaw phases.
1740 * TODO: eventually we should remove pci_disable_device() /
1741 * pci_enable_enable_device() from suspend/resume. Due to how they
1742 * depend on the device enable refcount we can't anyway depend on them
1743 * disabling/enabling the device.
1745 if (pci_enable_device(pdev)) {
1750 pci_set_master(pdev);
1752 disable_rpm_wakeref_asserts(dev_priv);
1754 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1755 ret = vlv_resume_prepare(dev_priv, false);
1757 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1760 intel_uncore_resume_early(dev_priv);
1762 if (IS_GEN9_LP(dev_priv)) {
1763 if (!dev_priv->suspended_to_idle)
1764 gen9_sanitize_dc_state(dev_priv);
1765 bxt_disable_dc9(dev_priv);
1766 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1767 hsw_disable_pc8(dev_priv);
1770 intel_uncore_sanitize(dev_priv);
1772 if (IS_GEN9_LP(dev_priv) ||
1773 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
1774 intel_power_domains_init_hw(dev_priv, true);
1776 i915_gem_sanitize(dev_priv);
1778 enable_rpm_wakeref_asserts(dev_priv);
1781 dev_priv->suspended_to_idle = false;
1786 static int i915_resume_switcheroo(struct drm_device *dev)
1790 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1793 ret = i915_drm_resume_early(dev);
1797 return i915_drm_resume(dev);
1801 * i915_reset - reset chip after a hang
1802 * @dev_priv: device private to reset
1804 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1807 * Caller must hold the struct_mutex.
1809 * Procedure is fairly simple:
1810 * - reset the chip using the reset reg
1811 * - re-init context state
1812 * - re-init hardware status page
1813 * - re-init ring buffer
1814 * - re-init interrupt state
1817 void i915_reset(struct drm_i915_private *dev_priv)
1819 struct i915_gpu_error *error = &dev_priv->gpu_error;
1822 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1823 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
1825 if (!test_bit(I915_RESET_HANDOFF, &error->flags))
1828 /* Clear any previous failed attempts at recovery. Time to try again. */
1829 if (!i915_gem_unset_wedged(dev_priv))
1832 error->reset_count++;
1834 pr_notice("drm/i915: Resetting chip after gpu hang\n");
1835 disable_irq(dev_priv->drm.irq);
1836 ret = i915_gem_reset_prepare(dev_priv);
1838 DRM_ERROR("GPU recovery failed\n");
1839 intel_gpu_reset(dev_priv, ALL_ENGINES);
1843 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
1846 DRM_ERROR("Failed to reset chip: %i\n", ret);
1848 DRM_DEBUG_DRIVER("GPU reset disabled\n");
1852 i915_gem_reset(dev_priv);
1853 intel_overlay_reset(dev_priv);
1855 /* Ok, now get things going again... */
1858 * Everything depends on having the GTT running, so we need to start
1859 * there. Fortunately we don't need to do this unless we reset the
1860 * chip at a PCI level.
1862 * Next we need to restore the context, but we don't use those
1865 * Ring buffer needs to be re-initialized in the KMS case, or if X
1866 * was running at the time of the reset (i.e. we weren't VT
1869 ret = i915_gem_init_hw(dev_priv);
1871 DRM_ERROR("Failed hw init on reset %d\n", ret);
1875 i915_queue_hangcheck(dev_priv);
1878 i915_gem_reset_finish(dev_priv);
1879 enable_irq(dev_priv->drm.irq);
1882 clear_bit(I915_RESET_HANDOFF, &error->flags);
1883 wake_up_bit(&error->flags, I915_RESET_HANDOFF);
1887 i915_gem_set_wedged(dev_priv);
1891 static int i915_pm_suspend(struct device *kdev)
1893 struct pci_dev *pdev = to_pci_dev(kdev);
1894 struct drm_device *dev = pci_get_drvdata(pdev);
1897 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1901 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1904 return i915_drm_suspend(dev);
1907 static int i915_pm_suspend_late(struct device *kdev)
1909 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1912 * We have a suspend ordering issue with the snd-hda driver also
1913 * requiring our device to be power up. Due to the lack of a
1914 * parent/child relationship we currently solve this with an late
1917 * FIXME: This should be solved with a special hdmi sink device or
1918 * similar so that power domains can be employed.
1920 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1923 return i915_drm_suspend_late(dev, false);
1926 static int i915_pm_poweroff_late(struct device *kdev)
1928 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1930 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1933 return i915_drm_suspend_late(dev, true);
1936 static int i915_pm_resume_early(struct device *kdev)
1938 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1940 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1943 return i915_drm_resume_early(dev);
1946 static int i915_pm_resume(struct device *kdev)
1948 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
1950 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1953 return i915_drm_resume(dev);
1956 /* freeze: before creating the hibernation_image */
1957 static int i915_pm_freeze(struct device *kdev)
1961 ret = i915_pm_suspend(kdev);
1965 ret = i915_gem_freeze(kdev_to_i915(kdev));
1972 static int i915_pm_freeze_late(struct device *kdev)
1976 ret = i915_pm_suspend_late(kdev);
1980 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
1987 /* thaw: called after creating the hibernation image, but before turning off. */
1988 static int i915_pm_thaw_early(struct device *kdev)
1990 return i915_pm_resume_early(kdev);
1993 static int i915_pm_thaw(struct device *kdev)
1995 return i915_pm_resume(kdev);
1998 /* restore: called after loading the hibernation image. */
1999 static int i915_pm_restore_early(struct device *kdev)
2001 return i915_pm_resume_early(kdev);
2004 static int i915_pm_restore(struct device *kdev)
2006 return i915_pm_resume(kdev);
2010 * Save all Gunit registers that may be lost after a D3 and a subsequent
2011 * S0i[R123] transition. The list of registers needing a save/restore is
2012 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2013 * registers in the following way:
2014 * - Driver: saved/restored by the driver
2015 * - Punit : saved/restored by the Punit firmware
2016 * - No, w/o marking: no need to save/restore, since the register is R/O or
2017 * used internally by the HW in a way that doesn't depend
2018 * keeping the content across a suspend/resume.
2019 * - Debug : used for debugging
2021 * We save/restore all registers marked with 'Driver', with the following
2023 * - Registers out of use, including also registers marked with 'Debug'.
2024 * These have no effect on the driver's operation, so we don't save/restore
2025 * them to reduce the overhead.
2026 * - Registers that are fully setup by an initialization function called from
2027 * the resume path. For example many clock gating and RPS/RC6 registers.
2028 * - Registers that provide the right functionality with their reset defaults.
2030 * TODO: Except for registers that based on the above 3 criteria can be safely
2031 * ignored, we save/restore all others, practically treating the HW context as
2032 * a black-box for the driver. Further investigation is needed to reduce the
2033 * saved/restored registers even further, by following the same 3 criteria.
2035 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2037 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2040 /* GAM 0x4000-0x4770 */
2041 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2042 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2043 s->arb_mode = I915_READ(ARB_MODE);
2044 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2045 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2047 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2048 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2050 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2051 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2053 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2054 s->ecochk = I915_READ(GAM_ECOCHK);
2055 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2056 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2058 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2060 /* MBC 0x9024-0x91D0, 0x8500 */
2061 s->g3dctl = I915_READ(VLV_G3DCTL);
2062 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2063 s->mbctl = I915_READ(GEN6_MBCTL);
2065 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2066 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2067 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2068 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2069 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2070 s->rstctl = I915_READ(GEN6_RSTCTL);
2071 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2073 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2074 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2075 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2076 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2077 s->ecobus = I915_READ(ECOBUS);
2078 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2079 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2080 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2081 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2082 s->rcedata = I915_READ(VLV_RCEDATA);
2083 s->spare2gh = I915_READ(VLV_SPAREG2H);
2085 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2086 s->gt_imr = I915_READ(GTIMR);
2087 s->gt_ier = I915_READ(GTIER);
2088 s->pm_imr = I915_READ(GEN6_PMIMR);
2089 s->pm_ier = I915_READ(GEN6_PMIER);
2091 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2092 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2094 /* GT SA CZ domain, 0x100000-0x138124 */
2095 s->tilectl = I915_READ(TILECTL);
2096 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2097 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2098 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2099 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2101 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2102 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2103 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
2104 s->pcbr = I915_READ(VLV_PCBR);
2105 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2108 * Not saving any of:
2109 * DFT, 0x9800-0x9EC0
2110 * SARB, 0xB000-0xB1FC
2111 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2116 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2118 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2122 /* GAM 0x4000-0x4770 */
2123 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2124 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2125 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2126 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2127 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2129 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2130 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2132 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2133 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2135 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2136 I915_WRITE(GAM_ECOCHK, s->ecochk);
2137 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2138 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2140 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2142 /* MBC 0x9024-0x91D0, 0x8500 */
2143 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2144 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2145 I915_WRITE(GEN6_MBCTL, s->mbctl);
2147 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2148 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2149 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2150 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2151 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2152 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2153 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2155 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2156 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2157 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2158 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2159 I915_WRITE(ECOBUS, s->ecobus);
2160 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2161 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2162 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2163 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2164 I915_WRITE(VLV_RCEDATA, s->rcedata);
2165 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2167 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2168 I915_WRITE(GTIMR, s->gt_imr);
2169 I915_WRITE(GTIER, s->gt_ier);
2170 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2171 I915_WRITE(GEN6_PMIER, s->pm_ier);
2173 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2174 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2176 /* GT SA CZ domain, 0x100000-0x138124 */
2177 I915_WRITE(TILECTL, s->tilectl);
2178 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2180 * Preserve the GT allow wake and GFX force clock bit, they are not
2181 * be restored, as they are used to control the s0ix suspend/resume
2182 * sequence by the caller.
2184 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2185 val &= VLV_GTLC_ALLOWWAKEREQ;
2186 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2187 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2189 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2190 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2191 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2192 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2194 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2196 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2197 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2198 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
2199 I915_WRITE(VLV_PCBR, s->pcbr);
2200 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2203 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2206 /* The HW does not like us polling for PW_STATUS frequently, so
2207 * use the sleeping loop rather than risk the busy spin within
2208 * intel_wait_for_register().
2210 * Transitioning between RC6 states should be at most 2ms (see
2211 * valleyview_enable_rps) so use a 3ms timeout.
2213 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2217 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2222 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2223 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2225 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2226 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2231 err = intel_wait_for_register(dev_priv,
2232 VLV_GTLC_SURVIVABILITY_REG,
2233 VLV_GFX_CLK_STATUS_BIT,
2234 VLV_GFX_CLK_STATUS_BIT,
2237 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2238 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2243 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2249 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2250 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2252 val |= VLV_GTLC_ALLOWWAKEREQ;
2253 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2254 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2256 mask = VLV_GTLC_ALLOWWAKEACK;
2257 val = allow ? mask : 0;
2259 err = vlv_wait_for_pw_status(dev_priv, mask, val);
2261 DRM_ERROR("timeout disabling GT waking\n");
2266 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2272 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2273 val = wait_for_on ? mask : 0;
2276 * RC6 transitioning can be delayed up to 2 msec (see
2277 * valleyview_enable_rps), use 3 msec for safety.
2279 if (vlv_wait_for_pw_status(dev_priv, mask, val))
2280 DRM_ERROR("timeout waiting for GT wells to go %s\n",
2281 onoff(wait_for_on));
2284 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2286 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2289 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2290 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2293 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2299 * Bspec defines the following GT well on flags as debug only, so
2300 * don't treat them as hard failures.
2302 vlv_wait_for_gt_wells(dev_priv, false);
2304 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2305 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2307 vlv_check_no_gt_access(dev_priv);
2309 err = vlv_force_gfx_clock(dev_priv, true);
2313 err = vlv_allow_gt_wake(dev_priv, false);
2317 if (!IS_CHERRYVIEW(dev_priv))
2318 vlv_save_gunit_s0ix_state(dev_priv);
2320 err = vlv_force_gfx_clock(dev_priv, false);
2327 /* For safety always re-enable waking and disable gfx clock forcing */
2328 vlv_allow_gt_wake(dev_priv, true);
2330 vlv_force_gfx_clock(dev_priv, false);
2335 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2342 * If any of the steps fail just try to continue, that's the best we
2343 * can do at this point. Return the first error code (which will also
2344 * leave RPM permanently disabled).
2346 ret = vlv_force_gfx_clock(dev_priv, true);
2348 if (!IS_CHERRYVIEW(dev_priv))
2349 vlv_restore_gunit_s0ix_state(dev_priv);
2351 err = vlv_allow_gt_wake(dev_priv, true);
2355 err = vlv_force_gfx_clock(dev_priv, false);
2359 vlv_check_no_gt_access(dev_priv);
2362 intel_init_clock_gating(dev_priv);
2367 static int intel_runtime_suspend(struct device *kdev)
2369 struct pci_dev *pdev = to_pci_dev(kdev);
2370 struct drm_device *dev = pci_get_drvdata(pdev);
2371 struct drm_i915_private *dev_priv = to_i915(dev);
2374 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
2377 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2380 DRM_DEBUG_KMS("Suspending device\n");
2382 disable_rpm_wakeref_asserts(dev_priv);
2385 * We are safe here against re-faults, since the fault handler takes
2388 i915_gem_runtime_suspend(dev_priv);
2390 intel_guc_suspend(dev_priv);
2392 intel_runtime_pm_disable_interrupts(dev_priv);
2395 if (IS_GEN9_LP(dev_priv)) {
2396 bxt_display_core_uninit(dev_priv);
2397 bxt_enable_dc9(dev_priv);
2398 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2399 hsw_enable_pc8(dev_priv);
2400 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2401 ret = vlv_suspend_complete(dev_priv);
2405 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2406 intel_runtime_pm_enable_interrupts(dev_priv);
2408 enable_rpm_wakeref_asserts(dev_priv);
2413 intel_uncore_suspend(dev_priv);
2415 enable_rpm_wakeref_asserts(dev_priv);
2416 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2418 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
2419 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2421 dev_priv->pm.suspended = true;
2424 * FIXME: We really should find a document that references the arguments
2427 if (IS_BROADWELL(dev_priv)) {
2429 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2430 * being detected, and the call we do at intel_runtime_resume()
2431 * won't be able to restore them. Since PCI_D3hot matches the
2432 * actual specification and appears to be working, use it.
2434 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2437 * current versions of firmware which depend on this opregion
2438 * notification have repurposed the D1 definition to mean
2439 * "runtime suspended" vs. what you would normally expect (D3)
2440 * to distinguish it from notifications that might be sent via
2443 intel_opregion_notify_adapter(dev_priv, PCI_D1);
2446 assert_forcewakes_inactive(dev_priv);
2448 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2449 intel_hpd_poll_init(dev_priv);
2451 DRM_DEBUG_KMS("Device suspended\n");
2455 static int intel_runtime_resume(struct device *kdev)
2457 struct pci_dev *pdev = to_pci_dev(kdev);
2458 struct drm_device *dev = pci_get_drvdata(pdev);
2459 struct drm_i915_private *dev_priv = to_i915(dev);
2462 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2465 DRM_DEBUG_KMS("Resuming device\n");
2467 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2468 disable_rpm_wakeref_asserts(dev_priv);
2470 intel_opregion_notify_adapter(dev_priv, PCI_D0);
2471 dev_priv->pm.suspended = false;
2472 if (intel_uncore_unclaimed_mmio(dev_priv))
2473 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2475 intel_guc_resume(dev_priv);
2477 if (IS_GEN9_LP(dev_priv)) {
2478 bxt_disable_dc9(dev_priv);
2479 bxt_display_core_init(dev_priv, true);
2480 if (dev_priv->csr.dmc_payload &&
2481 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2482 gen9_enable_dc5(dev_priv);
2483 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2484 hsw_disable_pc8(dev_priv);
2485 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2486 ret = vlv_resume_prepare(dev_priv, true);
2490 * No point of rolling back things in case of an error, as the best
2491 * we can do is to hope that things will still work (and disable RPM).
2493 i915_gem_init_swizzling(dev_priv);
2494 i915_gem_restore_fences(dev_priv);
2496 intel_runtime_pm_enable_interrupts(dev_priv);
2499 * On VLV/CHV display interrupts are part of the display
2500 * power well, so hpd is reinitialized from there. For
2501 * everyone else do it here.
2503 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2504 intel_hpd_init(dev_priv);
2506 enable_rpm_wakeref_asserts(dev_priv);
2509 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2511 DRM_DEBUG_KMS("Device resumed\n");
2516 const struct dev_pm_ops i915_pm_ops = {
2518 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2521 .suspend = i915_pm_suspend,
2522 .suspend_late = i915_pm_suspend_late,
2523 .resume_early = i915_pm_resume_early,
2524 .resume = i915_pm_resume,
2528 * @freeze, @freeze_late : called (1) before creating the
2529 * hibernation image [PMSG_FREEZE] and
2530 * (2) after rebooting, before restoring
2531 * the image [PMSG_QUIESCE]
2532 * @thaw, @thaw_early : called (1) after creating the hibernation
2533 * image, before writing it [PMSG_THAW]
2534 * and (2) after failing to create or
2535 * restore the image [PMSG_RECOVER]
2536 * @poweroff, @poweroff_late: called after writing the hibernation
2537 * image, before rebooting [PMSG_HIBERNATE]
2538 * @restore, @restore_early : called after rebooting and restoring the
2539 * hibernation image [PMSG_RESTORE]
2541 .freeze = i915_pm_freeze,
2542 .freeze_late = i915_pm_freeze_late,
2543 .thaw_early = i915_pm_thaw_early,
2544 .thaw = i915_pm_thaw,
2545 .poweroff = i915_pm_suspend,
2546 .poweroff_late = i915_pm_poweroff_late,
2547 .restore_early = i915_pm_restore_early,
2548 .restore = i915_pm_restore,
2550 /* S0ix (via runtime suspend) event handlers */
2551 .runtime_suspend = intel_runtime_suspend,
2552 .runtime_resume = intel_runtime_resume,
2555 static const struct vm_operations_struct i915_gem_vm_ops = {
2556 .fault = i915_gem_fault,
2557 .open = drm_gem_vm_open,
2558 .close = drm_gem_vm_close,
2561 static const struct file_operations i915_driver_fops = {
2562 .owner = THIS_MODULE,
2564 .release = drm_release,
2565 .unlocked_ioctl = drm_ioctl,
2566 .mmap = drm_gem_mmap,
2569 .compat_ioctl = i915_compat_ioctl,
2570 .llseek = noop_llseek,
2574 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2575 struct drm_file *file)
2580 static const struct drm_ioctl_desc i915_ioctls[] = {
2581 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2582 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2583 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2584 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2585 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2586 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2587 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2588 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2589 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2590 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2591 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2592 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2593 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2594 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2595 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2596 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2597 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2598 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2599 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
2600 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
2601 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2602 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2603 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2604 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2605 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2606 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2607 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2608 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2609 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2610 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2611 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2612 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2613 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2614 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2615 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2616 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2617 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2618 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2619 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2620 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2621 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2622 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2623 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2624 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2625 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2626 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2627 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2628 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2629 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2630 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2631 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2632 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2633 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2636 static struct drm_driver driver = {
2637 /* Don't use MTRRs here; the Xserver or userspace app should
2638 * deal with them for Intel hardware.
2641 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
2642 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
2643 .release = i915_driver_release,
2644 .open = i915_driver_open,
2645 .lastclose = i915_driver_lastclose,
2646 .postclose = i915_driver_postclose,
2647 .set_busid = drm_pci_set_busid,
2649 .gem_close_object = i915_gem_close_object,
2650 .gem_free_object_unlocked = i915_gem_free_object,
2651 .gem_vm_ops = &i915_gem_vm_ops,
2653 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2654 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2655 .gem_prime_export = i915_gem_prime_export,
2656 .gem_prime_import = i915_gem_prime_import,
2658 .dumb_create = i915_gem_dumb_create,
2659 .dumb_map_offset = i915_gem_mmap_gtt,
2660 .dumb_destroy = drm_gem_dumb_destroy,
2661 .ioctls = i915_ioctls,
2662 .num_ioctls = ARRAY_SIZE(i915_ioctls),
2663 .fops = &i915_driver_fops,
2664 .name = DRIVER_NAME,
2665 .desc = DRIVER_DESC,
2666 .date = DRIVER_DATE,
2667 .major = DRIVER_MAJOR,
2668 .minor = DRIVER_MINOR,
2669 .patchlevel = DRIVER_PATCHLEVEL,
2672 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2673 #include "selftests/mock_drm.c"