OSDN Git Service

Merge v4.18-rc3 into drm-next
authorDave Airlie <airlied@redhat.com>
Wed, 4 Jul 2018 00:27:12 +0000 (10:27 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 4 Jul 2018 00:27:12 +0000 (10:27 +1000)
Two requests have come in for a backmerge,
and I've got some pull reqs on rc2, so this
just makes sense.

Signed-off-by: Dave Airlie <airlied@redhat.com>
383 files changed:
Documentation/devicetree/bindings/display/ilitek,ili9341.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/gpu/amdgpu.rst [new file with mode: 0644]
Documentation/gpu/drivers.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/kms-properties.csv
drivers/dma-buf/dma-buf.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/ObjectID.h
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/soc15d.h
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
drivers/gpu/drm/amd/display/dc/basics/logger.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_sink.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dm_services.h
drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
drivers/gpu/drm/amd/display/dc/i2caux/engine.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/include/ddc_service_types.h
drivers/gpu/drm/amd/display/include/fixed31_32.h
drivers/gpu/drm/amd/display/include/logger_interface.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/stats/stats.c
drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h [deleted file]
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/arc/arcpgu_crtc.c
drivers/gpu/drm/armada/armada_fb.c
drivers/gpu/drm/armada/armada_fb.h
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/cdns-dsi.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_dumb_buffers.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/drm_writeback.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/gma500/accel_2d.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/framebuffer.h
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gma_display.c
drivers/gpu/drm/gma500/gtt.h
drivers/gpu/drm/gma500/mdfld_intel_display.c
drivers/gpu/drm/gma500/oaktrail_crtc.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/dvo_ch7017.c
drivers/gpu/drm/i915/dvo_ch7xxx.c
drivers/gpu/drm/i915/dvo_ivch.c
drivers/gpu/drm/i915/dvo_ns2501.c
drivers/gpu/drm/i915/dvo_sil164.c
drivers/gpu/drm/i915/dvo_tfp410.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/gvt/execlist.h
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/page_track.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_pvinfo.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/i915_vgpu.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_log.c
drivers/gpu/drm/i915/intel_guc_log.h
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_opregion.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/intel_vbt_defs.h
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/i915_vma.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/intel_lrc.c
drivers/gpu/drm/i915/selftests/intel_workarounds.c
drivers/gpu/drm/i915/selftests/mock_context.c
drivers/gpu/drm/i915/selftests/mock_dmabuf.c
drivers/gpu/drm/i915/selftests/mock_engine.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
drivers/gpu/drm/mediatek/mtk_drm_fb.c
drivers/gpu/drm/mediatek/mtk_drm_fb.h
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fb.h
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-innolux-p079zca.c
drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
drivers/gpu/drm/panel/panel-lvds.c
drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-sitronix-st7789v.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rockchip/cdn-dp-reg.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.h
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_lvds.c
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/selftests/drm_mm_selftests.h
drivers/gpu/drm/selftests/test-drm_mm.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sun4i/Makefile
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c [new file with mode: 0644]
drivers/gpu/drm/sun4i/sun8i_tcon_top.h [new file with mode: 0644]
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tinydrm/Kconfig
drivers/gpu/drm/tinydrm/Makefile
drivers/gpu/drm/tinydrm/ili9341.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/udl/udl_dmabuf.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/v3d/v3d_drv.h
drivers/gpu/drm/v3d/v3d_fence.c
drivers/gpu/drm/v3d/v3d_gem.c
drivers/gpu/drm/v3d/v3d_irq.c
drivers/gpu/drm/v3d/v3d_sched.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_regs.h
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_fb.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front.h
drivers/gpu/drm/xen/xen_drm_front_shbuf.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/staging/android/ion/ion.c
drivers/tee/tee_shm.c
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_bridge.h
include/drm/drm_connector.h
include/drm/drm_crtc.h
include/drm/drm_file.h
include/drm/drm_mm.h
include/drm/drm_mode_config.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_of.h
include/drm/drm_panel.h
include/drm/drm_plane.h
include/drm/drm_prime.h
include/drm/drm_writeback.h [new file with mode: 0644]
include/drm/gpu_scheduler.h
include/drm/i915_pciids.h
include/dt-bindings/clock/sun8i-tcon-top.h [new file with mode: 0644]
include/linux/dma-buf.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/drm.h
include/uapi/drm/drm_fourcc.h
include/uapi/drm/drm_mode.h

diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9341.txt b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
new file mode 100644 (file)
index 0000000..169b32e
--- /dev/null
@@ -0,0 +1,27 @@
+Ilitek ILI9341 display panels
+
+This binding is for display panels using an Ilitek ILI9341 controller in SPI
+mode.
+
+Required properties:
+- compatible:  "adafruit,yx240qv29", "ilitek,ili9341"
+- dc-gpios:    D/C pin
+- reset-gpios: Reset pin
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation:    panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight:   phandle of the backlight device attached to the panel
+
+Example:
+       display@0{
+               compatible = "adafruit,yx240qv29", "ilitek,ili9341";
+               reg = <0>;
+               spi-max-frequency = <32000000>;
+               dc-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+               rotation = <270>;
+               backlight = <&backlight>;
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
new file mode 100644 (file)
index 0000000..49e4105
--- /dev/null
@@ -0,0 +1,29 @@
+AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g070vvn01"
+- backlight: phandle of the backlight device attached to the panel
+- power-supply: single regulator to provide the supply voltage
+
+Required nodes:
+- port: Parallel port mapping to connect this display
+
+This panel needs single power supply voltage. Its backlight is conntrolled
+via PWM signal.
+
+Example:
+--------
+
+Example device-tree definition when connected to iMX6Q based board
+
+       lcd_panel: lcd-panel {
+               compatible = "auo,g070vvn01";
+               backlight = <&backlight_lcd>;
+               power-supply = <&reg_display>;
+
+               port {
+                       lcd_panel_in: endpoint {
+                               remote-endpoint = <&lcd_display_out>;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
new file mode 100644 (file)
index 0000000..4a041ac
--- /dev/null
@@ -0,0 +1,20 @@
+Ilitek ILI9881c based MIPI-DSI panels
+
+Required properties:
+  - compatible: must be "ilitek,ili9881c" and one of:
+    * "bananapi,lhr050h41"
+  - reg: DSI virtual channel used by that screen
+  - power-supply: phandle to the power regulator
+  - reset-gpios: a GPIO phandle for the reset pin
+
+Optional properties:
+  - backlight: phandle to the backlight used
+
+Example:
+panel@0 {
+       compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+       reg = <0>;
+       power-supply = <&reg_display>;
+       reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+       backlight = <&pwm_bl>;
+};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
new file mode 100644 (file)
index 0000000..a9b3526
--- /dev/null
@@ -0,0 +1,20 @@
+Innolux TV123WAM 12.3 inch eDP 2K display panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "innolux,tv123wam"
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+       panel_edp: panel-edp {
+               compatible = "innolux,tv123wam";
+               enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+               power-supply = <&pm8916_l2>;
+               backlight = <&backlight>;
+       };
index 3346c1e..5a9319a 100644 (file)
@@ -101,6 +101,7 @@ DWC HDMI PHY
 
 Required properties:
   - compatible: value must be one of:
+    * allwinner,sun50i-a64-hdmi-phy
     * allwinner,sun8i-a83t-hdmi-phy
     * allwinner,sun8i-h3-hdmi-phy
   - reg: base address and size of memory-mapped region
@@ -111,8 +112,9 @@ Required properties:
   - resets: phandle to the reset controller driving the PHY
   - reset-names: must be "phy"
 
-H3 HDMI PHY requires additional clock:
+H3 and A64 HDMI PHY require additional clocks:
   - pll-0: parent of phy clock
+  - pll-1: second possible phy clock parent (A64 only)
 
 TV Encoder
 ----------
@@ -187,6 +189,62 @@ And on the A23, A31, A31s and A33, you need one more clock line:
    - 'lvds-alt': An alternative clock source, separate from the TCON channel 0
                  clock, that can be used to drive the LVDS clock
 
+TCON TOP
+--------
+
+TCON TOPs main purpose is to configure whole display pipeline. It determines
+relationships between mixers and TCONs, selects source TCON for HDMI, muxes
+LCD and TV encoder GPIO output, selects TV encoder clock source and contains
+additional TV TCON and DSI gates.
+
+It allows display pipeline to be configured in very different ways:
+
+                                / LCD0/LVDS0
+                 / [0] TCON-LCD0
+                 |              \ MIPI DSI
+ mixer0          |
+        \        / [1] TCON-LCD1 - LCD1/LVDS1
+         TCON-TOP
+        /        \ [2] TCON-TV0 [0] - TVE0/RGB
+ mixer1          |                  \
+                 |                   TCON-TOP - HDMI
+                 |                  /
+                 \ [3] TCON-TV1 [1] - TVE1/RGB
+
+Note that both TCON TOP references same physical unit. Both mixers can be
+connected to any TCON.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun8i-r40-tcon-top
+  - reg: base address and size of the memory-mapped region.
+  - clocks: phandle to the clocks feeding the TCON TOP
+    * bus: TCON TOP interface clock
+    * tcon-tv0: TCON TV0 clock
+    * tve0: TVE0 clock
+    * tcon-tv1: TCON TV1 clock
+    * tve1: TVE0 clock
+    * dsi: MIPI DSI clock
+  - clock-names: clock name mentioned above
+  - resets: phandle to the reset line driving the TCON TOP
+  - #clock-cells : must contain 1
+  - clock-output-names: Names of clocks created for TCON TV0 channel clock,
+    TCON TV1 channel clock and DSI channel clock, in that order.
+
+- ports: A ports node with endpoint definitions as defined in
+    Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
+    be defined:
+    * port 0 is input for mixer0 mux
+    * port 1 is output for mixer0 mux
+    * port 2 is input for mixer1 mux
+    * port 3 is output for mixer1 mux
+    * port 4 is input for HDMI mux
+    * port 5 is output for HDMI mux
+    All output endpoints for mixer muxes and input endpoints for HDMI mux should
+    have reg property with the id of the target TCON, as shown in above graph
+    (0-3 for mixer muxes and 0-1 for HDMI mux). All ports should have only one
+    endpoint connected to remote endpoint.
+
 DRC
 ---
 
index 7cad066..6984539 100644 (file)
@@ -8,6 +8,7 @@ abracon Abracon Corporation
 actions        Actions Semiconductor Co., Ltd.
 active-semi    Active-Semi International Inc
 ad     Avionic Design GmbH
+adafruit       Adafruit Industries, LLC
 adapteva       Adapteva, Inc.
 adaptrum       Adaptrum, Inc.
 adh    AD Holdings Plc.
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
new file mode 100644 (file)
index 0000000..e52d0ce
--- /dev/null
@@ -0,0 +1,117 @@
+=========================
+ drm/amdgpu AMDgpu driver
+=========================
+
+The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core
+Next (GCN) architecture.
+
+Core Driver Infrastructure
+==========================
+
+This section covers core driver infrastructure.
+
+.. _amdgpu_memory_domains:
+
+Memory Domains
+--------------
+
+.. kernel-doc:: include/uapi/drm/amdgpu_drm.h
+   :doc: memory domains
+
+Buffer Objects
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+   :doc: amdgpu_object
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+   :internal:
+
+PRIME Buffer Sharing
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+   :doc: PRIME Buffer Sharing
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+   :internal:
+
+MMU Notifier
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+   :doc: MMU Notifier
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+   :internal:
+
+AMDGPU Virtual Memory
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+   :doc: GPUVM
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+   :internal:
+
+Interrupt Handling
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+   :doc: Interrupt Handling
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+   :internal:
+
+GPU Power/Thermal Controls and Monitoring
+=========================================
+
+This section covers hwmon and power/thermal controls.
+
+HWMON Interfaces
+----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: hwmon
+
+GPU sysfs Power State Interfaces
+--------------------------------
+
+GPU power controls are exposed via sysfs files.
+
+power_dpm_state
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: power_dpm_state
+
+power_dpm_force_performance_level
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: power_dpm_force_performance_level
+
+pp_table
+~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_table
+
+pp_od_clk_voltage
+~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_od_clk_voltage
+
+pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+
+pp_power_profile_mode
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_power_profile_mode
+
index f982558..65be325 100644 (file)
@@ -4,6 +4,7 @@ GPU Driver Documentation
 
 .. toctree::
 
+   amdgpu
    i915
    meson
    pl111
index 1dffd1a..4f6f113 100644 (file)
@@ -373,6 +373,15 @@ Connector Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_connector.c
    :export:
 
+Writeback Connectors
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+  :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+  :export:
+
 Encoder Abstraction
 ===================
 
@@ -517,6 +526,12 @@ Standard Connector Properties
 .. kernel-doc:: drivers/gpu/drm/drm_connector.c
    :doc: standard connector properties
 
+HDMI Specific Connector Properties
+-----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: HDMI connector properties
+
 Plane Composition Properties
 ----------------------------
 
index b08e9dc..21b6b72 100644 (file)
@@ -395,6 +395,8 @@ VMA Offset Manager
 .. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
    :export:
 
+.. _prime_buffer_sharing:
+
 PRIME Buffer Sharing
 ====================
 
@@ -496,3 +498,21 @@ DRM Sync Objects
 
 .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
    :export:
+
+GPU Scheduler
+=============
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+   :doc: Overview
+
+Scheduler Function References
+-----------------------------
+
+.. kernel-doc:: include/drm/gpu_scheduler.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+   :export:
index 07ed22e..bfde04e 100644 (file)
@@ -17,6 +17,7 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
 ,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
 ,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
 ,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
+,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
 i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
 ,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
 ,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
index d78d5fc..1388447 100644 (file)
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
                          || !exp_info->ops->map_dma_buf
                          || !exp_info->ops->unmap_dma_buf
                          || !exp_info->ops->release
-                         || !exp_info->ops->map_atomic
                          || !exp_info->ops->map
                          || !exp_info->ops->mmap)) {
                return ERR_PTR(-EINVAL);
@@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        mutex_lock(&dmabuf->lock);
 
        if (dmabuf->ops->attach) {
-               ret = dmabuf->ops->attach(dmabuf, dev, attach);
+               ret = dmabuf->ops->attach(dmabuf, attach);
                if (ret)
                        goto err_attach;
        }
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
  *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
  *
- *   There are also atomic variants of these interfaces. Like for kmap they
- *   facilitate non-blocking fast-paths. Neither the importer nor the exporter
- *   (in the callback) is allowed to block when using these.
- *
- *   Interfaces::
- *      void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- *      void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- *   For importers all the restrictions of using kmap apply, like the limited
- *   supply of kmap_atomic slots. Hence an importer shall only hold onto at
- *   max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ *   Implementing the functions is optional for exporters and for importers all
+ *   the restrictions of using kmap apply.
  *
  *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
  *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
  *   the partial chunks at the beginning and end but may return stale or bogus
  *   data outside of the range (in these partial chunks).
  *
- *   Note that these calls need to always succeed. The exporter needs to
- *   complete any preparations that might fail in begin_cpu_access.
- *
  *   For some cases the overhead of kmap can be too high, a vmap interface
  *   is introduced. This interface should be used very carefully, as vmalloc
  *   space is a limited resources on many architectures.
@@ -860,41 +847,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
 /**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf:    [in]    buffer to map page from.
- * @page_num:  [in]    page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
-       WARN_ON(!dmabuf);
-
-       return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf:    [in]    buffer to unmap page from.
- * @page_num:  [in]    page in PAGE_SIZE units to unmap.
- * @vaddr:     [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
-                          void *vaddr)
-{
-       WARN_ON(!dmabuf);
-
-       if (dmabuf->ops->unmap_atomic)
-               dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
-/**
  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
  * same restrictions as for kmap and friends apply.
  * @dmabuf:    [in]    buffer to map page from.
@@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
 {
        WARN_ON(!dmabuf);
 
+       if (!dmabuf->ops->map)
+               return NULL;
        return dmabuf->ops->map(dmabuf, page_num);
 }
 EXPORT_SYMBOL_GPL(dma_buf_kmap);
index ef9f3da..69c1351 100644 (file)
@@ -18,7 +18,7 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
                drm_encoder.o drm_mode_object.o drm_property.o \
                drm_plane.o drm_color_mgmt.o drm_print.o \
                drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
-               drm_syncobj.o drm_lease.o
+               drm_syncobj.o drm_lease.o drm_writeback.o
 
 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
 drm-$(CONFIG_DRM_VM) += drm_vm.o
index 0619269..5b39362 100644 (file)
 #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
 #define GENERIC_OBJECT_ID_MXM_OPM                 0x03
 #define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT          0x05
 
 /****************************************************/
 /* Graphics Object ENUM ID Definition               */
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
 
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1    (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2    (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
 /****************************************************/
 /* Object Cap definition - Shared with BIOS         */
 /****************************************************/
index a59c075..71b9b86 100644 (file)
@@ -968,6 +968,8 @@ struct amdgpu_gfx {
        struct amdgpu_irq_src           eop_irq;
        struct amdgpu_irq_src           priv_reg_irq;
        struct amdgpu_irq_src           priv_inst_irq;
+       struct amdgpu_irq_src           cp_ecc_error_irq;
+       struct amdgpu_irq_src           sq_irq;
        /* gfx status */
        uint32_t                        gfx_current_status;
        /* ce ram size*/
index 82312a7..7a625f3 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drm_syncobj.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
+#include "amdgpu_gmc.h"
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      struct drm_amdgpu_cs_chunk_fence *data,
@@ -302,7 +303,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
        *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 
        /* Do the same for visible VRAM if half of it is free */
-       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
+       if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
                u64 total_vis_vram = adev->gmc.visible_vram_size;
                u64 used_vis_vram =
                        amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -359,7 +360,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
         * to move it. Don't move anything if the threshold is zero.
         */
        if (p->bytes_moved < p->bytes_moved_threshold) {
-               if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+               if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
                    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
                        /* And don't move a CPU_ACCESS_REQUIRED BO to limited
                         * visible VRAM if we've depleted our allowance to do
@@ -381,7 +382,7 @@ retry:
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 
        p->bytes_moved += ctx.bytes_moved;
-       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+       if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
            amdgpu_bo_in_cpu_visible_vram(bo))
                p->bytes_moved_vis += ctx.bytes_moved;
 
@@ -434,8 +435,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
                /* Good we can try to move this BO somewhere else */
                update_bytes_moved_vis =
-                       adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
-                       amdgpu_bo_in_cpu_visible_vram(bo);
+                               !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+                               amdgpu_bo_in_cpu_visible_vram(bo);
                amdgpu_ttm_placement_from_domain(bo, other);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                p->bytes_moved += ctx.bytes_moved;
index c5bb362..64b3a1e 100644 (file)
@@ -449,26 +449,28 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
        struct amdgpu_ctx *ctx;
        struct idr *idp;
        uint32_t id, i;
+       long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
 
        idp = &mgr->ctx_handles;
 
+       mutex_lock(&mgr->lock);
        idr_for_each_entry(idp, ctx, id) {
 
-               if (!ctx->adev)
+               if (!ctx->adev) {
+                       mutex_unlock(&mgr->lock);
                        return;
+               }
 
                for (i = 0; i < ctx->adev->num_rings; i++) {
 
                        if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                                continue;
 
-                       if (kref_read(&ctx->refcount) == 1)
-                               drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
-                                                 &ctx->rings[i].entity);
-                       else
-                               DRM_ERROR("ctx %p is still alive\n", ctx);
+                       max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+                                         &ctx->rings[i].entity, max_wait);
                }
        }
+       mutex_unlock(&mgr->lock);
 }
 
 void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
index 6e5284e..d82d0d3 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/power_supply.h>
 #include <linux/kthread.h>
 #include <linux/console.h>
 #include <linux/slab.h>
@@ -675,17 +676,15 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_device_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GART location
  *
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place GART before or after VRAM.
  *
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If GART size is bigger than space left then we ajust GART size.
  * Thus function will never fails.
- *
- * FIXME: when reducing GTT size align new size on power of 2.
  */
 void amdgpu_device_gart_location(struct amdgpu_device *adev,
                                 struct amdgpu_gmc *mc)
@@ -698,13 +697,13 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
        size_bf = mc->vram_start;
        if (size_bf > size_af) {
                if (mc->gart_size > size_bf) {
-                       dev_warn(adev->dev, "limiting GTT\n");
+                       dev_warn(adev->dev, "limiting GART\n");
                        mc->gart_size = size_bf;
                }
                mc->gart_start = 0;
        } else {
                if (mc->gart_size > size_af) {
-                       dev_warn(adev->dev, "limiting GTT\n");
+                       dev_warn(adev->dev, "limiting GART\n");
                        mc->gart_size = size_af;
                }
                /* VCE doesn't like it when BOs cross a 4GB segment, so align
@@ -713,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
                mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
        }
        mc->gart_end = mc->gart_start + mc->gart_size - 1;
-       dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+       dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
                        mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 }
 
@@ -1926,7 +1925,7 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
        if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
                amdgpu_device_ip_set_powergating_state(adev,
                                                       AMD_IP_BLOCK_TYPE_SMC,
-                                                      AMD_CG_STATE_UNGATE);
+                                                      AMD_PG_STATE_UNGATE);
 
        /* ungate SMC block first */
        r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
@@ -2301,6 +2300,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_DELAYED_WORK(&adev->late_init_work,
                          amdgpu_device_ip_late_init_func_handler);
 
+       adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
        if (adev->asic_type >= CHIP_BONAIRE) {
index dd6203a..9acfbee 100644 (file)
@@ -402,7 +402,6 @@ struct amdgpu_dpm {
        u32 tdp_adjustment;
        u16 load_line_slope;
        bool power_control;
-       bool ac_power;
        /* special states active */
        bool                    thermal_active;
        bool                    uvd_active;
@@ -439,6 +438,7 @@ struct amdgpu_pm {
        struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
        uint32_t                smu_prv_buffer_size;
        struct amdgpu_bo        *smu_prv_buffer;
+       bool ac_power;
 };
 
 #define R600_SSTU_DFLT                               0
index b0bf2f2..a549483 100644 (file)
@@ -855,9 +855,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .runtime_idle = amdgpu_pmops_runtime_idle,
 };
 
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+       struct drm_file *file_priv = f->private_data;
+       struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
+
+       return 0;
+}
+
+
 static const struct file_operations amdgpu_driver_kms_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
+       .flush = amdgpu_flush,
        .release = drm_release,
        .unlocked_ioctl = amdgpu_drm_ioctl,
        .mmap = amdgpu_mmap,
index 5fb156a..89743cd 100644 (file)
@@ -510,7 +510,6 @@ out:
  * @adev: amdgpu_device pointer
  * @vm: vm to update
  * @bo_va: bo_va to update
- * @list: validation list
  * @operation: map, unmap or clear
  *
  * Update the bo_va directly after setting its address. Errors are not
@@ -519,7 +518,6 @@ out:
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm,
                                    struct amdgpu_bo_va *bo_va,
-                                   struct list_head *list,
                                    uint32_t operation)
 {
        int r;
@@ -612,7 +610,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                        return -ENOENT;
                abo = gem_to_amdgpu_bo(gobj);
                tv.bo = &abo->tbo;
-               tv.shared = false;
+               tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
                list_add(&tv.head, &list);
        } else {
                gobj = NULL;
@@ -673,7 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                break;
        }
        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
-               amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
+               amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
                                        args->operation);
 
 error_backoff:
index 893c249..6cb4948 100644 (file)
@@ -109,4 +109,19 @@ struct amdgpu_gmc {
        const struct amdgpu_gmc_funcs   *gmc_funcs;
 };
 
+/**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * True if full VRAM is visible through the BAR
+ */
+static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+{
+       WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
+
+       return (gmc->real_vram_size == gmc->visible_vram_size);
+}
+
 #endif
index f70eeed..31f8170 100644 (file)
@@ -353,7 +353,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                        ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
                        ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
                        ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
-                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
                        tmo = tmo_mm;
                else
                        tmo = tmo_gfx;
index 3a5ca46..1abf5b5 100644 (file)
  *          Alex Deucher
  *          Jerome Glisse
  */
+
+/**
+ * DOC: Interrupt Handling
+ *
+ * Interrupts generated within GPU hardware raise interrupt requests that are
+ * passed to amdgpu IRQ handler which is responsible for detecting source and
+ * type of the interrupt and dispatching matching handlers. If handling an
+ * interrupt requires calling kernel functions that may sleep processing is
+ * dispatched to work handlers.
+ *
+ * If MSI functionality is not disabled by module parameter then MSI
+ * support will be enabled.
+ *
+ * For GPU interrupt sources that may be driven by another driver, IRQ domain
+ * support is used (with mapping between virtual and hardware IRQs).
+ */
+
 #include <linux/irq.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
 
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
 /**
- * amdgpu_hotplug_work_func - display hotplug work handler
+ * amdgpu_hotplug_work_func - work handler for display hotplug event
  *
- * @work: work struct
+ * @work: work struct pointer
  *
- * This is the hot plug event work handler (all asics).
- * The work gets scheduled from the irq handler if there
- * was a hot plug interrupt.  It walks the connector table
- * and calls the hotplug handler for each one, then sends
- * a drm hotplug event to alert userspace.
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt.  It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
  */
 static void amdgpu_hotplug_work_func(struct work_struct *work)
 {
@@ -74,13 +93,12 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
 }
 
 /**
- * amdgpu_irq_reset_work_func - execute gpu reset
+ * amdgpu_irq_reset_work_func - execute GPU reset
  *
- * @work: work struct
+ * @work: work struct pointer
  *
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
+ * Execute scheduled GPU reset (Cayman+).
+ * This function is called when the IRQ handler thinks we need a GPU reset.
  */
 static void amdgpu_irq_reset_work_func(struct work_struct *work)
 {
@@ -91,7 +109,13 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
                amdgpu_device_gpu_recover(adev, NULL, false);
 }
 
-/* Disable *all* interrupts */
+/**
+ * amdgpu_irq_disable_all - disable *all* interrupts
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Disable all types of interrupts from all sources.
+ */
 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
 {
        unsigned long irqflags;
@@ -123,11 +147,15 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_handler - irq handler
+ * amdgpu_irq_handler - IRQ handler
+ *
+ * @irq: IRQ number (unused)
+ * @arg: pointer to DRM device
  *
- * @int irq, void *arg: args
+ * IRQ handler for amdgpu driver (all ASICs).
  *
- * This is the irq handler for the amdgpu driver (all asics).
+ * Returns:
+ * result of handling the IRQ, as defined by &irqreturn_t
  */
 irqreturn_t amdgpu_irq_handler(int irq, void *arg)
 {
@@ -142,18 +170,18 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
 }
 
 /**
- * amdgpu_msi_ok - asic specific msi checks
+ * amdgpu_msi_ok - check whether MSI functionality is enabled
  *
- * @adev: amdgpu device pointer
+ * @adev: amdgpu device pointer (unused)
+ *
+ * Checks whether MSI functionality has been disabled via module parameter
+ * (all ASICs).
  *
- * Handles asic specific MSI checks to determine if
- * MSIs should be enabled on a particular chip (all asics).
- * Returns true if MSIs should be enabled, false if MSIs
- * should not be enabled.
+ * Returns:
+ * *true* if MSIs are allowed to be enabled or *false* otherwise
  */
 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
 {
-       /* force MSI on */
        if (amdgpu_msi == 1)
                return true;
        else if (amdgpu_msi == 0)
@@ -163,12 +191,15 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_init - init driver interrupt info
+ * amdgpu_irq_init - initialize interrupt handling
  *
  * @adev: amdgpu device pointer
  *
- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
- * Returns 0 for success, error for failure.
+ * Sets up work functions for hotplug and reset interrupts, enables MSI
+ * functionality, initializes vblank, hotplug and reset interrupt handling.
+ *
+ * Returns:
+ * 0 on success or error code on failure
  */
 int amdgpu_irq_init(struct amdgpu_device *adev)
 {
@@ -176,7 +207,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 
        spin_lock_init(&adev->irq.lock);
 
-       /* enable msi */
+       /* Enable MSI if not disabled by module parameter */
        adev->irq.msi_enabled = false;
 
        if (amdgpu_msi_ok(adev)) {
@@ -189,7 +220,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 
        if (!amdgpu_device_has_dc_support(adev)) {
                if (!adev->enable_virtual_display)
-                       /* Disable vblank irqs aggressively for power-saving */
+                       /* Disable vblank IRQs aggressively for power-saving */
                        /* XXX: can this be enabled for DC? */
                        adev->ddev->vblank_disable_immediate = true;
 
@@ -197,7 +228,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
                if (r)
                        return r;
 
-               /* pre DCE11 */
+               /* Pre-DCE11 */
                INIT_WORK(&adev->hotplug_work,
                                amdgpu_hotplug_work_func);
        }
@@ -220,11 +251,13 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_fini - tear down driver interrupt info
+ * amdgpu_irq_fini - shut down interrupt handling
  *
  * @adev: amdgpu device pointer
  *
- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ * Tears down work functions for hotplug and reset interrupts, disables MSI
+ * functionality, shuts down vblank, hotplug and reset interrupt handling,
+ * turns off interrupts from all sources (all ASICs).
  */
 void amdgpu_irq_fini(struct amdgpu_device *adev)
 {
@@ -264,12 +297,17 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_add_id - register irq source
+ * amdgpu_irq_add_id - register IRQ source
  *
  * @adev: amdgpu device pointer
- * @src_id: source id for this source
- * @source: irq source
+ * @client_id: client id
+ * @src_id: source id
+ * @source: IRQ source pointer
+ *
+ * Registers IRQ source on a client.
  *
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_add_id(struct amdgpu_device *adev,
                      unsigned client_id, unsigned src_id,
@@ -312,12 +350,12 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
  *
  * @adev: amdgpu device pointer
- * @entry: interrupt vector
+ * @entry: interrupt vector pointer
  *
- * Dispatches the irq to the different IP blocks
+ * Dispatches IRQ to IP blocks.
  */
 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
                         struct amdgpu_iv_entry *entry)
@@ -361,13 +399,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_irq_update - update hw interrupt state
+ * amdgpu_irq_update - update hardware interrupt state
  *
  * @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to update
+ * @src: interrupt source pointer
+ * @type: type of interrupt
  *
- * Updates the interrupt state for a specific src (all asics).
+ * Updates interrupt state for the specific source (all ASICs).
  */
 int amdgpu_irq_update(struct amdgpu_device *adev,
                             struct amdgpu_irq_src *src, unsigned type)
@@ -378,7 +416,7 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
 
        spin_lock_irqsave(&adev->irq.lock, irqflags);
 
-       /* we need to determine after taking the lock, otherwise
+       /* We need to determine after taking the lock, otherwise
           we might disable just enabled interrupts again */
        if (amdgpu_irq_enabled(adev, src, type))
                state = AMDGPU_IRQ_STATE_ENABLE;
@@ -390,6 +428,14 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Updates state of all types of interrupts on all sources on resume after
+ * reset.
+ */
 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
 {
        int i, j, k;
@@ -413,10 +459,13 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
  * amdgpu_irq_get - enable interrupt
  *
  * @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to enable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
  *
- * Enables the interrupt type for a specific src (all asics).
+ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
@@ -440,10 +489,13 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
  * amdgpu_irq_put - disable interrupt
  *
  * @adev: amdgpu device pointer
- * @src: interrupt src you want to disable
- * @type: type of interrupt you want to disable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
+ *
+ * Enables specified type of interrupt on the specified source (all ASICs).
  *
- * Disables the interrupt type for a specific src (all asics).
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
@@ -464,12 +516,17 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
 }
 
 /**
- * amdgpu_irq_enabled - test if irq is enabled or not
+ * amdgpu_irq_enabled - check whether interrupt is enabled or not
  *
  * @adev: amdgpu device pointer
- * @idx: interrupt src you want to test
+ * @src: interrupt source pointer
+ * @type: type of interrupt
  *
- * Tests if the given interrupt source is enabled or not
+ * Checks whether the given type of interrupt is enabled on the given source.
+ *
+ * Returns:
+ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
+ * invalid parameters
  */
 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                        unsigned type)
@@ -486,7 +543,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
        return !!atomic_read(&src->enabled_types[type]);
 }
 
-/* gen irq */
+/* XXX: Generic IRQ handling */
 static void amdgpu_irq_mask(struct irq_data *irqd)
 {
        /* XXX */
@@ -497,12 +554,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
        /* XXX */
 }
 
+/* amdgpu hardware interrupt chip descriptor */
 static struct irq_chip amdgpu_irq_chip = {
        .name = "amdgpu-ih",
        .irq_mask = amdgpu_irq_mask,
        .irq_unmask = amdgpu_irq_unmask,
 };
 
+/**
+ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
+ *
+ * @d: amdgpu IRQ domain pointer (unused)
+ * @irq: virtual IRQ number
+ * @hwirq: hardware irq number
+ *
+ * Current implementation assigns simple interrupt handler to the given virtual
+ * IRQ.
+ *
+ * Returns:
+ * 0 on success or error code otherwise
+ */
 static int amdgpu_irqdomain_map(struct irq_domain *d,
                                unsigned int irq, irq_hw_number_t hwirq)
 {
@@ -514,17 +585,21 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
        return 0;
 }
 
+/* Implementation of methods for amdgpu IRQ domain */
 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
        .map = amdgpu_irqdomain_map,
 };
 
 /**
- * amdgpu_irq_add_domain - create a linear irq domain
+ * amdgpu_irq_add_domain - create a linear IRQ domain
  *
  * @adev: amdgpu device pointer
  *
- * Create an irq domain for GPU interrupt sources
+ * Creates an IRQ domain for GPU interrupt sources
  * that may be driven by another driver (e.g., ACP).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
 {
@@ -539,11 +614,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_remove_domain - remove the irq domain
+ * amdgpu_irq_remove_domain - remove the IRQ domain
  *
  * @adev: amdgpu device pointer
  *
- * Remove the irq domain for GPU interrupt sources
+ * Removes the IRQ domain for GPU interrupt sources
  * that may be driven by another driver (e.g., ACP).
  */
 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
@@ -555,16 +630,17 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
- *                             Linux irq
+ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
  *
  * @adev: amdgpu device pointer
  * @src_id: IH source id
  *
- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
+ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
  * Use this for components that generate a GPU interrupt, but are driven
  * by a different driver (e.g., ACP).
- * Returns the Linux irq.
+ *
+ * Returns:
+ * Linux IRQ
  */
 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
 {
index 91517b1..2060f20 100644 (file)
@@ -329,35 +329,35 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        type = AMD_IP_BLOCK_TYPE_GFX;
                        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                                ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 8;
+                       ib_start_alignment = 32;
+                       ib_size_alignment = 32;
                        break;
                case AMDGPU_HW_IP_COMPUTE:
                        type = AMD_IP_BLOCK_TYPE_GFX;
                        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                                ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 8;
+                       ib_start_alignment = 32;
+                       ib_size_alignment = 32;
                        break;
                case AMDGPU_HW_IP_DMA:
                        type = AMD_IP_BLOCK_TYPE_SDMA;
                        for (i = 0; i < adev->sdma.num_instances; i++)
                                ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 1;
+                       ib_start_alignment = 256;
+                       ib_size_alignment = 4;
                        break;
                case AMDGPU_HW_IP_UVD:
                        type = AMD_IP_BLOCK_TYPE_UVD;
                        for (i = 0; i < adev->uvd.num_uvd_inst; i++)
                                ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 16;
+                       ib_start_alignment = 64;
+                       ib_size_alignment = 64;
                        break;
                case AMDGPU_HW_IP_VCE:
                        type = AMD_IP_BLOCK_TYPE_VCE;
                        for (i = 0; i < adev->vce.num_rings; i++)
                                ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+                       ib_start_alignment = 4;
                        ib_size_alignment = 1;
                        break;
                case AMDGPU_HW_IP_UVD_ENC:
@@ -367,22 +367,28 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                                        ring_mask |=
                                        ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
                                        (j + i * adev->uvd.num_enc_rings));
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 1;
+                       ib_start_alignment = 64;
+                       ib_size_alignment = 64;
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
                        type = AMD_IP_BLOCK_TYPE_VCN;
                        ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+                       ib_start_alignment = 16;
                        ib_size_alignment = 16;
                        break;
                case AMDGPU_HW_IP_VCN_ENC:
                        type = AMD_IP_BLOCK_TYPE_VCN;
                        for (i = 0; i < adev->vcn.num_enc_rings; i++)
                                ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+                       ib_start_alignment = 64;
                        ib_size_alignment = 1;
                        break;
+               case AMDGPU_HW_IP_VCN_JPEG:
+                       type = AMD_IP_BLOCK_TYPE_VCN;
+                       ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
+                       ib_start_alignment = 16;
+                       ib_size_alignment = 16;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -427,6 +433,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
                case AMDGPU_HW_IP_VCN_ENC:
+               case AMDGPU_HW_IP_VCN_JPEG:
                        type = AMD_IP_BLOCK_TYPE_VCN;
                        break;
                default:
@@ -930,7 +937,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
                return;
 
        pm_runtime_get_sync(dev->dev);
-       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
 
        if (adev->asic_type != CHIP_RAVEN) {
                amdgpu_uvd_free_handles(adev, file_priv);
index 83e344f..72a3e8c 100644 (file)
  *    Christian König <christian.koenig@amd.com>
  */
 
+/**
+ * DOC: MMU Notifier
+ *
+ * For coherent userptr handling registers an MMU notifier to inform the driver
+ * about updates on the page tables of a process.
+ *
+ * When somebody tries to invalidate the page tables we block the update until
+ * all operations on the pages in question are completed, then those pages are
+ * marked as accessed and also dirty if it wasn't a read only access.
+ *
+ * New command submissions using the userptrs in question are delayed until all
+ * page table invalidation are completed and we once more see a coherent process
+ * address space.
+ */
+
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/mmu_notifier.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
 
+/**
+ * struct amdgpu_mn
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @mn: MMU notifier structur
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+ * @read_lock: mutex for recursive locking of @lock
+ * @recursion: depth of recursion
+ *
+ * Data for each amdgpu device and process address space.
+ */
 struct amdgpu_mn {
        /* constant after initialisation */
        struct amdgpu_device    *adev;
@@ -58,13 +88,21 @@ struct amdgpu_mn {
        atomic_t                recursion;
 };
 
+/**
+ * struct amdgpu_mn_node
+ *
+ * @it: interval node defining start-last of the affected address range
+ * @bos: list of all BOs in the affected address range
+ *
+ * Manages all BOs which are affected of a certain range of address space.
+ */
 struct amdgpu_mn_node {
        struct interval_tree_node       it;
        struct list_head                bos;
 };
 
 /**
- * amdgpu_mn_destroy - destroy the rmn
+ * amdgpu_mn_destroy - destroy the MMU notifier
  *
  * @work: previously sheduled work item
  *
@@ -72,47 +110,50 @@ struct amdgpu_mn_node {
  */
 static void amdgpu_mn_destroy(struct work_struct *work)
 {
-       struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
-       struct amdgpu_device *adev = rmn->adev;
+       struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
+       struct amdgpu_device *adev = amn->adev;
        struct amdgpu_mn_node *node, *next_node;
        struct amdgpu_bo *bo, *next_bo;
 
        mutex_lock(&adev->mn_lock);
-       down_write(&rmn->lock);
-       hash_del(&rmn->node);
+       down_write(&amn->lock);
+       hash_del(&amn->node);
        rbtree_postorder_for_each_entry_safe(node, next_node,
-                                            &rmn->objects.rb_root, it.rb) {
+                                            &amn->objects.rb_root, it.rb) {
                list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
                        bo->mn = NULL;
                        list_del_init(&bo->mn_list);
                }
                kfree(node);
        }
-       up_write(&rmn->lock);
+       up_write(&amn->lock);
        mutex_unlock(&adev->mn_lock);
-       mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
-       kfree(rmn);
+       mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
+       kfree(amn);
 }
 
 /**
  * amdgpu_mn_release - callback to notify about mm destruction
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  *
  * Shedule a work item to lazy destroy our notifier.
  */
 static void amdgpu_mn_release(struct mmu_notifier *mn,
                              struct mm_struct *mm)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
-       INIT_WORK(&rmn->work, amdgpu_mn_destroy);
-       schedule_work(&rmn->work);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+       INIT_WORK(&amn->work, amdgpu_mn_destroy);
+       schedule_work(&amn->work);
 }
 
 
 /**
- * amdgpu_mn_lock - take the write side lock for this mn
+ * amdgpu_mn_lock - take the write side lock for this notifier
+ *
+ * @mn: our notifier
  */
 void amdgpu_mn_lock(struct amdgpu_mn *mn)
 {
@@ -121,7 +162,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn)
 }
 
 /**
- * amdgpu_mn_unlock - drop the write side lock for this mn
+ * amdgpu_mn_unlock - drop the write side lock for this notifier
+ *
+ * @mn: our notifier
  */
 void amdgpu_mn_unlock(struct amdgpu_mn *mn)
 {
@@ -130,40 +173,38 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
 }
 
 /**
- * amdgpu_mn_read_lock - take the rmn read lock
- *
- * @rmn: our notifier
+ * amdgpu_mn_read_lock - take the read side lock for this notifier
  *
- * Take the rmn read side lock.
+ * @amn: our notifier
  */
-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
 {
-       mutex_lock(&rmn->read_lock);
-       if (atomic_inc_return(&rmn->recursion) == 1)
-               down_read_non_owner(&rmn->lock);
-       mutex_unlock(&rmn->read_lock);
+       mutex_lock(&amn->read_lock);
+       if (atomic_inc_return(&amn->recursion) == 1)
+               down_read_non_owner(&amn->lock);
+       mutex_unlock(&amn->read_lock);
 }
 
 /**
- * amdgpu_mn_read_unlock - drop the rmn read lock
- *
- * @rmn: our notifier
+ * amdgpu_mn_read_unlock - drop the read side lock for this notifier
  *
- * Drop the rmn read side lock.
+ * @amn: our notifier
  */
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
 {
-       if (atomic_dec_return(&rmn->recursion) == 0)
-               up_read_non_owner(&rmn->lock);
+       if (atomic_dec_return(&amn->recursion) == 0)
+               up_read_non_owner(&amn->lock);
 }
 
 /**
  * amdgpu_mn_invalidate_node - unmap all BOs of a node
  *
  * @node: the node with the BOs to unmap
+ * @start: start of address range affected
+ * @end: end of address range affected
  *
- * We block for all BOs and unmap them by move them
- * into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
  */
 static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
                                      unsigned long start,
@@ -190,27 +231,27 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
  * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  * @start: start of updated range
  * @end: end of updated range
  *
- * We block for all BOs between start and end to be idle and
- * unmap them by move them into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
  */
 static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
                                                 struct mm_struct *mm,
                                                 unsigned long start,
                                                 unsigned long end)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
 
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       amdgpu_mn_read_lock(rmn);
+       amdgpu_mn_read_lock(amn);
 
-       it = interval_tree_iter_first(&rmn->objects, start, end);
+       it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
 
@@ -238,15 +279,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
                                                 unsigned long start,
                                                 unsigned long end)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
 
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       amdgpu_mn_read_lock(rmn);
+       amdgpu_mn_read_lock(amn);
 
-       it = interval_tree_iter_first(&rmn->objects, start, end);
+       it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
                struct amdgpu_bo *bo;
@@ -268,7 +309,7 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
  * amdgpu_mn_invalidate_range_end - callback to notify about mm change
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  * @start: start of updated range
  * @end: end of updated range
  *
@@ -279,9 +320,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
                                           unsigned long start,
                                           unsigned long end)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
 
-       amdgpu_mn_read_unlock(rmn);
+       amdgpu_mn_read_unlock(amn);
 }
 
 static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +356,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
                                enum amdgpu_mn_type type)
 {
        struct mm_struct *mm = current->mm;
-       struct amdgpu_mn *rmn;
+       struct amdgpu_mn *amn;
        unsigned long key = AMDGPU_MN_KEY(mm, type);
        int r;
 
@@ -325,41 +366,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
                return ERR_PTR(-EINTR);
        }
 
-       hash_for_each_possible(adev->mn_hash, rmn, node, key)
-               if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
+       hash_for_each_possible(adev->mn_hash, amn, node, key)
+               if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
                        goto release_locks;
 
-       rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
-       if (!rmn) {
-               rmn = ERR_PTR(-ENOMEM);
+       amn = kzalloc(sizeof(*amn), GFP_KERNEL);
+       if (!amn) {
+               amn = ERR_PTR(-ENOMEM);
                goto release_locks;
        }
 
-       rmn->adev = adev;
-       rmn->mm = mm;
-       init_rwsem(&rmn->lock);
-       rmn->type = type;
-       rmn->mn.ops = &amdgpu_mn_ops[type];
-       rmn->objects = RB_ROOT_CACHED;
-       mutex_init(&rmn->read_lock);
-       atomic_set(&rmn->recursion, 0);
+       amn->adev = adev;
+       amn->mm = mm;
+       init_rwsem(&amn->lock);
+       amn->type = type;
+       amn->mn.ops = &amdgpu_mn_ops[type];
+       amn->objects = RB_ROOT_CACHED;
+       mutex_init(&amn->read_lock);
+       atomic_set(&amn->recursion, 0);
 
-       r = __mmu_notifier_register(&rmn->mn, mm);
+       r = __mmu_notifier_register(&amn->mn, mm);
        if (r)
-               goto free_rmn;
+               goto free_amn;
 
-       hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
+       hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
 
 release_locks:
        up_write(&mm->mmap_sem);
        mutex_unlock(&adev->mn_lock);
 
-       return rmn;
+       return amn;
 
-free_rmn:
+free_amn:
        up_write(&mm->mmap_sem);
        mutex_unlock(&adev->mn_lock);
-       kfree(rmn);
+       kfree(amn);
 
        return ERR_PTR(r);
 }
@@ -379,14 +420,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        enum amdgpu_mn_type type =
                bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
-       struct amdgpu_mn *rmn;
+       struct amdgpu_mn *amn;
        struct amdgpu_mn_node *node = NULL, *new_node;
        struct list_head bos;
        struct interval_tree_node *it;
 
-       rmn = amdgpu_mn_get(adev, type);
-       if (IS_ERR(rmn))
-               return PTR_ERR(rmn);
+       amn = amdgpu_mn_get(adev, type);
+       if (IS_ERR(amn))
+               return PTR_ERR(amn);
 
        new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
        if (!new_node)
@@ -394,12 +435,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 
        INIT_LIST_HEAD(&bos);
 
-       down_write(&rmn->lock);
+       down_write(&amn->lock);
 
-       while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+       while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
                kfree(node);
                node = container_of(it, struct amdgpu_mn_node, it);
-               interval_tree_remove(&node->it, &rmn->objects);
+               interval_tree_remove(&node->it, &amn->objects);
                addr = min(it->start, addr);
                end = max(it->last, end);
                list_splice(&node->bos, &bos);
@@ -410,7 +451,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
        else
                kfree(new_node);
 
-       bo->mn = rmn;
+       bo->mn = amn;
 
        node->it.start = addr;
        node->it.last = end;
@@ -418,9 +459,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
        list_splice(&bos, &node->bos);
        list_add(&bo->mn_list, &node->bos);
 
-       interval_tree_insert(&node->it, &rmn->objects);
+       interval_tree_insert(&node->it, &amn->objects);
 
-       up_write(&rmn->lock);
+       up_write(&amn->lock);
 
        return 0;
 }
@@ -435,18 +476,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct amdgpu_mn *rmn;
+       struct amdgpu_mn *amn;
        struct list_head *head;
 
        mutex_lock(&adev->mn_lock);
 
-       rmn = bo->mn;
-       if (rmn == NULL) {
+       amn = bo->mn;
+       if (amn == NULL) {
                mutex_unlock(&adev->mn_lock);
                return;
        }
 
-       down_write(&rmn->lock);
+       down_write(&amn->lock);
 
        /* save the next list entry for later */
        head = bo->mn_list.next;
@@ -456,12 +497,13 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 
        if (list_empty(head)) {
                struct amdgpu_mn_node *node;
+
                node = container_of(head, struct amdgpu_mn_node, bos);
-               interval_tree_remove(&node->it, &rmn->objects);
+               interval_tree_remove(&node->it, &amn->objects);
                kfree(node);
        }
 
-       up_write(&rmn->lock);
+       up_write(&amn->lock);
        mutex_unlock(&adev->mn_lock);
 }
 
index 3526efa..512f598 100644 (file)
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
 
+/**
+ * DOC: amdgpu_object
+ *
+ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
+ * represents memory used by driver (VRAM, system memory, etc.). The driver
+ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
+ * to create/destroy/set buffer object which are then managed by the kernel TTM
+ * memory manager.
+ * The interfaces are also used internally by kernel clients, including gfx,
+ * uvd, etc. for kernel managed allocations used by the GPU.
+ *
+ */
+
 static bool amdgpu_need_backup(struct amdgpu_device *adev)
 {
        if (adev->flags & AMD_IS_APU)
@@ -73,6 +86,16 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        kfree(bo);
 }
 
+/**
+ * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 {
        if (bo->destroy == &amdgpu_ttm_bo_destroy)
@@ -80,6 +103,14 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
        return false;
 }
 
+/**
+ * amdgpu_ttm_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
@@ -184,7 +215,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
  *
  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
  *
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
                              unsigned long size, int align,
@@ -261,7 +293,8 @@ error_free:
  *
  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
  *
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
                            unsigned long size, int align,
@@ -285,6 +318,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
  * amdgpu_bo_free_kernel - free BO for kernel use
  *
  * @bo: amdgpu BO to free
+ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
+ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
  *
  * unmaps and unpin a BO for kernel internal use.
  */
@@ -428,7 +463,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        if (unlikely(r != 0))
                return r;
 
-       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+       if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
            bo->tbo.mem.mem_type == TTM_PL_VRAM &&
            bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
                amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
@@ -498,6 +533,20 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object; and if requested, also creates a
+ * shadow object.
+ * Shadow object is used to backup the original buffer object, and is always
+ * in GTT.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_create(struct amdgpu_device *adev,
                     struct amdgpu_bo_param *bp,
                     struct amdgpu_bo **bo_ptr)
@@ -527,6 +576,21 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be backed up
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies an &amdgpu_bo buffer object to its shadow object.
+ * Not used for now.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
                               struct amdgpu_ring *ring,
                               struct amdgpu_bo *bo,
@@ -559,6 +623,18 @@ err:
        return r;
 }
 
+/**
+ * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
+ * @bo: pointer to the buffer object
+ *
+ * Sets placement according to domain; and changes placement and caching
+ * policy of the buffer object according to the placement.
+ * This is used for validating shadow bos.  It calls ttm_bo_validate() to
+ * make sure the buffer is resident where it needs to be.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 {
        struct ttm_operation_ctx ctx = { false, false };
@@ -581,6 +657,22 @@ retry:
        return r;
 }
 
+/**
+ * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be restored
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies a buffer object's shadow content back to the object.
+ * This is used for recovering a buffer from its shadow in case of a gpu
+ * reset where vram context may be lost.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
                                  struct amdgpu_ring *ring,
                                  struct amdgpu_bo *bo,
@@ -613,6 +705,17 @@ err:
        return r;
 }
 
+/**
+ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be mapped
+ * @ptr: kernel virtual address to be returned
+ *
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 {
        void *kptr;
@@ -643,6 +746,15 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
        return 0;
 }
 
+/**
+ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+ * Returns:
+ * the virtual address of a buffer object area.
+ */
 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 {
        bool is_iomem;
@@ -650,12 +762,27 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
        return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 }
 
+/**
+ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unmapped
+ *
+ * Unmaps a kernel map set up by amdgpu_bo_kmap().
+ */
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 {
        if (bo->kmap.bo)
                ttm_bo_kunmap(&bo->kmap);
 }
 
+/**
+ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * References the contained &ttm_buffer_object.
+ *
+ * Returns:
+ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 {
        if (bo == NULL)
@@ -665,6 +792,12 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
        return bo;
 }
 
+/**
+ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Unreferences the contained &ttm_buffer_object and clear the pointer
+ */
 void amdgpu_bo_unref(struct amdgpu_bo **bo)
 {
        struct ttm_buffer_object *tbo;
@@ -678,6 +811,29 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
                *bo = NULL;
 }
 
+/**
+ * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ * @min_offset: the start of requested address range
+ * @max_offset: the end of requested address range
+ * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
+ *
+ * Pins the buffer object according to requested domain and address range. If
+ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
+ * pin_count and pin_size accordingly.
+ *
+ * Pinning means to lock pages in memory along with keeping them at a fixed
+ * offset. It is required when a buffer can not be moved, for example, when
+ * a display buffer is being scanned out.
+ *
+ * Compared with amdgpu_bo_pin(), this function gives more flexibility on
+ * where to pin a buffer if there are specific restrictions on where a buffer
+ * must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                             u64 min_offset, u64 max_offset,
                             u64 *gpu_addr)
@@ -771,11 +927,34 @@ error:
        return r;
 }
 
+/**
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
+ *
+ * A simple wrapper to amdgpu_bo_pin_restricted().
+ * Provides a simpler API for buffers that do not have any strict restrictions
+ * on where a buffer must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 {
        return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
 }
 
+/**
+ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unpinned
+ *
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -808,6 +987,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        return r;
 }
 
+/**
+ * amdgpu_bo_evict_vram - evict VRAM buffers
+ * @adev: amdgpu device object
+ *
+ * Evicts all VRAM buffers on the lru list of the memory type.
+ * Mainly used for evicting vram at suspend time.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
@@ -830,6 +1019,15 @@ static const char *amdgpu_vram_names[] = {
        "DDR4",
 };
 
+/**
+ * amdgpu_bo_init - initialize memory manager
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
        /* reserve PAT memory space to WC for VRAM */
@@ -847,6 +1045,16 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
        return amdgpu_ttm_init(adev);
 }
 
+/**
+ * amdgpu_bo_late_init - late init
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_late_init() to free resources used earlier during
+ * initialization.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_late_init(struct amdgpu_device *adev)
 {
        amdgpu_ttm_late_init(adev);
@@ -854,6 +1062,12 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev)
        return 0;
 }
 
+/**
+ * amdgpu_bo_fini - tear down memory manager
+ * @adev: amdgpu device object
+ *
+ * Reverses amdgpu_bo_init() to tear down memory manager.
+ */
 void amdgpu_bo_fini(struct amdgpu_device *adev)
 {
        amdgpu_ttm_fini(adev);
@@ -861,12 +1075,33 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
        arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
 }
 
+/**
+ * amdgpu_bo_fbdev_mmap - mmap fbdev memory
+ * @bo: &amdgpu_bo buffer object
+ * @vma: vma as input from the fbdev mmap method
+ *
+ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
                             struct vm_area_struct *vma)
 {
        return ttm_fbdev_mmap(vma, &bo->tbo);
 }
 
+/**
+ * amdgpu_bo_set_tiling_flags - set tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: new flags
+ *
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -879,6 +1114,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
        return 0;
 }
 
+/**
+ * amdgpu_bo_get_tiling_flags - get tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: returned flags
+ *
+ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
+ * set the tiling flags on a buffer.
+ */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
        lockdep_assert_held(&bo->tbo.resv->lock.base);
@@ -887,6 +1130,19 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
                *tiling_flags = bo->tiling_flags;
 }
 
+/**
+ * amdgpu_bo_set_metadata - set metadata
+ * @bo: &amdgpu_bo buffer object
+ * @metadata: new metadata
+ * @metadata_size: size of the new metadata
+ * @flags: flags of the new metadata
+ *
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
                            uint32_t metadata_size, uint64_t flags)
 {
@@ -916,6 +1172,21 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        return 0;
 }
 
+/**
+ * amdgpu_bo_get_metadata - get metadata
+ * @bo: &amdgpu_bo buffer object
+ * @buffer: returned metadata
+ * @buffer_size: size of the buffer
+ * @metadata_size: size of the returned metadata
+ * @flags: flags of the returned metadata
+ *
+ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
                           size_t buffer_size, uint32_t *metadata_size,
                           uint64_t *flags)
@@ -939,6 +1210,16 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
        return 0;
 }
 
+/**
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new information of the bufer object
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+ * TTM driver callback which is called when ttm moves a buffer.
+ */
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
                           struct ttm_mem_reg *new_mem)
@@ -967,6 +1248,17 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
        trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 }
 
+/**
+ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
+ * @bo: pointer to a buffer object
+ *
+ * Notifies the driver we are taking a fault on this BO and have reserved it,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -1040,10 +1332,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
  * amdgpu_bo_gpu_offset - return GPU offset of bo
  * @bo:        amdgpu object for which we query the offset
  *
- * Returns current GPU offset of the object.
- *
  * Note: object should either be pinned or reserved when calling this
  * function, it might be useful to add check for this for debugging.
+ *
+ * Returns:
+ * current GPU offset of the object.
  */
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
@@ -1059,6 +1352,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
        return bo->tbo.offset;
 }
 
+/**
+ * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * @adev: amdgpu device object
+ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
+ *
+ * Returns:
+ * Which of the allowed domains is preferred for pinning the BO for scanout.
+ */
 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
                                            uint32_t domain)
 {
index b455da4..113edff 100644 (file)
@@ -68,11 +68,11 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
        if (adev->pm.dpm_enabled) {
                mutex_lock(&adev->pm.mutex);
                if (power_supply_is_system_supplied() > 0)
-                       adev->pm.dpm.ac_power = true;
+                       adev->pm.ac_power = true;
                else
-                       adev->pm.dpm.ac_power = false;
+                       adev->pm.ac_power = false;
                if (adev->powerplay.pp_funcs->enable_bapm)
-                       amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
+                       amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
                mutex_unlock(&adev->pm.mutex);
        }
 }
@@ -80,12 +80,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 /**
  * DOC: power_dpm_state
  *
- * This is a legacy interface and is only provided for backwards compatibility.
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters.  The file power_dpm_state is used for this.
+ * The power_dpm_state file is a legacy interface and is only provided for
+ * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
+ * certain power related parameters.  The file power_dpm_state is used for this.
  * It accepts the following arguments:
+ *
  * - battery
+ *
  * - balanced
+ *
  * - performance
  *
  * battery
@@ -169,14 +172,21 @@ fail:
  * The amdgpu driver provides a sysfs API for adjusting certain power
  * related parameters.  The file power_dpm_force_performance_level is
  * used for this.  It accepts the following arguments:
+ *
  * - auto
+ *
  * - low
+ *
  * - high
+ *
  * - manual
- * - GPU fan
+ *
  * - profile_standard
+ *
  * - profile_min_sclk
+ *
  * - profile_min_mclk
+ *
  * - profile_peak
  *
  * auto
@@ -463,8 +473,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  * this.
  *
  * Reading the file will display:
+ *
  * - a list of engine clock levels and voltages labeled OD_SCLK
+ *
  * - a list of memory clock levels and voltages labeled OD_MCLK
+ *
  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
  *
  * To manually adjust these settings, first select manual using
@@ -1285,35 +1298,51 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
  * DOC: hwmon
  *
  * The amdgpu driver exposes the following sensor interfaces:
+ *
  * - GPU temperature (via the on-die sensor)
+ *
  * - GPU voltage
+ *
  * - Northbridge voltage (APUs only)
+ *
  * - GPU power
+ *
  * - GPU fan
  *
  * hwmon interfaces for GPU temperature:
+ *
  * - temp1_input: the on die GPU temperature in millidegrees Celsius
+ *
  * - temp1_crit: temperature critical max value in millidegrees Celsius
+ *
  * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
  *
  * hwmon interfaces for GPU voltage:
+ *
  * - in0_input: the voltage on the GPU in millivolts
+ *
  * - in1_input: the voltage on the Northbridge in millivolts
  *
  * hwmon interfaces for GPU power:
+ *
  * - power1_average: average power used by the GPU in microWatts
+ *
  * - power1_cap_min: minimum cap supported in microWatts
+ *
  * - power1_cap_max: maximum cap supported in microWatts
+ *
  * - power1_cap: selected power cap in microWatts
  *
  * hwmon interfaces for GPU fan:
+ *
  * - pwm1: pulse width modulation fan level (0-255)
- * - pwm1_enable: pulse width modulation fan control method
- *                0: no fan speed control
- *                1: manual fan speed control using pwm interface
- *                2: automatic fan speed control
+ *
+ * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
+ *
  * - pwm1_min: pulse width modulation fan control minimum level (0)
+ *
  * - pwm1_max: pulse width modulation fan control maximum level (255)
+ *
  * - fan1_input: fan speed in RPM
  *
  * You can use hwmon tools like sensors to view this information on your system.
@@ -1878,6 +1907,14 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                        amdgpu_fence_wait_empty(ring);
        }
 
+       mutex_lock(&adev->pm.mutex);
+       /* update battery/ac status */
+       if (power_supply_is_system_supplied() > 0)
+               adev->pm.ac_power = true;
+       else
+               adev->pm.ac_power = false;
+       mutex_unlock(&adev->pm.mutex);
+
        if (adev->powerplay.pp_funcs->dispatch_tasks) {
                if (!amdgpu_device_has_dc_support(adev)) {
                        mutex_lock(&adev->pm.mutex);
@@ -1898,14 +1935,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
        } else {
                mutex_lock(&adev->pm.mutex);
                amdgpu_dpm_get_active_displays(adev);
-               /* update battery/ac status */
-               if (power_supply_is_system_supplied() > 0)
-                       adev->pm.dpm.ac_power = true;
-               else
-                       adev->pm.dpm.ac_power = false;
-
                amdgpu_dpm_change_power_state_locked(adev);
-
                mutex_unlock(&adev->pm.mutex);
        }
 }
index 4683626..b2286bc 100644 (file)
  *
  * Authors: Alex Deucher
  */
+
+/**
+ * DOC: PRIME Buffer Sharing
+ *
+ * The following callback implementations are used for :ref:`sharing GEM buffer
+ * objects between different devices via PRIME <prime_buffer_sharing>`.
+ */
+
 #include <drm/drmP.h>
 
 #include "amdgpu.h"
 
 static const struct dma_buf_ops amdgpu_dmabuf_ops;
 
+/**
+ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
+ * implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * A scatter/gather table for the pinned pages of the buffer object's memory.
+ */
 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
        return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
 }
 
+/**
+ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
+ * @obj: GEM buffer object
+ *
+ * Sets up an in-kernel virtual mapping of the buffer object's memory.
+ *
+ * Returns:
+ * The virtual address of the mapping or an error pointer.
+ */
 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
        return bo->dma_buf_vmap.virtual;
 }
 
+/**
+ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
+ * @obj: GEM buffer object
+ * @vaddr: virtual address (unused)
+ *
+ * Tears down the in-kernel virtual mapping of the buffer object's memory.
+ */
 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
        ttm_bo_kunmap(&bo->dma_buf_vmap);
 }
 
+/**
+ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
+ * @obj: GEM buffer object
+ * @vma: virtual memory area
+ *
+ * Sets up a userspace mapping of the buffer object's memory in the given
+ * virtual memory area.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
        return ret;
 }
 
+/**
+ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
+ * implementation
+ * @dev: DRM device
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+ * Import shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+ * A new GEM buffer object of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
 struct drm_gem_object *
 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
                                 struct dma_buf_attachment *attach,
@@ -132,8 +188,20 @@ error:
        return ERR_PTR(ret);
 }
 
+/**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: shared DMA buffer
+ * @target_dev: target device
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
 static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
-                                struct device *target_dev,
                                 struct dma_buf_attachment *attach)
 {
        struct drm_gem_object *obj = dma_buf->priv;
@@ -141,7 +209,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        long r;
 
-       r = drm_gem_map_attach(dma_buf, target_dev, attach);
+       r = drm_gem_map_attach(dma_buf, attach);
        if (r)
                return r;
 
@@ -181,6 +249,14 @@ error_detach:
        return r;
 }
 
+/**
+ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * the other device. For now, simply unpins the buffer from GTT.
+ */
 static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
                                  struct dma_buf_attachment *attach)
 {
@@ -202,6 +278,13 @@ error:
        drm_gem_map_detach(dma_buf, attach);
 }
 
+/**
+ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * The buffer object's reservation object.
+ */
 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -209,6 +292,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
        return bo->tbo.resv;
 }
 
+/**
+ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * @dma_buf: shared DMA buffer
+ * @direction: direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
 static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
                                       enum dma_data_direction direction)
 {
@@ -245,14 +340,24 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
        .release = drm_gem_dmabuf_release,
        .begin_cpu_access = amdgpu_gem_begin_cpu_access,
        .map = drm_gem_dmabuf_kmap,
-       .map_atomic = drm_gem_dmabuf_kmap_atomic,
        .unmap = drm_gem_dmabuf_kunmap,
-       .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
        .mmap = drm_gem_dmabuf_mmap,
        .vmap = drm_gem_dmabuf_vmap,
        .vunmap = drm_gem_dmabuf_vunmap,
 };
 
+/**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @dev: DRM device
+ * @gobj: GEM buffer object
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * The main work is done by the &drm_gem_prime_export helper, which in turn
+ * uses &amdgpu_gem_prime_res_obj.
+ *
+ * Returns:
+ * Shared DMA buffer representing the GEM buffer object from the given device.
+ */
 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
                                        struct drm_gem_object *gobj,
                                        int flags)
@@ -273,6 +378,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
        return buf;
 }
 
+/**
+ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
+ * @dev: DRM device
+ * @dma_buf: Shared DMA buffer
+ *
+ * The main work is done by the &drm_gem_prime_import helper, which in turn
+ * uses &amdgpu_gem_prime_import_sg_table.
+ *
+ * Returns:
+ * GEM buffer object representing the shared DMA buffer for the given device.
+ */
 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
                                            struct dma_buf *dma_buf)
 {
index 8af16e8..ea9850c 100644 (file)
@@ -96,6 +96,9 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                *out_ring = &adev->vcn.ring_enc[ring];
                break;
+       case AMDGPU_HW_IP_VCN_JPEG:
+               *out_ring = &adev->vcn.ring_jpeg;
+               break;
        default:
                *out_ring = NULL;
                DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
@@ -260,6 +263,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                ip_num_rings = adev->vcn.num_enc_rings;
                break;
+       case AMDGPU_HW_IP_VCN_JPEG:
+               ip_num_rings = 1;
+               break;
        default:
                DRM_DEBUG("unknown ip type: %d\n", hw_ip);
                return -EINVAL;
@@ -287,6 +293,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_UVD_ENC:
        case AMDGPU_HW_IP_VCN_DEC:
        case AMDGPU_HW_IP_VCN_ENC:
+       case AMDGPU_HW_IP_VCN_JPEG:
                r = amdgpu_identity_map(adev, mapper, ring, out_ring);
                break;
        case AMDGPU_HW_IP_DMA:
index c6850b6..19e45a3 100644 (file)
@@ -304,7 +304,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                0xffffffffffffffff : ring->buf_mask;
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
-               r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+               r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
                                            AMDGPU_GEM_DOMAIN_GTT,
                                            &ring->ring_obj,
                                            &ring->gpu_addr,
index 1513124..a293f4e 100644 (file)
@@ -53,7 +53,8 @@ enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_KIQ,
        AMDGPU_RING_TYPE_UVD_ENC,
        AMDGPU_RING_TYPE_VCN_DEC,
-       AMDGPU_RING_TYPE_VCN_ENC
+       AMDGPU_RING_TYPE_VCN_ENC,
+       AMDGPU_RING_TYPE_VCN_JPEG
 };
 
 struct amdgpu_device;
@@ -112,6 +113,7 @@ struct amdgpu_ring_funcs {
        u32                     nop;
        bool                    support_64bit_ptrs;
        unsigned                vmhub;
+       unsigned                extra_dw;
 
        /* ring read/write ptr handling */
        u64 (*get_rptr)(struct amdgpu_ring *ring);
index e93a0a2..0c084d3 100644 (file)
@@ -277,7 +277,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                if (!adev->mman.buffer_funcs_enabled) {
                        /* Move to system memory */
                        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
-               } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+               } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
                           !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
                           amdgpu_bo_in_cpu_visible_vram(abo)) {
 
index 3ff08e3..cc15d32 100644 (file)
@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
-       unsigned version_major, version_minor, family_id;
+       unsigned family_id;
        int i, j, r;
 
        INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
@@ -208,29 +208,46 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
        family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
-       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
-       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
-       DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
-               version_major, version_minor, family_id);
-
-       /*
-        * Limit the number of UVD handles depending on microcode major
-        * and minor versions. The firmware version which has 40 UVD
-        * instances support is 1.80. So all subsequent versions should
-        * also have the same support.
-        */
-       if ((version_major > 0x01) ||
-           ((version_major == 0x01) && (version_minor >= 0x50)))
-               adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
 
-       adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
-                               (family_id << 8));
+       if (adev->asic_type < CHIP_VEGA20) {
+               unsigned version_major, version_minor;
+
+               version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+               version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+                       version_major, version_minor, family_id);
+
+               /*
+                * Limit the number of UVD handles depending on microcode major
+                * and minor versions. The firmware version which has 40 UVD
+                * instances support is 1.80. So all subsequent versions should
+                * also have the same support.
+                */
+               if ((version_major > 0x01) ||
+                   ((version_major == 0x01) && (version_minor >= 0x50)))
+                       adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+               adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+                                       (family_id << 8));
+
+               if ((adev->asic_type == CHIP_POLARIS10 ||
+                    adev->asic_type == CHIP_POLARIS11) &&
+                   (adev->uvd.fw_version < FW_1_66_16))
+                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+                                 version_major, version_minor);
+       } else {
+               unsigned int enc_major, enc_minor, dec_minor;
+
+               dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
+               enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+               DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+                       enc_major, enc_minor, dec_minor, family_id);
 
-       if ((adev->asic_type == CHIP_POLARIS10 ||
-            adev->asic_type == CHIP_POLARIS11) &&
-           (adev->uvd.fw_version < FW_1_66_16))
-               DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
-                         version_major, version_minor);
+               adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+               adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
+       }
 
        bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
                  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
index 1b4ad9b..a66cd52 100644 (file)
@@ -140,6 +140,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
                amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
 
+       amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+
        release_firmware(adev->vcn.fw);
 
        return 0;
@@ -597,3 +599,129 @@ error:
        dma_fence_put(fence);
        return r;
 }
+
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+       r = amdgpu_ring_alloc(ring, 3);
+
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+                                 ring->idx, r);
+               return r;
+       }
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+                                 ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                                 ring->idx, tmp);
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+               struct dma_fence **fence)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
+       struct amdgpu_ib *ib;
+       struct dma_fence *f = NULL;
+       const unsigned ib_size_dw = 16;
+       int i, r;
+
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       if (r)
+               return r;
+
+       ib = &job->ibs[0];
+
+       ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+       ib->ptr[1] = 0xDEADBEEF;
+       for (i = 2; i < 16; i += 2) {
+               ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+               ib->ptr[i+1] = 0;
+       }
+       ib->length_dw = 16;
+
+       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+       job->fence = dma_fence_get(f);
+       if (r)
+               goto err;
+
+       amdgpu_job_free(job);
+       if (fence)
+               *fence = dma_fence_get(f);
+       dma_fence_put(f);
+
+       return 0;
+
+err:
+       amdgpu_job_free(job);
+       return r;
+}
+
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       struct dma_fence *fence = NULL;
+       long r = 0;
+
+       r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+               goto error;
+       }
+
+       r = dma_fence_wait_timeout(fence, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out.\n");
+               r = -ETIMEDOUT;
+               goto error;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+               goto error;
+       } else
+               r = 0;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout)
+               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       else {
+               DRM_ERROR("ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+
+       dma_fence_put(fence);
+
+error:
+       return r;
+}
index 773010b..0b0b863 100644 (file)
@@ -66,6 +66,7 @@ struct amdgpu_vcn {
        const struct firmware   *fw;    /* VCN firmware */
        struct amdgpu_ring      ring_dec;
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+       struct amdgpu_ring      ring_jpeg;
        struct amdgpu_irq_src   irq;
        unsigned                num_enc_rings;
 };
@@ -83,4 +84,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
 #endif
index edf16b2..422d1a4 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
 
-/*
- * GPUVM
+/**
+ * DOC: GPUVM
+ *
  * GPUVM is similar to the legacy gart on older asics, however
  * rather than there being a single global gart table
  * for the entire GPU, there are multiple VM page tables active
@@ -63,37 +65,84 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
 #undef START
 #undef LAST
 
-/* Local structure. Encapsulate some VM table update parameters to reduce
+/**
+ * struct amdgpu_pte_update_params - Local structure
+ *
+ * Encapsulate some VM table update parameters to reduce
  * the number of function parameters
+ *
  */
 struct amdgpu_pte_update_params {
-       /* amdgpu device we do this update for */
+
+       /**
+        * @adev: amdgpu device we do this update for
+        */
        struct amdgpu_device *adev;
-       /* optional amdgpu_vm we do this update for */
+
+       /**
+        * @vm: optional amdgpu_vm we do this update for
+        */
        struct amdgpu_vm *vm;
-       /* address where to copy page table entries from */
+
+       /**
+        * @src: address where to copy page table entries from
+        */
        uint64_t src;
-       /* indirect buffer to fill with commands */
+
+       /**
+        * @ib: indirect buffer to fill with commands
+        */
        struct amdgpu_ib *ib;
-       /* Function which actually does the update */
+
+       /**
+        * @func: Function which actually does the update
+        */
        void (*func)(struct amdgpu_pte_update_params *params,
                     struct amdgpu_bo *bo, uint64_t pe,
                     uint64_t addr, unsigned count, uint32_t incr,
                     uint64_t flags);
-       /* The next two are used during VM update by CPU
-        *  DMA addresses to use for mapping
-        *  Kernel pointer of PD/PT BO that needs to be updated
+       /**
+        * @pages_addr:
+        *
+        * DMA addresses to use for mapping, used during VM update by CPU
         */
        dma_addr_t *pages_addr;
+
+       /**
+        * @kptr:
+        *
+        * Kernel pointer of PD/PT BO that needs to be updated,
+        * used during VM update by CPU
+        */
        void *kptr;
 };
 
-/* Helper to disable partial resident texture feature from a fence callback */
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
+ */
 struct amdgpu_prt_cb {
+
+       /**
+        * @adev: amdgpu device
+        */
        struct amdgpu_device *adev;
+
+       /**
+        * @cb: callback
+        */
        struct dma_fence_cb cb;
 };
 
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
+ *
+ */
 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                                   struct amdgpu_vm *vm,
                                   struct amdgpu_bo *bo)
@@ -126,8 +175,10 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
  * amdgpu_vm_level_shift - return the addr shift for each level
  *
  * @adev: amdgpu_device pointer
+ * @level: VMPT level
  *
- * Returns the number of bits the pfn needs to be right shifted for a level.
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
  */
 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
                                      unsigned level)
@@ -155,8 +206,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
  *
  * @adev: amdgpu_device pointer
+ * @level: VMPT level
  *
- * Calculate the number of entries in a page directory or page table.
+ * Returns:
+ * The number of entries in a page directory or page table.
  */
 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
                                      unsigned level)
@@ -179,8 +232,10 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
  *
  * @adev: amdgpu_device pointer
+ * @level: VMPT level
  *
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
  */
 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
 {
@@ -218,6 +273,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
  * @param: parameter for the validation callback
  *
  * Validate the page table BOs on command submission if neccessary.
+ *
+ * Returns:
+ * Validation result.
  */
 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                              int (*validate)(void *p, struct amdgpu_bo *bo),
@@ -273,6 +331,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * @vm: VM to check
  *
  * Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if eviction list is empty.
  */
 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 {
@@ -283,10 +344,15 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
  *
  * @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
  * @bo: BO to clear
  * @level: level this BO is at
+ * @pte_support_ats: indicate ATS support from PTE
  *
  * Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_vm *vm, struct amdgpu_bo *bo,
@@ -382,10 +448,16 @@ error:
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
+ * @parent: parent PT
  * @saddr: start of the address range
  * @eaddr: end of the address range
+ * @level: VMPT level
+ * @ats: indicate ATS support from PTE
  *
  * Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                                  struct amdgpu_vm *vm,
@@ -494,6 +566,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
  * @size: Size from start address we need.
  *
  * Make sure the page tables are allocated.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
                        struct amdgpu_vm *vm,
@@ -559,6 +634,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
        }
 }
 
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
                                  struct amdgpu_job *job)
 {
@@ -586,19 +670,17 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
        return vm_flush_needed || gds_switch_needed;
 }
 
-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
-{
-       return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
-}
-
 /**
  * amdgpu_vm_flush - hardware flush the vm
  *
  * @ring: ring to use for flush
- * @vmid: vmid number to use
- * @pd_addr: address of the page directory
+ * @job:  related job
+ * @need_pipe_sync: is pipe sync needed
  *
  * Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
 {
@@ -706,6 +788,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
  * Returns the found bo_va or NULL if none is found
  *
  * Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
  */
 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
                                       struct amdgpu_bo *bo)
@@ -787,7 +872,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
  * @addr: the unmapped addr
  *
  * Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
  */
 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
@@ -840,6 +928,17 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
        }
 }
 
+
+/**
+ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+ * @owner: fence owner
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                             void *owner)
 {
@@ -893,7 +992,10 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
 /*
  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
  *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
  * @parent: parent PD
+ * @level: VMPT level
  *
  * Mark all PD level as invalid after an error.
  */
@@ -928,7 +1030,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
  * @vm: requested vm
  *
  * Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  */
 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
                                 struct amdgpu_vm *vm)
@@ -1115,14 +1219,15 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
  * @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
  * @dst: destination address to map to, the next dst inside the function
  * @flags: mapping flags
  *
  * Update the page tables in the range @start - @end.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                                  uint64_t start, uint64_t end,
@@ -1176,7 +1281,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
  * @end: last PTE to handle
  * @dst: addr those PTEs should point to
  * @flags: hw mapping flags
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
                                uint64_t start, uint64_t end,
@@ -1248,7 +1355,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params    *params,
  * @fence: optional resulting fence
  *
  * Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                       struct dma_fence *exclusive,
@@ -1324,7 +1433,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                ndw += ncmds * 10;
 
                /* extra commands for begin/end fragments */
-               ndw += 2 * 10 * adev->vm_manager.fragment_size;
+               if (vm->root.base.bo->shadow)
+                       ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
+               else
+                       ndw += 2 * 10 * adev->vm_manager.fragment_size;
 
                params.func = amdgpu_vm_do_set_ptes;
        }
@@ -1400,7 +1512,9 @@ error_free:
  *
  * Split the mapping into smaller chunks so that each update fits
  * into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct dma_fence *exclusive,
@@ -1513,7 +1627,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
  * @clear: if true clear the entries
  *
  * Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
@@ -1608,6 +1724,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 /**
  * amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
  */
 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 {
@@ -1622,6 +1740,8 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 
 /**
  * amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
  */
 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 {
@@ -1634,6 +1754,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 
 /**
  * amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
  */
 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
 {
@@ -1643,6 +1765,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
 
 /**
  * amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
  */
 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
 {
@@ -1654,6 +1779,9 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
 
 /**
  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
  */
 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
                                 struct dma_fence *fence)
@@ -1745,9 +1873,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  * or if an error occurred)
  *
  * Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
  * PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
  */
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm,
@@ -1792,10 +1922,11 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @sync: sync object to add fences to
  *
  * Make sure all BOs which are moved are updated in the PTs.
- * Returns 0 for success.
+ *
+ * Returns:
+ * 0 for success.
  *
  * PTs have to be reserved!
  */
@@ -1850,7 +1981,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
  *
  * Add @bo into the requested vm.
  * Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
  *
  * Object has to be reserved!
  */
@@ -1913,10 +2046,13 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
  * @bo_va: bo_va to store the address
  * @saddr: where to map the BO
  * @offset: requested offset in the BO
+ * @size: BO size in bytes
  * @flags: attributes of pages (read/write/valid/etc.)
  *
  * Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  *
  * Object has to be reserved and unreserved outside!
  */
@@ -1974,11 +2110,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
  * @bo_va: bo_va to store the address
  * @saddr: where to map the BO
  * @offset: requested offset in the BO
+ * @size: BO size in bytes
  * @flags: attributes of pages (read/write/valid/etc.)
  *
  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
  * mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  *
  * Object has to be reserved and unreserved outside!
  */
@@ -2035,7 +2174,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
  * @saddr: where to the BO is mapped
  *
  * Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  *
  * Object has to be reserved and unreserved outside!
  */
@@ -2089,7 +2230,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
  * @size: size of the range
  *
  * Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  */
 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm,
@@ -2186,8 +2329,13 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
  * amdgpu_vm_bo_lookup_mapping - find mapping by address
  *
  * @vm: the requested VM
+ * @addr: the address
  *
  * Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
  */
 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
                                                         uint64_t addr)
@@ -2239,8 +2387,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  * amdgpu_vm_bo_invalidate - mark the bo as invalid
  *
  * @adev: amdgpu_device pointer
- * @vm: requested vm
  * @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
  *
  * Mark @bo as invalid.
  */
@@ -2280,6 +2428,14 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
        }
 }
 
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
 {
        /* Total bits covered by PD + PTs */
@@ -2298,6 +2454,10 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
  *
  * @adev: amdgpu_device pointer
  * @vm_size: the default vm size if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
  */
 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
@@ -2365,8 +2525,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
  * @adev: amdgpu_device pointer
  * @vm: requested vm
  * @vm_context: Indicates if it GFX or Compute context
+ * @pasid: Process address space identifier
  *
  * Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                   int vm_context, unsigned int pasid)
@@ -2417,7 +2581,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        }
        DRM_DEBUG_DRIVER("VM update mode is %s\n",
                         vm->use_cpu_for_update ? "CPU" : "SDMA");
-       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
                  "CPU update of VM recommended only for large BAR system\n");
        vm->last_update = NULL;
 
@@ -2487,6 +2651,9 @@ error_free_sched_entity:
 /**
  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
  *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
  * This only works on GFX VMs that don't have any BOs added and no
  * page tables allocated yet.
  *
@@ -2499,7 +2666,8 @@ error_free_sched_entity:
  * setting. May leave behind an unused shadow BO for the page
  * directory when switching from SDMA updates to CPU updates.
  *
- * Returns 0 for success, -errno for errors.
+ * Returns:
+ * 0 for success, -errno for errors.
  */
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
@@ -2533,7 +2701,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        vm->pte_support_ats = pte_support_ats;
        DRM_DEBUG_DRIVER("VM update mode is %s\n",
                         vm->use_cpu_for_update ? "CPU" : "SDMA");
-       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
                  "CPU update of VM recommended only for large BAR system\n");
 
        if (vm->pasid) {
@@ -2654,8 +2822,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  * @adev: amdgpu_device pointer
  * @pasid: PASID do identify the VM
  *
- * This function is expected to be called in interrupt context. Returns
- * true if there was fault credit, false otherwise
+ * This function is expected to be called in interrupt context.
+ *
+ * Returns:
+ * True if there was fault credit, false otherwise
  */
 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
                                  unsigned int pasid)
@@ -2709,7 +2879,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
         */
 #ifdef CONFIG_X86_64
        if (amdgpu_vm_update_mode == -1) {
-               if (amdgpu_vm_is_large_bar(adev))
+               if (amdgpu_gmc_vram_full_visible(&adev->gmc))
                        adev->vm_manager.vm_update_mode =
                                AMDGPU_VM_USE_CPU_FOR_COMPUTE;
                else
@@ -2739,6 +2909,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
        amdgpu_vmid_mgr_fini(adev);
 }
 
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        union drm_amdgpu_vm *args = data;
index e9934de..b18c31a 100644 (file)
@@ -1221,7 +1221,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
        ectx.abort = false;
        ectx.last_jump = 0;
        if (ws)
-               ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+               ectx.ws = kcalloc(4, ws, GFP_ATOMIC);
        else
                ectx.ws = NULL;
 
index 7fbad2f..c9d45cf 100644 (file)
@@ -951,12 +951,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
        else
                pi->battery_state = false;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
 
-       if (adev->pm.dpm.ac_power == false) {
+       if (adev->pm.ac_power == false) {
                for (i = 0; i < ps->performance_level_count; i++) {
                        if (ps->performance_levels[i].mclk > max_limits->mclk)
                                ps->performance_levels[i].mclk = max_limits->mclk;
@@ -4078,7 +4078,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4127,7 +4127,7 @@ static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4160,7 +4160,7 @@ static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4191,7 +4191,7 @@ static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
index 8ff4c60..702e257 100644 (file)
@@ -2003,9 +2003,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
                amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
                if (amdgpu_dpm == -1)
-                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
-               else
                        amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+               else
+                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
                if (adev->enable_virtual_display)
                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
@@ -2024,9 +2024,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
                amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
                if (amdgpu_dpm == -1)
-                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
-               else
                        amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+               else
+                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
                if (adev->enable_virtual_display)
                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
index 818874b..807ee0d 100644 (file)
@@ -866,26 +866,32 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct dma_fence *f = NULL;
-       uint32_t scratch;
-       uint32_t tmp = 0;
+
+       unsigned int index;
+       uint64_t gpu_addr;
+       uint32_t tmp;
        long r;
 
-       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       r = amdgpu_device_wb_get(adev, &index);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
                return r;
        }
-       WREG32(scratch, 0xCAFEDEAD);
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err1;
        }
-       ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
-       ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
-       ib.ptr[2] = 0xDEADBEEF;
-       ib.length_dw = 3;
+       ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+       ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+       ib.ptr[2] = lower_32_bits(gpu_addr);
+       ib.ptr[3] = upper_32_bits(gpu_addr);
+       ib.ptr[4] = 0xDEADBEEF;
+       ib.length_dw = 5;
 
        r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
@@ -900,20 +906,21 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
                DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
        }
-       tmp = RREG32(scratch);
+
+       tmp = adev->wb.wb[index];
        if (tmp == 0xDEADBEEF) {
                DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
                r = 0;
        } else {
-               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-                         scratch, tmp);
+               DRM_ERROR("ib test on ring %d failed\n", ring->idx);
                r = -EINVAL;
        }
+
 err2:
        amdgpu_ib_free(adev, &ib, NULL);
        dma_fence_put(f);
 err1:
-       amdgpu_gfx_scratch_free(adev, scratch);
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -2048,6 +2055,20 @@ static int gfx_v8_0_sw_init(void *handle)
        if (r)
                return r;
 
+       /* Add CP EDC/ECC irq  */
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 197,
+                             &adev->gfx.cp_ecc_error_irq);
+       if (r)
+               return r;
+
+       /* SQ interrupts. */
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 239,
+                             &adev->gfx.sq_irq);
+       if (r) {
+               DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+               return r;
+       }
+
        adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
 
        gfx_v8_0_scratch_init(adev);
@@ -5111,6 +5132,10 @@ static int gfx_v8_0_hw_fini(void *handle)
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 
+       amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+
+       amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
+
        /* disable KCQ to avoid CPC touch memory not valid anymore */
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
@@ -5542,6 +5567,20 @@ static int gfx_v8_0_late_init(void *handle)
        if (r)
                return r;
 
+       r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+       if (r) {
+               DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
+               return r;
+       }
+
+       r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
+       if (r) {
+               DRM_ERROR(
+                       "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
+                       r);
+               return r;
+       }
+
        amdgpu_device_ip_set_powergating_state(adev,
                                               AMD_IP_BLOCK_TYPE_GFX,
                                               AMD_PG_STATE_GATE);
@@ -6787,6 +6826,77 @@ static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
+static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
+                                        struct amdgpu_irq_src *source,
+                                        unsigned int type,
+                                        enum amdgpu_interrupt_state state)
+{
+       int enable_flag;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               enable_flag = 0;
+               break;
+
+       case AMDGPU_IRQ_STATE_ENABLE:
+               enable_flag = 1;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+
+       return 0;
+}
+
+static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
+                                    struct amdgpu_irq_src *source,
+                                    unsigned int type,
+                                    enum amdgpu_interrupt_state state)
+{
+       int enable_flag;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               enable_flag = 1;
+               break;
+
+       case AMDGPU_IRQ_STATE_ENABLE:
+               enable_flag = 0;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
+                    enable_flag);
+
+       return 0;
+}
+
 static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
@@ -6837,6 +6947,69 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+                                    struct amdgpu_irq_src *source,
+                                    struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("CP EDC/ECC error detected.");
+       return 0;
+}
+
+static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+                          struct amdgpu_irq_src *source,
+                          struct amdgpu_iv_entry *entry)
+{
+       u8 enc, se_id;
+       char type[20];
+
+       /* Parse all fields according to SQ_INTERRUPT* registers */
+       enc = (entry->src_data[0] >> 26) & 0x3;
+       se_id = (entry->src_data[0] >> 24) & 0x3;
+
+       switch (enc) {
+               case 0:
+                       DRM_INFO("SQ general purpose intr detected:"
+                                       "se_id %d, immed_overflow %d, host_reg_overflow %d,"
+                                       "host_cmd_overflow %d, cmd_timestamp %d,"
+                                       "reg_timestamp %d, thread_trace_buff_full %d,"
+                                       "wlt %d, thread_trace %d.\n",
+                                       se_id,
+                                       (entry->src_data[0] >> 7) & 0x1,
+                                       (entry->src_data[0] >> 6) & 0x1,
+                                       (entry->src_data[0] >> 5) & 0x1,
+                                       (entry->src_data[0] >> 4) & 0x1,
+                                       (entry->src_data[0] >> 3) & 0x1,
+                                       (entry->src_data[0] >> 2) & 0x1,
+                                       (entry->src_data[0] >> 1) & 0x1,
+                                       entry->src_data[0] & 0x1
+                                       );
+                       break;
+               case 1:
+               case 2:
+
+                       if (enc == 1)
+                               sprintf(type, "instruction intr");
+                       else
+                               sprintf(type, "EDC/ECC error");
+
+                       DRM_INFO(
+                               "SQ %s detected: "
+                                       "se_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d\n",
+                                       type, se_id,
+                                       (entry->src_data[0] >> 20) & 0xf,
+                                       (entry->src_data[0] >> 18) & 0x3,
+                                       (entry->src_data[0] >> 14) & 0xf,
+                                       (entry->src_data[0] >> 10) & 0xf
+                                       );
+                       break;
+               default:
+                       DRM_ERROR("SQ invalid encoding type\n.");
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
                                            struct amdgpu_irq_src *src,
                                            unsigned int type,
@@ -7037,6 +7210,16 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
        .process = gfx_v8_0_kiq_irq,
 };
 
+static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+       .set = gfx_v8_0_set_cp_ecc_int_state,
+       .process = gfx_v8_0_cp_ecc_error_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
+       .set = gfx_v8_0_set_sq_int_state,
+       .process = gfx_v8_0_sq_irq,
+};
+
 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 {
        adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -7050,6 +7233,12 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 
        adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
        adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
+
+       adev->gfx.cp_ecc_error_irq.num_types = 1;
+       adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+
+       adev->gfx.sq_irq.num_types = 1;
+       adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
 }
 
 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
index 7a1e77c..46de1fd 100644 (file)
@@ -1921,7 +1921,7 @@ static int kv_dpm_set_power_state(void *handle)
        int ret;
 
        if (pi->bapm_enable) {
-               ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
+               ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
                if (ret) {
                        DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
                        return ret;
index 5c97a36..d51318c 100644 (file)
@@ -3480,7 +3480,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                disable_sclk_switching = true;
        }
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -3489,7 +3489,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
                        ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
        }
-       if (adev->pm.dpm.ac_power == false) {
+       if (adev->pm.ac_power == false) {
                for (i = 0; i < ps->performance_level_count; i++) {
                        if (ps->performance_levels[i].mclk > max_limits->mclk)
                                ps->performance_levels[i].mclk = max_limits->mclk;
index 8dc2910..edfe508 100644 (file)
 
 #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
 
+#define        PACKETJ_CONDITION_CHECK0        0
+#define        PACKETJ_CONDITION_CHECK1        1
+#define        PACKETJ_CONDITION_CHECK2        2
+#define        PACKETJ_CONDITION_CHECK3        3
+#define        PACKETJ_CONDITION_CHECK4        4
+#define        PACKETJ_CONDITION_CHECK5        5
+#define        PACKETJ_CONDITION_CHECK6        6
+#define        PACKETJ_CONDITION_CHECK7        7
+
+#define        PACKETJ_TYPE0   0
+#define        PACKETJ_TYPE1   1
+#define        PACKETJ_TYPE2   2
+#define        PACKETJ_TYPE3   3
+#define        PACKETJ_TYPE4   4
+#define        PACKETJ_TYPE5   5
+#define        PACKETJ_TYPE6   6
+#define        PACKETJ_TYPE7   7
+
+#define PACKETJ(reg, r, cond, type)    ((reg & 0x3FFFF) |                      \
+                        ((r & 0x3F) << 18) |                   \
+                        ((cond & 0xF) << 24) |                         \
+                        ((type & 0xF) << 28))
+
 /* Packet 3 types */
 #define        PACKET3_NOP                                     0x10
 #define        PACKET3_SET_BASE                                0x11
index 29684c3..b82c920 100644 (file)
@@ -38,7 +38,9 @@
 static int vcn_v1_0_stop(struct amdgpu_device *adev);
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -55,6 +57,7 @@ static int vcn_v1_0_early_init(void *handle)
 
        vcn_v1_0_set_dec_ring_funcs(adev);
        vcn_v1_0_set_enc_ring_funcs(adev);
+       vcn_v1_0_set_jpeg_ring_funcs(adev);
        vcn_v1_0_set_irq_funcs(adev);
 
        return 0;
@@ -86,6 +89,11 @@ static int vcn_v1_0_sw_init(void *handle)
                        return r;
        }
 
+       /* VCN JPEG TRAP */
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+       if (r)
+               return r;
+
        r = amdgpu_vcn_sw_init(adev);
        if (r)
                return r;
@@ -108,6 +116,12 @@ static int vcn_v1_0_sw_init(void *handle)
                        return r;
        }
 
+       ring = &adev->vcn.ring_jpeg;
+       sprintf(ring->name, "vcn_jpeg");
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       if (r)
+               return r;
+
        return r;
 }
 
@@ -162,6 +176,14 @@ static int vcn_v1_0_hw_init(void *handle)
                }
        }
 
+       ring = &adev->vcn.ring_jpeg;
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               goto done;
+       }
+
 done:
        if (!r)
                DRM_INFO("VCN decode and encode initialized successfully.\n");
@@ -729,6 +751,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 
+       ring = &adev->vcn.ring_jpeg;
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+       /* initialize wptr */
+       ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+       /* copy patch commands to the jpeg ring */
+       vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+               (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
        return 0;
 }
 
@@ -1126,6 +1164,383 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, val);
 }
 
+
+/**
+ * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x68e04);
+
+       amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x68e04);
+
+       amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                    unsigned flags)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, seq);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, seq);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x01400200);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, seq);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+       amdgpu_ring_write(ring, 0xffffffff);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x3fbc);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib,
+                                 unsigned vmid, bool ctx_switch)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, ib->length_dw);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x01400200);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x2);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+       amdgpu_ring_write(ring, 0x2);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
+                                           uint32_t reg, uint32_t val,
+                                           uint32_t mask)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t reg_offset = (reg << 2);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x01400200);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, val);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+               ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring,
+                       PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+       } else {
+               amdgpu_ring_write(ring, reg_offset);
+               amdgpu_ring_write(ring,
+                       PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+       }
+       amdgpu_ring_write(ring, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
+               unsigned vmid, uint64_t pd_addr)
+{
+       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+       uint32_t data0, data1, mask;
+
+       pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+       /* wait for register write */
+       data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+       data1 = lower_32_bits(pd_addr);
+       mask = 0xffffffff;
+       vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
+                                       uint32_t reg, uint32_t val)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t reg_offset = (reg << 2);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+                       ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring,
+                       PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+       } else {
+               amdgpu_ring_write(ring, reg_offset);
+               amdgpu_ring_write(ring,
+                       PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       }
+       amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+       int i;
+
+       WARN_ON(ring->wptr % 2 || count % 2);
+
+       for (i = 0; i < count / 2; i++) {
+               amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+               amdgpu_ring_write(ring, 0);
+       }
+}
+
+static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+       struct amdgpu_device *adev = ring->adev;
+       ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+               ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               ring->ring[(*ptr)++] = 0;
+               ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+       } else {
+               ring->ring[(*ptr)++] = reg_offset;
+               ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+       }
+       ring->ring[(*ptr)++] = val;
+}
+
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       uint32_t reg, reg_offset, val, mask, i;
+
+       // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+       reg_offset = (reg << 2);
+       val = lower_32_bits(ring->gpu_addr);
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+       reg_offset = (reg << 2);
+       val = upper_32_bits(ring->gpu_addr);
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 3rd to 5th: issue MEM_READ commands
+       for (i = 0; i <= 2; i++) {
+               ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+               ring->ring[ptr++] = 0;
+       }
+
+       // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+       reg_offset = (reg << 2);
+       val = 0x13;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 7th: program mmUVD_JRBC_RB_REF_DATA
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
+       reg_offset = (reg << 2);
+       val = 0x1;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+       reg_offset = (reg << 2);
+       val = 0x1;
+       mask = 0x1;
+
+       ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+       ring->ring[ptr++] = 0x01400200;
+       ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+       ring->ring[ptr++] = val;
+       ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+               ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               ring->ring[ptr++] = 0;
+               ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+       } else {
+               ring->ring[ptr++] = reg_offset;
+               ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+       }
+       ring->ring[ptr++] = mask;
+
+       //9th to 21st: insert no-op
+       for (i = 0; i <= 12; i++) {
+               ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+               ring->ring[ptr++] = 0;
+       }
+
+       //22nd: reset mmUVD_JRBC_RB_RPTR
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
+       reg_offset = (reg << 2);
+       val = 0;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+       reg_offset = (reg << 2);
+       val = 0x12;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        unsigned type,
@@ -1150,6 +1565,9 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
        case 120:
                amdgpu_fence_process(&adev->vcn.ring_enc[1]);
                break;
+       case 126:
+               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
@@ -1273,6 +1691,39 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+       .type = AMDGPU_RING_TYPE_VCN_JPEG,
+       .align_mask = 0xf,
+       .nop = PACKET0(0x81ff, 0),
+       .support_64bit_ptrs = false,
+       .vmhub = AMDGPU_MMHUB,
+       .extra_dw = 64,
+       .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
+       .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
+       .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
+       .emit_frame_size =
+               6 + 6 + /* hdp invalidate / flush */
+               SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+               SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+               8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+               14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
+               6,
+       .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
+       .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+       .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+       .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+       .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+       .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
+       .insert_nop = vcn_v1_0_jpeg_ring_nop,
+       .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+       .insert_end = vcn_v1_0_jpeg_ring_insert_end,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_vcn_ring_begin_use,
+       .end_use = amdgpu_vcn_ring_end_use,
+       .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
+       .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
+};
+
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
        adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -1289,6 +1740,12 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+       DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
+}
+
 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
        .set = vcn_v1_0_set_interrupt_state,
        .process = vcn_v1_0_process_interrupt,
index d5d4586..4c35625 100644 (file)
@@ -9,19 +9,10 @@ config DRM_AMD_DC
          support for AMDGPU. This adds required support for Vega and
          Raven ASICs.
 
-config DRM_AMD_DC_FBC
-       bool "AMD FBC - Enable Frame Buffer Compression"
-       depends on DRM_AMD_DC
-       help
-         Choose this option if you want to use frame buffer compression
-         support.
-         This is a power optimisation feature, check its availability
-         on your hardware before enabling this option.
-
-
 config DRM_AMD_DC_DCN1_0
        bool "DCN 1.0 Raven family"
        depends on DRM_AMD_DC && X86
+       default y
        help
          Choose this option if you want to have
          RV family for display engine
index 3a8d635..66bd3cc 100644 (file)
@@ -347,7 +347,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
        drm_kms_helper_hotplug_event(dev);
 }
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
 /* Allocate memory for FBC compressed data  */
 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 {
@@ -388,7 +387,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
        }
 
 }
-#endif
 
 
 /* Init display KMS
@@ -3426,12 +3424,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
        struct edid *edid = amdgpu_dm_connector->edid;
 
        encoder = helper->best_encoder(connector);
-       amdgpu_dm_connector_ddc_get_modes(connector, edid);
-       amdgpu_dm_connector_add_common_modes(encoder, connector);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+       if (!edid || !drm_edid_is_valid(edid)) {
+               drm_add_modes_noedid(connector, 640, 480);
+       } else {
+               amdgpu_dm_connector_ddc_get_modes(connector, edid);
+               amdgpu_dm_connector_add_common_modes(encoder, connector);
+       }
        amdgpu_dm_fbc_init(connector);
-#endif
+
        return amdgpu_dm_connector->num_modes;
 }
 
@@ -3914,8 +3915,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 
        /* Flip */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
-       /* update crtc fb */
-       crtc->primary->fb = fb;
 
        WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
        WARN_ON(!acrtc_state->stream);
index d5aa89a..a29dc35 100644 (file)
@@ -72,13 +72,11 @@ struct irq_list_head {
        struct work_struct work;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
 struct dm_comressor_info {
        void *cpu_addr;
        struct amdgpu_bo *bo_ptr;
        uint64_t gpu_addr;
 };
-#endif
 
 
 struct amdgpu_display_manager {
@@ -129,9 +127,8 @@ struct amdgpu_display_manager {
         * Caches device atomic state for suspend/resume
         */
        struct drm_atomic_state *cached_state;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
        struct dm_comressor_info compressor;
-#endif
 };
 
 struct amdgpu_dm_connector {
index ec304b1..b19dc4c 100644 (file)
@@ -497,6 +497,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
                DRM_ERROR("EDID err: %d, on connector: %s",
                                edid_status,
                                aconnector->base.name);
+       if (link->aux_mode) {
+               union test_request test_request = {0};
+               union test_response test_response = {0};
+
+               dm_helpers_dp_read_dpcd(ctx,
+                                       link,
+                                       DP_TEST_REQUEST,
+                                       &test_request.raw,
+                                       sizeof(union test_request));
+
+               if (!test_request.bits.EDID_READ)
+                       return edid_status;
+
+               test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+               dm_helpers_dp_write_dpcd(ctx,
+                                       link,
+                                       DP_TEST_EDID_CHECKSUM,
+                                       &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+                                       1);
+
+               dm_helpers_dp_write_dpcd(ctx,
+                                       link,
+                                       DP_TEST_RESPONSE,
+                                       &test_response.raw,
+                                       sizeof(test_response));
+
+       }
 
        return edid_status;
 }
index 5a33461..e861929 100644 (file)
 #include "amdgpu_dm_irq.h"
 #include "amdgpu_pm.h"
 
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
-{
-       struct timespec64 time;
-
-       getrawmonotonic64(&time);
-       return timespec64_to_ns(&time);
-}
-
 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
                unsigned long long current_time_stamp,
                unsigned long long last_time_stamp)
index 0214515..f6c00a5 100644 (file)
@@ -78,6 +78,8 @@ void dc_conn_log(struct dc_context *ctx,
        if (i == NUM_ELEMENTS(signal_type_info_tbl))
                goto fail;
 
+       dm_logger_append_heading(&entry);
+
        dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
                        signal_type_info_tbl[i].name,
                        link->link_index);
index 0866874..a3c56cd 100644 (file)
@@ -32,8 +32,9 @@
 static const struct dc_log_type_info log_type_info_tbl[] = {
                {LOG_ERROR,                 "Error"},
                {LOG_WARNING,               "Warning"},
-               {LOG_DEBUG,                 "Debug"},
+               {LOG_DEBUG,                 "Debug"},
                {LOG_DC,                    "DC_Interface"},
+               {LOG_DTN,                   "DTN"},
                {LOG_SURFACE,               "Surface"},
                {LOG_HW_HOTPLUG,            "HW_Hotplug"},
                {LOG_HW_LINK_TRAINING,      "HW_LKTN"},
@@ -60,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
                {LOG_EVENT_LINK_LOSS,       "LinkLoss"},
                {LOG_EVENT_UNDERFLOW,       "Underflow"},
                {LOG_IF_TRACE,              "InterfaceTrace"},
-               {LOG_DTN,                   "DTN"},
+               {LOG_PERF_TRACE,            "PerfTrace"},
                {LOG_DISPLAYSTATS,          "DisplayStats"}
 };
 
@@ -128,8 +129,45 @@ uint32_t dal_logger_destroy(struct dal_logger **logger)
 }
 
 /* ------------------------------------------------------------------------ */
+void dm_logger_append_heading(struct log_entry *entry)
+{
+       int j;
+
+       for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
 
+               const struct dc_log_type_info *info = &log_type_info_tbl[j];
 
+               if (info->type == entry->type)
+                       dm_logger_append(entry, "[%s]\t", info->name);
+       }
+}
+
+
+/* Print everything unread existing in log_buffer to debug console*/
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+{
+       char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+
+       if (should_warn)
+               dm_output_to_console(
+                       "---------------- FLUSHING LOG BUFFER ----------------\n");
+       while (logger->buffer_read_offset < logger->buffer_write_offset) {
+
+               if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+                       dm_output_to_console("%s", string_start);
+                       string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+               }
+               logger->buffer_read_offset++;
+       }
+       if (should_warn)
+               dm_output_to_console(
+                       "-------------- END FLUSHING LOG BUFFER --------------\n\n");
+}
+/* ------------------------------------------------------------------------ */
+
+/* Warning: Be careful that 'msg' is null terminated and the total size is
+ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+ */
 static bool dal_logger_should_log(
        struct dal_logger *logger,
        enum dc_log_type log_type)
@@ -159,26 +197,6 @@ static void log_to_debug_console(struct log_entry *entry)
        }
 }
 
-/* Print everything unread existing in log_buffer to debug console*/
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
-{
-       char *string_start = &logger->log_buffer[logger->buffer_read_offset];
-
-       if (should_warn)
-               dm_output_to_console(
-                       "---------------- FLUSHING LOG BUFFER ----------------\n");
-       while (logger->buffer_read_offset < logger->buffer_write_offset) {
-
-               if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
-                       dm_output_to_console("%s", string_start);
-                       string_start = logger->log_buffer + logger->buffer_read_offset + 1;
-               }
-               logger->buffer_read_offset++;
-       }
-       if (should_warn)
-               dm_output_to_console(
-                       "-------------- END FLUSHING LOG BUFFER --------------\n\n");
-}
 
 static void log_to_internal_buffer(struct log_entry *entry)
 {
@@ -229,19 +247,6 @@ static void log_to_internal_buffer(struct log_entry *entry)
        }
 }
 
-static void log_heading(struct log_entry *entry)
-{
-       int j;
-
-       for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
-
-               const struct dc_log_type_info *info = &log_type_info_tbl[j];
-
-               if (info->type == entry->type)
-                       dm_logger_append(entry, "[%s]\t", info->name);
-       }
-}
-
 static void append_entry(
                struct log_entry *entry,
                char *buffer,
@@ -259,11 +264,7 @@ static void append_entry(
        entry->buf_offset += buf_size;
 }
 
-/* ------------------------------------------------------------------------ */
 
-/* Warning: Be careful that 'msg' is null terminated and the total size is
- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
- */
 void dm_logger_write(
        struct dal_logger *logger,
        enum dc_log_type log_type,
@@ -287,7 +288,7 @@ void dm_logger_write(
 
                entry.type = log_type;
 
-               log_heading(&entry);
+               dm_logger_append_heading(&entry);
 
                size = dm_log_to_buffer(
                        buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
@@ -372,7 +373,7 @@ void dm_logger_open(
 
        logger->open_count++;
 
-       log_heading(entry);
+       dm_logger_append_heading(entry);
 }
 
 void dm_logger_close(struct log_entry *entry)
index 644b218..53ce7fa 100644 (file)
@@ -169,6 +169,22 @@ failed_alloc:
        return false;
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_stream_adjust_vmin_vmax
+ *
+ *  @brief
+ *     Looks up the pipe context of dc_stream_state and updates the
+ *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ *     Rate, which is a power-saving feature that targets reducing panel
+ *     refresh rate while the screen is static
+ *
+ *  @param [in] dc: dc reference
+ *  @param [in] stream: Initial dc stream state
+ *  @param [in] adjust: Updated parameters for vertical_total_min and
+ *  vertical_total_max
+ *****************************************************************************
+ */
 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                struct dc_stream_state **streams, int num_streams,
                int vmin, int vmax)
@@ -465,6 +481,7 @@ static bool construct(struct dc *dc,
        dc_ctx->driver_context = init_params->driver;
        dc_ctx->dc = dc;
        dc_ctx->asic_id = init_params->asic_id;
+       dc_ctx->dc_sink_id_count = 0;
        dc->ctx = dc_ctx;
 
        dc->current_state = dc_create_state();
@@ -1548,7 +1565,7 @@ struct dc_sink *dc_link_add_remote_sink(
        struct dc_sink *dc_sink;
        enum dc_edid_status edid_status;
 
-       if (len > MAX_EDID_BUFFER_SIZE) {
+       if (len > DC_MAX_EDID_BUFFER_SIZE) {
                dm_error("Max EDID buffer size breached!\n");
                return NULL;
        }
index 2fa5218..08b7ee5 100644 (file)
@@ -1861,28 +1861,6 @@ static enum dc_status enable_link(
                break;
        }
 
-       if (pipe_ctx->stream_res.audio && status == DC_OK) {
-               struct dc *core_dc = pipe_ctx->stream->ctx->dc;
-               /* notify audio driver for audio modes of monitor */
-               struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
-               unsigned int i, num_audio = 1;
-               for (i = 0; i < MAX_PIPES; i++) {
-                       /*current_state not updated yet*/
-                       if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
-                               num_audio++;
-               }
-
-               pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
-
-               if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
-                       /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
-                       pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
-               /* un-mute audio */
-               /* TODO: audio should be per stream rather than per link */
-               pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                       pipe_ctx->stream_res.stream_enc, false);
-       }
-
        return status;
 }
 
@@ -2415,6 +2393,8 @@ void core_link_enable_stream(
                        }
        }
 
+       core_dc->hwss.enable_audio_stream(pipe_ctx);
+
        /* turn off otg test pattern if enable */
        pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
                        CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
@@ -2453,6 +2433,22 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
        core_dc->hwss.set_avmute(pipe_ctx, enable);
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_link_enable_hpd_filter
+ *
+ *  @brief
+ *     If enable is true, programs HPD filter on associated HPD line using
+ *     delay_on_disconnect/delay_on_connect values dependent on
+ *     link->connector_signal
+ *
+ *     If enable is false, programs HPD filter on associated HPD line with no
+ *     delays on connect or disconnect
+ *
+ *  @param [in] link: pointer to the dc link
+ *  @param [in] enable: boolean specifying whether to enable hbd
+ *****************************************************************************
+ */
 void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
 {
        struct gpio *hpd;
index 7857cb4..509f265 100644 (file)
@@ -1647,22 +1647,26 @@ static enum dc_status read_hpd_rx_irq_data(
                        irq_data->raw,
                        sizeof(union hpd_irq_data));
        else {
-               /* Read 2 bytes at this location,... */
+               /* Read 14 bytes in a single read and then copy only the required fields.
+                * This is more efficient than doing it in two separate AUX reads. */
+
+               uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
                retval = core_link_read_dpcd(
                        link,
                        DP_SINK_COUNT_ESI,
-                       irq_data->raw,
-                       2);
+                       tmp,
+                       sizeof(tmp));
 
                if (retval != DC_OK)
                        return retval;
 
-               /* ... then read remaining 4 at the other location */
-               retval = core_link_read_dpcd(
-                       link,
-                       DP_LANE0_1_STATUS_ESI,
-                       &irq_data->raw[2],
-                       4);
+               irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+               irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
        }
 
        return retval;
@@ -2305,6 +2309,7 @@ static bool retrieve_link_cap(struct dc_link *link)
 {
        uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
 
+       struct dp_device_vendor_id sink_id;
        union down_stream_port_count down_strm_port_count;
        union edp_configuration_cap edp_config_cap;
        union dp_downstream_port_present ds_port = { 0 };
@@ -2391,6 +2396,17 @@ static bool retrieve_link_cap(struct dc_link *link)
                        &link->dpcd_caps.sink_count.raw,
                        sizeof(link->dpcd_caps.sink_count.raw));
 
+       /* read sink ieee oui */
+       core_link_read_dpcd(link,
+                       DP_SINK_OUI,
+                       (uint8_t *)(&sink_id),
+                       sizeof(sink_id));
+
+       link->dpcd_caps.sink_dev_id =
+                       (sink_id.ieee_oui[0] << 16) +
+                       (sink_id.ieee_oui[1] << 8) +
+                       (sink_id.ieee_oui[2]);
+
        /* Connectivity log: detection */
        CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
 
index 751f3ac..fca2255 100644 (file)
@@ -522,13 +522,12 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
        }
 }
 
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        const struct dc_stream_state *stream = pipe_ctx->stream;
        struct rect surf_src = plane_state->src_rect;
        struct rect surf_clip = plane_state->clip_rect;
-       int recout_full_x, recout_full_y;
        bool pri_split = pipe_ctx->bottom_pipe &&
                        pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
        bool sec_split = pipe_ctx->top_pipe &&
@@ -597,20 +596,22 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
                }
        }
        /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
-        *                              * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
-        *                              ratio)
+        *                      * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+        *                      ratio)
         */
-       recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+       recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
                                        * stream->dst.width / stream->src.width -
                        surf_src.x * plane_state->dst_rect.width / surf_src.width
                                        * stream->dst.width / stream->src.width;
-       recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+       recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
                                        * stream->dst.height / stream->src.height -
                        surf_src.y * plane_state->dst_rect.height / surf_src.height
                                        * stream->dst.height / stream->src.height;
 
-       recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
-       recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+       recout_full->width = plane_state->dst_rect.width
+                                       * stream->dst.width / stream->src.width;
+       recout_full->height = plane_state->dst_rect.height
+                                       * stream->dst.height / stream->src.height;
 }
 
 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -662,7 +663,7 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
 }
 
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
 {
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
        struct rect src = pipe_ctx->plane_state->src_rect;
@@ -680,15 +681,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
                flip_vert_scan_dir = true;
        else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
                flip_horz_scan_dir = true;
-       if (pipe_ctx->plane_state->horizontal_mirror)
-               flip_horz_scan_dir = !flip_horz_scan_dir;
 
        if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
                        pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
                rect_swap_helper(&src);
                rect_swap_helper(&data->viewport_c);
                rect_swap_helper(&data->viewport);
-       }
+       } else if (pipe_ctx->plane_state->horizontal_mirror)
+                       flip_horz_scan_dir = !flip_horz_scan_dir;
 
        /*
         * Init calculated according to formula:
@@ -708,127 +708,286 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
        data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
                        dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
 
+       if (!flip_horz_scan_dir) {
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+                       int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h, data->ratios.horz));
 
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+               }
+               if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+                       int vp_clip = (src.x + src.width) / vpc_div -
+                                       data->viewport_c.width - data->viewport_c.x;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+               }
 
-       /* Adjust for viewport end clip-off */
-       if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
-               int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
-       }
-       if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
-               int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
-       }
-       if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
-               int vp_clip = (src.x + src.width) / vpc_div -
-                               data->viewport_c.width - data->viewport_c.x;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
-       }
-       if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
-               int vp_clip = (src.y + src.height) / vpc_div -
-                               data->viewport_c.height - data->viewport_c.y;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
-       }
-
-       /* Adjust for non-0 viewport offset */
-       if (data->viewport.x && !flip_horz_scan_dir) {
-               int int_part;
-
-               data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
-                               data->ratios.horz, recout_skip->width));
-               int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
-               if (int_part < data->taps.h_taps) {
-                       int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
-                                               (data->taps.h_taps - int_part) : data->viewport.x;
-                       data->viewport.x -= int_adj;
-                       data->viewport.width += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.h_taps) {
-                       data->viewport.x += int_part - data->taps.h_taps;
-                       data->viewport.width -= int_part - data->taps.h_taps;
-                       int_part = data->taps.h_taps;
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.x) {
+                       int int_part;
+
+                       data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+                                       data->ratios.horz, data->recout.x - recout_full->x));
+                       int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+                       if (int_part < data->taps.h_taps) {
+                               int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+                                                       (data->taps.h_taps - int_part) : data->viewport.x;
+                               data->viewport.x -= int_adj;
+                               data->viewport.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps) {
+                               data->viewport.x += int_part - data->taps.h_taps;
+                               data->viewport.width -= int_part - data->taps.h_taps;
+                               int_part = data->taps.h_taps;
+                       }
+                       data->inits.h.value &= 0xffffffff;
+                       data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
                }
-               data->inits.h.value &= 0xffffffff;
-               data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
-       }
-
-       if (data->viewport_c.x && !flip_horz_scan_dir) {
-               int int_part;
-
-               data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
-                               data->ratios.horz_c, recout_skip->width));
-               int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
-               if (int_part < data->taps.h_taps_c) {
-                       int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
-                                       (data->taps.h_taps_c - int_part) : data->viewport_c.x;
-                       data->viewport_c.x -= int_adj;
-                       data->viewport_c.width += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.h_taps_c) {
-                       data->viewport_c.x += int_part - data->taps.h_taps_c;
-                       data->viewport_c.width -= int_part - data->taps.h_taps_c;
-                       int_part = data->taps.h_taps_c;
+
+               if (data->viewport_c.x) {
+                       int int_part;
+
+                       data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+                                       data->ratios.horz_c, data->recout.x - recout_full->x));
+                       int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+                       if (int_part < data->taps.h_taps_c) {
+                               int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+                                               (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+                               data->viewport_c.x -= int_adj;
+                               data->viewport_c.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps_c) {
+                               data->viewport_c.x += int_part - data->taps.h_taps_c;
+                               data->viewport_c.width -= int_part - data->taps.h_taps_c;
+                               int_part = data->taps.h_taps_c;
+                       }
+                       data->inits.h_c.value &= 0xffffffff;
+                       data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
                }
-               data->inits.h_c.value &= 0xffffffff;
-               data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
-       }
-
-       if (data->viewport.y && !flip_vert_scan_dir) {
-               int int_part;
-
-               data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
-                               data->ratios.vert, recout_skip->height));
-               int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
-               if (int_part < data->taps.v_taps) {
-                       int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
-                                               (data->taps.v_taps - int_part) : data->viewport.y;
-                       data->viewport.y -= int_adj;
-                       data->viewport.height += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.v_taps) {
-                       data->viewport.y += int_part - data->taps.v_taps;
-                       data->viewport.height -= int_part - data->taps.v_taps;
-                       int_part = data->taps.v_taps;
+       } else {
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.x) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
+                       data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
+               }
+               if (data->viewport_c.x) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+                       data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
                }
-               data->inits.v.value &= 0xffffffff;
-               data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
-       }
-
-       if (data->viewport_c.y && !flip_vert_scan_dir) {
-               int int_part;
-
-               data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
-                               data->ratios.vert_c, recout_skip->height));
-               int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
-               if (int_part < data->taps.v_taps_c) {
-                       int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
-                                       (data->taps.v_taps_c - int_part) : data->viewport_c.y;
-                       data->viewport_c.y -= int_adj;
-                       data->viewport_c.height += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.v_taps_c) {
-                       data->viewport_c.y += int_part - data->taps.v_taps_c;
-                       data->viewport_c.height -= int_part - data->taps.v_taps_c;
-                       int_part = data->taps.v_taps_c;
+
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+                       int int_part;
+                       int end_offset = src.x + src.width
+                                       - data->viewport.x - data->viewport.width;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+                                       data->ratios.horz, data->recout.x - recout_full->x));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, takning into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.h) - end_offset;
+                       if (int_part < data->taps.h_taps) {
+                               int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
+                                                       (data->taps.h_taps - int_part) : end_offset;
+                               data->viewport.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps) {
+                               data->viewport.width += int_part - data->taps.h_taps;
+                               int_part = data->taps.h_taps;
+                       }
+                       data->inits.h.value &= 0xffffffff;
+                       data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+               }
+
+               if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+                       int int_part;
+                       int end_offset = (src.x + src.width) / vpc_div
+                                       - data->viewport_c.x - data->viewport_c.width;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+                                       data->ratios.horz_c, data->recout.x - recout_full->x));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, takning into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
+                       if (int_part < data->taps.h_taps_c) {
+                               int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
+                                                       (data->taps.h_taps_c - int_part) : end_offset;
+                               data->viewport_c.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps_c) {
+                               data->viewport_c.width += int_part - data->taps.h_taps_c;
+                               int_part = data->taps.h_taps_c;
+                       }
+                       data->inits.h_c.value &= 0xffffffff;
+                       data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+               }
+
+       }
+       if (!flip_vert_scan_dir) {
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+                       int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+               }
+               if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+                       int vp_clip = (src.y + src.height) / vpc_div -
+                                       data->viewport_c.height - data->viewport_c.y;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+               }
+
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.y) {
+                       int int_part;
+
+                       data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+                                       data->ratios.vert, data->recout.y - recout_full->y));
+                       int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+                       if (int_part < data->taps.v_taps) {
+                               int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+                                                       (data->taps.v_taps - int_part) : data->viewport.y;
+                               data->viewport.y -= int_adj;
+                               data->viewport.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps) {
+                               data->viewport.y += int_part - data->taps.v_taps;
+                               data->viewport.height -= int_part - data->taps.v_taps;
+                               int_part = data->taps.v_taps;
+                       }
+                       data->inits.v.value &= 0xffffffff;
+                       data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+               }
+
+               if (data->viewport_c.y) {
+                       int int_part;
+
+                       data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+                                       data->ratios.vert_c, data->recout.y - recout_full->y));
+                       int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+                       if (int_part < data->taps.v_taps_c) {
+                               int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+                                               (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+                               data->viewport_c.y -= int_adj;
+                               data->viewport_c.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps_c) {
+                               data->viewport_c.y += int_part - data->taps.v_taps_c;
+                               data->viewport_c.height -= int_part - data->taps.v_taps_c;
+                               int_part = data->taps.v_taps_c;
+                       }
+                       data->inits.v_c.value &= 0xffffffff;
+                       data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+               }
+       } else {
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.y) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
+                       data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
+               }
+               if (data->viewport_c.y) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+                       data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+               }
+
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+                       int int_part;
+                       int end_offset = src.y + src.height
+                                       - data->viewport.y - data->viewport.height;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+                                       data->ratios.vert, data->recout.y - recout_full->y));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, taking into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.v) - end_offset;
+                       if (int_part < data->taps.v_taps) {
+                               int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
+                                                       (data->taps.v_taps - int_part) : end_offset;
+                               data->viewport.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps) {
+                               data->viewport.height += int_part - data->taps.v_taps;
+                               int_part = data->taps.v_taps;
+                       }
+                       data->inits.v.value &= 0xffffffff;
+                       data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+               }
+
+               if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+                       int int_part;
+                       int end_offset = (src.y + src.height) / vpc_div
+                                       - data->viewport_c.y - data->viewport_c.height;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+                                       data->ratios.vert_c, data->recout.y - recout_full->y));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, taking into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
+                       if (int_part < data->taps.v_taps_c) {
+                               int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
+                                                       (data->taps.v_taps_c - int_part) : end_offset;
+                               data->viewport_c.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps_c) {
+                               data->viewport_c.height += int_part - data->taps.v_taps_c;
+                               int_part = data->taps.v_taps_c;
+                       }
+                       data->inits.v_c.value &= 0xffffffff;
+                       data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
                }
-               data->inits.v_c.value &= 0xffffffff;
-               data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
        }
 
        /* Interlaced inits based on final vert inits */
@@ -846,7 +1005,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
-       struct view recout_skip = { 0 };
+       struct rect recout_full = { 0 };
        bool res = false;
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
        /* Important: scaling ratio calculation requires pixel format,
@@ -866,7 +1025,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
                return false;
 
-       calculate_recout(pipe_ctx, &recout_skip);
+       calculate_recout(pipe_ctx, &recout_full);
 
        /**
         * Setting line buffer pixel depth to 24bpp yields banding
@@ -910,7 +1069,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
        if (res)
                /* May need to re-check lb size after this in some obscure scenario */
-               calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+               calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
 
        DC_LOG_SCALER(
                                "%s: Viewport:\nheight:%d width:%d x:%d "
@@ -2347,7 +2506,8 @@ static void set_hdr_static_info_packet(
 {
        /* HDR Static Metadata info packet for HDR10 */
 
-       if (!stream->hdr_static_metadata.valid)
+       if (!stream->hdr_static_metadata.valid ||
+                       stream->use_dynamic_meta)
                return;
 
        *info_packet = stream->hdr_static_metadata;
index 25fae38..9971b51 100644 (file)
@@ -53,6 +53,10 @@ static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init
        sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
        sink->converter_disable_audio = init_params->converter_disable_audio;
        sink->dc_container_id = NULL;
+       sink->sink_id = init_params->link->ctx->dc_sink_id_count;
+       // increment dc_sink_id_count because we don't want two sinks with same ID
+       // unless they are actually the same
+       init_params->link->ctx->dc_sink_id_count++;
 
        return true;
 }
index 68a71ad..815dfb5 100644 (file)
@@ -84,6 +84,17 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
        return plane_state;
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_plane_get_status
+ *
+ *  @brief
+ *     Looks up the pipe context of plane_state and updates the pending status
+ *     of the pipe context. Then returns plane_state->status
+ *
+ *  @param [in] plane_state: pointer to the plane_state to get the status of
+ *****************************************************************************
+ */
 const struct dc_plane_status *dc_plane_get_status(
                const struct dc_plane_state *plane_state)
 {
index 9cfde0c..7ebce76 100644 (file)
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.44"
+#define DC_VER "3.1.47"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
@@ -68,6 +68,7 @@ struct dc_caps {
        uint32_t max_planes;
        uint32_t max_downscale_ratio;
        uint32_t i2c_speed_in_khz;
+       uint32_t dmdata_alloc_size;
        unsigned int max_cursor_size;
        unsigned int max_video_width;
        int linear_pitch_alignment;
@@ -288,9 +289,7 @@ struct dc {
        bool apply_edp_fast_boot_optimization;
 
        /* FBC compressor */
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        struct compressor *fbc_compressor;
-#endif
 };
 
 enum frame_buffer_mode {
@@ -358,6 +357,7 @@ enum dc_transfer_func_type {
        TF_TYPE_PREDEFINED,
        TF_TYPE_DISTRIBUTED_POINTS,
        TF_TYPE_BYPASS,
+       TF_TYPE_HWPWL
 };
 
 struct dc_transfer_func_distributed_points {
@@ -377,16 +377,21 @@ enum dc_transfer_func_predefined {
        TRANSFER_FUNCTION_PQ,
        TRANSFER_FUNCTION_LINEAR,
        TRANSFER_FUNCTION_UNITY,
+       TRANSFER_FUNCTION_HLG,
+       TRANSFER_FUNCTION_HLG12
 };
 
 struct dc_transfer_func {
        struct kref refcount;
-       struct dc_transfer_func_distributed_points tf_pts;
        enum dc_transfer_func_type type;
        enum dc_transfer_func_predefined tf;
        /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
        uint32_t sdr_ref_white_level;
        struct dc_context *ctx;
+       union {
+               struct pwl_params pwl;
+               struct dc_transfer_func_distributed_points tf_pts;
+       };
 };
 
 /*
@@ -661,9 +666,13 @@ struct dc_sink {
        struct dc_link *link;
        struct dc_context *ctx;
 
+       uint32_t sink_id;
+
        /* private to dc_sink.c */
+       // refcount must be the last member in dc_sink, since we want the
+       // sink structure to be logically cloneable up to (but not including)
+       // refcount
        struct kref refcount;
-
 };
 
 void dc_sink_retain(struct dc_sink *sink);
index e1affeb..05c8c31 100644 (file)
 #ifndef DC_DDC_TYPES_H_
 #define DC_DDC_TYPES_H_
 
+enum aux_transaction_type {
+       AUX_TRANSACTION_TYPE_DP,
+       AUX_TRANSACTION_TYPE_I2C
+};
+
+
+enum i2caux_transaction_action {
+       I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+       I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+       I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+       I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+       I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+       I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+enum aux_channel_operation_result {
+       AUX_CHANNEL_OPERATION_SUCCEEDED,
+       AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+       AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+       AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
+       AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+};
+
+
+struct aux_request_transaction_data {
+       enum aux_transaction_type type;
+       enum i2caux_transaction_action action;
+       /* 20-bit AUX channel transaction address */
+       uint32_t address;
+       /* delay, in 100-microsecond units */
+       uint8_t delay;
+       uint32_t length;
+       uint8_t *data;
+};
+
+enum aux_transaction_reply {
+       AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+       AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+       AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+       AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+       AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+       AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+       AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
+
+       AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+       enum aux_transaction_reply status;
+       uint32_t length;
+       uint8_t *data;
+};
+
 struct i2c_payload {
        bool write;
        uint8_t address;
@@ -109,7 +168,7 @@ struct ddc_service {
 
        uint32_t address;
        uint32_t edid_buf_len;
-       uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+       uint8_t edid_buf[DC_MAX_EDID_BUFFER_SIZE];
 };
 
 #endif /* DC_DDC_TYPES_H_ */
index 90bccd5..da93ab4 100644 (file)
@@ -430,7 +430,7 @@ union test_request {
        struct {
        uint8_t LINK_TRAINING         :1;
        uint8_t LINK_TEST_PATTRN      :1;
-       uint8_t EDID_REAT             :1;
+       uint8_t EDID_READ             :1;
        uint8_t PHY_TEST_PATTERN      :1;
        uint8_t AUDIO_TEST_PATTERN    :1;
        uint8_t RESERVED              :1;
@@ -443,7 +443,8 @@ union test_response {
        struct {
                uint8_t ACK         :1;
                uint8_t NO_ACK      :1;
-               uint8_t RESERVED    :6;
+               uint8_t EDID_CHECKSUM_WRITE:1;
+               uint8_t RESERVED    :5;
        } bits;
        uint8_t raw;
 };
index b1f7057..d31023d 100644 (file)
@@ -567,25 +567,25 @@ struct scaling_taps {
 };
 
 enum dc_timing_standard {
-       TIMING_STANDARD_UNDEFINED,
-       TIMING_STANDARD_DMT,
-       TIMING_STANDARD_GTF,
-       TIMING_STANDARD_CVT,
-       TIMING_STANDARD_CVT_RB,
-       TIMING_STANDARD_CEA770,
-       TIMING_STANDARD_CEA861,
-       TIMING_STANDARD_HDMI,
-       TIMING_STANDARD_TV_NTSC,
-       TIMING_STANDARD_TV_NTSC_J,
-       TIMING_STANDARD_TV_PAL,
-       TIMING_STANDARD_TV_PAL_M,
-       TIMING_STANDARD_TV_PAL_CN,
-       TIMING_STANDARD_TV_SECAM,
-       TIMING_STANDARD_EXPLICIT,
+       DC_TIMING_STANDARD_UNDEFINED,
+       DC_TIMING_STANDARD_DMT,
+       DC_TIMING_STANDARD_GTF,
+       DC_TIMING_STANDARD_CVT,
+       DC_TIMING_STANDARD_CVT_RB,
+       DC_TIMING_STANDARD_CEA770,
+       DC_TIMING_STANDARD_CEA861,
+       DC_TIMING_STANDARD_HDMI,
+       DC_TIMING_STANDARD_TV_NTSC,
+       DC_TIMING_STANDARD_TV_NTSC_J,
+       DC_TIMING_STANDARD_TV_PAL,
+       DC_TIMING_STANDARD_TV_PAL_M,
+       DC_TIMING_STANDARD_TV_PAL_CN,
+       DC_TIMING_STANDARD_TV_SECAM,
+       DC_TIMING_STANDARD_EXPLICIT,
        /*!< For explicit timings from EDID, VBIOS, etc.*/
-       TIMING_STANDARD_USER_OVERRIDE,
+       DC_TIMING_STANDARD_USER_OVERRIDE,
        /*!< For mode timing override by user*/
-       TIMING_STANDARD_MAX
+       DC_TIMING_STANDARD_MAX
 };
 
 enum dc_color_depth {
index d7e6d53..af503e0 100644 (file)
@@ -59,6 +59,9 @@ struct dc_stream_state {
        struct freesync_context freesync_ctx;
 
        struct dc_info_packet hdr_static_metadata;
+       PHYSICAL_ADDRESS_LOC dmdata_address;
+       bool   use_dynamic_meta;
+
        struct dc_transfer_func *out_transfer_func;
        struct colorspace_transform gamut_remap_matrix;
        struct dc_csc_transform csc_color_matrix;
@@ -299,9 +302,4 @@ bool dc_stream_get_crtc_position(struct dc *dc,
                                 unsigned int *v_pos,
                                 unsigned int *nom_v_pos);
 
-void dc_stream_set_static_screen_events(struct dc *dc,
-                                       struct dc_stream_state **stream,
-                                       int num_streams,
-                                       const struct dc_static_screen_events *events);
-
 #endif /* DC_STREAM_H_ */
index 76df253..c96e526 100644 (file)
@@ -92,13 +92,12 @@ struct dc_context {
        bool created_bios;
        struct gpio_service *gpio_service;
        struct i2caux *i2caux;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+       uint32_t dc_sink_id_count;
        uint64_t fbc_gpu_addr;
-#endif
 };
 
 
-#define MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 512
 #define EDID_BLOCK_SIZE 128
 #define MAX_SURFACE_NUM 4
 #define NUM_PIXEL_FORMATS 10
@@ -137,13 +136,13 @@ enum plane_stereo_format {
  */
 
 enum dc_edid_connector_type {
-       EDID_CONNECTOR_UNKNOWN = 0,
-       EDID_CONNECTOR_ANALOG = 1,
-       EDID_CONNECTOR_DIGITAL = 10,
-       EDID_CONNECTOR_DVI = 11,
-       EDID_CONNECTOR_HDMIA = 12,
-       EDID_CONNECTOR_MDDI = 14,
-       EDID_CONNECTOR_DISPLAYPORT = 15
+       DC_EDID_CONNECTOR_UNKNOWN = 0,
+       DC_EDID_CONNECTOR_ANALOG = 1,
+       DC_EDID_CONNECTOR_DIGITAL = 10,
+       DC_EDID_CONNECTOR_DVI = 11,
+       DC_EDID_CONNECTOR_HDMIA = 12,
+       DC_EDID_CONNECTOR_MDDI = 14,
+       DC_EDID_CONNECTOR_DISPLAYPORT = 15
 };
 
 enum dc_edid_status {
@@ -169,7 +168,7 @@ struct dc_cea_audio_mode {
 
 struct dc_edid {
        uint32_t length;
-       uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+       uint8_t raw_edid[DC_MAX_EDID_BUFFER_SIZE];
 };
 
 /* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
index c0e813c..91642e6 100644 (file)
@@ -289,11 +289,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 
        struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-       if (REG(DP_DB_CNTL))
-               REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-#endif
-
        /* set pixel encoding */
        switch (crtc_timing->pixel_encoding) {
        case PIXEL_ENCODING_YCBCR422:
index e2994d3..df02701 100644 (file)
@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
        struct dce110_compressor *cp110,
        bool enabled)
 {
-       uint8_t counter = 0;
+       uint16_t counter = 0;
        uint32_t addr = mmFBC_STATUS;
        uint32_t value;
 
@@ -551,9 +551,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
        compressor->base.lpt_channels_num = 0;
        compressor->base.attached_inst = 0;
        compressor->base.is_enabled = false;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        compressor->base.funcs = &dce110_compressor_funcs;
 
-#endif
 }
 
index c29052b..353ffcb 100644 (file)
@@ -34,9 +34,7 @@
 #include "dce/dce_hwseq.h"
 #include "gpio_service_interface.h"
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
 #include "dce110_compressor.h"
-#endif
 
 #include "bios/bios_parser_helper.h"
 #include "timing_generator.h"
@@ -667,16 +665,25 @@ static enum dc_status bios_parser_crtc_source_select(
 
 void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
 {
+       bool is_hdmi;
+       bool is_dp;
+
        ASSERT(pipe_ctx->stream);
 
        if (pipe_ctx->stream_res.stream_enc == NULL)
                return;  /* this is not root pipe */
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+       is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+       is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+       if (!is_hdmi && !is_dp)
+               return;
+
+       if (is_hdmi)
                pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
                        pipe_ctx->stream_res.stream_enc,
                        &pipe_ctx->stream_res.encoder_info_frame);
-       else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+       else
                pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
                        pipe_ctx->stream_res.stream_enc,
                        &pipe_ctx->stream_res.encoder_info_frame);
@@ -972,19 +979,35 @@ void hwss_edp_backlight_control(
                edp_receiver_ready_T9(link);
 }
 
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
 {
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       struct dc_link *link = stream->sink->link;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+       /* notify audio driver for audio modes of monitor */
+       struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+       unsigned int i, num_audio = 1;
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
+       if (pipe_ctx->stream_res.audio) {
+               for (i = 0; i < MAX_PIPES; i++) {
+                       /*current_state not updated yet*/
+                       if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+                               num_audio++;
+               }
 
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
+               pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+               if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+                       /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+                       pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+               /* un-mute audio */
+               /* TODO: audio should be per stream rather than per link */
+               pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+                       pipe_ctx->stream_res.stream_enc, false);
+       }
+}
+
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
 
        pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
                        pipe_ctx->stream_res.stream_enc, true);
@@ -1015,7 +1038,23 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
                 * stream->stream_engine_id);
                 */
        }
+}
 
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+       struct dc_stream_state *stream = pipe_ctx->stream;
+       struct dc_link *link = stream->sink->link;
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       dc->hwss.disable_audio_stream(pipe_ctx, option);
 
        link->link_enc->funcs->connect_dig_be_to_fe(
                        link->link_enc,
@@ -1298,6 +1337,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
        struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
                        pipe_ctx[pipe_ctx->pipe_idx];
 
+       if (pipe_ctx->stream_res.audio != NULL) {
+               struct audio_output audio_output;
+
+               build_audio_output(context, pipe_ctx, &audio_output);
+
+               if (dc_is_dp_signal(pipe_ctx->stream->signal))
+                       pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+                                       pipe_ctx->stream_res.stream_enc,
+                                       pipe_ctx->stream_res.audio->inst,
+                                       &pipe_ctx->stream->audio_info);
+               else
+                       pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+                                       pipe_ctx->stream_res.stream_enc,
+                                       pipe_ctx->stream_res.audio->inst,
+                                       &pipe_ctx->stream->audio_info,
+                                       &audio_output.crtc_info);
+
+               pipe_ctx->stream_res.audio->funcs->az_configure(
+                               pipe_ctx->stream_res.audio,
+                               pipe_ctx->stream->signal,
+                               &audio_output.crtc_info,
+                               &pipe_ctx->stream->audio_info);
+       }
+
        /*  */
        dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
 
@@ -1441,10 +1504,8 @@ static void power_down_all_hw_blocks(struct dc *dc)
 
        power_down_clock_sources(dc);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        if (dc->fbc_compressor)
                dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
 }
 
 static void disable_vga_and_power_gate_all_controllers(
@@ -1686,9 +1747,7 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
        if (events->force_trigger)
                value |= 0x1;
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        value |= 0x84;
-#endif
 
        for (i = 0; i < num_pipes; i++)
                pipe_ctx[i]->stream_res.tg->funcs->
@@ -1816,8 +1875,6 @@ static void apply_min_clocks(
        }
 }
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
-
 /*
  *  Check if FBC can be enabled
  */
@@ -1896,7 +1953,6 @@ static void enable_fbc(struct dc *dc,
                compr->funcs->enable_fbc(compr, &params);
        }
 }
-#endif
 
 static void dce110_reset_hw_ctx_wrap(
                struct dc *dc,
@@ -1949,6 +2005,86 @@ static void dce110_reset_hw_ctx_wrap(
        }
 }
 
+static void dce110_setup_audio_dto(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       int i;
+
+       /* program audio wall clock. use HDMI as clock source if HDMI
+        * audio active. Otherwise, use DP as clock source
+        * first, loop to find any HDMI audio, if not, loop find DP audio
+        */
+       /* Setup audio rate clock source */
+       /* Issue:
+       * Audio lag happened on DP monitor when unplug a HDMI monitor
+       *
+       * Cause:
+       * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
+       * is set to either dto0 or dto1, audio should work fine.
+       * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
+       * set to dto0 will cause audio lag.
+       *
+       * Solution:
+       * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
+       * find first available pipe with audio, setup audio wall DTO per topology
+       * instead of per pipe.
+       */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == NULL)
+                       continue;
+
+               if (pipe_ctx->top_pipe)
+                       continue;
+
+               if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+                       continue;
+
+               if (pipe_ctx->stream_res.audio != NULL) {
+                       struct audio_output audio_output;
+
+                       build_audio_output(context, pipe_ctx, &audio_output);
+
+                       pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+                               pipe_ctx->stream_res.audio,
+                               pipe_ctx->stream->signal,
+                               &audio_output.crtc_info,
+                               &audio_output.pll_info);
+                       break;
+               }
+       }
+
+       /* no HDMI audio is found, try DP audio */
+       if (i == dc->res_pool->pipe_count) {
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->stream == NULL)
+                               continue;
+
+                       if (pipe_ctx->top_pipe)
+                               continue;
+
+                       if (!dc_is_dp_signal(pipe_ctx->stream->signal))
+                               continue;
+
+                       if (pipe_ctx->stream_res.audio != NULL) {
+                               struct audio_output audio_output;
+
+                               build_audio_output(context, pipe_ctx, &audio_output);
+
+                               pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+                                       pipe_ctx->stream_res.audio,
+                                       pipe_ctx->stream->signal,
+                                       &audio_output.crtc_info,
+                                       &audio_output.pll_info);
+                               break;
+                       }
+               }
+       }
+}
 
 enum dc_status dce110_apply_ctx_to_hw(
                struct dc *dc,
@@ -1993,10 +2129,9 @@ enum dc_status dce110_apply_ctx_to_hw(
 
        set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        if (dc->fbc_compressor)
                dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
+
        /*TODO: when pplib works*/
        apply_min_clocks(dc, context, &clocks_state, true);
 
@@ -2040,79 +2175,8 @@ enum dc_status dce110_apply_ctx_to_hw(
                                dc->res_pool->display_clock,
                                context->bw.dce.dispclk_khz * 115 / 100);
        }
-       /* program audio wall clock. use HDMI as clock source if HDMI
-        * audio active. Otherwise, use DP as clock source
-        * first, loop to find any HDMI audio, if not, loop find DP audio
-        */
-       /* Setup audio rate clock source */
-       /* Issue:
-       * Audio lag happened on DP monitor when unplug a HDMI monitor
-       *
-       * Cause:
-       * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
-       * is set to either dto0 or dto1, audio should work fine.
-       * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
-       * set to dto0 will cause audio lag.
-       *
-       * Solution:
-       * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
-       * find first available pipe with audio, setup audio wall DTO per topology
-       * instead of per pipe.
-       */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe_ctx->stream == NULL)
-                       continue;
-
-               if (pipe_ctx->top_pipe)
-                       continue;
-
-               if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
-                       continue;
-
-               if (pipe_ctx->stream_res.audio != NULL) {
-                       struct audio_output audio_output;
-
-                       build_audio_output(context, pipe_ctx, &audio_output);
-
-                       pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
-                               pipe_ctx->stream_res.audio,
-                               pipe_ctx->stream->signal,
-                               &audio_output.crtc_info,
-                               &audio_output.pll_info);
-                       break;
-               }
-       }
 
-       /* no HDMI audio is found, try DP audio */
-       if (i == dc->res_pool->pipe_count) {
-               for (i = 0; i < dc->res_pool->pipe_count; i++) {
-                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-                       if (pipe_ctx->stream == NULL)
-                               continue;
-
-                       if (pipe_ctx->top_pipe)
-                               continue;
-
-                       if (!dc_is_dp_signal(pipe_ctx->stream->signal))
-                               continue;
-
-                       if (pipe_ctx->stream_res.audio != NULL) {
-                               struct audio_output audio_output;
-
-                               build_audio_output(context, pipe_ctx, &audio_output);
-
-                               pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
-                                       pipe_ctx->stream_res.audio,
-                                       pipe_ctx->stream->signal,
-                                       &audio_output.crtc_info,
-                                       &audio_output.pll_info);
-                               break;
-                       }
-               }
-       }
+       dce110_setup_audio_dto(dc, context);
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx_old =
@@ -2131,31 +2195,6 @@ enum dc_status dce110_apply_ctx_to_hw(
                if (pipe_ctx->top_pipe)
                        continue;
 
-               if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
-
-                       struct audio_output audio_output;
-
-                       build_audio_output(context, pipe_ctx, &audio_output);
-
-                       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-                               pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
-                                               pipe_ctx->stream_res.stream_enc,
-                                               pipe_ctx->stream_res.audio->inst,
-                                               &pipe_ctx->stream->audio_info);
-                       else
-                               pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
-                                               pipe_ctx->stream_res.stream_enc,
-                                               pipe_ctx->stream_res.audio->inst,
-                                               &pipe_ctx->stream->audio_info,
-                                               &audio_output.crtc_info);
-
-                       pipe_ctx->stream_res.audio->funcs->az_configure(
-                                       pipe_ctx->stream_res.audio,
-                                       pipe_ctx->stream->signal,
-                                       &audio_output.crtc_info,
-                                       &pipe_ctx->stream->audio_info);
-               }
-
                status = apply_single_controller_ctx_to_hw(
                                pipe_ctx,
                                context,
@@ -2170,12 +2209,9 @@ enum dc_status dce110_apply_ctx_to_hw(
 
        dcb->funcs->set_scratch_critical_state(dcb, false);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        if (dc->fbc_compressor)
                enable_fbc(dc, context);
 
-#endif
-
        return DC_OK;
 }
 
@@ -2490,10 +2526,9 @@ static void init_hw(struct dc *dc)
                abm->funcs->init_backlight(abm);
                abm->funcs->abm_init(abm);
        }
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
        if (dc->fbc_compressor)
                dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
-#endif
 
 }
 
@@ -2679,9 +2714,7 @@ static void dce110_program_front_end_for_pipe(
        struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct xfm_grph_csc_adjustment adjust;
        struct out_csc_color_matrix tbl_entry;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
-#endif
        unsigned int i;
        DC_LOGGER_INIT();
        memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2722,7 +2755,6 @@ static void dce110_program_front_end_for_pipe(
 
        program_scaler(dc, pipe_ctx);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        /* fbc not applicable on Underlay pipe */
        if (dc->fbc_compressor && old_pipe->stream &&
            pipe_ctx->pipe_idx != underlay_idx) {
@@ -2731,7 +2763,6 @@ static void dce110_program_front_end_for_pipe(
                else
                        enable_fbc(dc, dc->current_state);
        }
-#endif
 
        mi->funcs->mem_input_program_surface_config(
                        mi,
@@ -2968,6 +2999,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .disable_stream = dce110_disable_stream,
        .unblank_stream = dce110_unblank_stream,
        .blank_stream = dce110_blank_stream,
+       .enable_audio_stream = dce110_enable_audio_stream,
+       .disable_audio_stream = dce110_disable_audio_stream,
        .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
        .enable_display_power_gating = dce110_enable_display_power_gating,
        .disable_plane = dce110_power_down_fe,
index 5d7e9f5..f48d5a6 100644 (file)
@@ -49,6 +49,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
                struct dc_link_settings *link_settings);
 
 void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
+
 void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
 
 void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
index ee33786..20c0290 100644 (file)
@@ -54,9 +54,8 @@
 
 #define DC_LOGGER \
                dc->ctx->logger
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
 #include "dce110/dce110_compressor.h"
-#endif
 
 #include "reg_helper.h"
 
@@ -1267,12 +1266,8 @@ static bool construct(
                }
        }
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        dc->fbc_compressor = dce110_compressor_create(ctx);
 
-
-
-#endif
        if (!underlay_create(ctx, &pool->base))
                goto res_create_fail;
 
index f8e0576..03eb736 100644 (file)
@@ -719,19 +719,7 @@ static void reset_back_end_for_pipe(
                if (!pipe_ctx->stream->dpms_off)
                        core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
                else if (pipe_ctx->stream_res.audio) {
-                       /*
-                        * if stream is already disabled outside of commit streams path,
-                        * audio disable was skipped. Need to do it here
-                        */
-                       pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
-                       if (dc->caps.dynamic_audio == true) {
-                               /*we have to dynamic arbitrate the audio endpoints*/
-                               pipe_ctx->stream_res.audio = NULL;
-                               /*we free the resource, need reset is_audio_acquired*/
-                               update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
-                       }
-
+                       dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
                }
 
        }
@@ -2063,12 +2051,13 @@ static void update_dchubp_dpp(
 
 static void dcn10_blank_pixel_data(
                struct dc *dc,
-               struct stream_resource *stream_res,
-               struct dc_stream_state *stream,
+               struct pipe_ctx *pipe_ctx,
                bool blank)
 {
        enum dc_color_space color_space;
        struct tg_color black_color = {0};
+       struct stream_resource *stream_res = &pipe_ctx->stream_res;
+       struct dc_stream_state *stream = pipe_ctx->stream;
 
        /* program otg blank color */
        color_space = stream->output_color_space;
@@ -2127,8 +2116,7 @@ static void program_all_pipe_in_tree(
                pipe_ctx->stream_res.tg->funcs->program_global_sync(
                                pipe_ctx->stream_res.tg);
 
-               dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
-                               pipe_ctx->stream, blank);
+               dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
        }
 
        if (pipe_ctx->plane_state != NULL) {
@@ -2247,7 +2235,7 @@ static void dcn10_apply_ctx_for_surface(
 
        if (num_planes == 0) {
                /* OTG blank before remove all front end */
-               dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+               dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
        }
 
        /* Disconnect unused mpcc */
@@ -2778,6 +2766,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .disable_stream = dce110_disable_stream,
        .unblank_stream = dce110_unblank_stream,
        .blank_stream = dce110_blank_stream,
+       .enable_audio_stream = dce110_enable_audio_stream,
+       .disable_audio_stream = dce110_disable_audio_stream,
        .enable_display_power_gating = dcn10_dummy_display_power_gating,
        .disable_plane = dcn10_disable_plane,
        .blank_pixel_data = dcn10_blank_pixel_data,
index f2fbce0..e6a3ade 100644 (file)
@@ -1257,6 +1257,37 @@ void optc1_read_otg_state(struct optc *optc1,
                        OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
 }
 
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+               uint32_t *otg_active_width,
+               uint32_t *otg_active_height)
+{
+       uint32_t otg_enabled;
+       uint32_t v_blank_start;
+       uint32_t v_blank_end;
+       uint32_t h_blank_start;
+       uint32_t h_blank_end;
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+
+       REG_GET(OTG_CONTROL,
+                       OTG_MASTER_EN, &otg_enabled);
+
+       if (otg_enabled == 0)
+               return false;
+
+       REG_GET_2(OTG_V_BLANK_START_END,
+                       OTG_V_BLANK_START, &v_blank_start,
+                       OTG_V_BLANK_END, &v_blank_end);
+
+       REG_GET_2(OTG_H_BLANK_START_END,
+                       OTG_H_BLANK_START, &h_blank_start,
+                       OTG_H_BLANK_END, &h_blank_end);
+
+       *otg_active_width = v_blank_start - v_blank_end;
+       *otg_active_height = h_blank_start - h_blank_end;
+       return true;
+}
+
 void optc1_clear_optc_underflow(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1305,6 +1336,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
                .get_position = optc1_get_position,
                .get_frame_count = optc1_get_vblank_counter,
                .get_scanoutpos = optc1_get_crtc_scanoutpos,
+               .get_otg_active_size = optc1_get_otg_active_size,
                .set_early_control = optc1_set_early_control,
                /* used by enable_timing_synchronization. Not need for FPGA */
                .wait_for_state = optc1_wait_for_state,
index c62052f..59ed272 100644 (file)
@@ -507,4 +507,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
 
 void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
 
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+               uint32_t *otg_active_width,
+               uint32_t *otg_active_height);
+
 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
index df5cb2d..2da325c 100644 (file)
@@ -417,6 +417,7 @@ static const struct dce110_clk_src_mask cs_mask = {
 
 static const struct resource_caps res_cap = {
                .num_timing_generator = 4,
+               .num_opp = 4,
                .num_video_plane = 4,
                .num_audio = 4,
                .num_stream_encoder = 4,
@@ -1004,7 +1005,8 @@ static bool construct(
 
        ctx->dc_bios->regs = &bios_regs;
 
-       pool->base.res_cap = &res_cap;
+               pool->base.res_cap = &res_cap;
+
        pool->base.funcs = &dcn10_res_pool_funcs;
 
        /*
index c928ee4..6f9078f 100644 (file)
@@ -257,20 +257,18 @@ void enc1_stream_encoder_dp_set_stream_attribute(
        uint8_t colorimetry_bpc;
        uint8_t dynamic_range_rgb = 0; /*full range*/
        uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+       uint8_t dp_pixel_encoding = 0;
+       uint8_t dp_component_depth = 0;
 
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-       REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-
        /* set pixel encoding */
        switch (crtc_timing->pixel_encoding) {
        case PIXEL_ENCODING_YCBCR422:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_YCBCR422);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
                break;
        case PIXEL_ENCODING_YCBCR444:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_YCBCR444);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
 
                if (crtc_timing->flags.Y_ONLY)
                        if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -278,8 +276,8 @@ void enc1_stream_encoder_dp_set_stream_attribute(
                                 * Color depth of Y-only could be
                                 * 8, 10, 12, 16 bits
                                 */
-                               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                                               DP_PIXEL_ENCODING_TYPE_Y_ONLY);
+                               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
+
                /* Note: DP_MSA_MISC1 bit 7 is the indicator
                 * of Y-only mode.
                 * This bit is set in HW if register
@@ -287,48 +285,55 @@ void enc1_stream_encoder_dp_set_stream_attribute(
                 */
                break;
        case PIXEL_ENCODING_YCBCR420:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_YCBCR420);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
                REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
                break;
        default:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_RGB444);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
                break;
        }
 
        misc1 = REG_READ(DP_MSA_MISC);
+       /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+        * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+        * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+        * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+        */
+       if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+                       (output_color_space == COLOR_SPACE_2020_YCBCR) ||
+                       (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
+                       (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+               misc1 = misc1 | 0x40;
+       else
+               misc1 = misc1 & ~0x40;
 
        /* set color depth */
-
        switch (crtc_timing->display_color_depth) {
        case COLOR_DEPTH_666:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               0);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
                break;
        case COLOR_DEPTH_888:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_8BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
                break;
        case COLOR_DEPTH_101010:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_10BPC);
-
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
                break;
        case COLOR_DEPTH_121212:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_12BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
                break;
        case COLOR_DEPTH_161616:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_16BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
                break;
        default:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_6BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
                break;
        }
 
+       /* Set DP pixel encoding and component depth */
+       REG_UPDATE_2(DP_PIXEL_FORMAT,
+                       DP_PIXEL_ENCODING, dp_pixel_encoding,
+                       DP_COMPONENT_DEPTH, dp_component_depth);
+
        /* set dynamic range and YCbCr range */
 
        switch (crtc_timing->display_color_depth) {
@@ -354,7 +359,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
 
        switch (output_color_space) {
        case COLOR_SPACE_SRGB:
-               misc0 = misc0 | 0x0;
                misc1 = misc1 & ~0x80; /* bit7 = 0*/
                dynamic_range_rgb = 0; /*full range*/
                break;
@@ -1087,27 +1091,6 @@ static union audio_cea_channels speakers_to_channels(
        return cea_channels;
 }
 
-static uint32_t calc_max_audio_packets_per_line(
-       const struct audio_crtc_info *crtc_info)
-{
-       uint32_t max_packets_per_line;
-
-       max_packets_per_line =
-               crtc_info->h_total - crtc_info->h_active;
-
-       if (crtc_info->pixel_repetition)
-               max_packets_per_line *= crtc_info->pixel_repetition;
-
-       /* for other hdmi features */
-       max_packets_per_line -= 58;
-       /* for Control Period */
-       max_packets_per_line -= 16;
-       /* Number of Audio Packets per Line */
-       max_packets_per_line /= 32;
-
-       return max_packets_per_line;
-}
-
 static void get_audio_clock_info(
        enum dc_color_depth color_depth,
        uint32_t crtc_pixel_clock_in_khz,
@@ -1201,16 +1184,9 @@ static void enc1_se_setup_hdmi_audio(
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
        struct audio_clock_info audio_clock_info = {0};
-       uint32_t max_packets_per_line;
-
-       /* For now still do calculation, although this field is ignored when
-        * above HDMI_PACKET_GEN_VERSION set to 1
-        */
-       max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
 
        /* HDMI_AUDIO_PACKET_CONTROL */
-       REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
-                       HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+       REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
                        HDMI_AUDIO_DELAY_EN, 1);
 
        /* AFMT_AUDIO_PACKET_CONTROL */
index 4ff9b2b..eb5ab39 100644 (file)
@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
 #define dm_log_to_buffer(buffer, size, fmt, args)\
        vsnprintf(buffer, size, fmt, args)
 
-unsigned long long dm_get_timestamp(struct dc_context *ctx);
+static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+       return ktime_get_raw_ns();
+}
 
 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
                unsigned long long current_time_stamp,
index bb526ad..1d73096 100644 (file)
@@ -157,6 +157,10 @@ static void process_read_reply(
                        ctx->operation_succeeded = false;
                }
        break;
+       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
@@ -215,6 +219,10 @@ static void process_read_request(
                         * so we should not wait here */
                }
        break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
@@ -370,6 +378,10 @@ static void process_write_reply(
                        ctx->operation_succeeded = false;
                }
        break;
+       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
@@ -422,6 +434,10 @@ static void process_write_request(
                         * so we should not wait here */
                }
        break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
index 8e71324..b01488f 100644 (file)
 #ifndef __DAL_AUX_ENGINE_H__
 #define __DAL_AUX_ENGINE_H__
 
-enum aux_transaction_type {
-       AUX_TRANSACTION_TYPE_DP,
-       AUX_TRANSACTION_TYPE_I2C
-};
-
-struct aux_request_transaction_data {
-       enum aux_transaction_type type;
-       enum i2caux_transaction_action action;
-       /* 20-bit AUX channel transaction address */
-       uint32_t address;
-       /* delay, in 100-microsecond units */
-       uint8_t delay;
-       uint32_t length;
-       uint8_t *data;
-};
-
-enum aux_transaction_reply {
-       AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
-       AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
-       AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
-
-       AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
-       AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
-       AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
-
-       AUX_TRANSACTION_REPLY_INVALID = 0xFF
-};
-
-struct aux_reply_transaction_data {
-       enum aux_transaction_reply status;
-       uint32_t length;
-       uint8_t *data;
-};
-
-enum aux_channel_operation_result {
-       AUX_CHANNEL_OPERATION_SUCCEEDED,
-       AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
-       AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
-       AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
-};
+#include "dc_ddc_types.h"
 
 struct aux_engine;
 
index 5f47f6c..2b927f2 100644 (file)
@@ -198,27 +198,27 @@ static void submit_channel_request(
                ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
                ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
                 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+       if (REG(AUXN_IMPCAL)) {
+               /* clear_aux_error */
+               REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+                               1,
+                               0);
 
-       /* clear_aux_error */
-       REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
-                       1,
-                       0);
-
-       REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
-                       1,
-                       0);
-
-       /* force_default_calibrate */
-       REG_UPDATE_1BY1_2(AUXN_IMPCAL,
-                       AUXN_IMPCAL_ENABLE, 1,
-                       AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+                               1,
+                               0);
 
-       /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+               /* force_default_calibrate */
+               REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+                               AUXN_IMPCAL_ENABLE, 1,
+                               AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
 
-       REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
-                       1,
-                       0);
+               /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
 
+               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+                               1,
+                               0);
+       }
        /* set the delay and the number of bytes to write */
 
        /* The length include
@@ -291,6 +291,12 @@ static void process_channel_reply(
        value = REG_GET(AUX_SW_STATUS,
                        AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
 
+       /* in case HPD is LOW, exit AUX transaction */
+       if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+               reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
+               return;
+       }
+
        if (bytes_replied) {
                uint32_t reply_result;
 
@@ -347,8 +353,10 @@ static void process_channel_reply(
                 * because there was surely an error that was asserted
                 * that should have been handled
                 * for hot plug case, this could happens*/
-               if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+               if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+                       reply->status = AUX_TRANSACTION_REPLY_INVALID;
                        ASSERT_CRITICAL(false);
+               }
        }
 }
 
@@ -371,6 +379,10 @@ static enum aux_channel_operation_result get_channel_status(
        value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
                                10, aux110->timeout_period/10);
 
+       /* in case HPD is LOW, exit AUX transaction */
+       if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+               return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
        /* Note that the following bits are set in 'status.bits'
         * during CTS 4.2.1.2 (FW 3.3.1):
         * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
@@ -402,10 +414,10 @@ static enum aux_channel_operation_result get_channel_status(
                        return AUX_CHANNEL_OPERATION_SUCCEEDED;
                }
        } else {
-               /*time_elapsed >= aux_engine->timeout_period */
-               if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
-                       ASSERT_CRITICAL(false);
-
+               /*time_elapsed >= aux_engine->timeout_period
+                *  AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+                */
+               ASSERT_CRITICAL(false);
                return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
        }
 }
index 33de8a8..1e8a158 100644 (file)
@@ -26,6 +26,8 @@
 #ifndef __DAL_ENGINE_H__
 #define __DAL_ENGINE_H__
 
+#include "dc_ddc_types.h"
+
 enum i2caux_transaction_operation {
        I2CAUX_TRANSACTION_READ,
        I2CAUX_TRANSACTION_WRITE
@@ -53,7 +55,8 @@ enum i2caux_transaction_status {
        I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
        I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
        I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
-       I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+       I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+       I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
 };
 
 struct i2caux_transaction_request {
@@ -75,19 +78,6 @@ enum i2c_default_speed {
        I2CAUX_DEFAULT_I2C_SW_SPEED = 50
 };
 
-enum i2caux_transaction_action {
-       I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
-       I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
-       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
-
-       I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
-       I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
-       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
-
-       I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
-       I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
-};
-
 struct engine;
 
 struct engine_funcs {
index a94942d..4beddca 100644 (file)
@@ -148,6 +148,7 @@ struct resource_pool {
        unsigned int underlay_pipe_index;
        unsigned int stream_enc_count;
        unsigned int ref_clock_inKhz;
+       unsigned int dentist_vco_freq_khz;
        unsigned int timing_generator_count;
 
        /*
index 69cb0a1..af700c7 100644 (file)
@@ -156,6 +156,9 @@ struct timing_generator_funcs {
                uint32_t *v_blank_end,
                uint32_t *h_position,
                uint32_t *v_position);
+       bool (*get_otg_active_size)(struct timing_generator *optc,
+                       uint32_t *otg_active_width,
+                       uint32_t *otg_active_height);
        void (*set_early_control)(struct timing_generator *tg,
                                                           uint32_t early_cntl);
        void (*wait_for_state)(struct timing_generator *tg,
index 63fc6c4..a71770e 100644 (file)
@@ -154,14 +154,18 @@ struct hw_sequencer_funcs {
                        struct dc_link_settings *link_settings);
 
        void (*blank_stream)(struct pipe_ctx *pipe_ctx);
+
+       void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+       void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
+
        void (*pipe_control_lock)(
                                struct dc *dc,
                                struct pipe_ctx *pipe,
                                bool lock);
        void (*blank_pixel_data)(
                        struct dc *dc,
-                       struct stream_resource *stream_res,
-                       struct dc_stream_state *stream,
+                       struct pipe_ctx *pipe_ctx,
                        bool blank);
 
        void (*set_bandwidth)(
index 640a647..e92facb 100644 (file)
@@ -38,6 +38,7 @@ enum dce_version resource_parse_asic_id(
 
 struct resource_caps {
        int num_timing_generator;
+       int num_opp;
        int num_video_plane;
        int num_audio;
        int num_stream_encoder;
index 019e7a0..d968956 100644 (file)
@@ -40,7 +40,8 @@ enum ddc_result {
        DDC_RESULT_FAILED_INCOMPLETE,
        DDC_RESULT_FAILED_OPERATION,
        DDC_RESULT_FAILED_INVALID_OPERATION,
-       DDC_RESULT_FAILED_BUFFER_OVERFLOW
+       DDC_RESULT_FAILED_BUFFER_OVERFLOW,
+       DDC_RESULT_FAILED_HPD_DISCON
 };
 
 enum ddc_service_type {
index a981b3e..52a7333 100644 (file)
 #ifndef __DAL_FIXED31_32_H__
 #define __DAL_FIXED31_32_H__
 
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
 #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
 #ifndef LLONG_MIN
 #define LLONG_MIN (1LL<<63)
index dc98d6d..0f10ed7 100644 (file)
@@ -62,6 +62,8 @@ void dm_logger_append_va(
                const char *msg,
                va_list args);
 
+void dm_logger_append_heading(struct log_entry *entry);
+
 void dm_logger_open(
                struct dal_logger *logger,
                struct log_entry *entry,
index eee0dfa..98edaef 100644 (file)
@@ -131,6 +131,63 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
                        dc_fixpt_div(dc_fixpt_one, m1));
 
 }
+
+/*de gamma, none linear to linear*/
+static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+       struct fixed31_32 a;
+       struct fixed31_32 b;
+       struct fixed31_32 c;
+       struct fixed31_32 threshold;
+       struct fixed31_32 reference_white_level;
+
+       a = dc_fixpt_from_fraction(17883277, 100000000);
+       if (is_light0_12) {
+               /*light 0-12*/
+               b = dc_fixpt_from_fraction(28466892, 100000000);
+               c = dc_fixpt_from_fraction(55991073, 100000000);
+               threshold = dc_fixpt_one;
+               reference_white_level = dc_fixpt_half;
+       } else {
+               /*light 0-1*/
+               b = dc_fixpt_from_fraction(2372241, 100000000);
+               c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+               threshold = dc_fixpt_from_fraction(1, 12);
+               reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
+       }
+       if (dc_fixpt_lt(threshold, in_x))
+               *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
+       else
+               *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
+}
+
+/*re gamma, linear to none linear*/
+static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+       struct fixed31_32 a;
+       struct fixed31_32 b;
+       struct fixed31_32 c;
+       struct fixed31_32 reference_white_level;
+
+       a = dc_fixpt_from_fraction(17883277, 100000000);
+       if (is_light0_12) {
+               /*light 0-12*/
+               b = dc_fixpt_from_fraction(28466892, 100000000);
+               c = dc_fixpt_from_fraction(55991073, 100000000);
+               reference_white_level = dc_fixpt_from_fraction(4, 1);
+       } else {
+               /*light 0-1*/
+               b = dc_fixpt_from_fraction(2372241, 100000000);
+               c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+               reference_white_level = dc_fixpt_from_fraction(1, 3);
+       }
+       if (dc_fixpt_lt(dc_fixpt_half, in_x))
+               *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
+       else
+               *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
+}
+
+
 /* one-time pre-compute PQ values - only for sdr_white_level 80 */
 void precompute_pq(void)
 {
@@ -691,6 +748,48 @@ static void build_degamma(struct pwl_float_data_ex *curve,
        }
 }
 
+static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
+               uint32_t hw_points_num,
+               const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+       uint32_t i;
+
+       struct pwl_float_data_ex *rgb = degamma;
+       const struct hw_x_point *coord_x = coordinate_x;
+
+       i = 0;
+
+       while (i != hw_points_num + 1) {
+               compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
+               rgb->g = rgb->r;
+               rgb->b = rgb->r;
+               ++coord_x;
+               ++rgb;
+               ++i;
+       }
+}
+
+static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
+               uint32_t hw_points_num,
+               const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+       uint32_t i;
+
+       struct pwl_float_data_ex *rgb = regamma;
+       const struct hw_x_point *coord_x = coordinate_x;
+
+       i = 0;
+
+       while (i != hw_points_num + 1) {
+               compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
+               rgb->g = rgb->r;
+               rgb->b = rgb->r;
+               ++coord_x;
+               ++rgb;
+               ++i;
+       }
+}
+
 static void scale_gamma(struct pwl_float_data *pwl_rgb,
                const struct dc_gamma *ramp,
                struct dividers dividers)
@@ -1622,6 +1721,25 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
                ret = true;
 
                kvfree(rgb_regamma);
+       } else if (trans == TRANSFER_FUNCTION_HLG ||
+               trans == TRANSFER_FUNCTION_HLG12) {
+               rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+                                      (MAX_HW_POINTS + _EXTRA_POINTS),
+                                      GFP_KERNEL);
+               if (!rgb_regamma)
+                       goto rgb_regamma_alloc_fail;
+
+               build_hlg_regamma(rgb_regamma,
+                               MAX_HW_POINTS,
+                               coordinates_x,
+                               trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+               for (i = 0; i <= MAX_HW_POINTS ; i++) {
+                       points->red[i]    = rgb_regamma[i].r;
+                       points->green[i]  = rgb_regamma[i].g;
+                       points->blue[i]   = rgb_regamma[i].b;
+               }
+               ret = true;
+               kvfree(rgb_regamma);
        }
 rgb_regamma_alloc_fail:
        return ret;
@@ -1682,6 +1800,25 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
                ret = true;
 
                kvfree(rgb_degamma);
+       } else if (trans == TRANSFER_FUNCTION_HLG ||
+               trans == TRANSFER_FUNCTION_HLG12) {
+               rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+                                      (MAX_HW_POINTS + _EXTRA_POINTS),
+                                      GFP_KERNEL);
+               if (!rgb_degamma)
+                       goto rgb_degamma_alloc_fail;
+
+               build_hlg_degamma(rgb_degamma,
+                               MAX_HW_POINTS,
+                               coordinates_x,
+                               trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+               for (i = 0; i <= MAX_HW_POINTS ; i++) {
+                       points->red[i]    = rgb_degamma[i].r;
+                       points->green[i]  = rgb_degamma[i].g;
+                       points->blue[i]   = rgb_degamma[i].b;
+               }
+               ret = true;
+               kvfree(rgb_degamma);
        }
        points->end_exponent = 0;
        points->x_point_at_y1_red = 1;
index 710852a..3d4c1b1 100644 (file)
@@ -29,7 +29,7 @@
 #include "core_types.h"
 
 #define DAL_STATS_ENABLE_REGKEY                        "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT                0x00000001
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT                0x00000000
 #define DAL_STATS_ENABLE_REGKEY_ENABLED                0x00000001
 
 #define DAL_STATS_ENTRIES_REGKEY               "DalStatsEntries"
@@ -238,7 +238,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
        for (int i = 0; i < core_stats->entry_id; i++) {
                if (event_index < core_stats->event_index &&
                                i == events[event_index].entry_id) {
-                       DISPLAY_STATS("%s\n", events[event_index].event_string);
+                       DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
                        event_index++;
                } else if (time_index < core_stats->index &&
                                i == time[time_index].entry_id) {
index 18a3247..fe0cbaa 100644 (file)
@@ -89,6 +89,8 @@
 #define mmUVD_JPEG_RB_SIZE_BASE_IDX                                                                    1
 #define mmUVD_JPEG_ADDR_CONFIG                                                                         0x021f
 #define mmUVD_JPEG_ADDR_CONFIG_BASE_IDX                                                                1
+#define mmUVD_JPEG_PITCH                                                                               0x0222
+#define mmUVD_JPEG_PITCH_BASE_IDX                                                                      1
 #define mmUVD_JPEG_GPCOM_CMD                                                                           0x022c
 #define mmUVD_JPEG_GPCOM_CMD_BASE_IDX                                                                  1
 #define mmUVD_JPEG_GPCOM_DATA0                                                                         0x022d
 #define mmUVD_RB_WPTR4_BASE_IDX                                                                        1
 #define mmUVD_JRBC_RB_RPTR                                                                             0x0457
 #define mmUVD_JRBC_RB_RPTR_BASE_IDX                                                                    1
+#define mmUVD_LMI_JPEG_VMID                                                                            0x045d
+#define mmUVD_LMI_JPEG_VMID_BASE_IDX                                                                   1
 #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH                                                            0x045e
 #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX                                                   1
 #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW                                                             0x045f
 #define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX                                                      1
 #define mmUVD_LMI_JRBC_IB_VMID                                                                         0x0507
 #define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX                                                                1
+#define mmUVD_LMI_JRBC_RB_VMID                                                                         0x0508
+#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX                                                                1
 #define mmUVD_JRBC_RB_WPTR                                                                             0x0509
 #define mmUVD_JRBC_RB_WPTR_BASE_IDX                                                                    1
 #define mmUVD_JRBC_RB_CNTL                                                                             0x050a
 #define mmUVD_JRBC_IB_SIZE_BASE_IDX                                                                    1
 #define mmUVD_JRBC_LMI_SWAP_CNTL                                                                       0x050d
 #define mmUVD_JRBC_LMI_SWAP_CNTL_BASE_IDX                                                              1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW                                                         0x050e
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX                                                1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH                                                        0x050f
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX                                               1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW                                                         0x0510
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX                                                1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH                                                        0x0511
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX                                               1
+#define mmUVD_JRBC_RB_REF_DATA                                                                         0x0512
+#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX                                                                1
+#define mmUVD_JRBC_RB_COND_RD_TIMER                                                                    0x0513
+#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX                                                           1
+#define mmUVD_JRBC_EXTERNAL_REG_BASE                                                                   0x0517
+#define mmUVD_JRBC_EXTERNAL_REG_BASE_BASE_IDX                                                          1
 #define mmUVD_JRBC_SOFT_RESET                                                                          0x0519
 #define mmUVD_JRBC_SOFT_RESET_BASE_IDX                                                                 1
 #define mmUVD_JRBC_STATUS                                                                              0x051a
index e63bc47..9b675d9 100644 (file)
@@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
                return -EINVAL;
 
        hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
-       hwmgr->power_source = PP_PowerSource_AC;
        hwmgr->pp_table_version = PP_TABLE_V1;
        hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
        hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -236,6 +235,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
        ret = hwmgr->hwmgr_func->backend_init(hwmgr);
        if (ret)
                goto err1;
+ /* make sure dc limits are valid */
+       if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+                       (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
+                       hwmgr->dyn_state.max_clock_voltage_on_dc =
+                                       hwmgr->dyn_state.max_clock_voltage_on_ac;
 
        ret = psm_init_power_state_table(hwmgr);
        if (ret)
index 6d72a56..4149562 100644 (file)
@@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
                        PPSMC_MSG_VCEDPM_Disable);
 }
 
-static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr, enable ?
-                       PPSMC_MSG_SAMUDPM_Enable :
-                       PPSMC_MSG_SAMUDPM_Disable);
-}
-
 static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 {
        if (!bgate)
@@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
        return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
 }
 
-static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       if (!bgate)
-               smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
-       return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
 int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_uvd_power_gating(hwmgr))
@@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SamuPowerGating))
-               return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SAMPowerOFF);
-       return 0;
-}
-
-static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SamuPowerGating))
-               return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SAMPowerON);
-       return 0;
-}
-
 int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        data->uvd_power_gated = false;
        data->vce_power_gated = false;
-       data->samu_power_gated = false;
 
        smu7_powerup_uvd(hwmgr);
        smu7_powerup_vce(hwmgr);
-       smu7_powerup_samu(hwmgr);
 
        return 0;
 }
@@ -195,26 +161,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
        }
 }
 
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->samu_power_gated == bgate)
-               return 0;
-
-       data->samu_power_gated = bgate;
-
-       if (bgate) {
-               smu7_update_samu_dpm(hwmgr, true);
-               smu7_powerdown_samu(hwmgr);
-       } else {
-               smu7_powerup_samu(hwmgr);
-               smu7_update_samu_dpm(hwmgr, false);
-       }
-
-       return 0;
-}
-
 int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                        const uint32_t *msg_id)
 {
index 1ddce02..be7f66d 100644 (file)
@@ -29,7 +29,6 @@
 void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
 void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
 int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
 int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
 int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
 int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
index f8e866c..b89d6fb 100644 (file)
@@ -885,6 +885,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
        data->odn_dpm_table.max_vddc = max_vddc;
 }
 
+static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i;
+
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+
+       if (table_info == NULL)
+               return;
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+                                       data->dpm_table.sclk_table.dpm_levels[i].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+                       break;
+               }
+       }
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+                                       data->dpm_table.mclk_table.dpm_levels[i].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+                       break;
+               }
+       }
+
+       dep_table = table_info->vdd_dep_on_mclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+                       return;
+               }
+       }
+
+       dep_table = table_info->vdd_dep_on_sclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+                       return;
+               }
+       }
+       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+               data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+       }
+}
+
 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -904,10 +958,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 
        /* initialize ODN table */
        if (hwmgr->od_enabled) {
-               smu7_setup_voltage_range_from_vbios(hwmgr);
-               smu7_odn_initial_default_setting(hwmgr);
+               if (data->odn_dpm_table.max_vddc) {
+                       smu7_check_dpm_table_updated(hwmgr);
+               } else {
+                       smu7_setup_voltage_range_from_vbios(hwmgr);
+                       smu7_odn_initial_default_setting(hwmgr);
+               }
        }
-
        return 0;
 }
 
@@ -2820,7 +2877,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state *request_ps,
                        const struct pp_power_state *current_ps)
 {
-
+       struct amdgpu_device *adev = hwmgr->adev;
        struct smu7_power_state *smu7_ps =
                                cast_phw_smu7_power_state(&request_ps->hardware);
        uint32_t sclk;
@@ -2843,12 +2900,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                 "VI should always have 2 performance levels",
                                );
 
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+       max_limits = adev->pm.ac_power ?
                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
 
        /* Cap clock DPM tables at DC MAX if it is in DC. */
-       if (PP_PowerSource_DC == hwmgr->power_source) {
+       if (!adev->pm.ac_power) {
                for (i = 0; i < smu7_ps->performance_level_count; i++) {
                        if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
                                smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
@@ -3717,8 +3774,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
        uint32_t i;
 
        for (i = 0; i < dpm_table->count; i++) {
-               if ((dpm_table->dpm_levels[i].value < low_limit)
-               || (dpm_table->dpm_levels[i].value > high_limit))
+       /*skip the trim if od is enabled*/
+               if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+                       || dpm_table->dpm_levels[i].value > high_limit))
                        dpm_table->dpm_levels[i].enabled = false;
                else
                        dpm_table->dpm_levels[i].enabled = true;
@@ -3762,10 +3820,8 @@ static int smu7_generate_dpm_level_enable_mask(
        const struct smu7_power_state *smu7_ps =
                        cast_const_phw_smu7_power_state(states->pnew_state);
 
-       /*skip the trim if od is enabled*/
-       if (!hwmgr->od_enabled)
-               result = smu7_trim_dpm_states(hwmgr, smu7_ps);
 
+       result = smu7_trim_dpm_states(hwmgr, smu7_ps);
        if (result)
                return result;
 
@@ -4244,7 +4300,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
 
        data->uvd_power_gated = false;
        data->vce_power_gated = false;
-       data->samu_power_gated = false;
 
        return 0;
 }
@@ -4739,60 +4794,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
        return true;
 }
 
-static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i;
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
-       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-
-       if (table_info == NULL)
-               return;
-
-       for (i=0; i<data->dpm_table.sclk_table.count; i++) {
-               if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
-                                       data->dpm_table.sclk_table.dpm_levels[i].value) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-                       break;
-               }
-       }
-
-       for (i=0; i<data->dpm_table.mclk_table.count; i++) {
-               if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
-                                       data->dpm_table.mclk_table.dpm_levels[i].value) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-                       break;
-               }
-       }
-
-       dep_table = table_info->vdd_dep_on_mclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
-
-       for (i=0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
-                       return;
-               }
-       }
-
-       dep_table = table_info->vdd_dep_on_sclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
-       for (i=0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
-                       return;
-               }
-       }
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
-               data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
-       }
-}
-
 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size)
index c91e75d..3784ce6 100644 (file)
@@ -310,7 +310,6 @@ struct smu7_hwmgr {
        /* ---- Power Gating States ---- */
        bool                           uvd_power_gated;
        bool                           vce_power_gated;
-       bool                           samu_power_gated;
        bool                           need_long_memory_training;
 
        /* Application power optimization parameters */
index 05e680d..3b8d36d 100644 (file)
@@ -2414,6 +2414,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+       struct vega10_hwmgr *data = hwmgr->backend;
+       struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+       struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+       uint32_t i;
+
+       dep_table = table_info->vdd_dep_on_mclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+                       return;
+               }
+       }
+
+       dep_table = table_info->vdd_dep_on_sclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+                       return;
+               }
+       }
+
+       if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+               data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+               data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+       }
+}
+
 /**
 * Initializes the SMC table and uploads it
 *
@@ -2430,6 +2464,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
        PPTable_t *pp_table = &(data->smc_state_table.pp_table);
        struct pp_atomfwctrl_voltage_table voltage_table;
        struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
+       struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 
        result = vega10_setup_default_dpm_tables(hwmgr);
        PP_ASSERT_WITH_CODE(!result,
@@ -2437,8 +2472,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
                        return result);
 
        /* initialize ODN table */
-       if (hwmgr->od_enabled)
-               vega10_odn_initial_default_setting(hwmgr);
+       if (hwmgr->od_enabled) {
+               if (odn_table->max_vddc) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+                       vega10_check_dpm_table_updated(hwmgr);
+               } else {
+                       vega10_odn_initial_default_setting(hwmgr);
+               }
+       }
 
        pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
                        VOLTAGE_OBJ_SVID2,  &voltage_table);
@@ -3061,6 +3102,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state  *request_ps,
                        const struct pp_power_state *current_ps)
 {
+       struct amdgpu_device *adev = hwmgr->adev;
        struct vega10_power_state *vega10_ps =
                                cast_phw_vega10_power_state(&request_ps->hardware);
        uint32_t sclk;
@@ -3086,12 +3128,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        if (vega10_ps->performance_level_count != 2)
                pr_info("VI should always have 2 performance levels");
 
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+       max_limits = adev->pm.ac_power ?
                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
 
        /* Cap clock DPM tables at DC MAX if it is in DC. */
-       if (PP_PowerSource_DC == hwmgr->power_source) {
+       if (!adev->pm.ac_power) {
                for (i = 0; i < vega10_ps->performance_level_count; i++) {
                        if (vega10_ps->performance_levels[i].mem_clock >
                                max_limits->mclk)
@@ -4695,40 +4737,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
        return true;
 }
 
-static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
-       struct vega10_hwmgr *data = hwmgr->backend;
-       struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
-       struct phm_ppt_v2_information *table_info = hwmgr->pptable;
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
-       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-       uint32_t i;
-
-       dep_table = table_info->vdd_dep_on_mclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
-
-       for (i = 0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
-                       return;
-               }
-       }
-
-       dep_table = table_info->vdd_dep_on_sclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
-       for (i = 0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
-                       return;
-               }
-       }
-
-       if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
-               data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
-               data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
-       }
-}
-
 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
                                                enum PP_OD_DPM_TABLE_COMMAND type)
 {
index aadd6cb..339820d 100644 (file)
@@ -370,7 +370,6 @@ struct vega10_hwmgr {
        /* ---- Power Gating States ---- */
        bool                           uvd_power_gated;
        bool                           vce_power_gated;
-       bool                           samu_power_gated;
        bool                           need_long_memory_training;
 
        /* Internal settings to apply the application power optimization parameters */
index b99fb8a..40c98ca 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/seq_file.h>
 #include "amd_powerplay.h"
 #include "hardwaremanager.h"
-#include "pp_power_source.h"
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
 #include "hwmgr_ppt.h"
@@ -741,7 +740,6 @@ struct pp_hwmgr {
        const struct pp_table_func *pptable_func;
 
        struct pp_power_state    *ps;
-       enum pp_power_source  power_source;
        uint32_t num_ps;
        struct pp_thermal_controller_info thermal_controller;
        bool fan_ctrl_is_in_default_mode;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
deleted file mode 100644 (file)
index b43315c..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef PP_POWERSOURCE_H
-#define PP_POWERSOURCE_H
-
-enum pp_power_source {
-       PP_PowerSource_AC = 0,
-       PP_PowerSource_DC,
-       PP_PowerSource_LimitedPower,
-       PP_PowerSource_LimitedPower_2,
-       PP_PowerSource_Max
-};
-
-
-#endif
index 6c22ed9..89dfbf5 100644 (file)
@@ -29,7 +29,6 @@
 enum SMU_TABLE {
        SMU_UVD_TABLE = 0,
        SMU_VCE_TABLE,
-       SMU_SAMU_TABLE,
        SMU_BIF_TABLE,
 };
 
@@ -47,7 +46,6 @@ enum SMU_MEMBER {
        UcodeLoadStatus,
        UvdBootLevel,
        VceBootLevel,
-       SamuBootLevel,
        LowSclkInterruptThreshold,
        DRAM_LOG_ADDR_H,
        DRAM_LOG_ADDR_L,
index 2d4ec8a..fbe3ef4 100644 (file)
@@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-                                       SMU7_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_samu_clock_voltage_dependency_table *samu_table =
-                               hwmgr->dyn_state.samu_clock_voltage_dependency_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(samu_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
-               table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
-               table->SamuLevel[count].MinPhases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int ci_populate_memory_timing_parameters(
                struct pp_hwmgr *hwmgr,
                uint32_t engine_clock,
@@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                "Failed to initialize ACP Level!", return result);
 
-       result = ci_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
        /* need to populate the  ARB settings for the initial state. */
        result = ci_program_memory_timing_parameters(hwmgr);
@@ -2881,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       struct smu7_hwmgr *data = hwmgr->backend;
+       struct ci_smumgr *smu_data = hwmgr->smu_backend;
+       struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+                       hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+       uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+       uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+                                               hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+       int32_t i;
+
+       if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
+               smu_data->smc_state_table.UvdBootLevel = 0;
+       else
+               smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+                               UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
+
+       data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+       for (i = uvd_table->count - 1; i >= 0; i--) {
+               if (uvd_table->entries[i].v <= max_vddc)
+                       data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+               if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
+                       break;
+       }
+       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+       return 0;
+}
+
+static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       struct smu7_hwmgr *data = hwmgr->backend;
+       struct phm_vce_clock_voltage_dependency_table *vce_table =
+                       hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+       uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+       uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+                                               hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+       int32_t i;
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+                               VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
+
+       data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+
+       for (i = vce_table->count - 1; i >= 0; i--) {
+               if (vce_table->entries[i].v <= max_vddc)
+                       data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+               if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
+                       break;
+       }
+       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.vce_dpm_enable_mask);
+
+       return 0;
+}
+
+static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               ci_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               ci_update_vce_smc_table(hwmgr);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
 const struct pp_smumgr_func ci_smu_funcs = {
        .smu_init = ci_smu_init,
        .smu_fini = ci_smu_fini,
@@ -2903,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
        .initialize_mc_reg_table = ci_initialize_mc_reg_table,
        .is_dpm_running = ci_is_dpm_running,
        .update_dpm_settings = ci_update_dpm_settings,
+       .update_smc_table = ci_update_smc_table,
 };
index 53df940..18048f8 100644 (file)
@@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
                int32_t eng_clock, int32_t mem_clock,
                struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                        "Failed to initialize ACP Level!", return result);
 
-       result = fiji_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point
         * (the other states are just copies of the boot state) we only
         * need to populate the  ARB settings for the initial state.
@@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
@@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
 static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
 {
        switch (type) {
@@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                fiji_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               fiji_update_samu_smc_table(hwmgr);
-               break;
        default:
                break;
        }
index 415f691..9299b93 100644 (file)
@@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-       SMU71_Discrete_DpmTable *table)
-{
-       return 0;
-}
-
 static int iceland_populate_memory_timing_parameters(
                struct pp_hwmgr *hwmgr,
                uint32_t engine_clock,
@@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                "Failed to initialize ACP Level!", return result;);
 
-       result = iceland_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result;);
-
        /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
        /* need to populate the  ARB settings for the initial state. */
        result = iceland_program_memory_timing_parameters(hwmgr);
index a8c6524..a4ce199 100644 (file)
@@ -1337,55 +1337,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
                int32_t eng_clock, int32_t mem_clock,
                SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -1865,10 +1816,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                        "Failed to initialize VCE Level!", return result);
 
-       result = polaris10_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point
         * (the other states are just copies of the boot state) we only
         * need to populate the  ARB settings for the initial state.
@@ -2222,34 +2169,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-
 static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
@@ -2276,9 +2195,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                polaris10_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               polaris10_update_samu_smc_table(hwmgr);
-               break;
        case SMU_BIF_TABLE:
                polaris10_update_bif_smc_table(hwmgr);
        default:
@@ -2357,8 +2273,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
index 782b19f..7dabc6c 100644 (file)
@@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                                                   pptable_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t) (mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].Frequency =
-                       pptable_info->mm_dep_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage.Vddc =
-                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
-                               mm_table->entries[count].vddc);
-               table->SamuLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
-                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->SamuLevel[count].MinVoltage.Vddci =
-                       phm_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               table->SamuLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                       table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((!result),
-                       "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-       }
-
-       return result;
-}
-
 static int tonga_populate_memory_timing_parameters(
                struct pp_hwmgr *hwmgr,
                uint32_t engine_clock,
@@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(!result,
                "Failed to initialize ACP Level !", return result);
 
-       result = tonga_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize SAMU Level !", return result);
-
        /* Since only the initial state is completely set up at this
        * point (the other states are just copies of the boot state) we only
        * need to populate the  ARB settings for the initial state.
@@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
@@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
 static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
 {
        switch (type) {
@@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                tonga_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               tonga_update_samu_smc_table(hwmgr);
-               break;
        default:
                break;
        }
index 2de4895..57420d7 100644 (file)
@@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-
 static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
 {
        struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
@@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                vegam_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               vegam_update_samu_smc_table(hwmgr);
-               break;
        case SMU_BIF_TABLE:
                vegam_update_bif_smc_table(hwmgr);
                break;
@@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU75_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
                int32_t eng_clock, int32_t mem_clock,
                SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(!result,
                        "Failed to initialize VCE Level!", return result);
 
-       result = vegam_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point
         * (the other states are just copies of the boot state) we only
         * need to populate the  ARB settings for the initial state.
@@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
index 16903dc..c3349b8 100644 (file)
@@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
 {
        struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
 
-       if (!crtc->primary->fb)
-               return;
-
        clk_disable_unprepare(arcpgu->clk);
        arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
                              arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
index ac92bce..edd1512 100644 (file)
@@ -7,30 +7,15 @@
  */
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include "armada_drm.h"
 #include "armada_fb.h"
 #include "armada_gem.h"
 #include "armada_hw.h"
 
-static void armada_fb_destroy(struct drm_framebuffer *fb)
-{
-       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-
-       drm_framebuffer_cleanup(&dfb->fb);
-       drm_gem_object_put_unlocked(&dfb->obj->obj);
-       kfree(dfb);
-}
-
-static int armada_fb_create_handle(struct drm_framebuffer *fb,
-       struct drm_file *dfile, unsigned int *handle)
-{
-       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-       return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
-}
-
 static const struct drm_framebuffer_funcs armada_fb_funcs = {
-       .destroy        = armada_fb_destroy,
-       .create_handle  = armada_fb_create_handle,
+       .destroy        = drm_gem_fb_destroy,
+       .create_handle  = drm_gem_fb_create_handle,
 };
 
 struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
 
        dfb->fmt = format;
        dfb->mod = config;
-       dfb->obj = obj;
+       dfb->fb.obj[0] = &obj->obj;
 
        drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
 
index 48073c4..5c130ff 100644 (file)
 
 struct armada_framebuffer {
        struct drm_framebuffer  fb;
-       struct armada_gem_object *obj;
        uint8_t                 fmt;
        uint8_t                 mod;
 };
 #define drm_fb_to_armada_fb(dfb) \
        container_of(dfb, struct armada_framebuffer, fb)
-#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
 
 struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
        const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
index a97f509..3fb37c7 100644 (file)
@@ -490,8 +490,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
        .map_dma_buf    = armada_gem_prime_map_dma_buf,
        .unmap_dma_buf  = armada_gem_prime_unmap_dma_buf,
        .release        = drm_gem_dmabuf_release,
-       .map_atomic     = armada_gem_dmabuf_no_kmap,
-       .unmap_atomic   = armada_gem_dmabuf_no_kunmap,
        .map            = armada_gem_dmabuf_no_kmap,
        .unmap          = armada_gem_dmabuf_no_kunmap,
        .mmap           = armada_gem_dmabuf_mmap,
index c1ea5c3..843cac2 100644 (file)
@@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
        drm_fb_cma_fbdev_fini(dev);
        flush_workqueue(dc->wq);
        drm_kms_helper_poll_fini(dev);
+       drm_atomic_helper_shutdown(dev);
        drm_mode_config_cleanup(dev);
 
        pm_runtime_get_sync(dev->dev);
index 47e0992..0444006 100644 (file)
@@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
                                    ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
 }
 
-static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
+                                         struct atmel_hlcdc_plane_state *state)
 {
-       struct drm_crtc *crtc = plane->base.crtc;
+       struct drm_crtc *crtc = state->base.crtc;
        struct drm_color_lut *lut;
        int idx;
 
@@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
        atmel_hlcdc_plane_update_pos_and_size(plane, state);
        atmel_hlcdc_plane_update_general_settings(plane, state);
        atmel_hlcdc_plane_update_format(plane, state);
-       atmel_hlcdc_plane_update_clut(plane);
+       atmel_hlcdc_plane_update_clut(plane, state);
        atmel_hlcdc_plane_update_buffers(plane, state);
        atmel_hlcdc_plane_update_disc_area(plane, state);
 
@@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
        atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
 }
 
-static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
-{
-       struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
-       if (plane->base.fb)
-               drm_framebuffer_put(plane->base.fb);
-
-       drm_plane_cleanup(p);
-}
-
 static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
 {
        const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
 static const struct drm_plane_funcs layer_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = atmel_hlcdc_plane_destroy,
+       .destroy = drm_plane_cleanup,
        .reset = atmel_hlcdc_plane_reset,
        .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
        .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
index fa2c799..bf6cad6 100644 (file)
@@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
 
 config DRM_SIL_SII8620
        tristate "Silicon Image SII8620 HDMI/MHL bridge"
-       depends on OF && RC_CORE
+       depends on OF
        select DRM_KMS_HELPER
        imply EXTCON
+       select INPUT
+       select RC_CORE
        help
          Silicon Image SII8620 HDMI/MHL bridge chip driver.
 
index c255fc3..f2d43f2 100644 (file)
@@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
        .transfer = cdns_dsi_transfer,
 };
 
-static int cdns_dsi_resume(struct device *dev)
+static int __maybe_unused cdns_dsi_resume(struct device *dev)
 {
        struct cdns_dsi *dsi = dev_get_drvdata(dev);
 
@@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
        return 0;
 }
 
-static int cdns_dsi_suspend(struct device *dev)
+static int __maybe_unused cdns_dsi_suspend(struct device *dev)
 {
        struct cdns_dsi *dsi = dev_get_drvdata(dev);
 
index be2d7e4..ce9db7a 100644 (file)
@@ -92,7 +92,6 @@
 
 #define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
 #define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
 
 struct cirrus_crtc {
        struct drm_crtc                 base;
@@ -117,11 +116,6 @@ struct cirrus_connector {
        struct drm_connector            base;
 };
 
-struct cirrus_framebuffer {
-       struct drm_framebuffer          base;
-       struct drm_gem_object *obj;
-};
-
 struct cirrus_mc {
        resource_size_t                 vram_size;
        resource_size_t                 vram_base;
@@ -152,7 +146,7 @@ struct cirrus_device {
 
 struct cirrus_fbdev {
        struct drm_fb_helper helper;
-       struct cirrus_framebuffer gfb;
+       struct drm_framebuffer gfb;
        void *sysram;
        int size;
        int x1, y1, x2, y2; /* dirty rect */
@@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
                       struct drm_mode_create_dumb *args);
 
 int cirrus_framebuffer_init(struct drm_device *dev,
-                          struct cirrus_framebuffer *gfb,
+                           struct drm_framebuffer *gfb,
                            const struct drm_mode_fb_cmd2 *mode_cmd,
                            struct drm_gem_object *obj);
 
index 32fbfba..b643ac9 100644 (file)
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
        struct drm_gem_object *obj;
        struct cirrus_bo *bo;
        int src_offset, dst_offset;
-       int bpp = afbdev->gfb.base.format->cpp[0];
+       int bpp = afbdev->gfb.format->cpp[0];
        int ret = -EBUSY;
        bool unmap = false;
        bool store_for_later = false;
        int x2, y2;
        unsigned long flags;
 
-       obj = afbdev->gfb.obj;
+       obj = afbdev->gfb.obj[0];
        bo = gem_to_cirrus_bo(obj);
 
        /*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
        }
        for (i = y; i < y + height; i++) {
                /* assume equal stride for now */
-               src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+               src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
                memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
 
        }
@@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
        gfbdev->sysram = sysram;
        gfbdev->size = size;
 
-       fb = &gfbdev->gfb.base;
+       fb = &gfbdev->gfb;
        if (!fb) {
                DRM_INFO("fb is NULL\n");
                return -EINVAL;
@@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 static int cirrus_fbdev_destroy(struct drm_device *dev,
                                struct cirrus_fbdev *gfbdev)
 {
-       struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+       struct drm_framebuffer *gfb = &gfbdev->gfb;
 
        drm_fb_helper_unregister_fbi(&gfbdev->helper);
 
-       if (gfb->obj) {
-               drm_gem_object_put_unlocked(gfb->obj);
-               gfb->obj = NULL;
+       if (gfb->obj[0]) {
+               drm_gem_object_put_unlocked(gfb->obj[0]);
+               gfb->obj[0] = NULL;
        }
 
        vfree(gfbdev->sysram);
        drm_fb_helper_fini(&gfbdev->helper);
-       drm_framebuffer_unregister_private(&gfb->base);
-       drm_framebuffer_cleanup(&gfb->base);
+       drm_framebuffer_unregister_private(gfb);
+       drm_framebuffer_cleanup(gfb);
 
        return 0;
 }
index 26df1e8..60d54e1 100644 (file)
  */
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "cirrus_drv.h"
 
-static int cirrus_create_handle(struct drm_framebuffer *fb,
-                               struct drm_file* file_priv,
-                               unsigned int* handle)
-{
-       struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
-       return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
-}
-
-static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
-       drm_gem_object_put_unlocked(cirrus_fb->obj);
-       drm_framebuffer_cleanup(fb);
-       kfree(fb);
-}
-
 static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
-       .create_handle = cirrus_create_handle,
-       .destroy = cirrus_user_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
 int cirrus_framebuffer_init(struct drm_device *dev,
-                           struct cirrus_framebuffer *gfb,
+                           struct drm_framebuffer *gfb,
                            const struct drm_mode_fb_cmd2 *mode_cmd,
                            struct drm_gem_object *obj)
 {
        int ret;
 
-       drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
-       gfb->obj = obj;
-       ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+       drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
+       gfb->obj[0] = obj;
+       ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
        if (ret) {
                DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
                return ret;
@@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
 {
        struct cirrus_device *cdev = dev->dev_private;
        struct drm_gem_object *obj;
-       struct cirrus_framebuffer *cirrus_fb;
+       struct drm_framebuffer *fb;
        u32 bpp;
        int ret;
 
@@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
        if (obj == NULL)
                return ERR_PTR(-ENOENT);
 
-       cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
-       if (!cirrus_fb) {
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb) {
                drm_gem_object_put_unlocked(obj);
                return ERR_PTR(-ENOMEM);
        }
 
-       ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+       ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
        if (ret) {
                drm_gem_object_put_unlocked(obj);
-               kfree(cirrus_fb);
+               kfree(fb);
                return ERR_PTR(ret);
        }
-       return &cirrus_fb->base;
+       return fb;
 }
 
 static const struct drm_mode_config_funcs cirrus_mode_funcs = {
index c91b9b0..b529f8c 100644 (file)
@@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
                                int x, int y, int atomic)
 {
        struct cirrus_device *cdev = crtc->dev->dev_private;
-       struct drm_gem_object *obj;
-       struct cirrus_framebuffer *cirrus_fb;
        struct cirrus_bo *bo;
        int ret;
        u64 gpu_addr;
 
        /* push the previous fb to system ram */
        if (!atomic && fb) {
-               cirrus_fb = to_cirrus_framebuffer(fb);
-               obj = cirrus_fb->obj;
-               bo = gem_to_cirrus_bo(obj);
+               bo = gem_to_cirrus_bo(fb->obj[0]);
                ret = cirrus_bo_reserve(bo, false);
                if (ret)
                        return ret;
@@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
                cirrus_bo_unreserve(bo);
        }
 
-       cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
-       obj = cirrus_fb->obj;
-       bo = gem_to_cirrus_bo(obj);
+       bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
 
        ret = cirrus_bo_reserve(bo, false);
        if (ret)
@@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
                return ret;
        }
 
-       if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+       if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
                /* if pushing console in kmap it */
                ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
                if (ret)
index 895741e..1788423 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_print.h>
+#include <drm/drm_writeback.h>
 #include <linux/sync_file.h>
 
 #include "drm_crtc_internal.h"
@@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
        return fence_ptr;
 }
 
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+                                       struct drm_connector *connector,
+                                       s32 __user *fence_ptr)
+{
+       unsigned int index = drm_connector_index(connector);
+
+       if (!fence_ptr)
+               return 0;
+
+       if (put_user(-1, fence_ptr))
+               return -EFAULT;
+
+       state->connectors[index].out_fence_ptr = fence_ptr;
+
+       return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+                                              struct drm_connector *connector)
+{
+       unsigned int index = drm_connector_index(connector);
+       s32 __user *fence_ptr;
+
+       fence_ptr = state->connectors[index].out_fence_ptr;
+       state->connectors[index].out_fence_ptr = NULL;
+
+       return fence_ptr;
+}
+
 /**
  * drm_atomic_set_mode_for_crtc - set mode for CRTC
  * @state: the CRTC whose incoming state to update
@@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
                                 const struct drm_display_mode *mode)
 {
+       struct drm_crtc *crtc = state->crtc;
        struct drm_mode_modeinfo umode;
 
        /* Early return for no change. */
@@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
 
                drm_mode_copy(&state->mode, mode);
                state->enable = true;
-               DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
-                                mode->name, state);
+               DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+                                mode->name, crtc->base.id, crtc->name, state);
        } else {
                memset(&state->mode, 0, sizeof(state->mode));
                state->enable = false;
-               DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
-                                state);
+               DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+                                crtc->base.id, crtc->name, state);
        }
 
        return 0;
@@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
                                       struct drm_property_blob *blob)
 {
+       struct drm_crtc *crtc = state->crtc;
+
        if (blob == state->mode_blob)
                return 0;
 
@@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
        memset(&state->mode, 0, sizeof(state->mode));
 
        if (blob) {
-               if (blob->length != sizeof(struct drm_mode_modeinfo) ||
-                   drm_mode_convert_umode(state->crtc->dev, &state->mode,
-                                          blob->data))
+               int ret;
+
+               if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+                       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+                                        crtc->base.id, crtc->name,
+                                        blob->length);
                        return -EINVAL;
+               }
+
+               ret = drm_mode_convert_umode(crtc->dev,
+                                            &state->mode, blob->data);
+               if (ret) {
+                       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+                                        crtc->base.id, crtc->name,
+                                        ret, drm_get_mode_status_name(state->mode.status));
+                       drm_mode_debug_printmodeline(&state->mode);
+                       return -EINVAL;
+               }
 
                state->mode_blob = drm_property_blob_get(blob);
                state->enable = true;
-               DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
-                                state->mode.name, state);
+               DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+                                state->mode.name, crtc->base.id, crtc->name,
+                                state);
        } else {
                state->enable = false;
-               DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
-                                state);
+               DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+                                crtc->base.id, crtc->name, state);
        }
 
        return 0;
@@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                        return -EFAULT;
 
                set_out_fence_for_crtc(state->state, crtc, fence_ptr);
-       } else if (crtc->funcs->atomic_set_property)
+       } else if (crtc->funcs->atomic_set_property) {
                return crtc->funcs->atomic_set_property(crtc, state, property, val);
-       else
+       } else {
+               DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+                                crtc->base.id, crtc->name,
+                                property->base.id, property->name);
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -677,6 +729,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
 }
 
 /**
+ * drm_atomic_connector_check - check connector state
+ * @connector: connector to check
+ * @state: connector state to check
+ *
+ * Provides core sanity checks for connector state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_connector_check(struct drm_connector *connector,
+               struct drm_connector_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_writeback_job *writeback_job = state->writeback_job;
+
+       if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
+               return 0;
+
+       if (writeback_job->fb && !state->crtc) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+                                connector->base.id, connector->name);
+               return -EINVAL;
+       }
+
+       if (state->crtc)
+               crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+                                                               state->crtc);
+
+       if (writeback_job->fb && !crtc_state->active) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+                                connector->base.id, connector->name,
+                                state->crtc->base.id);
+               return -EINVAL;
+       }
+
+       if (writeback_job->out_fence && !writeback_job->fb) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+                                connector->base.id, connector->name);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
  * drm_atomic_get_plane_state - get plane state
  * @state: global atomic state object
  * @plane: plane to get state object for
@@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
 
        WARN_ON(!state->acquire_ctx);
 
+       /* the legacy pointers should never be set */
+       WARN_ON(plane->fb);
+       WARN_ON(plane->old_fb);
+       WARN_ON(plane->crtc);
+
        plane_state = drm_atomic_get_existing_plane_state(state, plane);
        if (plane_state)
                return plane_state;
@@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
        } else if (property == plane->alpha_property) {
                state->alpha = val;
        } else if (property == plane->rotation_property) {
-               if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
+               if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+                       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+                                        plane->base.id, plane->name, val);
                        return -EINVAL;
+               }
                state->rotation = val;
        } else if (property == plane->zpos_property) {
                state->zpos = val;
@@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
                return plane->funcs->atomic_set_property(plane, state,
                                property, val);
        } else {
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+                                plane->base.id, plane->name,
+                                property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
 
        /* either *both* CRTC and FB must be set, or neither */
        if (state->crtc && !state->fb) {
-               DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
+                                plane->base.id, plane->name);
                return -EINVAL;
        } else if (state->fb && !state->crtc) {
-               DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
+                                plane->base.id, plane->name);
                return -EINVAL;
        }
 
@@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
 
        /* Check whether this plane is usable on this CRTC */
        if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
-               DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
+               DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+                                state->crtc->base.id, state->crtc->name,
+                                plane->base.id, plane->name);
                return -EINVAL;
        }
 
@@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
                                           state->fb->modifier);
        if (ret) {
                struct drm_format_name_buf format_name;
-               DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+                                plane->base.id, plane->name,
                                 drm_get_format_name(state->fb->format->format,
                                                     &format_name),
                                 state->fb->modifier);
@@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
            state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
            state->crtc_h > INT_MAX ||
            state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
-               DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+                                plane->base.id, plane->name,
                                 state->crtc_w, state->crtc_h,
                                 state->crtc_x, state->crtc_y);
                return -ERANGE;
@@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
            state->src_x > fb_width - state->src_w ||
            state->src_h > fb_height ||
            state->src_y > fb_height - state->src_h) {
-               DRM_DEBUG_ATOMIC("Invalid source coordinates "
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
                                 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+                                plane->base.id, plane->name,
                                 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
                                 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
                                 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@@ -1120,6 +1235,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
        state->private_objs[index].old_state = obj->state;
        state->private_objs[index].new_state = obj_state;
        state->private_objs[index].ptr = obj;
+       obj_state->state = state;
 
        state->num_private_objs = num_objs;
 
@@ -1278,6 +1394,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
                        state->link_status = val;
        } else if (property == config->aspect_ratio_property) {
                state->picture_aspect_ratio = val;
+       } else if (property == config->content_type_property) {
+               state->content_type = val;
        } else if (property == connector->scaling_mode_property) {
                state->scaling_mode = val;
        } else if (property == connector->content_protection_property) {
@@ -1286,10 +1404,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
                        return -EINVAL;
                }
                state->content_protection = val;
+       } else if (property == config->writeback_fb_id_property) {
+               struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+               int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+               if (fb)
+                       drm_framebuffer_put(fb);
+               return ret;
+       } else if (property == config->writeback_out_fence_ptr_property) {
+               s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+               return set_out_fence_for_connector(state->state, connector,
+                                                  fence_ptr);
        } else if (connector->funcs->atomic_set_property) {
                return connector->funcs->atomic_set_property(connector,
                                state, property, val);
        } else {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+                                connector->base.id, connector->name,
+                                property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -1363,10 +1495,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
                *val = state->link_status;
        } else if (property == config->aspect_ratio_property) {
                *val = state->picture_aspect_ratio;
+       } else if (property == config->content_type_property) {
+               *val = state->content_type;
        } else if (property == connector->scaling_mode_property) {
                *val = state->scaling_mode;
        } else if (property == connector->content_protection_property) {
                *val = state->content_protection;
+       } else if (property == config->writeback_fb_id_property) {
+               /* Writeback framebuffer is one-shot, write and forget */
+               *val = 0;
+       } else if (property == config->writeback_out_fence_ptr_property) {
+               *val = 0;
        } else if (connector->funcs->atomic_get_property) {
                return connector->funcs->atomic_get_property(connector,
                                state, property, val);
@@ -1456,11 +1595,12 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
        }
 
        if (crtc)
-               DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
-                                plane_state, crtc->base.id, crtc->name);
+               DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+                                plane->base.id, plane->name, plane_state,
+                                crtc->base.id, crtc->name);
        else
-               DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
-                                plane_state);
+               DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+                                plane->base.id, plane->name, plane_state);
 
        return 0;
 }
@@ -1480,12 +1620,15 @@ void
 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
                            struct drm_framebuffer *fb)
 {
+       struct drm_plane *plane = plane_state->plane;
+
        if (fb)
-               DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
-                                fb->base.id, plane_state);
-       else
-               DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+               DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+                                fb->base.id, plane->base.id, plane->name,
                                 plane_state);
+       else
+               DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+                                plane->base.id, plane->name, plane_state);
 
        drm_framebuffer_assign(&plane_state->fb, fb);
 }
@@ -1546,6 +1689,7 @@ int
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                                  struct drm_crtc *crtc)
 {
+       struct drm_connector *connector = conn_state->connector;
        struct drm_crtc_state *crtc_state;
 
        if (conn_state->crtc == crtc)
@@ -1573,10 +1717,12 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                drm_connector_get(conn_state->connector);
                conn_state->crtc = crtc;
 
-               DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+               DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+                                connector->base.id, connector->name,
                                 conn_state, crtc->base.id, crtc->name);
        } else {
-               DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+               DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+                                connector->base.id, connector->name,
                                 conn_state);
        }
 
@@ -1584,6 +1730,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 }
 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
 
+/*
+ * drm_atomic_get_writeback_job - return or allocate a writeback job
+ * @conn_state: Connector state to get the job for
+ *
+ * Writeback jobs have a different lifetime to the atomic state they are
+ * associated with. This convenience function takes care of allocating a job
+ * if there isn't yet one associated with the connector state, otherwise
+ * it just returns the existing job.
+ *
+ * Returns: The writeback job for the given connector state
+ */
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+       WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+       if (!conn_state->writeback_job)
+               conn_state->writeback_job =
+                       kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+       return conn_state->writeback_job;
+}
+
+/**
+ * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
+ * @conn_state: atomic state object for the connector
+ * @fb: fb to use for the connector
+ *
+ * This is used to set the framebuffer for a writeback connector, which outputs
+ * to a buffer instead of an actual physical connector.
+ * Changing the assigned framebuffer requires us to grab a reference to the new
+ * fb and drop the reference to the old fb, if there is one. This function
+ * takes care of all these details besides updating the pointer in the
+ * state object itself.
+ *
+ * Note: The only way conn_state can already have an fb set is if the commit
+ * sets the property more than once.
+ *
+ * See also: drm_writeback_connector_init()
+ *
+ * Returns: 0 on success
+ */
+int drm_atomic_set_writeback_fb_for_connector(
+               struct drm_connector_state *conn_state,
+               struct drm_framebuffer *fb)
+{
+       struct drm_writeback_job *job =
+               drm_atomic_get_writeback_job(conn_state);
+       if (!job)
+               return -ENOMEM;
+
+       drm_framebuffer_assign(&job->fb, fb);
+
+       if (fb)
+               DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+                                fb->base.id, conn_state);
+       else
+               DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+                                conn_state);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
+
 /**
  * drm_atomic_add_affected_connectors - add connectors for crtc
  * @state: atomic state
@@ -1672,6 +1882,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
 
        WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
 
+       DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
+                        crtc->base.id, crtc->name, state);
+
        drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
                struct drm_plane_state *plane_state =
                        drm_atomic_get_plane_state(state, plane);
@@ -1702,6 +1915,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
        struct drm_plane_state *plane_state;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
+       struct drm_connector *conn;
+       struct drm_connector_state *conn_state;
        int i, ret = 0;
 
        DRM_DEBUG_ATOMIC("checking %p\n", state);
@@ -1724,6 +1939,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
                }
        }
 
+       for_each_new_connector_in_state(state, conn, conn_state, i) {
+               ret = drm_atomic_connector_check(conn, conn_state);
+               if (ret) {
+                       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
+                                        conn->base.id, conn->name);
+                       return ret;
+               }
+       }
+
        if (config->funcs->atomic_check) {
                ret = config->funcs->atomic_check(state->dev, state);
 
@@ -2048,45 +2272,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 }
 
 /**
- * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
- *
- * @dev: drm device to check.
- * @plane_mask: plane mask for planes that were updated.
- * @ret: return value, can be -EDEADLK for a retry.
- *
- * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
- * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
- * is a common operation for each atomic update, so this call is split off as a
- * helper.
- */
-void drm_atomic_clean_old_fb(struct drm_device *dev,
-                            unsigned plane_mask,
-                            int ret)
-{
-       struct drm_plane *plane;
-
-       /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
-        * locks (ie. while it is still safe to deref plane->state).  We
-        * need to do this here because the driver entry points cannot
-        * distinguish between legacy and atomic ioctls.
-        */
-       drm_for_each_plane_mask(plane, dev, plane_mask) {
-               if (ret == 0) {
-                       struct drm_framebuffer *new_fb = plane->state->fb;
-                       if (new_fb)
-                               drm_framebuffer_get(new_fb);
-                       plane->fb = new_fb;
-                       plane->crtc = plane->state->crtc;
-
-                       if (plane->old_fb)
-                               drm_framebuffer_put(plane->old_fb);
-               }
-               plane->old_fb = NULL;
-       }
-}
-EXPORT_SYMBOL(drm_atomic_clean_old_fb);
-
-/**
  * DOC: explicit fencing properties
  *
  * Explicit fencing allows userspace to control the buffer synchronization
@@ -2161,7 +2346,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
        return 0;
 }
 
-static int prepare_crtc_signaling(struct drm_device *dev,
+static int prepare_signaling(struct drm_device *dev,
                                  struct drm_atomic_state *state,
                                  struct drm_mode_atomic *arg,
                                  struct drm_file *file_priv,
@@ -2170,6 +2355,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
+       struct drm_connector *conn;
+       struct drm_connector_state *conn_state;
        int i, c = 0, ret;
 
        if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@@ -2235,6 +2422,43 @@ static int prepare_crtc_signaling(struct drm_device *dev,
                c++;
        }
 
+       for_each_new_connector_in_state(state, conn, conn_state, i) {
+               struct drm_writeback_job *job;
+               struct drm_out_fence_state *f;
+               struct dma_fence *fence;
+               s32 __user *fence_ptr;
+
+               fence_ptr = get_out_fence_for_connector(state, conn);
+               if (!fence_ptr)
+                       continue;
+
+               job = drm_atomic_get_writeback_job(conn_state);
+               if (!job)
+                       return -ENOMEM;
+
+               f = krealloc(*fence_state, sizeof(**fence_state) *
+                            (*num_fences + 1), GFP_KERNEL);
+               if (!f)
+                       return -ENOMEM;
+
+               memset(&f[*num_fences], 0, sizeof(*f));
+
+               f[*num_fences].out_fence_ptr = fence_ptr;
+               *fence_state = f;
+
+               fence = drm_writeback_get_out_fence((struct drm_writeback_connector *)conn);
+               if (!fence)
+                       return -ENOMEM;
+
+               ret = setup_out_fence(&f[(*num_fences)++], fence);
+               if (ret) {
+                       dma_fence_put(fence);
+                       return ret;
+               }
+
+               job->out_fence = fence;
+       }
+
        /*
         * Having this flag means user mode pends on event which will never
         * reach due to lack of at least one CRTC for signaling
@@ -2245,11 +2469,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
        return 0;
 }
 
-static void complete_crtc_signaling(struct drm_device *dev,
-                                   struct drm_atomic_state *state,
-                                   struct drm_out_fence_state *fence_state,
-                                   unsigned int num_fences,
-                                   bool install_fds)
+static void complete_signaling(struct drm_device *dev,
+                              struct drm_atomic_state *state,
+                              struct drm_out_fence_state *fence_state,
+                              unsigned int num_fences,
+                              bool install_fds)
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
@@ -2306,9 +2530,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        unsigned int copied_objs, copied_props;
        struct drm_atomic_state *state;
        struct drm_modeset_acquire_ctx ctx;
-       struct drm_plane *plane;
        struct drm_out_fence_state *fence_state;
-       unsigned plane_mask;
        int ret = 0;
        unsigned int i, j, num_fences;
 
@@ -2348,7 +2570,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
 
 retry:
-       plane_mask = 0;
        copied_objs = 0;
        copied_props = 0;
        fence_state = NULL;
@@ -2419,17 +2640,11 @@ retry:
                        copied_props++;
                }
 
-               if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
-                   !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
-                       plane = obj_to_plane(obj);
-                       plane_mask |= (1 << drm_plane_index(plane));
-                       plane->old_fb = plane->fb;
-               }
                drm_mode_object_put(obj);
        }
 
-       ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
-                                    &num_fences);
+       ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+                               &num_fences);
        if (ret)
                goto out;
 
@@ -2445,9 +2660,7 @@ retry:
        }
 
 out:
-       drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
-       complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
+       complete_signaling(dev, state, fence_state, num_fences, !ret);
 
        if (ret == -EDEADLK) {
                drm_atomic_state_clear(state);
index 130da51..17baf50 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_writeback.h>
 #include <linux/dma-fence.h>
 
 #include "drm_crtc_helper_internal.h"
@@ -1172,6 +1173,25 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
 
+static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+                                               struct drm_atomic_state *old_state)
+{
+       struct drm_connector *connector;
+       struct drm_connector_state *new_conn_state;
+       int i;
+
+       for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+               const struct drm_connector_helper_funcs *funcs;
+
+               funcs = connector->helper_private;
+
+               if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+                       WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+                       funcs->atomic_commit(connector, new_conn_state->writeback_job);
+               }
+       }
+}
+
 /**
  * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
  * @dev: DRM device
@@ -1251,6 +1271,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 
                drm_bridge_enable(encoder->bridge);
        }
+
+       drm_atomic_helper_commit_writebacks(dev, old_state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
 
@@ -2914,7 +2936,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
        struct drm_plane *plane;
        struct drm_crtc_state *crtc_state;
        struct drm_crtc *crtc;
-       unsigned plane_mask = 0;
        int ret, i;
 
        state = drm_atomic_state_alloc(dev);
@@ -2957,17 +2978,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
                        goto free;
 
                drm_atomic_set_fb_for_plane(plane_state, NULL);
-
-               if (clean_old_fbs) {
-                       plane->old_fb = plane->fb;
-                       plane_mask |= BIT(drm_plane_index(plane));
-               }
        }
 
        ret = drm_atomic_commit(state);
 free:
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
        drm_atomic_state_put(state);
        return ret;
 }
@@ -3129,13 +3143,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 
        state->acquire_ctx = ctx;
 
-       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               WARN_ON(plane->crtc != new_plane_state->crtc);
-               WARN_ON(plane->fb != new_plane_state->fb);
-               WARN_ON(plane->old_fb);
-
+       for_each_new_plane_in_state(state, plane, new_plane_state, i)
                state->planes[i].old_state = plane->state;
-       }
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
                state->crtcs[i].old_state = crtc->state;
@@ -3660,6 +3669,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
        if (state->crtc)
                drm_connector_get(connector);
        state->commit = NULL;
+
+       /* Don't copy over a writeback job, they are used only once */
+       state->writeback_job = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
 
index 9b9ba5d..2f9ebdd 100644 (file)
@@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
        { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
        { DRM_MODE_CONNECTOR_DSI, "DSI" },
        { DRM_MODE_CONNECTOR_DPI, "DPI" },
+       { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
 };
 
 void drm_connector_ida_init(void)
@@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
        struct ida *connector_ida =
                &drm_connector_enum_list[connector_type].ida;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+               (!funcs->atomic_destroy_state ||
+                !funcs->atomic_duplicate_state));
+
        ret = __drm_mode_object_add(dev, &connector->base,
                                    DRM_MODE_OBJECT_CONNECTOR,
                                    false, drm_connector_free);
@@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
        config->num_connector++;
        spin_unlock_irq(&config->connector_list_lock);
 
-       if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+       if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
+           connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
                drm_object_attach_property(&connector->base,
                                              config->edid_property,
                                              0);
@@ -720,6 +726,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
        { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
 };
 
+static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
+       { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
+       { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
+       { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
+       { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
+       { DRM_MODE_CONTENT_TYPE_GAME, "Game" },
+};
+
 static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
        { DRM_MODE_PANEL_ORIENTATION_NORMAL,    "Normal"        },
        { DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down"   },
@@ -997,6 +1011,84 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
 EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
 
 /**
+ * DOC: HDMI connector properties
+ *
+ * content type (HDMI specific):
+ *     Indicates content type setting to be used in HDMI infoframes to indicate
+ *     content type for the external device, so that it adjusts it's display
+ *     settings accordingly.
+ *
+ *     The value of this property can be one of the following:
+ *
+ *     No Data:
+ *             Content type is unknown
+ *     Graphics:
+ *             Content type is graphics
+ *     Photo:
+ *             Content type is photo
+ *     Cinema:
+ *             Content type is cinema
+ *     Game:
+ *             Content type is game
+ *
+ *     Drivers can set up this property by calling
+ *     drm_connector_attach_content_type_property(). Decoding to
+ *     infoframe values is done through
+ *     drm_hdmi_get_content_type_from_property() and
+ *     drm_hdmi_get_itc_bit_from_property().
+ */
+
+/**
+ * drm_connector_attach_content_type_property - attach content-type property
+ * @connector: connector to attach content type property on.
+ *
+ * Called by a driver the first time a HDMI connector is made.
+ */
+int drm_connector_attach_content_type_property(struct drm_connector *connector)
+{
+       if (!drm_mode_create_content_type_property(connector->dev))
+               drm_object_attach_property(&connector->base,
+                                          connector->dev->mode_config.content_type_property,
+                                          DRM_MODE_CONTENT_TYPE_NO_DATA);
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_type_property);
+
+
+/**
+ * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
+ *                                         content type information, based
+ *                                         on correspondent DRM property.
+ * @frame: HDMI AVI infoframe
+ * @conn_state: DRM display connector state
+ *
+ */
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+                                        const struct drm_connector_state *conn_state)
+{
+       switch (conn_state->content_type) {
+       case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+               frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+               break;
+       case DRM_MODE_CONTENT_TYPE_CINEMA:
+               frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
+               break;
+       case DRM_MODE_CONTENT_TYPE_GAME:
+               frame->content_type = HDMI_CONTENT_TYPE_GAME;
+               break;
+       case DRM_MODE_CONTENT_TYPE_PHOTO:
+               frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
+               break;
+       default:
+               /* Graphics is the default(0) */
+               frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+       }
+
+       frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
+/**
  * drm_create_tv_properties - create TV specific connector properties
  * @dev: DRM device
  * @num_modes: number of different TV formats (modes) supported
@@ -1261,6 +1353,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
 EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
 
 /**
+ * drm_mode_create_content_type_property - create content type property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_content_type_property(struct drm_device *dev)
+{
+       if (dev->mode_config.content_type_property)
+               return 0;
+
+       dev->mode_config.content_type_property =
+               drm_property_create_enum(dev, 0, "content type",
+                                        drm_content_type_enum_list,
+                                        ARRAY_SIZE(drm_content_type_enum_list));
+
+       if (dev->mode_config.content_type_property == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_content_type_property);
+
+/**
  * drm_mode_create_suggested_offset_properties - create suggests offset properties
  * @dev: DRM device
  *
index 98a36e6..f45e7a8 100644 (file)
@@ -286,6 +286,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        if (WARN_ON(config->num_crtc >= 32))
                return -EINVAL;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+               (!funcs->atomic_destroy_state ||
+                !funcs->atomic_duplicate_state));
+
        crtc->dev = dev;
        crtc->funcs = funcs;
 
@@ -469,23 +473,32 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
         * connectors from it), hence we need to refcount the fbs across all
         * crtcs. Atomic modeset will have saner semantics ...
         */
-       drm_for_each_crtc(tmp, crtc->dev)
-               tmp->primary->old_fb = tmp->primary->fb;
+       drm_for_each_crtc(tmp, crtc->dev) {
+               struct drm_plane *plane = tmp->primary;
+
+               plane->old_fb = plane->fb;
+       }
 
        fb = set->fb;
 
        ret = crtc->funcs->set_config(set, ctx);
        if (ret == 0) {
-               crtc->primary->crtc = fb ? crtc : NULL;
-               crtc->primary->fb = fb;
+               struct drm_plane *plane = crtc->primary;
+
+               if (!plane->state) {
+                       plane->crtc = fb ? crtc : NULL;
+                       plane->fb = fb;
+               }
        }
 
        drm_for_each_crtc(tmp, crtc->dev) {
-               if (tmp->primary->fb)
-                       drm_framebuffer_get(tmp->primary->fb);
-               if (tmp->primary->old_fb)
-                       drm_framebuffer_put(tmp->primary->old_fb);
-               tmp->primary->old_fb = NULL;
+               struct drm_plane *plane = tmp->primary;
+
+               if (plane->fb)
+                       drm_framebuffer_get(plane->fb);
+               if (plane->old_fb)
+                       drm_framebuffer_put(plane->old_fb);
+               plane->old_fb = NULL;
        }
 
        return ret;
@@ -640,7 +653,9 @@ retry:
 
                ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
                if (ret) {
-                       DRM_DEBUG_KMS("Invalid mode\n");
+                       DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
+                                     ret, drm_get_mode_status_name(mode->status));
+                       drm_mode_debug_printmodeline(mode);
                        goto out;
                }
 
index 5d307b2..235d40f 100644 (file)
@@ -56,12 +56,21 @@ int drm_mode_setcrtc(struct drm_device *dev,
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
 
+/* drm_modes.c */
+const char *drm_get_mode_status_name(enum drm_mode_status status);
+
 /* IOCTLs */
 int drm_mode_getresources(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
 
 
 /* drm_dumb_buffers.c */
+int drm_mode_create_dumb(struct drm_device *dev,
+                        struct drm_mode_create_dumb *args,
+                        struct drm_file *file_priv);
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+                         struct drm_file *file_priv);
+
 /* IOCTLs */
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
                               void *data, struct drm_file *file_priv);
@@ -163,14 +172,19 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
                                     const struct drm_framebuffer *fb);
 void drm_fb_release(struct drm_file *file_priv);
 
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+                  struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+                 struct drm_file *file_priv);
+
 
 /* IOCTL */
-int drm_mode_addfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv);
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv);
 int drm_mode_addfb2(struct drm_device *dev,
                    void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
-                 void *data, struct drm_file *file_priv);
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv);
 int drm_mode_getfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv);
 int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
index 9e2ae02..81dfdd3 100644 (file)
  * a hardware-specific ioctl to allocate suitable buffer objects.
  */
 
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv)
+int drm_mode_create_dumb(struct drm_device *dev,
+                        struct drm_mode_create_dumb *args,
+                        struct drm_file *file_priv)
 {
-       struct drm_mode_create_dumb *args = data;
        u32 cpp, stride, size;
 
        if (!dev->driver->dumb_create)
@@ -92,6 +92,12 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
        return dev->driver->dumb_create(file_priv, dev, args);
 }
 
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       return drm_mode_create_dumb(dev, data, file_priv);
+}
+
 /**
  * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
  * @dev: DRM device
@@ -123,17 +129,22 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
                                               &args->offset);
 }
 
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
-                               void *data, struct drm_file *file_priv)
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+                         struct drm_file *file_priv)
 {
-       struct drm_mode_destroy_dumb *args = data;
-
        if (!dev->driver->dumb_create)
                return -ENOSYS;
 
        if (dev->driver->dumb_destroy)
-               return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+               return dev->driver->dumb_destroy(file_priv, dev, handle);
        else
-               return drm_gem_dumb_destroy(file_priv, dev, args->handle);
+               return drm_gem_dumb_destroy(file_priv, dev, handle);
 }
 
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_destroy_dumb *args = data;
+
+       return drm_mode_destroy_dumb(dev, args->handle, file_priv);
+}
index a580838..5dc742b 100644 (file)
@@ -163,8 +163,9 @@ static const struct edid_quirk {
        /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
        { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
 
-       /* HTC Vive VR Headset */
+       /* HTC Vive and Vive Pro VR Headsets */
        { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+       { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
 
        /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
        { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@@ -687,562 +688,562 @@ static const struct minimode extra_modes[] = {
 static const struct drm_display_mode edid_cea_modes[] = {
        /* 0 - dummy, VICs start at 1 */
        { },
-       /* 1 - 640x480@60Hz */
+       /* 1 - 640x480@60Hz 4:3 */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 2 - 720x480@60Hz */
+       /* 2 - 720x480@60Hz 4:3 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 3 - 720x480@60Hz */
+       /* 3 - 720x480@60Hz 16:9 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 4 - 1280x720@60Hz */
+       /* 4 - 1280x720@60Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 5 - 1920x1080i@60Hz */
+       /* 5 - 1920x1080i@60Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 6 - 720(1440)x480i@60Hz */
+       /* 6 - 720(1440)x480i@60Hz 4:3 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 7 - 720(1440)x480i@60Hz */
+       /* 7 - 720(1440)x480i@60Hz 16:9 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 8 - 720(1440)x240@60Hz */
+       /* 8 - 720(1440)x240@60Hz 4:3 */
        { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 9 - 720(1440)x240@60Hz */
+       /* 9 - 720(1440)x240@60Hz 16:9 */
        { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 10 - 2880x480i@60Hz */
+       /* 10 - 2880x480i@60Hz 4:3 */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 11 - 2880x480i@60Hz */
+       /* 11 - 2880x480i@60Hz 16:9 */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 12 - 2880x240@60Hz */
+       /* 12 - 2880x240@60Hz 4:3 */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 13 - 2880x240@60Hz */
+       /* 13 - 2880x240@60Hz 16:9 */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 14 - 1440x480@60Hz */
+       /* 14 - 1440x480@60Hz 4:3 */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 15 - 1440x480@60Hz */
+       /* 15 - 1440x480@60Hz 16:9 */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 16 - 1920x1080@60Hz */
+       /* 16 - 1920x1080@60Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 17 - 720x576@50Hz */
+       /* 17 - 720x576@50Hz 4:3 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 18 - 720x576@50Hz */
+       /* 18 - 720x576@50Hz 16:9 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 19 - 1280x720@50Hz */
+       /* 19 - 1280x720@50Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 20 - 1920x1080i@50Hz */
+       /* 20 - 1920x1080i@50Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 21 - 720(1440)x576i@50Hz */
+       /* 21 - 720(1440)x576i@50Hz 4:3 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 22 - 720(1440)x576i@50Hz */
+       /* 22 - 720(1440)x576i@50Hz 16:9 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 23 - 720(1440)x288@50Hz */
+       /* 23 - 720(1440)x288@50Hz 4:3 */
        { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 24 - 720(1440)x288@50Hz */
+       /* 24 - 720(1440)x288@50Hz 16:9 */
        { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 25 - 2880x576i@50Hz */
+       /* 25 - 2880x576i@50Hz 4:3 */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 26 - 2880x576i@50Hz */
+       /* 26 - 2880x576i@50Hz 16:9 */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 27 - 2880x288@50Hz */
+       /* 27 - 2880x288@50Hz 4:3 */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 28 - 2880x288@50Hz */
+       /* 28 - 2880x288@50Hz 16:9 */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 29 - 1440x576@50Hz */
+       /* 29 - 1440x576@50Hz 4:3 */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 30 - 1440x576@50Hz */
+       /* 30 - 1440x576@50Hz 16:9 */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 31 - 1920x1080@50Hz */
+       /* 31 - 1920x1080@50Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 32 - 1920x1080@24Hz */
+       /* 32 - 1920x1080@24Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
                   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 33 - 1920x1080@25Hz */
+       /* 33 - 1920x1080@25Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 34 - 1920x1080@30Hz */
+       /* 34 - 1920x1080@30Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 35 - 2880x480@60Hz */
+       /* 35 - 2880x480@60Hz 4:3 */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 36 - 2880x480@60Hz */
+       /* 36 - 2880x480@60Hz 16:9 */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 37 - 2880x576@50Hz */
+       /* 37 - 2880x576@50Hz 4:3 */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 38 - 2880x576@50Hz */
+       /* 38 - 2880x576@50Hz 16:9 */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 39 - 1920x1080i@50Hz */
+       /* 39 - 1920x1080i@50Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
                   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 40 - 1920x1080i@100Hz */
+       /* 40 - 1920x1080i@100Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 41 - 1280x720@100Hz */
+       /* 41 - 1280x720@100Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 42 - 720x576@100Hz */
+       /* 42 - 720x576@100Hz 4:3 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 43 - 720x576@100Hz */
+       /* 43 - 720x576@100Hz 16:9 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 44 - 720(1440)x576i@100Hz */
+       /* 44 - 720(1440)x576i@100Hz 4:3 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 45 - 720(1440)x576i@100Hz */
+       /* 45 - 720(1440)x576i@100Hz 16:9 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 46 - 1920x1080i@120Hz */
+       /* 46 - 1920x1080i@120Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 47 - 1280x720@120Hz */
+       /* 47 - 1280x720@120Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 48 - 720x480@120Hz */
+       /* 48 - 720x480@120Hz 4:3 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 49 - 720x480@120Hz */
+       /* 49 - 720x480@120Hz 16:9 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 50 - 720(1440)x480i@120Hz */
+       /* 50 - 720(1440)x480i@120Hz 4:3 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 51 - 720(1440)x480i@120Hz */
+       /* 51 - 720(1440)x480i@120Hz 16:9 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 52 - 720x576@200Hz */
+       /* 52 - 720x576@200Hz 4:3 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 53 - 720x576@200Hz */
+       /* 53 - 720x576@200Hz 16:9 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 54 - 720(1440)x576i@200Hz */
+       /* 54 - 720(1440)x576i@200Hz 4:3 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 55 - 720(1440)x576i@200Hz */
+       /* 55 - 720(1440)x576i@200Hz 16:9 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 56 - 720x480@240Hz */
+       /* 56 - 720x480@240Hz 4:3 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 57 - 720x480@240Hz */
+       /* 57 - 720x480@240Hz 16:9 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 58 - 720(1440)x480i@240Hz */
+       /* 58 - 720(1440)x480i@240Hz 4:3 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 59 - 720(1440)x480i@240Hz */
+       /* 59 - 720(1440)x480i@240Hz 16:9 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 60 - 1280x720@24Hz */
+       /* 60 - 1280x720@24Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 61 - 1280x720@25Hz */
+       /* 61 - 1280x720@25Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
                   3740, 3960, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 62 - 1280x720@30Hz */
+       /* 62 - 1280x720@30Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 63 - 1920x1080@120Hz */
+       /* 63 - 1920x1080@120Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 64 - 1920x1080@100Hz */
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+       /* 64 - 1920x1080@100Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 65 - 1280x720@24Hz */
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+       /* 65 - 1280x720@24Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 66 - 1280x720@25Hz */
+       /* 66 - 1280x720@25Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
                   3740, 3960, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 67 - 1280x720@30Hz */
+       /* 67 - 1280x720@30Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 68 - 1280x720@50Hz */
+       /* 68 - 1280x720@50Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 69 - 1280x720@60Hz */
+       /* 69 - 1280x720@60Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 70 - 1280x720@100Hz */
+       /* 70 - 1280x720@100Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 71 - 1280x720@120Hz */
+       /* 71 - 1280x720@120Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 72 - 1920x1080@24Hz */
+       /* 72 - 1920x1080@24Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
                   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 73 - 1920x1080@25Hz */
+       /* 73 - 1920x1080@25Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 74 - 1920x1080@30Hz */
+       /* 74 - 1920x1080@30Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 75 - 1920x1080@50Hz */
+       /* 75 - 1920x1080@50Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 76 - 1920x1080@60Hz */
+       /* 76 - 1920x1080@60Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 77 - 1920x1080@100Hz */
+       /* 77 - 1920x1080@100Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 78 - 1920x1080@120Hz */
+       /* 78 - 1920x1080@120Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 79 - 1680x720@24Hz */
+       /* 79 - 1680x720@24Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 80 - 1680x720@25Hz */
+       /* 80 - 1680x720@25Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
                   2948, 3168, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 81 - 1680x720@30Hz */
+       /* 81 - 1680x720@30Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
                   2420, 2640, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 82 - 1680x720@50Hz */
+       /* 82 - 1680x720@50Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
                   1980, 2200, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 83 - 1680x720@60Hz */
+       /* 83 - 1680x720@60Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
                   1980, 2200, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 84 - 1680x720@100Hz */
+       /* 84 - 1680x720@100Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
                   1780, 2000, 0, 720, 725, 730, 825, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 85 - 1680x720@120Hz */
+       /* 85 - 1680x720@120Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
                   1780, 2000, 0, 720, 725, 730, 825, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 86 - 2560x1080@24Hz */
+       /* 86 - 2560x1080@24Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
                   3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 87 - 2560x1080@25Hz */
+       /* 87 - 2560x1080@25Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
                   3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 88 - 2560x1080@30Hz */
+       /* 88 - 2560x1080@30Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
                   3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 89 - 2560x1080@50Hz */
+       /* 89 - 2560x1080@50Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
                   3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 90 - 2560x1080@60Hz */
+       /* 90 - 2560x1080@60Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
                   2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 91 - 2560x1080@100Hz */
+       /* 91 - 2560x1080@100Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
                   2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 92 - 2560x1080@120Hz */
+       /* 92 - 2560x1080@120Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
                   3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 93 - 3840x2160p@24Hz 16:9 */
+       /* 93 - 3840x2160@24Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
                   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 94 - 3840x2160p@25Hz 16:9 */
+       /* 94 - 3840x2160@25Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 95 - 3840x2160p@30Hz 16:9 */
+       /* 95 - 3840x2160@30Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 96 - 3840x2160p@50Hz 16:9 */
+       /* 96 - 3840x2160@50Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 97 - 3840x2160p@60Hz 16:9 */
+       /* 97 - 3840x2160@60Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 98 - 4096x2160p@24Hz 256:135 */
+       /* 98 - 4096x2160@24Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
                   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 99 - 4096x2160p@25Hz 256:135 */
+       /* 99 - 4096x2160@25Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
                   5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 100 - 4096x2160p@30Hz 256:135 */
+       /* 100 - 4096x2160@30Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
                   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 101 - 4096x2160p@50Hz 256:135 */
+       /* 101 - 4096x2160@50Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
                   5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 102 - 4096x2160p@60Hz 256:135 */
+       /* 102 - 4096x2160@60Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
                   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 103 - 3840x2160p@24Hz 64:27 */
+       /* 103 - 3840x2160@24Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
                   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 104 - 3840x2160p@25Hz 64:27 */
+       /* 104 - 3840x2160@25Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 105 - 3840x2160p@30Hz 64:27 */
+       /* 105 - 3840x2160@30Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 106 - 3840x2160p@50Hz 64:27 */
+       /* 106 - 3840x2160@50Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 107 - 3840x2160p@60Hz 64:27 */
+       /* 107 - 3840x2160@60Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@@ -4874,6 +4875,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
        frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 
        /*
+        * As some drivers don't support atomic, we can't use connector state.
+        * So just initialize the frame with default values, just the same way
+        * as it's done with other properties here.
+        */
+       frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+       frame->itc = 0;
+
+       /*
         * Populate picture aspect ratio from either
         * user input (if specified) or from the CEA mode list.
         */
index 2ee1eaa..cab14f2 100644 (file)
@@ -368,7 +368,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
        struct drm_plane *plane;
        struct drm_atomic_state *state;
        int i, ret;
-       unsigned int plane_mask;
        struct drm_modeset_acquire_ctx ctx;
 
        drm_modeset_acquire_init(&ctx, 0);
@@ -381,7 +380,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
 
        state->acquire_ctx = &ctx;
 retry:
-       plane_mask = 0;
        drm_for_each_plane(plane, dev) {
                plane_state = drm_atomic_get_plane_state(state, plane);
                if (IS_ERR(plane_state)) {
@@ -391,9 +389,6 @@ retry:
 
                plane_state->rotation = DRM_MODE_ROTATE_0;
 
-               plane->old_fb = plane->fb;
-               plane_mask |= 1 << drm_plane_index(plane);
-
                /* disable non-primary: */
                if (plane->type == DRM_PLANE_TYPE_PRIMARY)
                        continue;
@@ -430,8 +425,6 @@ retry:
        ret = drm_atomic_commit(state);
 
 out_state:
-       drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
        if (ret == -EDEADLK)
                goto backoff;
 
@@ -1164,7 +1157,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
  * @info: fbdev registered by the helper
  * @rect: info about rectangle to fill
  *
- * A wrapper around cfb_imageblit implemented by fbdev core
+ * A wrapper around cfb_fillrect implemented by fbdev core
  */
 void drm_fb_helper_cfb_fillrect(struct fb_info *info,
                                const struct fb_fillrect *rect)
index 6d9b945..66bb403 100644 (file)
@@ -101,6 +101,166 @@ DEFINE_MUTEX(drm_global_mutex);
 
 static int drm_open_helper(struct file *filp, struct drm_minor *minor);
 
+/**
+ * drm_file_alloc - allocate file context
+ * @minor: minor to allocate on
+ *
+ * This allocates a new DRM file context. It is not linked into any context and
+ * can be used by the caller freely. Note that the context keeps a pointer to
+ * @minor, so it must be freed before @minor is.
+ *
+ * RETURNS:
+ * Pointer to newly allocated context, ERR_PTR on failure.
+ */
+struct drm_file *drm_file_alloc(struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct drm_file *file;
+       int ret;
+
+       file = kzalloc(sizeof(*file), GFP_KERNEL);
+       if (!file)
+               return ERR_PTR(-ENOMEM);
+
+       file->pid = get_pid(task_pid(current));
+       file->minor = minor;
+
+       /* for compatibility root is always authenticated */
+       file->authenticated = capable(CAP_SYS_ADMIN);
+       file->lock_count = 0;
+
+       INIT_LIST_HEAD(&file->lhead);
+       INIT_LIST_HEAD(&file->fbs);
+       mutex_init(&file->fbs_lock);
+       INIT_LIST_HEAD(&file->blobs);
+       INIT_LIST_HEAD(&file->pending_event_list);
+       INIT_LIST_HEAD(&file->event_list);
+       init_waitqueue_head(&file->event_wait);
+       file->event_space = 4096; /* set aside 4k for event buffer */
+
+       mutex_init(&file->event_read_lock);
+
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               drm_gem_open(dev, file);
+
+       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+               drm_syncobj_open(file);
+
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_prime_init_file_private(&file->prime);
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, file);
+               if (ret < 0)
+                       goto out_prime_destroy;
+       }
+
+       return file;
+
+out_prime_destroy:
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_prime_destroy_file_private(&file->prime);
+       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+               drm_syncobj_release(file);
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               drm_gem_release(dev, file);
+       put_pid(file->pid);
+       kfree(file);
+
+       return ERR_PTR(ret);
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e, *et;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* Unlink pending events */
+       list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+                                pending_link) {
+               list_del(&e->pending_link);
+               e->file_priv = NULL;
+       }
+
+       /* Remove unconsumed events */
+       list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+               list_del(&e->link);
+               kfree(e);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_file_free - free file context
+ * @file: context to free, or NULL
+ *
+ * This destroys and deallocates a DRM file context previously allocated via
+ * drm_file_alloc(). The caller must make sure to unlink it from any contexts
+ * before calling this.
+ *
+ * If NULL is passed, this is a no-op.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+void drm_file_free(struct drm_file *file)
+{
+       struct drm_device *dev;
+
+       if (!file)
+               return;
+
+       dev = file->minor->dev;
+
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file->minor->kdev->devt),
+                 dev->open_count);
+
+       if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
+           dev->driver->preclose)
+               dev->driver->preclose(dev, file);
+
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
+               drm_legacy_lock_release(dev, file->filp);
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               drm_legacy_reclaim_buffers(dev, file);
+
+       drm_events_release(file);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_fb_release(file);
+               drm_property_destroy_user_blobs(dev, file);
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+               drm_syncobj_release(file);
+
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               drm_gem_release(dev, file);
+
+       drm_legacy_ctxbitmap_flush(dev, file);
+
+       if (drm_is_primary_client(file))
+               drm_master_release(file);
+
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, file);
+
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_prime_destroy_file_private(&file->prime);
+
+       WARN_ON(!list_empty(&file->event_list));
+
+       put_pid(file->pid);
+       kfree(file);
+}
+
 static int drm_setup(struct drm_device * dev)
 {
        int ret;
@@ -207,52 +367,22 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 
        DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       filp->private_data = priv;
-       filp->f_mode |= FMODE_UNSIGNED_OFFSET;
-       priv->filp = filp;
-       priv->pid = get_pid(task_pid(current));
-       priv->minor = minor;
-
-       /* for compatibility root is always authenticated */
-       priv->authenticated = capable(CAP_SYS_ADMIN);
-       priv->lock_count = 0;
-
-       INIT_LIST_HEAD(&priv->lhead);
-       INIT_LIST_HEAD(&priv->fbs);
-       mutex_init(&priv->fbs_lock);
-       INIT_LIST_HEAD(&priv->blobs);
-       INIT_LIST_HEAD(&priv->pending_event_list);
-       INIT_LIST_HEAD(&priv->event_list);
-       init_waitqueue_head(&priv->event_wait);
-       priv->event_space = 4096; /* set aside 4k for event buffer */
-
-       mutex_init(&priv->event_read_lock);
-
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_open(dev, priv);
-
-       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
-               drm_syncobj_open(priv);
-
-       if (drm_core_check_feature(dev, DRIVER_PRIME))
-               drm_prime_init_file_private(&priv->prime);
-
-       if (dev->driver->open) {
-               ret = dev->driver->open(dev, priv);
-               if (ret < 0)
-                       goto out_prime_destroy;
-       }
+       priv = drm_file_alloc(minor);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
        if (drm_is_primary_client(priv)) {
                ret = drm_master_open(priv);
-               if (ret)
-                       goto out_close;
+               if (ret) {
+                       drm_file_free(priv);
+                       return ret;
+               }
        }
 
+       filp->private_data = priv;
+       filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+       priv->filp = filp;
+
        mutex_lock(&dev->filelist_mutex);
        list_add(&priv->lhead, &dev->filelist);
        mutex_unlock(&dev->filelist_mutex);
@@ -278,45 +408,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 #endif
 
        return 0;
-
-out_close:
-       if (dev->driver->postclose)
-               dev->driver->postclose(dev, priv);
-out_prime_destroy:
-       if (drm_core_check_feature(dev, DRIVER_PRIME))
-               drm_prime_destroy_file_private(&priv->prime);
-       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
-               drm_syncobj_release(priv);
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_release(dev, priv);
-       put_pid(priv->pid);
-       kfree(priv);
-       filp->private_data = NULL;
-       return ret;
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_pending_event *e, *et;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-
-       /* Unlink pending events */
-       list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
-                                pending_link) {
-               list_del(&e->pending_link);
-               e->file_priv = NULL;
-       }
-
-       /* Remove unconsumed events */
-       list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
-               list_del(&e->link);
-               kfree(e);
-       }
-
-       spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
 static void drm_legacy_dev_reinit(struct drm_device *dev)
@@ -383,57 +474,7 @@ int drm_release(struct inode *inode, struct file *filp)
        list_del(&file_priv->lhead);
        mutex_unlock(&dev->filelist_mutex);
 
-       if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
-           dev->driver->preclose)
-               dev->driver->preclose(dev, file_priv);
-
-       /* ========================================================
-        * Begin inline drm_release
-        */
-
-       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
-                 task_pid_nr(current),
-                 (long)old_encode_dev(file_priv->minor->kdev->devt),
-                 dev->open_count);
-
-       if (drm_core_check_feature(dev, DRIVER_LEGACY))
-               drm_legacy_lock_release(dev, filp);
-
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-               drm_legacy_reclaim_buffers(dev, file_priv);
-
-       drm_events_release(file_priv);
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               drm_fb_release(file_priv);
-               drm_property_destroy_user_blobs(dev, file_priv);
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
-               drm_syncobj_release(file_priv);
-
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_release(dev, file_priv);
-
-       drm_legacy_ctxbitmap_flush(dev, file_priv);
-
-       if (drm_is_primary_client(file_priv))
-               drm_master_release(file_priv);
-
-       if (dev->driver->postclose)
-               dev->driver->postclose(dev, file_priv);
-
-       if (drm_core_check_feature(dev, DRIVER_PRIME))
-               drm_prime_destroy_file_private(&file_priv->prime);
-
-       WARN_ON(!list_empty(&file_priv->event_list));
-
-       put_pid(file_priv->pid);
-       kfree(file_priv);
-
-       /* ========================================================
-        * End inline drm_release
-        */
+       drm_file_free(file_priv);
 
        if (!--dev->open_count) {
                drm_lastclose(dev);
index bfedcef..ed90974 100644 (file)
@@ -95,21 +95,20 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
 /**
  * drm_mode_addfb - add an FB to the graphics configuration
  * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @or: pointer to request structure
+ * @file_priv: drm file
  *
  * Add a new FB to the specified CRTC, given a user request. This is the
  * original addfb ioctl which only supported RGB formats.
  *
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_addfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+                  struct drm_file *file_priv)
 {
-       struct drm_mode_fb_cmd *or = data;
        struct drm_mode_fb_cmd2 r = {};
        int ret;
 
@@ -134,6 +133,12 @@ int drm_mode_addfb(struct drm_device *dev,
        return 0;
 }
 
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv)
+{
+       return drm_mode_addfb(dev, data, file_priv);
+}
+
 static int fb_plane_width(int width,
                          const struct drm_format_info *format, int plane)
 {
@@ -367,29 +372,28 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
 
 /**
  * drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @dev: drm device
+ * @fb_id: id of framebuffer to remove
+ * @file_priv: drm file
  *
- * Remove the FB specified by the user.
+ * Remove the specified FB.
  *
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_rmfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+                 struct drm_file *file_priv)
 {
        struct drm_framebuffer *fb = NULL;
        struct drm_framebuffer *fbl = NULL;
-       uint32_t *id = data;
        int found = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       fb = drm_framebuffer_lookup(dev, file_priv, *id);
+       fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
        if (!fb)
                return -ENOENT;
 
@@ -435,6 +439,14 @@ fail_unref:
        return -ENOENT;
 }
 
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv)
+{
+       uint32_t *fb_id = data;
+
+       return drm_mode_rmfb(dev, *fb_id, file_priv);
+}
+
 /**
  * drm_mode_getfb - get FB info
  * @dev: drm device for the ioctl
@@ -836,8 +848,6 @@ retry:
                        goto unlock;
 
                plane_mask |= BIT(drm_plane_index(plane));
-
-               plane->old_fb = plane->fb;
        }
 
        /* This list is only filled when disable_crtcs is set. */
@@ -852,9 +862,6 @@ retry:
                ret = drm_atomic_commit(state);
 
 unlock:
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
        if (ret == -EDEADLK) {
                drm_atomic_state_clear(state);
                drm_modeset_backoff(&ctx);
index acfbc06..2810d41 100644 (file)
@@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
        struct dma_buf *dma_buf;
        struct dma_fence *fence;
 
-       if (plane->state->fb == state->fb || !state->fb)
+       if (!state->fb)
                return 0;
 
        dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
index b72242e..40179c5 100644 (file)
@@ -26,6 +26,8 @@
 
 /* drm_file.c */
 extern struct mutex drm_global_mutex;
+struct drm_file *drm_file_alloc(struct drm_minor *minor);
+void drm_file_free(struct drm_file *file);
 void drm_lastclose(struct drm_device *dev);
 
 /* drm_pci.c */
index 0d4cfb2..3c12504 100644 (file)
@@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
                        return -EINVAL;
                file_priv->aspect_ratio_allowed = req->value;
                break;
+       case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
+               if (!file_priv->atomic)
+                       return -EINVAL;
+               if (req->value > 1)
+                       return -EINVAL;
+               file_priv->writeback_connectors = req->value;
+               break;
        default:
                return -EINVAL;
        }
@@ -637,9 +644,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
index 3166026..3cc5fbd 100644 (file)
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 
+static u64 rb_to_hole_size(struct rb_node *rb)
+{
+       return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static void insert_hole_size(struct rb_root_cached *root,
+                            struct drm_mm_node *node)
+{
+       struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
+       u64 x = node->hole_size;
+       bool first = true;
+
+       while (*link) {
+               rb = *link;
+               if (x > rb_to_hole_size(rb)) {
+                       link = &rb->rb_left;
+               } else {
+                       link = &rb->rb_right;
+                       first = false;
+               }
+       }
+
+       rb_link_node(&node->rb_hole_size, rb, link);
+       rb_insert_color_cached(&node->rb_hole_size, root, first);
+}
+
 static void add_hole(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
                __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
-       RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+       insert_hole_size(&mm->holes_size, node);
        RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 
        list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
        list_del(&node->hole_stack);
-       rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+       rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
        rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
        node->hole_size = 0;
 
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
 
 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 {
-       struct rb_node *best = NULL;
-       struct rb_node **link = &mm->holes_size.rb_node;
+       struct rb_node *rb = mm->holes_size.rb_root.rb_node;
+       struct drm_mm_node *best = NULL;
 
-       while (*link) {
-               struct rb_node *rb = *link;
+       do {
+               struct drm_mm_node *node =
+                       rb_entry(rb, struct drm_mm_node, rb_hole_size);
 
-               if (size <= rb_hole_size(rb)) {
-                       link = &rb->rb_left;
-                       best = rb;
+               if (size <= node->hole_size) {
+                       best = node;
+                       rb = rb->rb_right;
                } else {
-                       link = &rb->rb_right;
+                       rb = rb->rb_left;
                }
-       }
+       } while (rb);
 
-       return rb_hole_size_to_node(best);
+       return best;
 }
 
 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
 {
+       struct rb_node *rb = mm->holes_addr.rb_node;
        struct drm_mm_node *node = NULL;
-       struct rb_node **link = &mm->holes_addr.rb_node;
 
-       while (*link) {
+       while (rb) {
                u64 hole_start;
 
-               node = rb_hole_addr_to_node(*link);
+               node = rb_hole_addr_to_node(rb);
                hole_start = __drm_mm_hole_node_start(node);
 
                if (addr < hole_start)
-                       link = &node->rb_hole_addr.rb_left;
+                       rb = node->rb_hole_addr.rb_left;
                else if (addr > hole_start + node->hole_size)
-                       link = &node->rb_hole_addr.rb_right;
+                       rb = node->rb_hole_addr.rb_right;
                else
                        break;
        }
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
           u64 start, u64 end, u64 size,
           enum drm_mm_insert_mode mode)
 {
-       if (RB_EMPTY_ROOT(&mm->holes_size))
-               return NULL;
-
        switch (mode) {
        default:
        case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
        switch (mode) {
        default:
        case DRM_MM_INSERT_BEST:
-               return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+               return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 
        case DRM_MM_INSERT_LOW:
                return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
+static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
+{
+       return rb ? rb_to_hole_size(rb) : 0;
+}
+
 /**
  * drm_mm_insert_node_in_range - ranged search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 {
        struct drm_mm_node *hole;
        u64 remainder_mask;
+       bool once;
 
        DRM_MM_BUG_ON(range_start >= range_end);
 
        if (unlikely(size == 0 || range_end - range_start < size))
                return -ENOSPC;
 
+       if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
+               return -ENOSPC;
+
        if (alignment <= 1)
                alignment = 0;
 
+       once = mode & DRM_MM_INSERT_ONCE;
+       mode &= ~DRM_MM_INSERT_ONCE;
+
        remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
-       for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
-            hole = next_hole(mm, hole, mode)) {
+       for (hole = first_hole(mm, range_start, range_end, size, mode);
+            hole;
+            hole = once ? NULL : next_hole(mm, hole, mode)) {
                u64 hole_start = __drm_mm_hole_node_start(hole);
                u64 hole_end = hole_start + hole->hole_size;
                u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 
        if (drm_mm_hole_follows(old)) {
                list_replace(&old->hole_stack, &new->hole_stack);
-               rb_replace_node(&old->rb_hole_size,
-                               &new->rb_hole_size,
-                               &mm->holes_size);
+               rb_replace_node_cached(&old->rb_hole_size,
+                                      &new->rb_hole_size,
+                                      &mm->holes_size);
                rb_replace_node(&old->rb_hole_addr,
                                &new->rb_hole_addr,
                                &mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 
        INIT_LIST_HEAD(&mm->hole_stack);
        mm->interval_tree = RB_ROOT_CACHED;
-       mm->holes_size = RB_ROOT;
+       mm->holes_size = RB_ROOT_CACHED;
        mm->holes_addr = RB_ROOT;
 
        /* Clever trick to avoid a special case in the free hole tracking. */
index e5c6533..21e353b 100644 (file)
@@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        count = 0;
        connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
        drm_for_each_connector_iter(connector, &conn_iter) {
+               /* only expose writeback connectors if userspace understands them */
+               if (!file_priv->writeback_connectors &&
+                   (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
+                       continue;
+
                if (drm_lease_held(file_priv, connector->base.id)) {
                        if (count < card_res->count_connectors &&
                            put_user(connector->base.id, connector_id + count)) {
index c78ca0e..7f552d5 100644 (file)
@@ -1257,7 +1257,7 @@ static const char * const drm_mode_status_names[] = {
 
 #undef MODE_STATUS
 
-static const char *drm_get_mode_status_name(enum drm_mode_status status)
+const char *drm_get_mode_status_name(enum drm_mode_status status)
 {
        int index = status + 3;
 
index 1fe1224..3b8c7a6 100644 (file)
@@ -15,15 +15,15 @@ static void drm_release_of(struct device *dev, void *data)
 }
 
 /**
- * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node
  * @dev: DRM device
  * @port: port OF node
  *
  * Given a port OF node, return the possible mask of the corresponding
  * CRTC within a device's list of CRTCs.  Returns zero if not found.
  */
-static uint32_t drm_crtc_port_mask(struct drm_device *dev,
-                                  struct device_node *port)
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+                           struct device_node *port)
 {
        unsigned int index = 0;
        struct drm_crtc *tmp;
@@ -37,6 +37,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
 
        return 0;
 }
+EXPORT_SYMBOL(drm_of_crtc_port_mask);
 
 /**
  * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
@@ -62,7 +63,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
                        return 0;
                }
 
-               possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+               possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
 
                of_node_put(remote_port);
        }
index 308d442..965530a 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 
+#include <drm/drm_device.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
 
@@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
  *
  * An error is returned if the panel is already attached to another connector.
  *
+ * When unloading, the driver should detach from the panel by calling
+ * drm_panel_detach().
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
        if (panel->connector)
                return -EBUSY;
 
+       panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
+       if (!panel->link) {
+               dev_err(panel->dev, "failed to link panel to %s\n",
+                       dev_name(connector->dev->dev));
+               return -EINVAL;
+       }
+
        panel->connector = connector;
        panel->drm = connector->dev;
 
@@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
  * Detaches a panel from the connector it is attached to. If a panel is not
  * attached to any connector this is effectively a no-op.
  *
+ * This function should not be called by the panel device itself. It
+ * is only for the drm device that called drm_panel_attach().
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_panel_detach(struct drm_panel *panel)
 {
+       device_link_del(panel->link);
+
        panel->connector = NULL;
        panel->drm = NULL;
 
index 0350544..df0b4eb 100644 (file)
@@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
        if (WARN_ON(config->num_total_plane >= 32))
                return -EINVAL;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+               (!funcs->atomic_destroy_state ||
+                !funcs->atomic_duplicate_state));
+
        ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
        if (ret)
                return ret;
@@ -561,19 +565,20 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
        if (i == plane->format_count)
                return -EINVAL;
 
-       if (!plane->modifier_count)
-               return 0;
+       if (plane->funcs->format_mod_supported) {
+               if (!plane->funcs->format_mod_supported(plane, format, modifier))
+                       return -EINVAL;
+       } else {
+               if (!plane->modifier_count)
+                       return 0;
 
-       for (i = 0; i < plane->modifier_count; i++) {
-               if (modifier == plane->modifiers[i])
-                       break;
+               for (i = 0; i < plane->modifier_count; i++) {
+                       if (modifier == plane->modifiers[i])
+                               break;
+               }
+               if (i == plane->modifier_count)
+                       return -EINVAL;
        }
-       if (i == plane->modifier_count)
-               return -EINVAL;
-
-       if (plane->funcs->format_mod_supported &&
-           !plane->funcs->format_mod_supported(plane, format, modifier))
-               return -EINVAL;
 
        return 0;
 }
@@ -650,9 +655,11 @@ static int __setplane_internal(struct drm_plane *plane,
                                         crtc_x, crtc_y, crtc_w, crtc_h,
                                         src_x, src_y, src_w, src_h, ctx);
        if (!ret) {
-               plane->crtc = crtc;
-               plane->fb = fb;
-               drm_framebuffer_get(plane->fb);
+               if (!plane->state) {
+                       plane->crtc = crtc;
+                       plane->fb = fb;
+                       drm_framebuffer_get(plane->fb);
+               }
        } else {
                plane->old_fb = NULL;
        }
@@ -1092,8 +1099,10 @@ retry:
                /* Keep the old fb, don't unref it. */
                plane->old_fb = NULL;
        } else {
-               plane->fb = fb;
-               drm_framebuffer_get(fb);
+               if (!plane->state) {
+                       plane->fb = fb;
+                       drm_framebuffer_get(fb);
+               }
        }
 
 out:
index f88f681..2010794 100644 (file)
@@ -502,6 +502,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
 int drm_plane_helper_disable(struct drm_plane *plane)
 {
        struct drm_plane_state *plane_state;
+       struct drm_framebuffer *old_fb;
 
        /* crtc helpers love to call disable functions for already disabled hw
         * functions. So cope with that. */
@@ -521,8 +522,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
        plane_state->plane = plane;
 
        plane_state->crtc = NULL;
+       old_fb = plane_state->fb;
        drm_atomic_set_fb_for_plane(plane_state, NULL);
 
-       return drm_plane_helper_commit(plane, plane_state, plane->fb);
+       return drm_plane_helper_commit(plane, plane_state, old_fb);
 }
 EXPORT_SYMBOL(drm_plane_helper_disable);
index 397b46b..186db2e 100644 (file)
@@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
 /**
  * drm_gem_map_attach - dma_buf attach implementation for GEM
  * @dma_buf: buffer to attach device to
- * @target_dev: not used
  * @attach: buffer attachment data
  *
  * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
  *
  * Returns 0 on success, negative error code on failure.
  */
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
                       struct dma_buf_attachment *attach)
 {
        struct drm_prime_attachment *prime_attach;
@@ -435,35 +434,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 
 /**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-                                unsigned long page_num)
-{
-       return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-                                 unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
-/**
  * drm_gem_dmabuf_kmap - map implementation for GEM
  * @dma_buf: buffer to be mapped
  * @page_num: page number within the buffer
@@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
        .unmap_dma_buf = drm_gem_unmap_dma_buf,
        .release = drm_gem_dmabuf_release,
        .map = drm_gem_dmabuf_kmap,
-       .map_atomic = drm_gem_dmabuf_kmap_atomic,
        .unmap = drm_gem_dmabuf_kunmap,
-       .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
        .mmap = drm_gem_dmabuf_mmap,
        .vmap = drm_gem_dmabuf_vmap,
        .vunmap = drm_gem_dmabuf_vunmap,
index 2660543..c330104 100644 (file)
@@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  * map, get the page, increment the use count and return it.
  */
 #if IS_ENABLED(CONFIG_AGP)
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_file *priv = vma->vm_file->private_data;
@@ -173,7 +173,7 @@ vm_fault_error:
        return VM_FAULT_SIGBUS; /* Disallow mremap */
 }
 #else
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
 {
        return VM_FAULT_SIGBUS;
 }
@@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
  * Get the mapping, find the real physical page to map, get the page, and
  * return it.
  */
-static int drm_vm_shm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_local_map *map = vma->vm_private_data;
@@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
  *
  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  */
-static int drm_vm_dma_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_file *priv = vma->vm_file->private_data;
@@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
  *
  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  */
-static int drm_vm_sg_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_local_map *map = vma->vm_private_data;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
new file mode 100644 (file)
index 0000000..8273950
--- /dev/null
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_property.h>
+#include <drm/drm_writeback.h>
+#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: overview
+ *
+ * Writeback connectors are used to expose hardware which can write the output
+ * from a CRTC to a memory buffer. They are used and act similarly to other
+ * types of connectors, with some important differences:
+ *  - Writeback connectors don't provide a way to output visually to the user.
+ *  - Writeback connectors should always report as "disconnected" (so that
+ *    clients which don't understand them will ignore them).
+ *  - Writeback connectors don't have EDID.
+ *
+ * A framebuffer may only be attached to a writeback connector when the
+ * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
+ * framebuffer applies only to a single commit (see below). A framebuffer may
+ * not be attached while the CRTC is off.
+ *
+ * Unlike with planes, when a writeback framebuffer is removed by userspace DRM
+ * makes no attempt to remove it from active use by the connector. This is
+ * because no method is provided to abort a writeback operation, and in any
+ * case making a new commit whilst a writeback is ongoing is undefined (see
+ * WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
+ * the framebuffer will automatically no longer be in active use. As it will
+ * also have already been removed from the framebuffer list, there will be no
+ * way for any userspace application to retrieve a reference to it in the
+ * intervening period.
+ *
+ * Writeback connectors have some additional properties, which userspace
+ * can use to query and control them:
+ *
+ *  "WRITEBACK_FB_ID":
+ *     Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
+ *     framebuffer to be written by the writeback connector. This property is
+ *     similar to the FB_ID property on planes, but will always read as zero
+ *     and is not preserved across commits.
+ *     Userspace must set this property to an output buffer every time it
+ *     wishes the buffer to get filled.
+ *
+ *  "WRITEBACK_PIXEL_FORMATS":
+ *     Immutable blob property to store the supported pixel formats table. The
+ *     data is an array of u32 DRM_FORMAT_* fourcc values.
+ *     Userspace can use this blob to find out what pixel formats are supported
+ *     by the connector's writeback engine.
+ *
+ *  "WRITEBACK_OUT_FENCE_PTR":
+ *     Userspace can use this property to provide a pointer for the kernel to
+ *     fill with a sync_file file descriptor, which will signal once the
+ *     writeback is finished. The value should be the address of a 32-bit
+ *     signed integer, cast to a u64.
+ *     Userspace should wait for this fence to signal before making another
+ *     commit affecting any of the same CRTCs, Planes or Connectors.
+ *     **Failure to do so will result in undefined behaviour.**
+ *     For this reason it is strongly recommended that all userspace
+ *     applications making use of writeback connectors *always* retrieve an
+ *     out-fence for the commit and use it appropriately.
+ *     From userspace, this property will always read as zero.
+ */
+
+#define fence_to_wb_connector(x) container_of(x->lock, \
+                                             struct drm_writeback_connector, \
+                                             fence_lock)
+
+static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
+{
+       struct drm_writeback_connector *wb_connector =
+               fence_to_wb_connector(fence);
+
+       return wb_connector->base.dev->driver->name;
+}
+
+static const char *
+drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
+{
+       struct drm_writeback_connector *wb_connector =
+               fence_to_wb_connector(fence);
+
+       return wb_connector->timeline_name;
+}
+
+static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
+{
+       return true;
+}
+
+static const struct dma_fence_ops drm_writeback_fence_ops = {
+       .get_driver_name = drm_writeback_fence_get_driver_name,
+       .get_timeline_name = drm_writeback_fence_get_timeline_name,
+       .enable_signaling = drm_writeback_fence_enable_signaling,
+       .wait = dma_fence_default_wait,
+};
+
+static int create_writeback_properties(struct drm_device *dev)
+{
+       struct drm_property *prop;
+
+       if (!dev->mode_config.writeback_fb_id_property) {
+               prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+                                                 "WRITEBACK_FB_ID",
+                                                 DRM_MODE_OBJECT_FB);
+               if (!prop)
+                       return -ENOMEM;
+               dev->mode_config.writeback_fb_id_property = prop;
+       }
+
+       if (!dev->mode_config.writeback_pixel_formats_property) {
+               prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                          DRM_MODE_PROP_ATOMIC |
+                                          DRM_MODE_PROP_IMMUTABLE,
+                                          "WRITEBACK_PIXEL_FORMATS", 0);
+               if (!prop)
+                       return -ENOMEM;
+               dev->mode_config.writeback_pixel_formats_property = prop;
+       }
+
+       if (!dev->mode_config.writeback_out_fence_ptr_property) {
+               prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                                                "WRITEBACK_OUT_FENCE_PTR", 0,
+                                                U64_MAX);
+               if (!prop)
+                       return -ENOMEM;
+               dev->mode_config.writeback_out_fence_ptr_property = prop;
+       }
+
+       return 0;
+}
+
+static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+/**
+ * drm_writeback_connector_init - Initialize a writeback connector and its properties
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values. It will also create an internal encoder associated with the
+ * drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
+ * the encoder helper.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init(struct drm_device *dev,
+                                struct drm_writeback_connector *wb_connector,
+                                const struct drm_connector_funcs *con_funcs,
+                                const struct drm_encoder_helper_funcs *enc_helper_funcs,
+                                const u32 *formats, int n_formats)
+{
+       struct drm_property_blob *blob;
+       struct drm_connector *connector = &wb_connector->base;
+       struct drm_mode_config *config = &dev->mode_config;
+       int ret = create_writeback_properties(dev);
+
+       if (ret != 0)
+               return ret;
+
+       blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+                                       formats);
+       if (IS_ERR(blob))
+               return PTR_ERR(blob);
+
+       drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+       ret = drm_encoder_init(dev, &wb_connector->encoder,
+                              &drm_writeback_encoder_funcs,
+                              DRM_MODE_ENCODER_VIRTUAL, NULL);
+       if (ret)
+               goto fail;
+
+       connector->interlace_allowed = 0;
+
+       ret = drm_connector_init(dev, connector, con_funcs,
+                                DRM_MODE_CONNECTOR_WRITEBACK);
+       if (ret)
+               goto connector_fail;
+
+       ret = drm_mode_connector_attach_encoder(connector,
+                                               &wb_connector->encoder);
+       if (ret)
+               goto attach_fail;
+
+       INIT_LIST_HEAD(&wb_connector->job_queue);
+       spin_lock_init(&wb_connector->job_lock);
+
+       wb_connector->fence_context = dma_fence_context_alloc(1);
+       spin_lock_init(&wb_connector->fence_lock);
+       snprintf(wb_connector->timeline_name,
+                sizeof(wb_connector->timeline_name),
+                "CONNECTOR:%d-%s", connector->base.id, connector->name);
+
+       drm_object_attach_property(&connector->base,
+                                  config->writeback_out_fence_ptr_property, 0);
+
+       drm_object_attach_property(&connector->base,
+                                  config->writeback_fb_id_property, 0);
+
+       drm_object_attach_property(&connector->base,
+                                  config->writeback_pixel_formats_property,
+                                  blob->base.id);
+       wb_connector->pixel_formats_blob_ptr = blob;
+
+       return 0;
+
+attach_fail:
+       drm_connector_cleanup(connector);
+connector_fail:
+       drm_encoder_cleanup(&wb_connector->encoder);
+fail:
+       drm_property_blob_put(blob);
+       return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_queue_job - Queue a writeback job for later signalling
+ * @wb_connector: The writeback connector to queue a job on
+ * @job: The job to queue
+ *
+ * This function adds a job to the job_queue for a writeback connector. It
+ * should be considered to take ownership of the writeback job, and so any other
+ * references to the job must be cleared after calling this function.
+ *
+ * Drivers must ensure that for a given writeback connector, jobs are queued in
+ * exactly the same order as they will be completed by the hardware (and
+ * signaled via drm_writeback_signal_completion).
+ *
+ * For every call to drm_writeback_queue_job() there must be exactly one call to
+ * drm_writeback_signal_completion()
+ *
+ * See also: drm_writeback_signal_completion()
+ */
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+                            struct drm_writeback_job *job)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&wb_connector->job_lock, flags);
+       list_add_tail(&job->list_entry, &wb_connector->job_queue);
+       spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+EXPORT_SYMBOL(drm_writeback_queue_job);
+
+/*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+ * The job cannot be cleaned up directly in drm_writeback_signal_completion,
+ * because it may be called in interrupt context. Dropping the framebuffer
+ * reference can sleep, and so the cleanup is deferred to a workqueue.
+ */
+static void cleanup_work(struct work_struct *work)
+{
+       struct drm_writeback_job *job = container_of(work,
+                                                    struct drm_writeback_job,
+                                                    cleanup_work);
+       drm_framebuffer_put(job->fb);
+       kfree(job);
+}
+
+
+/**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+ * @wb_connector: The writeback connector whose job is complete
+ * @status: Status code to set in the writeback out_fence (0 for success)
+ *
+ * Drivers should call this to signal the completion of a previously queued
+ * writeback job. It should be called as soon as possible after the hardware
+ * has finished writing, and may be called from interrupt context.
+ * It is the driver's responsibility to ensure that for a given connector, the
+ * hardware completes writeback jobs in the same order as they are queued.
+ *
+ * Unless the driver is holding its own reference to the framebuffer, it must
+ * not be accessed after calling this function.
+ *
+ * See also: drm_writeback_queue_job()
+ */
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+                               int status)
+{
+       unsigned long flags;
+       struct drm_writeback_job *job;
+
+       spin_lock_irqsave(&wb_connector->job_lock, flags);
+       job = list_first_entry_or_null(&wb_connector->job_queue,
+                                      struct drm_writeback_job,
+                                      list_entry);
+       if (job) {
+               list_del(&job->list_entry);
+               if (job->out_fence) {
+                       if (status)
+                               dma_fence_set_error(job->out_fence, status);
+                       dma_fence_signal(job->out_fence);
+                       dma_fence_put(job->out_fence);
+               }
+       }
+       spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+       if (WARN_ON(!job))
+               return;
+
+       INIT_WORK(&job->cleanup_work, cleanup_work);
+       queue_work(system_long_wq, &job->cleanup_work);
+}
+EXPORT_SYMBOL(drm_writeback_signal_completion);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
+{
+       struct dma_fence *fence;
+
+       if (WARN_ON(wb_connector->base.connector_type !=
+                   DRM_MODE_CONNECTOR_WRITEBACK))
+               return NULL;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               return NULL;
+
+       dma_fence_init(fence, &drm_writeback_fence_ops,
+                      &wb_connector->fence_lock, wb_connector->fence_context,
+                      ++wb_connector->fence_seqno);
+
+       return fence;
+}
+EXPORT_SYMBOL(drm_writeback_get_out_fence);
index 38a2a7f..eb9915d 100644 (file)
@@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
        if (!state->crtc)
                return;
 
-       plane->crtc = state->crtc;
-
        if (exynos_crtc->ops->update_plane)
                exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
 }
index c51d925..204c8e4 100644 (file)
@@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
        if (!fb)
                return;
 
-       offset = psbfb->gtt->offset;
+       offset = to_gtt_range(fb->obj[0])->offset;
        stride = fb->pitches[0];
 
        switch (fb->format->depth) {
index cb0a2ae..2f00a37 100644 (file)
@@ -33,6 +33,7 @@
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "psb_drv.h"
 #include "psb_intel_reg.h"
 #include "framebuffer.h"
 #include "gtt.h"
 
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                             struct drm_file *file_priv,
-                                             unsigned int *handle);
-
 static const struct drm_framebuffer_funcs psb_fb_funcs = {
-       .destroy = psb_user_framebuffer_destroy,
-       .create_handle = psb_user_framebuffer_create_handle,
+       .destroy = drm_gem_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
 };
 
 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@@ -96,17 +92,18 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
        struct psb_fbdev *fbdev = info->par;
        struct psb_framebuffer *psbfb = &fbdev->pfb;
        struct drm_device *dev = psbfb->base.dev;
+       struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
 
        /*
         *      We have to poke our nose in here. The core fb code assumes
         *      panning is part of the hardware that can be invoked before
         *      the actual fb is mapped. In our case that isn't quite true.
         */
-       if (psbfb->gtt->npage) {
+       if (gtt->npage) {
                /* GTT roll shifts in 4K pages, we need to shift the right
                   number of pages */
                int pages = info->fix.line_length >> 12;
-               psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+               psb_gtt_roll(dev, gtt, var->yoffset * pages);
        }
         return 0;
 }
@@ -117,13 +114,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
        struct psb_framebuffer *psbfb = vma->vm_private_data;
        struct drm_device *dev = psbfb->base.dev;
        struct drm_psb_private *dev_priv = dev->dev_private;
+       struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
        int page_num;
        int i;
        unsigned long address;
        int ret;
        unsigned long pfn;
        unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
-                                 psbfb->gtt->offset;
+                                 gtt->offset;
 
        page_num = vma_pages(vma);
        address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -246,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
                return -EINVAL;
 
        drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
-       fb->gtt = gt;
+       fb->base.obj[0] = &gt->gem;
        ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
        if (ret) {
                dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@@ -518,8 +516,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
        drm_framebuffer_unregister_private(&psbfb->base);
        drm_framebuffer_cleanup(&psbfb->base);
 
-       if (psbfb->gtt)
-               drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
+       if (psbfb->base.obj[0])
+               drm_gem_object_put_unlocked(psbfb->base.obj[0]);
        return 0;
 }
 
@@ -576,44 +574,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
        dev_priv->fbdev = NULL;
 }
 
-/**
- *     psb_user_framebuffer_create_handle - add hamdle to a framebuffer
- *     @fb: framebuffer
- *     @file_priv: our DRM file
- *     @handle: returned handle
- *
- *     Our framebuffer object is a GTT range which also contains a GEM
- *     object. We need to turn it into a handle for userspace. GEM will do
- *     the work for us
- */
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                             struct drm_file *file_priv,
-                                             unsigned int *handle)
-{
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
-       struct gtt_range *r = psbfb->gtt;
-       return drm_gem_handle_create(file_priv, &r->gem, handle);
-}
-
-/**
- *     psb_user_framebuffer_destroy    -       destruct user created fb
- *     @fb: framebuffer
- *
- *     User framebuffers are backed by GEM objects so all we have to do is
- *     clean up a bit and drop the reference, GEM will handle the fallout
- */
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
-       struct gtt_range *r = psbfb->gtt;
-
-       /* Let DRM do its clean up */
-       drm_framebuffer_cleanup(fb);
-       /*  We are no longer using the resource in GEM */
-       drm_gem_object_unreference_unlocked(&r->gem);
-       kfree(fb);
-}
-
 static const struct drm_mode_config_funcs psb_mode_funcs = {
        .fb_create = psb_user_framebuffer_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
index 395f20b..23dc3c5 100644 (file)
@@ -31,7 +31,6 @@ struct psb_framebuffer {
        struct drm_framebuffer base;
        struct address_space *addr_space;
        struct fb_info *fbdev;
-       struct gtt_range *gtt;
 };
 
 struct psb_fbdev {
index 1312397..913bf4c 100644 (file)
@@ -93,7 +93,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
                return ret;
        }
        /* We have the initial and handle reference but need only one now */
-       drm_gem_object_unreference_unlocked(&r->gem);
+       drm_gem_object_put_unlocked(&r->gem);
        *handlep = handle;
        return 0;
 }
index f3c48a2..08f17f8 100644 (file)
@@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct gtt_range *gtt;
        int pipe = gma_crtc->pipe;
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        unsigned long start, offset;
@@ -76,12 +76,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                goto gma_pipe_cleaner;
        }
 
+       gtt = to_gtt_range(fb->obj[0]);
+
        /* We are displaying this buffer, make sure it is actually loaded
           into the GTT */
-       ret = psb_gtt_pin(psbfb->gtt);
+       ret = psb_gtt_pin(gtt);
        if (ret < 0)
                goto gma_pipe_set_base_exit;
-       start = psbfb->gtt->offset;
+       start = gtt->offset;
        offset = y * fb->pitches[0] + x * fb->format->cpp[0];
 
        REG_WRITE(map->stride, fb->pitches[0]);
@@ -129,7 +131,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 gma_pipe_cleaner:
        /* If there was a previous display we can now unpin it */
        if (old_fb)
-               psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+               psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
 
 gma_pipe_set_base_exit:
        gma_power_end(dev);
@@ -353,7 +355,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
                        gt = container_of(gma_crtc->cursor_obj,
                                          struct gtt_range, gem);
                        psb_gtt_unpin(gt);
-                       drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+                       drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
                        gma_crtc->cursor_obj = NULL;
                }
                return 0;
@@ -429,7 +431,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
        if (gma_crtc->cursor_obj) {
                gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
                psb_gtt_unpin(gt);
-               drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+               drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
        }
 
        gma_crtc->cursor_obj = obj;
@@ -437,7 +439,7 @@ unlock:
        return ret;
 
 unref_cursor:
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
        return ret;
 }
 
@@ -491,7 +493,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
        if (crtc->primary->fb) {
-               gt = to_psb_fb(crtc->primary->fb)->gtt;
+               gt = to_gtt_range(crtc->primary->fb->obj[0]);
                psb_gtt_unpin(gt);
        }
 }
index cdbb350..cb0c3a2 100644 (file)
@@ -53,6 +53,8 @@ struct gtt_range {
        int roll;                       /* Roll applied to the GTT entries */
 };
 
+#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+
 extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
                                             const char *name, int backed,
                                             u32 align);
index 5c06644..2b9fa01 100644 (file)
@@ -167,7 +167,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
        int pipe = gma_crtc->pipe;
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        unsigned long start, offset;
@@ -196,7 +195,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        if (!gma_power_begin(dev, true))
                return 0;
 
-       start = psbfb->gtt->offset;
+       start = to_gtt_range(fb->obj[0])->offset;
        offset = y * fb->pitches[0] + x * fb->format->cpp[0];
 
        REG_WRITE(map->stride, fb->pitches[0]);
index 0fff269..1b7fd6a 100644 (file)
@@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
        int pipe = gma_crtc->pipe;
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        unsigned long start, offset;
@@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
        if (!gma_power_begin(dev, true))
                return 0;
 
-       start = psbfb->gtt->offset;
+       start = to_gtt_range(fb->obj[0])->offset;
        offset = y * fb->pitches[0] + x * fb->format->cpp[0];
 
        REG_WRITE(map->stride, fb->pitches[0]);
index f2ee6aa..1d40746 100644 (file)
@@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
        "Scaling not supported"
 };
 
+#define MAX_ARG_LEN 32
+
 static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
                                 const void *args, int args_len)
 {
-       u8 buf[args_len*2 + 2], status;
-       struct i2c_msg msgs[args_len + 3];
+       u8 buf[MAX_ARG_LEN*2 + 2], status;
+       struct i2c_msg msgs[MAX_ARG_LEN + 3];
        int i, ret;
 
+       if (args_len > MAX_ARG_LEN) {
+               DRM_ERROR("Need to increase arg length\n");
+               return false;
+       }
+
        psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
 
        for (i = 0; i < args_len; i++) {
index 2269be9..bb77420 100644 (file)
@@ -859,7 +859,6 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
                return PTR_ERR(crtc_state);
 
        if (src_w != crtc_w || src_h != crtc_h) {
-               DRM_ERROR("Scale not support!!!\n");
                return -EINVAL;
        }
 
index 6ebd884..0038c97 100644 (file)
@@ -589,13 +589,22 @@ out:
        return ret;
 }
 
+#define MAX_WRITE_RANGE_BUF 32
+
 static void
 reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
 {
        struct i2c_client *client = priv->hdmi;
-       u8 buf[cnt+1];
+       /* This is the maximum size of the buffer passed in */
+       u8 buf[MAX_WRITE_RANGE_BUF + 1];
        int ret;
 
+       if (cnt > MAX_WRITE_RANGE_BUF) {
+               dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
+                               MAX_WRITE_RANGE_BUF);
+               return;
+       }
+
        buf[0] = REG2ADDR(reg);
        memcpy(&buf[1], p, cnt);
 
@@ -805,7 +814,7 @@ static void
 tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
                 union hdmi_infoframe *frame)
 {
-       u8 buf[32];
+       u8 buf[MAX_WRITE_RANGE_BUF];
        ssize_t len;
 
        len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
index 80b3e16..caac994 100644 (file)
 #define CH7017_BANG_LIMIT_CONTROL      0x7f
 
 struct ch7017_priv {
-       uint8_t dummy;
+       u8 dummy;
 };
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
 
 static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
 {
-       uint8_t buf[2] = { addr, val };
+       u8 buf[2] = { addr, val };
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
                            const struct drm_display_mode *mode,
                            const struct drm_display_mode *adjusted_mode)
 {
-       uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
-       uint8_t outputs_enable, lvds_control_2, lvds_power_down;
-       uint8_t horizontal_active_pixel_input;
-       uint8_t horizontal_active_pixel_output, vertical_active_line_output;
-       uint8_t active_input_line_output;
+       u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+       u8 outputs_enable, lvds_control_2, lvds_power_down;
+       u8 horizontal_active_pixel_input;
+       u8 horizontal_active_pixel_output, vertical_active_line_output;
+       u8 active_input_line_output;
 
        DRM_DEBUG_KMS("Registers before mode setting\n");
        ch7017_dump_regs(dvo);
@@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
 /* set the CH7017 power state */
 static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-       uint8_t val;
+       u8 val;
 
        ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
 
@@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
 
 static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
 {
-       uint8_t val;
+       u8 val;
 
        ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
 
@@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint8_t val;
+       u8 val;
 
 #define DUMP(reg)                                      \
 do {                                                   \
index 7aeeffd..397ac52 100644 (file)
@@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
 static struct ch7xxx_id_struct {
-       uint8_t vid;
+       u8 vid;
        char *name;
 } ch7xxx_ids[] = {
        { CH7011_VID, "CH7011" },
@@ -96,7 +96,7 @@ static struct ch7xxx_id_struct {
 };
 
 static struct ch7xxx_did_struct {
-       uint8_t did;
+       u8 did;
        char *name;
 } ch7xxx_dids[] = {
        { CH7xxx_DID, "CH7XXX" },
@@ -107,7 +107,7 @@ struct ch7xxx_priv {
        bool quiet;
 };
 
-static char *ch7xxx_get_id(uint8_t vid)
+static char *ch7xxx_get_id(u8 vid)
 {
        int i;
 
@@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid)
        return NULL;
 }
 
-static char *ch7xxx_get_did(uint8_t did)
+static char *ch7xxx_get_did(u8 did)
 {
        int i;
 
@@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did)
 }
 
 /** Reads an 8 bit register */
-static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 }
 
 /** Writes an 8 bit register */
-static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
 {
        /* this will detect the CH7xxx chip on the specified i2c bus */
        struct ch7xxx_priv *ch7xxx;
-       uint8_t vendor, device;
+       u8 vendor, device;
        char *name, *devid;
 
        ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
@@ -244,7 +244,7 @@ out:
 
 static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
 {
-       uint8_t cdet, orig_pm, pm;
+       u8 cdet, orig_pm, pm;
 
        ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
 
@@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
                            const struct drm_display_mode *mode,
                            const struct drm_display_mode *adjusted_mode)
 {
-       uint8_t tvco, tpcp, tpd, tlpf, idf;
+       u8 tvco, tpcp, tpd, tlpf, idf;
 
        if (mode->clock <= 65000) {
                tvco = 0x23;
@@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
        int i;
 
        for (i = 0; i < CH7xxx_NUM_REGS; i++) {
-               uint8_t val;
+               u8 val;
                if ((i % 8) == 0)
                        DRM_DEBUG_KMS("\n %02X: ", i);
                ch7xxx_readb(dvo, i, &val);
index c73aff1..24278cc 100644 (file)
  * instead. The following list contains all registers that
  * require saving.
  */
-static const uint16_t backup_addresses[] = {
+static const u16 backup_addresses[] = {
        0x11, 0x12,
        0x18, 0x19, 0x1a, 0x1f,
        0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
@@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = {
 struct ivch_priv {
        bool quiet;
 
-       uint16_t width, height;
+       u16 width, height;
 
        /* Register backup */
 
-       uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+       u16 reg_backup[ARRAY_SIZE(backup_addresses)];
 };
 
 
@@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
  *
  * Each of the 256 registers are 16 bits long.
  */
-static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
 {
        struct ivch_priv *priv = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 }
 
 /* Writes a 16-bit register on the ivch */
-static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
 {
        struct ivch_priv *priv = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
                      struct i2c_adapter *adapter)
 {
        struct ivch_priv *priv;
-       uint16_t temp;
+       u16 temp;
        int i;
 
        priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
@@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
        int i;
-       uint16_t vr01, vr30, backlight;
+       u16 vr01, vr30, backlight;
 
        ivch_reset(dvo);
 
@@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 
 static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
 {
-       uint16_t vr01;
+       u16 vr01;
 
        ivch_reset(dvo);
 
@@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
                          const struct drm_display_mode *adjusted_mode)
 {
        struct ivch_priv *priv = dvo->dev_priv;
-       uint16_t vr40 = 0;
-       uint16_t vr01 = 0;
-       uint16_t vr10;
+       u16 vr40 = 0;
+       u16 vr01 = 0;
+       u16 vr10;
 
        ivch_reset(dvo);
 
@@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 
        if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
            mode->vdisplay != adjusted_mode->crtc_vdisplay) {
-               uint16_t x_ratio, y_ratio;
+               u16 x_ratio, y_ratio;
 
                vr01 |= VR01_PANEL_FIT_ENABLE;
                vr40 |= VR40_CLOCK_GATING_ENABLE;
@@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint16_t val;
+       u16 val;
 
        ivch_read(dvo, VR00, &val);
        DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
index 2379c33..c584e01 100644 (file)
@@ -191,8 +191,8 @@ enum {
 };
 
 struct ns2501_reg {
-        uint8_t offset;
-        uint8_t value;
+       u8 offset;
+       u8 value;
 };
 
 /*
@@ -202,23 +202,23 @@ struct ns2501_reg {
  * read all this with a grain of salt.
  */
 struct ns2501_configuration {
-       uint8_t sync;           /* configuration of the C0 register */
-       uint8_t conf;           /* configuration register 8 */
-       uint8_t syncb;          /* configuration register 41 */
-       uint8_t dither;         /* configuration of the dithering */
-       uint8_t pll_a;          /* PLL configuration, register A, 1B */
-       uint16_t pll_b;         /* PLL configuration, register B, 1C/1D */
-       uint16_t hstart;        /* horizontal start, registers C1/C2 */
-       uint16_t hstop;         /* horizontal total, registers C3/C4 */
-       uint16_t vstart;        /* vertical start, registers C5/C6 */
-       uint16_t vstop;         /* vertical total, registers C7/C8 */
-       uint16_t vsync;         /* manual vertical sync start, 80/81 */
-       uint16_t vtotal;        /* number of lines generated, 82/83 */
-       uint16_t hpos;          /* horizontal position + 256, 98/99  */
-       uint16_t vpos;          /* vertical position, 8e/8f */
-       uint16_t voffs;         /* vertical output offset, 9c/9d */
-       uint16_t hscale;        /* horizontal scaling factor, b8/b9 */
-       uint16_t vscale;        /* vertical scaling factor, 10/11 */
+       u8 sync;                /* configuration of the C0 register */
+       u8 conf;                /* configuration register 8 */
+       u8 syncb;               /* configuration register 41 */
+       u8 dither;              /* configuration of the dithering */
+       u8 pll_a;               /* PLL configuration, register A, 1B */
+       u16 pll_b;              /* PLL configuration, register B, 1C/1D */
+       u16 hstart;             /* horizontal start, registers C1/C2 */
+       u16 hstop;              /* horizontal total, registers C3/C4 */
+       u16 vstart;             /* vertical start, registers C5/C6 */
+       u16 vstop;              /* vertical total, registers C7/C8 */
+       u16 vsync;              /* manual vertical sync start, 80/81 */
+       u16 vtotal;             /* number of lines generated, 82/83 */
+       u16 hpos;               /* horizontal position + 256, 98/99  */
+       u16 vpos;               /* vertical position, 8e/8f */
+       u16 voffs;              /* vertical output offset, 9c/9d */
+       u16 hscale;             /* horizontal scaling factor, b8/b9 */
+       u16 vscale;             /* vertical scaling factor, 10/11 */
 };
 
 /*
@@ -389,7 +389,7 @@ struct ns2501_priv {
 ** If it returns false, it might be wise to enable the
 ** DVO with the above function.
 */
-static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct ns2501_priv *ns = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
 ** If it returns false, it might be wise to enable the
 ** DVO with the above function.
 */
-static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct ns2501_priv *ns = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
 
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
index 1c1a067..4ae5d8f 100644 (file)
@@ -65,7 +65,7 @@ struct sil164_priv {
 
 #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
 
-static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct sil164_priv *sil = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        return false;
 }
 
-static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct sil164_priv *sil = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -173,7 +173,7 @@ out:
 
 static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
 {
-       uint8_t reg9;
+       u8 reg9;
 
        sil164_readb(dvo, SIL164_REG9, &reg9);
 
@@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
 
 static void sil164_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint8_t val;
+       u8 val;
 
        sil164_readb(dvo, SIL164_FREQ_LO, &val);
        DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
index 31e181d..d603bc2 100644 (file)
@@ -90,7 +90,7 @@ struct tfp410_priv {
        bool quiet;
 };
 
-static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        return false;
 }
 
-static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 
 static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
 {
-       uint8_t ch1, ch2;
+       u8 ch1, ch2;
 
        if (tfp410_readb(dvo, addr+0, &ch1) &&
            tfp410_readb(dvo, addr+1, &ch2))
@@ -203,7 +203,7 @@ out:
 static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
 {
        enum drm_connector_status ret = connector_status_disconnected;
-       uint8_t ctl2;
+       u8 ctl2;
 
        if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
                if (ctl2 & TFP410_CTL_2_RSEN)
@@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
 /* set the tfp410 power state */
 static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-       uint8_t ctl1;
+       u8 ctl1;
 
        if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
                return;
@@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
 
 static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
 {
-       uint8_t ctl1;
+       u8 ctl1;
 
        if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
                return false;
@@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
 
 static void tfp410_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint8_t val, val2;
+       u8 val, val2;
 
        tfp410_readb(dvo, TFP410_REV, &val);
        DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
index 7c9ec4f..380eeb2 100644 (file)
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
        }
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+       ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
                                  size, I915_GTT_PAGE_SIZE,
                                  I915_COLOR_UNEVICTABLE,
                                  start, end, flags);
index b51c05d..0651e63 100644 (file)
@@ -172,6 +172,7 @@ struct decode_info {
 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
+#define OP_MEDIA_POOL_STATE                     OP_3D_MEDIA(0x2, 0x0, 0x5)
 
 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
@@ -1256,7 +1257,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
        if (!info->async_flip)
                return 0;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
                tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
                                GENMASK(12, 10)) >> 10;
@@ -1284,7 +1287,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
 
        set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
                      info->surf_val << 12);
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
                              info->stride_val);
                set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1308,7 +1313,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
 
        if (IS_BROADWELL(dev_priv))
                return gen8_decode_mi_display_flip(s, info);
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv))
                return skl_decode_mi_display_flip(s, info);
 
        return -ENODEV;
@@ -1317,26 +1324,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
 static int check_mi_display_flip(struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
 {
-       struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
-       if (IS_BROADWELL(dev_priv)
-               || IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv))
-               return gen8_check_mi_display_flip(s, info);
-       return -ENODEV;
+       return gen8_check_mi_display_flip(s, info);
 }
 
 static int update_plane_mmio_from_mi_display_flip(
                struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
 {
-       struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
-       if (IS_BROADWELL(dev_priv)
-               || IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv))
-               return gen8_update_plane_mmio_from_mi_display_flip(s, info);
-       return -ENODEV;
+       return gen8_update_plane_mmio_from_mi_display_flip(s, info);
 }
 
 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@ -1615,15 +1610,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
  */
 static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
-       struct intel_gvt *gvt = s->vgpu->gvt;
-
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               /* BDW decides privilege based on address space */
-               if (cmd_val(s, 0) & (1 << 8) &&
+       /* Decide privilege based on address space */
+       if (cmd_val(s, 0) & (1 << 8) &&
                        !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
-                       return 0;
-       }
+               return 0;
        return 1;
 }
 
@@ -2349,6 +2339,9 @@ static struct cmd_info cmd_info[] = {
        {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
                0, 16, NULL},
 
+       {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
+               0, 16, NULL},
+
        {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
 
        {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
index 6d8180e..6ee50cb 100644 (file)
@@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int pipe;
 
+       if (IS_BROXTON(dev_priv)) {
+               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
+                       BXT_DE_PORT_HP_DDIB |
+                       BXT_DE_PORT_HP_DDIC);
+
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                               BXT_DE_PORT_HP_DDIA;
+               }
+
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                               BXT_DE_PORT_HP_DDIB;
+               }
+
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                               BXT_DE_PORT_HP_DDIC;
+               }
+
+               return;
+       }
+
        vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
                        SDE_PORTC_HOTPLUG_CPT |
                        SDE_PORTD_HOTPLUG_CPT);
@@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        for_each_pipe(dev_priv, pipe) {
                vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
                vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
-               vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
-               vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+               vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+               vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
        }
 
        vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
        struct intel_gvt_irq *irq = &gvt->irq;
        struct intel_vgpu *vgpu;
        int pipe, id;
+       int found = false;
 
-       if (WARN_ON(!mutex_is_locked(&gvt->lock)))
-               return;
-
+       mutex_lock(&gvt->lock);
        for_each_active_vgpu(gvt, vgpu, id) {
                for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
-                       if (pipe_is_enabled(vgpu, pipe))
-                               goto out;
+                       if (pipe_is_enabled(vgpu, pipe)) {
+                               found = true;
+                               break;
+                       }
                }
+               if (found)
+                       break;
        }
 
        /* all the pipes are disabled */
-       hrtimer_cancel(&irq->vblank_timer.timer);
-       return;
-
-out:
-       hrtimer_start(&irq->vblank_timer.timer,
-               ktime_add_ns(ktime_get(), irq->vblank_timer.period),
-               HRTIMER_MODE_ABS);
-
+       if (!found)
+               hrtimer_cancel(&irq->vblank_timer.timer);
+       else
+               hrtimer_start(&irq->vblank_timer.timer,
+                       ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+                       HRTIMER_MODE_ABS);
+       mutex_unlock(&gvt->lock);
 }
 
 static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
 {
        int pipe;
 
+       mutex_lock(&vgpu->vgpu_lock);
        for_each_pipe(vgpu->gvt->dev_priv, pipe)
                emulate_vblank_on_pipe(vgpu, pipe);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
@@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
        struct intel_vgpu *vgpu;
        int id;
 
-       if (WARN_ON(!mutex_is_locked(&gvt->lock)))
-               return;
-
+       mutex_lock(&gvt->lock);
        for_each_active_vgpu(gvt, vgpu, id)
                emulate_vblank(vgpu);
+       mutex_unlock(&gvt->lock);
 }
 
 /**
index 6f4f8e9..6e3f566 100644 (file)
@@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 
        obj->read_domains = I915_GEM_DOMAIN_GTT;
        obj->write_domain = 0;
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
@@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
        return obj;
 }
 
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+       if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+               return true;
+       else
+               return false;
+}
+
 static int vgpu_get_plane_info(struct drm_device *dev,
                struct intel_vgpu *vgpu,
                struct intel_vgpu_fb_info *info,
@@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                info->x_pos = c.x_pos;
                info->y_pos = c.y_pos;
 
-               /* The invalid cursor hotspot value is delivered to host
-                * until we find a way to get the cursor hotspot info of
-                * guest OS.
-                */
-               info->x_hot = UINT_MAX;
-               info->y_hot = UINT_MAX;
+               if (validate_hotspot(&c)) {
+                       info->x_hot = c.x_hot;
+                       info->y_hot = c.y_hot;
+               } else {
+                       info->x_hot = UINT_MAX;
+                       info->y_hot = UINT_MAX;
+               }
+
                info->size = (((info->stride * c.height * c.bpp) / 8)
                                + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else {
index f613376..4b98539 100644 (file)
@@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
        return chr;
 }
 
+static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
+{
+       int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+       int port = -EINVAL;
+
+       if (port_select == 1)
+               port = PORT_B;
+       else if (port_select == 2)
+               port = PORT_C;
+       else if (port_select == 3)
+               port = PORT_D;
+       return port;
+}
+
 static inline int get_port_from_gmbus0(u32 gmbus0)
 {
        int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
@@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
 static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
                        unsigned int offset, void *p_data, unsigned int bytes)
 {
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int port, pin_select;
 
        memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
        if (pin_select == 0)
                return 0;
 
-       port = get_port_from_gmbus0(pin_select);
+       if (IS_BROXTON(dev_priv))
+               port = bxt_get_port_from_gmbus0(pin_select);
+       else
+               port = get_port_from_gmbus0(pin_select);
        if (WARN_ON(port < 0))
                return 0;
 
index 427e40e..714d709 100644 (file)
@@ -146,14 +146,11 @@ struct execlist_ring_context {
        u32 nop4;
        u32 lri_cmd_2;
        struct execlist_mmio_pair ctx_timestamp;
-       struct execlist_mmio_pair pdp3_UDW;
-       struct execlist_mmio_pair pdp3_LDW;
-       struct execlist_mmio_pair pdp2_UDW;
-       struct execlist_mmio_pair pdp2_LDW;
-       struct execlist_mmio_pair pdp1_UDW;
-       struct execlist_mmio_pair pdp1_LDW;
-       struct execlist_mmio_pair pdp0_UDW;
-       struct execlist_mmio_pair pdp0_LDW;
+       /*
+        * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
+        *           pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
+        */
+       struct execlist_mmio_pair pdps[8];
 };
 
 struct intel_vgpu_elsp_dwords {
index 1c12068..face664 100644 (file)
@@ -36,6 +36,7 @@
 #include <uapi/drm/drm_fourcc.h>
 #include "i915_drv.h"
 #include "gvt.h"
+#include "i915_pvinfo.h"
 
 #define PRIMARY_FORMAT_NUM     16
 struct pixel_format {
@@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
        u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
        u32 stride = stride_reg;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                switch (tiled) {
                case PLANE_CTL_TILED_LINEAR:
                        stride = stride_reg * 64;
@@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (!plane->enabled)
                return -ENODEV;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
                _PLANE_CTL_TILED_SHIFT;
                fmt = skl_format_to_drm(
@@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        }
 
        plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
-               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+               (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) ?
                        (_PRI_PLANE_STRIDE_MASK >> 6) :
                                _PRI_PLANE_STRIDE_MASK, plane->bpp);
 
@@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode)
        int cursor_pixel_formats_index = 4;
 
        switch (mode) {
-       case CURSOR_MODE_128_ARGB_AX:
+       case MCURSOR_MODE_128_ARGB_AX:
                cursor_pixel_formats_index = 0;
                break;
-       case CURSOR_MODE_256_ARGB_AX:
+       case MCURSOR_MODE_256_ARGB_AX:
                cursor_pixel_formats_index = 1;
                break;
-       case CURSOR_MODE_64_ARGB_AX:
+       case MCURSOR_MODE_64_ARGB_AX:
                cursor_pixel_formats_index = 2;
                break;
-       case CURSOR_MODE_64_32B_AX:
+       case MCURSOR_MODE_64_32B_AX:
                cursor_pixel_formats_index = 3;
                break;
 
@@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
                return -ENODEV;
 
        val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
-       mode = val & CURSOR_MODE;
-       plane->enabled = (mode != CURSOR_MODE_DISABLE);
+       mode = val & MCURSOR_MODE;
+       plane->enabled = (mode != MCURSOR_MODE_DISABLE);
        if (!plane->enabled)
                return -ENODEV;
 
@@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
        plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
        plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
 
+       plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+       plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
        return 0;
 }
 
index a73e1d4..4ac18b4 100644 (file)
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
 
        h = (struct gvt_firmware_header *)fw->data;
 
-       crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+       crc32_start = offsetofend(struct gvt_firmware_header, crc32);
        mem = fw->data + crc32_start;
 
 #define VERIFY(s, a, b) do { \
index 2329654..642e216 100644 (file)
@@ -1973,7 +1973,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
         * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
-       if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+       if (type > GTT_TYPE_PPGTT_PTE_PT) {
                struct intel_gvt_gtt_entry se;
 
                memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@ -2257,13 +2257,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 
        gvt_dbg_core("init gtt\n");
 
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
-               gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
-       } else {
-               return -ENODEV;
-       }
+       gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+       gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
 
        page = (void *)get_zeroed_page(GFP_KERNEL);
        if (!page) {
index 61bd14f..4e65266 100644 (file)
@@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt)
        struct intel_gvt_device_info *info = &gvt->device_info;
        struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
 
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               info->max_support_vgpus = 8;
-               info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
-               info->mmio_size = 2 * 1024 * 1024;
-               info->mmio_bar = 0;
-               info->gtt_start_offset = 8 * 1024 * 1024;
-               info->gtt_entry_size = 8;
-               info->gtt_entry_size_shift = 3;
-               info->gmadr_bytes_in_cmd = 8;
-               info->max_surface_size = 36 * 1024 * 1024;
-       }
+       info->max_support_vgpus = 8;
+       info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+       info->mmio_size = 2 * 1024 * 1024;
+       info->mmio_bar = 0;
+       info->gtt_start_offset = 8 * 1024 * 1024;
+       info->gtt_entry_size = 8;
+       info->gtt_entry_size_shift = 3;
+       info->gmadr_bytes_in_cmd = 8;
+       info->max_surface_size = 36 * 1024 * 1024;
        info->msi_cap_offset = pdev->msi_cap;
 }
 
@@ -271,11 +268,8 @@ static int gvt_service_thread(void *data)
                        continue;
 
                if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
-                                       (void *)&gvt->service_request)) {
-                       mutex_lock(&gvt->lock);
+                                       (void *)&gvt->service_request))
                        intel_gvt_emulate_vblank(gvt);
-                       mutex_unlock(&gvt->lock);
-               }
 
                if (test_bit(INTEL_GVT_REQUEST_SCHED,
                                (void *)&gvt->service_request) ||
@@ -379,6 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
        idr_init(&gvt->vgpu_idr);
        spin_lock_init(&gvt->scheduler.mmio_context_lock);
        mutex_init(&gvt->lock);
+       mutex_init(&gvt->sched_lock);
        gvt->dev_priv = dev_priv;
 
        init_device_info(gvt);
index 05d15a0..de2a3a2 100644 (file)
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
 
 struct intel_vgpu {
        struct intel_gvt *gvt;
+       struct mutex vgpu_lock;
        int id;
        unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
        bool active;
        bool pv_notified;
        bool failsafe;
        unsigned int resetting_eng;
+
+       /* Both sched_data and sched_ctl can be seen a part of the global gvt
+        * scheduler structure. So below 2 vgpu data are protected
+        * by sched_lock, not vgpu_lock.
+        */
        void *sched_data;
        struct vgpu_sched_ctl sched_ctl;
 
@@ -294,7 +300,13 @@ struct intel_vgpu_type {
 };
 
 struct intel_gvt {
+       /* GVT scope lock, protect GVT itself, and all resource currently
+        * not yet protected by special locks(vgpu and scheduler lock).
+        */
        struct mutex lock;
+       /* scheduler scope lock, protect gvt and vgpu schedule related data */
+       struct mutex sched_lock;
+
        struct drm_i915_private *dev_priv;
        struct idr vgpu_idr;    /* vGPU IDR pool */
 
@@ -314,6 +326,10 @@ struct intel_gvt {
 
        struct task_struct *service_thread;
        wait_queue_head_t service_thread_wq;
+
+       /* service_request is always used in bit operation, we should always
+        * use it with atomic bit ops so that no need to use gvt big lock.
+        */
        unsigned long service_request;
 
        struct {
@@ -361,9 +377,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define gvt_aperture_sz(gvt)     (gvt->dev_priv->ggtt.mappable_end)
 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
 
-#define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.vm.total)
 #define gvt_ggtt_sz(gvt) \
-       ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+       ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
 #define gvt_hidden_sz(gvt)       (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
 
 #define gvt_aperture_gmadr_base(gvt) (0)
index bcbc47a..e39492a 100644 (file)
@@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
                return D_SKL;
        else if (IS_KABYLAKE(gvt->dev_priv))
                return D_KBL;
+       else if (IS_BROXTON(gvt->dev_priv))
+               return D_BXT;
 
        return 0;
 }
@@ -255,7 +257,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
 
        if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+               || IS_KABYLAKE(vgpu->gvt->dev_priv)
+               || IS_BROXTON(vgpu->gvt->dev_priv)) {
                switch (offset) {
                case FORCEWAKE_RENDER_GEN9_REG:
                        ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -316,6 +319,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                }
        }
 
+       /* vgpu_lock already hold by emulate mmio r/w */
        intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
 
        /* sw will wait for the device to ack the reset request */
@@ -420,7 +424,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
        else
                vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+       /* vgpu_lock already hold by emulate mmio r/w */
+       mutex_unlock(&vgpu->vgpu_lock);
        intel_gvt_check_vblank_emulation(vgpu->gvt);
+       mutex_lock(&vgpu->vgpu_lock);
        return 0;
 }
 
@@ -857,7 +864,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        data = vgpu_vreg(vgpu, offset);
 
        if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv))
+               || IS_KABYLAKE(vgpu->gvt->dev_priv)
+               || IS_BROXTON(vgpu->gvt->dev_priv))
                && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
                /* SKL DPB/C/D aux ctl register changed */
                return 0;
@@ -1209,8 +1217,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                ret = handle_g2v_notification(vgpu, data);
                break;
        /* add xhot and yhot to handled list to avoid error log */
-       case 0x78830:
-       case 0x78834:
+       case _vgtif_reg(cursor_x_hot):
+       case _vgtif_reg(cursor_y_hot):
        case _vgtif_reg(pdp[0].lo):
        case _vgtif_reg(pdp[0].hi):
        case _vgtif_reg(pdp[1].lo):
@@ -1369,6 +1377,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
                                *data0 = 0x1e1a1100;
                        else
                                *data0 = 0x61514b3d;
+               } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+                       /**
+                        * "Read memory latency" command on gen9.
+                        * Below memory latency values are read
+                        * from Broxton MRB.
+                        */
+                       if (!*data0)
+                               *data0 = 0x16080707;
+                       else
+                               *data0 = 0x16161616;
                }
                break;
        case SKL_PCODE_CDCLK_CONTROL:
@@ -1426,8 +1444,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
 {
        u32 v = *(u32 *)p_data;
 
-       v &= (1 << 31) | (1 << 29) | (1 << 9) |
-            (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+       if (IS_BROXTON(vgpu->gvt->dev_priv))
+               v &= (1 << 31) | (1 << 29);
+       else
+               v &= (1 << 31) | (1 << 29) | (1 << 9) |
+                       (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
        v |= (v >> 1);
 
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@ -1447,6 +1468,102 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
+static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (v & BXT_DE_PLL_PLL_ENABLE)
+               v |= BXT_DE_PLL_LOCK;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (v & PORT_PLL_ENABLE)
+               v |= PORT_PLL_LOCK;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+       u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
+
+       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = vgpu_vreg(vgpu, offset);
+
+       v &= ~UNIQUE_TRANGE_EN_METHOD;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
+               vgpu_vreg(vgpu, offset - 0x600) = v;
+               vgpu_vreg(vgpu, offset - 0x800) = v;
+       } else {
+               vgpu_vreg(vgpu, offset - 0x400) = v;
+               vgpu_vreg(vgpu, offset - 0x600) = v;
+       }
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (v & BIT(0)) {
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+                       ~PHY_RESERVED;
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+                       PHY_POWER_GOOD;
+       }
+
+       if (v & BIT(1)) {
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+                       ~PHY_RESERVED;
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+                       PHY_POWER_GOOD;
+       }
+
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -2670,17 +2787,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
        MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
        MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
-       MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
-       MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
-       MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
-       MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
+       MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
+       MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
+       MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
+       MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
 
        MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
        MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2805,53 +2922,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
        MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
        MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
 
-       MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
-       MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+       MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+               NULL, NULL);
+       MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+               NULL, NULL);
 
        MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
        MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
        MMIO_D(RC6_LOCATION, D_SKL_PLUS);
        MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
 
        /* TRTT */
-       MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
-       MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+       MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+               NULL, gen9_trtte_write);
+       MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
 
-       MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
        MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
        MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
 
        MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
-       MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
 
        MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
        MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
@@ -2869,11 +2990,185 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
+       MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
 
        MMIO_D(_MMIO(0x4ab8), D_KBL);
-       MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
+       MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+
+       return 0;
+}
+
+static int init_bxt_mmio_info(struct intel_gvt *gvt)
+{
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       int ret;
+
+       MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
+
+       MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
+       MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
+       MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
+       MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
+       MMIO_D(ERROR_GEN6, D_BXT);
+       MMIO_D(DONE_REG, D_BXT);
+       MMIO_D(EIR, D_BXT);
+       MMIO_D(PGTBL_ER, D_BXT);
+       MMIO_D(_MMIO(0x4194), D_BXT);
+       MMIO_D(_MMIO(0x4294), D_BXT);
+       MMIO_D(_MMIO(0x4494), D_BXT);
+
+       MMIO_RING_D(RING_PSMI_CTL, D_BXT);
+       MMIO_RING_D(RING_DMA_FADD, D_BXT);
+       MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
+       MMIO_RING_D(RING_IPEHR, D_BXT);
+       MMIO_RING_D(RING_INSTPS, D_BXT);
+       MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
+       MMIO_RING_D(RING_BBSTATE, D_BXT);
+       MMIO_RING_D(RING_IPEIR, D_BXT);
+
+       MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
+
+       MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
+       MMIO_D(BXT_RP_STATE_CAP, D_BXT);
+       MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
+               NULL, bxt_phy_ctl_family_write);
+       MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
+               NULL, bxt_phy_ctl_family_write);
+       MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
+       MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
+       MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
+       MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
+               NULL, bxt_port_pll_enable_write);
+       MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
+               NULL, bxt_port_pll_enable_write);
+       MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
+               bxt_port_pll_enable_write);
+
+       MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
+
+       MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
+
+       MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
+               NULL, bxt_pcs_dw12_grp_write);
+       MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+               bxt_port_tx_dw3_read, NULL);
+       MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
+
+       MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
+               NULL, bxt_pcs_dw12_grp_write);
+       MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+               bxt_port_tx_dw3_read, NULL);
+       MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
+
+       MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
+               NULL, bxt_pcs_dw12_grp_write);
+       MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+               bxt_port_tx_dw3_read, NULL);
+       MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
+
+       MMIO_D(BXT_DE_PLL_CTL, D_BXT);
+       MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
+       MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
+       MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
+
+       MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+
+       MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
+       MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
+       MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
+
+       MMIO_D(RC6_CTX_BASE, D_BXT);
+
+       MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+       MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
+       MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
+       MMIO_D(GEN6_GFXPAUSE, D_BXT);
+       MMIO_D(GEN8_L3SQCREG1, D_BXT);
+
+       MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
 
        return 0;
 }
@@ -2965,6 +3260,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                ret = init_skl_mmio_info(gvt);
                if (ret)
                        goto err;
+       } else if (IS_BROXTON(dev_priv)) {
+               ret = init_broadwell_mmio_info(gvt);
+               if (ret)
+                       goto err;
+               ret = init_skl_mmio_info(gvt);
+               if (ret)
+                       goto err;
+               ret = init_bxt_mmio_info(gvt);
+               if (ret)
+                       goto err;
        }
 
        gvt->mmio.mmio_block = mmio_blocks;
index 7a041b3..5daa23a 100644 (file)
@@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
                        clear_bits |= (1 << bit);
        }
 
-       WARN_ON(!up_irq_info);
+       if (WARN_ON(!up_irq_info))
+               return;
 
        if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
                u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -580,7 +581,9 @@ static void gen8_init_irq(
 
                SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
                SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
-       } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+       } else if (IS_SKYLAKE(gvt->dev_priv)
+                       || IS_KABYLAKE(gvt->dev_priv)
+                       || IS_BROXTON(gvt->dev_priv)) {
                SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
 
        gvt_dbg_core("init irq framework\n");
 
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               irq->ops = &gen8_irq_ops;
-               irq->irq_map = gen8_irq_map;
-       } else {
-               WARN_ON(1);
-               return -ENODEV;
-       }
+       irq->ops = &gen8_irq_ops;
+       irq->irq_map = gen8_irq_map;
 
        /* common event initialization */
        init_events(irq);
index b31eb36..9943660 100644 (file)
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
                return;
 
        gvt = vgpu->gvt;
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
        if (reg_is_mmio(gvt, offset)) {
                if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
                        memcpy(pt, p_data, bytes);
 
        }
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
                return 0;
        }
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
@@ -156,7 +156,7 @@ err:
        gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
                        offset, bytes);
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
 
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                return 0;
        }
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
@@ -220,7 +220,7 @@ err:
        gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
                     bytes);
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
 
index 71b6208..e474188 100644 (file)
@@ -42,15 +42,16 @@ struct intel_vgpu;
 #define D_BDW   (1 << 0)
 #define D_SKL  (1 << 1)
 #define D_KBL  (1 << 2)
+#define D_BXT  (1 << 3)
 
-#define D_GEN9PLUS     (D_SKL | D_KBL)
-#define D_GEN8PLUS     (D_BDW | D_SKL | D_KBL)
+#define D_GEN9PLUS     (D_SKL | D_KBL | D_BXT)
+#define D_GEN8PLUS     (D_BDW | D_SKL | D_KBL | D_BXT)
 
-#define D_SKL_PLUS     (D_SKL | D_KBL)
-#define D_BDW_PLUS     (D_BDW | D_SKL | D_KBL)
+#define D_SKL_PLUS     (D_SKL | D_KBL | D_BXT)
+#define D_BDW_PLUS     (D_BDW | D_SKL | D_KBL | D_BXT)
 
 #define D_PRE_SKL      (D_BDW)
-#define D_ALL          (D_BDW | D_SKL | D_KBL)
+#define D_ALL          (D_BDW | D_SKL | D_KBL | D_BXT)
 
 typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
                             unsigned int);
index 0f94955..20be9a9 100644 (file)
@@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
         */
        fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                            FW_REG_READ | FW_REG_WRITE);
-       if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+       if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
+                       IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
                fw |= FORCEWAKE_RENDER;
 
        intel_uncore_forcewake_get(dev_priv, fw);
@@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
                return;
 
-       if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+       if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
                return;
 
        if (!pre && !gen9_render_mocs.initialized)
@@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 
 #define CTX_CONTEXT_CONTROL_VAL        0x03
 
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
 {
-       u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+       const u32 *reg_state = ce->lrc_reg_state;
        u32 inhibit_mask =
                _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
@@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
        u32 old_v, new_v;
 
        dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv))
                switch_mocs(pre, next, ring_id);
 
        for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
                 * state image on kabylake, it's initialized by lri command and
                 * save or restore with context together.
                 */
-               if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+               if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+                       && mmio->in_context)
                        continue;
 
                // save
@@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre,
                         * itself.
                         */
                        if (mmio->in_context &&
-                           !is_inhibit_context(s->shadow_ctx, ring_id))
+                           !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
                                continue;
 
                        if (mmio->mask)
@@ -574,7 +578,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 {
        struct engine_mmio *mmio;
 
-       if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+       if (IS_SKYLAKE(gvt->dev_priv) ||
+               IS_KABYLAKE(gvt->dev_priv) ||
+               IS_BROXTON(gvt->dev_priv))
                gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
        else
                gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
index 0439eb8..5c3b9ff 100644 (file)
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
index 53e2bd7..256d0db 100644 (file)
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
                void *data, unsigned int bytes)
 {
-       struct intel_gvt *gvt = vgpu->gvt;
        struct intel_vgpu_page_track *page_track;
        int ret = 0;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
        if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
        }
 
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
index d053cbe..09d7bb7 100644 (file)
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
        struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
        ktime_t cur_time;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&gvt->sched_lock);
        cur_time = ktime_get();
 
        if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
        vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
        tbs_sched_func(sched_data);
 
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&gvt->sched_lock);
 }
 
 static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
 
 int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
 {
+       int ret;
+
+       mutex_lock(&gvt->sched_lock);
        gvt->scheduler.sched_ops = &tbs_schedule_ops;
+       ret = gvt->scheduler.sched_ops->init(gvt);
+       mutex_unlock(&gvt->sched_lock);
 
-       return gvt->scheduler.sched_ops->init(gvt);
+       return ret;
 }
 
 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
 {
+       mutex_lock(&gvt->sched_lock);
        gvt->scheduler.sched_ops->clean(gvt);
+       mutex_unlock(&gvt->sched_lock);
 }
 
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
 {
-       return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+       int ret;
+
+       mutex_lock(&vgpu->gvt->sched_lock);
+       ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+       mutex_unlock(&vgpu->gvt->sched_lock);
+
+       return ret;
 }
 
 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->gvt->sched_lock);
        vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+       mutex_unlock(&vgpu->gvt->sched_lock);
 }
 
 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
 {
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
 
+       mutex_lock(&vgpu->gvt->sched_lock);
        if (!vgpu_data->active) {
                gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
                vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
        }
+       mutex_unlock(&vgpu->gvt->sched_lock);
 }
 
 void intel_gvt_kick_schedule(struct intel_gvt *gvt)
 {
+       mutex_lock(&gvt->sched_lock);
        intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+       mutex_unlock(&gvt->sched_lock);
 }
 
 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 
        gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
 
+       mutex_lock(&vgpu->gvt->sched_lock);
        scheduler->sched_ops->stop_schedule(vgpu);
 
        if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
+       mutex_unlock(&vgpu->gvt->sched_lock);
 }
index c2d183b..928818f 100644 (file)
@@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer(
                struct execlist_ring_context *ring_context,
                u32 pdp[8])
 {
-       struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
        int i;
 
        for (i = 0; i < 8; i++)
-               pdp_pair[i].val = pdp[7 - i];
+               ring_context->pdps[i].val = pdp[7 - i];
 }
 
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
-       struct intel_vgpu *vgpu = workload->vgpu;
-       int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
+               workload->req->hw_context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
 
@@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
+               workload->req->hw_context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
        void *dst;
@@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
 static inline bool is_gvt_request(struct i915_request *req)
 {
-       return i915_gem_context_force_single_submission(req->ctx);
+       return i915_gem_context_force_single_submission(req->gem_context);
 }
 
 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
-               struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
        u64 desc = 0;
 
        desc = ce->lrc_desc;
@@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
         * like GEN8_CTX_* cached in desc_template
         */
        desc &= U64_MAX << 12;
-       desc |= ctx->desc_template & ((1ULL << 12) - 1);
+       desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
 
        ce->lrc_desc = desc;
 }
@@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
+       struct i915_request *req = workload->req;
        void *shadow_ring_buffer_va;
        u32 *cs;
-       struct i915_request *req = workload->req;
 
-       if (IS_KABYLAKE(req->i915) &&
-           is_inhibit_context(req->ctx, req->engine->id))
+       if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+               && is_inhibit_context(req->hw_context))
                intel_vgpu_restore_inhibit_context(vgpu, req);
 
        /* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       int ring_id = workload->ring_id;
-       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct intel_ring *ring;
+       struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+       struct intel_context *ce;
+       struct i915_request *rq;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       if (workload->shadowed)
+       if (workload->req)
                return 0;
 
+       /* pin shadow context by gvt even the shadow context will be pinned
+        * when i915 alloc request. That is because gvt will update the guest
+        * context from shadow context when workload is completed, and at that
+        * moment, i915 may already unpined the shadow context to make the
+        * shadow_ctx pages invalid. So gvt need to pin itself. After update
+        * the guest context, gvt can unpin the shadow_ctx safely.
+        */
+       ce = intel_context_pin(shadow_ctx, engine);
+       if (IS_ERR(ce)) {
+               gvt_vgpu_err("fail to pin shadow context\n");
+               return PTR_ERR(ce);
+       }
+
        shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
        shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
-               shadow_context_descriptor_update(shadow_ctx,
-                                       dev_priv->engine[ring_id]);
+       if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+               shadow_context_descriptor_update(ce);
 
        ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
        if (ret)
-               goto err_scan;
+               goto err_unpin;
 
        if ((workload->ring_id == RCS) &&
            (workload->wa_ctx.indirect_ctx.size != 0)) {
                ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
                if (ret)
-                       goto err_scan;
+                       goto err_shadow;
        }
 
-       /* pin shadow context by gvt even the shadow context will be pinned
-        * when i915 alloc request. That is because gvt will update the guest
-        * context from shadow context when workload is completed, and at that
-        * moment, i915 may already unpined the shadow context to make the
-        * shadow_ctx pages invalid. So gvt need to pin itself. After update
-        * the guest context, gvt can unpin the shadow_ctx safely.
-        */
-       ring = intel_context_pin(shadow_ctx, engine);
-       if (IS_ERR(ring)) {
-               ret = PTR_ERR(ring);
-               gvt_vgpu_err("fail to pin shadow context\n");
+       rq = i915_request_alloc(engine, shadow_ctx);
+       if (IS_ERR(rq)) {
+               gvt_vgpu_err("fail to allocate gem request\n");
+               ret = PTR_ERR(rq);
                goto err_shadow;
        }
+       workload->req = i915_request_get(rq);
 
        ret = populate_shadow_context(workload);
        if (ret)
-               goto err_unpin;
-       workload->shadowed = true;
-       return 0;
+               goto err_req;
 
-err_unpin:
-       intel_context_unpin(shadow_ctx, engine);
+       return 0;
+err_req:
+       rq = fetch_and_zero(&workload->req);
+       i915_request_put(rq);
 err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
-       return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
-       int ring_id = workload->ring_id;
-       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
-       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct i915_request *rq;
-       struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_submission *s = &vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-       int ret;
-
-       rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
-       if (IS_ERR(rq)) {
-               gvt_vgpu_err("fail to allocate gem request\n");
-               ret = PTR_ERR(rq);
-               goto err_unpin;
-       }
-
-       gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
-       workload->req = i915_request_get(rq);
-       ret = copy_workload_to_ring_buffer(workload);
-       if (ret)
-               goto err_unpin;
-       return 0;
-
 err_unpin:
-       intel_context_unpin(shadow_ctx, engine);
-       release_shadow_wa_ctx(&workload->wa_ctx);
+       intel_context_unpin(ce);
        return ret;
 }
 
@@ -517,21 +485,13 @@ err:
        return ret;
 }
 
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-       struct intel_vgpu_workload *workload = container_of(wa_ctx,
-                                       struct intel_vgpu_workload,
-                                       wa_ctx);
-       int ring_id = workload->ring_id;
-       struct intel_vgpu_submission *s = &workload->vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-       struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
-       struct execlist_ring_context *shadow_ring_context;
-       struct page *page;
-
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap_atomic(page);
+       struct intel_vgpu_workload *workload =
+               container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+       struct i915_request *rq = workload->req;
+       struct execlist_ring_context *shadow_ring_context =
+               (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
 
        shadow_ring_context->bb_per_ctx_ptr.val =
                (shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +499,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        shadow_ring_context->rcs_indirect_ctx.val =
                (shadow_ring_context->rcs_indirect_ctx.val &
                (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
-       kunmap_atomic(shadow_ring_context);
-       return 0;
 }
 
 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +590,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
                goto err_unpin_mm;
        }
 
-       ret = intel_gvt_generate_request(workload);
+       ret = copy_workload_to_ring_buffer(workload);
        if (ret) {
                gvt_vgpu_err("fail to generate request\n");
                goto err_unpin_mm;
@@ -670,16 +627,14 @@ err_unpin_mm:
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_submission *s = &vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int ring_id = workload->ring_id;
-       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       int ret = 0;
+       int ret;
 
        gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
                ring_id, workload);
 
+       mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
        ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -687,10 +642,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
                goto out;
 
        ret = prepare_workload(workload);
-       if (ret) {
-               intel_context_unpin(shadow_ctx, engine);
-               goto out;
-       }
 
 out:
        if (ret)
@@ -704,6 +655,7 @@ out:
        }
 
        mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
 
@@ -713,7 +665,7 @@ static struct intel_vgpu_workload *pick_next_workload(
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload = NULL;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&gvt->sched_lock);
 
        /*
         * no current vgpu / will be scheduled out / no workload
@@ -759,33 +711,29 @@ static struct intel_vgpu_workload *pick_next_workload(
 
        atomic_inc(&workload->vgpu->submission.running_workload_num);
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&gvt->sched_lock);
        return workload;
 }
 
 static void update_guest_context(struct intel_vgpu_workload *workload)
 {
+       struct i915_request *rq = workload->req;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
-       struct intel_vgpu_submission *s = &vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-       int ring_id = workload->ring_id;
-       struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
+       struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
        void *src;
        unsigned long context_gpa, context_page_num;
        int i;
 
-       gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
-                       workload->ctx_desc.lrca);
-
-       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+       gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+                     workload->ctx_desc.lrca);
 
+       context_page_num = rq->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
 
-       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+       if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
                context_page_num = 19;
 
        i = 2;
@@ -858,19 +806,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                scheduler->current_workload[ring_id];
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_request *rq = workload->req;
        int event;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
+       mutex_lock(&gvt->sched_lock);
 
        /* For the workload w/ request, needs to wait for the context
         * switch to make sure request is completed.
         * For the workload w/o request, directly complete the workload.
         */
-       if (workload->req) {
-               struct drm_i915_private *dev_priv =
-                       workload->vgpu->gvt->dev_priv;
-               struct intel_engine_cs *engine =
-                       dev_priv->engine[workload->ring_id];
+       if (rq) {
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -886,8 +832,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                workload->status = 0;
                }
 
-               i915_request_put(fetch_and_zero(&workload->req));
-
                if (!workload->status && !(vgpu->resetting_eng &
                                           ENGINE_MASK(ring_id))) {
                        update_guest_context(workload);
@@ -896,10 +840,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                         INTEL_GVT_EVENT_MAX)
                                intel_vgpu_trigger_virtual_event(vgpu, event);
                }
-               mutex_lock(&dev_priv->drm.struct_mutex);
+
                /* unpin shadow ctx as the shadow_ctx update is done */
-               intel_context_unpin(s->shadow_ctx, engine);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
+               mutex_lock(&rq->i915->drm.struct_mutex);
+               intel_context_unpin(rq->hw_context);
+               mutex_unlock(&rq->i915->drm.struct_mutex);
+
+               i915_request_put(fetch_and_zero(&workload->req));
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -939,7 +886,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
        if (gvt->scheduler.need_reschedule)
                intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
 
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&gvt->sched_lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 struct workload_thread_param {
@@ -957,7 +905,8 @@ static int workload_thread(void *priv)
        struct intel_vgpu *vgpu = NULL;
        int ret;
        bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
-                       || IS_KABYLAKE(gvt->dev_priv);
+                       || IS_KABYLAKE(gvt->dev_priv)
+                       || IS_BROXTON(gvt->dev_priv);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        kfree(p);
@@ -991,9 +940,7 @@ static int workload_thread(void *priv)
                        intel_uncore_forcewake_get(gvt->dev_priv,
                                        FORCEWAKE_ALL);
 
-               mutex_lock(&gvt->lock);
                ret = dispatch_workload(workload);
-               mutex_unlock(&gvt->lock);
 
                if (ret) {
                        vgpu = workload->vgpu;
@@ -1270,7 +1217,6 @@ alloc_workload(struct intel_vgpu *vgpu)
        atomic_set(&workload->shadow_ctx_active, 0);
 
        workload->status = -EINPROGRESS;
-       workload->shadowed = false;
        workload->vgpu = vgpu;
 
        return workload;
@@ -1285,7 +1231,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
        u64 gpa;
        int i;
 
-       gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+       gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
 
        for (i = 0; i < 8; i++)
                intel_gvt_hypervisor_read_gpa(vgpu,
index 6c64478..21eddab 100644 (file)
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
        struct i915_request *req;
        /* if this workload has been dispatched to i915? */
        bool dispatched;
-       bool shadowed;
        int status;
 
        struct intel_vgpu_mm *shadow_mm;
index 572a18c..83a4397 100644 (file)
@@ -58,6 +58,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 
        vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
 
+       vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+       vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
        gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
        gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
                vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -223,22 +226,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
  */
 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
-       struct intel_gvt *gvt = vgpu->gvt;
-
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        vgpu->active = false;
 
        if (atomic_read(&vgpu->submission.running_workload_num)) {
-               mutex_unlock(&gvt->lock);
+               mutex_unlock(&vgpu->vgpu_lock);
                intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&gvt->lock);
+               mutex_lock(&vgpu->vgpu_lock);
        }
 
        intel_vgpu_stop_schedule(vgpu);
        intel_vgpu_dmabuf_cleanup(vgpu);
 
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
@@ -252,14 +253,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        WARN(vgpu->active, "vGPU is still active!\n");
 
        intel_gvt_debugfs_remove_vgpu(vgpu);
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
-       if (idr_is_empty(&gvt->vgpu_idr))
-               intel_gvt_clean_irq(gvt);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_submission(vgpu);
        intel_vgpu_clean_display(vgpu);
@@ -269,10 +267,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_free_resource(vgpu);
        intel_vgpu_clean_mmio(vgpu);
        intel_vgpu_dmabuf_cleanup(vgpu);
-       vfree(vgpu);
+       mutex_unlock(&vgpu->vgpu_lock);
 
+       mutex_lock(&gvt->lock);
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
+       if (idr_is_empty(&gvt->vgpu_idr))
+               intel_gvt_clean_irq(gvt);
        intel_gvt_update_vgpu_types(gvt);
        mutex_unlock(&gvt->lock);
+
+       vfree(vgpu);
 }
 
 #define IDLE_VGPU_IDR 0
@@ -298,6 +302,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
 
        vgpu->id = IDLE_VGPU_IDR;
        vgpu->gvt = gvt;
+       mutex_init(&vgpu->vgpu_lock);
 
        for (i = 0; i < I915_NUM_ENGINES; i++)
                INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +329,10 @@ out_free_vgpu:
  */
 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->vgpu_lock);
        intel_vgpu_clean_sched_policy(vgpu);
+       mutex_unlock(&vgpu->vgpu_lock);
+
        vfree(vgpu);
 }
 
@@ -342,8 +350,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (!vgpu)
                return ERR_PTR(-ENOMEM);
 
-       mutex_lock(&gvt->lock);
-
        ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
                GFP_KERNEL);
        if (ret < 0)
@@ -353,6 +359,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->handle = param->handle;
        vgpu->gvt = gvt;
        vgpu->sched_ctl.weight = param->weight;
+       mutex_init(&vgpu->vgpu_lock);
        INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
        INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
        idr_init(&vgpu->object_idr);
@@ -400,8 +407,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
-       mutex_unlock(&gvt->lock);
-
        return vgpu;
 
 out_clean_sched_policy:
@@ -424,7 +429,6 @@ out_clean_idr:
        idr_remove(&gvt->vgpu_idr, vgpu->id);
 out_free_vgpu:
        vfree(vgpu);
-       mutex_unlock(&gvt->lock);
        return ERR_PTR(ret);
 }
 
@@ -456,12 +460,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
        param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
        param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
 
+       mutex_lock(&gvt->lock);
        vgpu = __intel_gvt_create_vgpu(gvt, &param);
-       if (IS_ERR(vgpu))
-               return vgpu;
-
-       /* calculate left instance change for types */
-       intel_gvt_update_vgpu_types(gvt);
+       if (!IS_ERR(vgpu))
+               /* calculate left instance change for types */
+               intel_gvt_update_vgpu_types(gvt);
+       mutex_unlock(&gvt->lock);
 
        return vgpu;
 }
@@ -473,7 +477,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  * @engine_mask: engines to reset for GT reset
  *
  * This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
  *
  * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
  * the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +517,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
         * scheduler when the reset is triggered by current vgpu.
         */
        if (scheduler->current_vgpu == NULL) {
-               mutex_unlock(&gvt->lock);
+               mutex_unlock(&vgpu->vgpu_lock);
                intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&gvt->lock);
+               mutex_lock(&vgpu->vgpu_lock);
        }
 
        intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +559,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
  */
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
 {
-       mutex_lock(&vgpu->gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
        intel_gvt_reset_vgpu_locked(vgpu, true, 0);
-       mutex_unlock(&vgpu->gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
index 13e7b9e..c400f42 100644 (file)
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
                } else {
                        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 
-                       if (ppgtt->base.file != stats->file_priv)
+                       if (ppgtt->vm.file != stats->file_priv)
                                continue;
                }
 
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                   dpy_count, dpy_size);
 
        seq_printf(m, "%llu [%pa] gtt total\n",
-                  ggtt->base.total, &ggtt->mappable_end);
+                  ggtt->vm.total, &ggtt->mappable_end);
        seq_printf(m, "Supported page sizes: %s\n",
                   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
                                        buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                                                   struct i915_request,
                                                   client_link);
                rcu_read_lock();
-               task = pid_task(request && request->ctx->pid ?
-                               request->ctx->pid : file->pid,
+               task = pid_task(request && request->gem_context->pid ?
+                               request->gem_context->pid : file->pid,
                                PIDTYPE_PID);
                print_file_stats(m, task ? task->comm : "<unknown>", stats);
                rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
-               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
-                       pm_ier = I915_READ(GEN6_PMIER);
-                       pm_imr = I915_READ(GEN6_PMIMR);
-                       pm_isr = I915_READ(GEN6_PMISR);
-                       pm_iir = I915_READ(GEN6_PMIIR);
-                       pm_mask = I915_READ(GEN6_PMINTRMSK);
-               } else {
+               if (INTEL_GEN(dev_priv) >= 11) {
+                       pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+                       pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+                       /*
+                        * The equivalent to the PM ISR & IIR cannot be read
+                        * without affecting the current state of the system
+                        */
+                       pm_isr = 0;
+                       pm_iir = 0;
+               } else if (INTEL_GEN(dev_priv) >= 8) {
                        pm_ier = I915_READ(GEN8_GT_IER(2));
                        pm_imr = I915_READ(GEN8_GT_IMR(2));
                        pm_isr = I915_READ(GEN8_GT_ISR(2));
                        pm_iir = I915_READ(GEN8_GT_IIR(2));
-                       pm_mask = I915_READ(GEN6_PMINTRMSK);
+               } else {
+                       pm_ier = I915_READ(GEN6_PMIER);
+                       pm_imr = I915_READ(GEN6_PMIMR);
+                       pm_isr = I915_READ(GEN6_PMISR);
+                       pm_iir = I915_READ(GEN6_PMIIR);
                }
+               pm_mask = I915_READ(GEN6_PMINTRMSK);
+
                seq_printf(m, "Video Turbo Mode: %s\n",
                           yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
                seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "SW control enabled: %s\n",
                           yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
                                  GEN6_RP_MEDIA_SW_MODE));
-               seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
-                          pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+               seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+                          pm_ier, pm_imr, pm_mask);
+               if (INTEL_GEN(dev_priv) <= 10)
+                       seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+                                  pm_isr, pm_iir);
                seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
                           rps->pm_intrmsk_mbz);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1346,11 +1359,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
                           engine->hangcheck.seqno, seqno[id],
                           intel_engine_last_submit(engine));
-               seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
+               seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
                           yesno(intel_engine_has_waiter(engine)),
                           yesno(test_bit(engine->id,
                                          &dev_priv->gpu_error.missed_irq_rings)),
-                          yesno(engine->hangcheck.stalled));
+                          yesno(engine->hangcheck.stalled),
+                          yesno(engine->hangcheck.wedged));
 
                spin_lock_irq(&b->rb_lock);
                for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -1895,7 +1909,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fbdev_fb->base.format->cpp[0] * 8,
                           fbdev_fb->base.modifier,
                           drm_framebuffer_read_refcount(&fbdev_fb->base));
-               describe_obj(m, fbdev_fb->obj);
+               describe_obj(m, intel_fb_obj(&fbdev_fb->base));
                seq_putc(m, '\n');
        }
 #endif
@@ -1913,7 +1927,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fb->base.format->cpp[0] * 8,
                           fb->base.modifier,
                           drm_framebuffer_read_refcount(&fb->base));
-               describe_obj(m, fb->obj);
+               describe_obj(m, intel_fb_obj(&fb->base));
                seq_putc(m, '\n');
        }
        mutex_unlock(&dev->mode_config.fb_lock);
@@ -2523,7 +2537,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
        if (!USES_GUC(dev_priv))
                return -ENODEV;
 
-       *val = intel_guc_log_level_get(&dev_priv->guc.log);
+       *val = intel_guc_log_get_level(&dev_priv->guc.log);
 
        return 0;
 }
@@ -2535,7 +2549,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
        if (!USES_GUC(dev_priv))
                return -ENODEV;
 
-       return intel_guc_log_level_set(&dev_priv->guc.log, val);
+       return intel_guc_log_set_level(&dev_priv->guc.log, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2630,8 +2644,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u32 psrperf = 0;
-       u32 stat[3];
-       enum pipe pipe;
        bool enabled = false;
        bool sink_support;
 
@@ -2649,50 +2661,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
        seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
                   dev_priv->psr.busy_frontbuffer_bits);
-       seq_printf(m, "Re-enable work scheduled: %s\n",
-                  yesno(work_busy(&dev_priv->psr.work.work)));
-
-       if (HAS_DDI(dev_priv)) {
-               if (dev_priv->psr.psr2_enabled)
-                       enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
-               else
-                       enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
-       } else {
-               for_each_pipe(dev_priv, pipe) {
-                       enum transcoder cpu_transcoder =
-                               intel_pipe_to_cpu_transcoder(dev_priv, pipe);
-                       enum intel_display_power_domain power_domain;
 
-                       power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-                       if (!intel_display_power_get_if_enabled(dev_priv,
-                                                               power_domain))
-                               continue;
-
-                       stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
-                               VLV_EDP_PSR_CURR_STATE_MASK;
-                       if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
-                           (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
-                               enabled = true;
-
-                       intel_display_power_put(dev_priv, power_domain);
-               }
-       }
+       if (dev_priv->psr.psr2_enabled)
+               enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+       else
+               enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
 
        seq_printf(m, "Main link in standby mode: %s\n",
                   yesno(dev_priv->psr.link_standby));
 
-       seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
-       if (!HAS_DDI(dev_priv))
-               for_each_pipe(dev_priv, pipe) {
-                       if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
-                           (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
-                               seq_printf(m, " pipe %c", pipe_name(pipe));
-               }
-       seq_puts(m, "\n");
+       seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
 
        /*
-        * VLV/CHV PSR has no kind of performance counter
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3398,28 +3378,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct i915_workarounds *workarounds = &dev_priv->workarounds;
+       struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
        int i;
 
-       intel_runtime_pm_get(dev_priv);
-
-       seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-       for (i = 0; i < workarounds->count; ++i) {
-               i915_reg_t addr;
-               u32 mask, value, read;
-               bool ok;
-
-               addr = workarounds->reg[i].addr;
-               mask = workarounds->reg[i].mask;
-               value = workarounds->reg[i].value;
-               read = I915_READ(addr);
-               ok = (value & mask) == (read & mask);
-               seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
-                          i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
-       }
-
-       intel_runtime_pm_put(dev_priv);
+       seq_printf(m, "Workarounds applied: %d\n", wa->count);
+       for (i = 0; i < wa->count; ++i)
+               seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+                          wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
 
        return 0;
 }
@@ -4245,8 +4210,13 @@ i915_drop_caches_set(void *data, u64 val)
                i915_gem_shrink_all(dev_priv);
        fs_reclaim_release(GFP_KERNEL);
 
-       if (val & DROP_IDLE)
-               drain_delayed_work(&dev_priv->gt.idle_work);
+       if (val & DROP_IDLE) {
+               do {
+                       if (READ_ONCE(dev_priv->gt.active_requests))
+                               flush_delayed_work(&dev_priv->gt.retire_work);
+                       drain_delayed_work(&dev_priv->gt.idle_work);
+               } while (READ_ONCE(dev_priv->gt.awake));
+       }
 
        if (val & DROP_FREED)
                i915_gem_drain_freed_objects(dev_priv);
index 9c449b8..beb0951 100644 (file)
@@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line)
        if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
                DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
                         i915_modparams.inject_load_failure, func, line);
+               i915_modparams.inject_load_failure = 0;
                return true;
        }
 
        return false;
 }
+
+bool i915_error_injected(void)
+{
+       return i915_load_fail_count && !i915_modparams.inject_load_failure;
+}
+
 #endif
 
 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
@@ -114,21 +121,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
        va_end(args);
 }
 
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-       return i915_modparams.inject_load_failure &&
-              i915_load_fail_count == i915_modparams.inject_load_failure;
-#else
-       return false;
-#endif
-}
-
-#define i915_load_error(dev_priv, fmt, ...)                                 \
-       __i915_printk(dev_priv,                                              \
-                     i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
-                     fmt, ##__VA_ARGS__)
-
 /* Map PCH device id to PCH type, or PCH_NONE if unknown. */
 static enum intel_pch
 intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -233,6 +225,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
                id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
        else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
                id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+       else if (IS_ICELAKE(dev_priv))
+               id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
 
        if (id)
                DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -246,14 +240,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pch = NULL;
 
-       /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
-        * (which really amounts to a PCH but no South Display).
-        */
-       if (INTEL_INFO(dev_priv)->num_pipes == 0) {
-               dev_priv->pch_type = PCH_NOP;
-               return;
-       }
-
        /*
         * The reason to probe ISA bridge instead of Dev31:Fun0 is to
         * make graphics device passthrough work easy for VMM, that only
@@ -282,18 +268,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
                } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
                                         pch->subsystem_device)) {
                        id = intel_virt_detect_pch(dev_priv);
-                       if (id) {
-                               pch_type = intel_pch_type(dev_priv, id);
-                               if (WARN_ON(pch_type == PCH_NONE))
-                                       pch_type = PCH_NOP;
-                       } else {
-                               pch_type = PCH_NOP;
-                       }
+                       pch_type = intel_pch_type(dev_priv, id);
+
+                       /* Sanity check virtual PCH id */
+                       if (WARN_ON(id && pch_type == PCH_NONE))
+                               id = 0;
+
                        dev_priv->pch_type = pch_type;
                        dev_priv->pch_id = id;
                        break;
                }
        }
+
+       /*
+        * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+        * display.
+        */
+       if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+               DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+               dev_priv->pch_type = PCH_NOP;
+               dev_priv->pch_id = 0;
+       }
+
        if (!pch)
                DRM_DEBUG_KMS("No PCH found.\n");
 
@@ -634,26 +630,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
        .can_switch = i915_switcheroo_can_switch,
 };
 
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
-       /* Flush any outstanding unpin_work. */
-       i915_gem_drain_workqueue(dev_priv);
-
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_uc_fini_hw(dev_priv);
-       intel_uc_fini(dev_priv);
-       i915_gem_cleanup_engines(dev_priv);
-       i915_gem_contexts_fini(dev_priv);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-
-       intel_uc_fini_misc(dev_priv);
-       i915_gem_cleanup_userptr(dev_priv);
-
-       i915_gem_drain_freed_objects(dev_priv);
-
-       WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
 static int i915_load_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1553,12 +1529,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
        return false;
 }
 
+static int i915_drm_prepare(struct drm_device *dev)
+{
+       struct drm_i915_private *i915 = to_i915(dev);
+       int err;
+
+       /*
+        * NB intel_display_suspend() may issue new requests after we've
+        * ostensibly marked the GPU as ready-to-sleep here. We need to
+        * split out that work and pull it forward so that after point,
+        * the GPU is not woken again.
+        */
+       err = i915_gem_suspend(i915);
+       if (err)
+               dev_err(&i915->drm.pdev->dev,
+                       "GEM idle failed, suspend/resume might fail\n");
+
+       return err;
+}
+
 static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
        pci_power_t opregion_target_state;
-       int error;
 
        /* ignore lid events during suspend */
        mutex_lock(&dev_priv->modeset_restore_lock);
@@ -1575,13 +1569,6 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        pci_save_state(pdev);
 
-       error = i915_gem_suspend(dev_priv);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "GEM idle failed, resume might fail\n");
-               goto out;
-       }
-
        intel_display_suspend(dev);
 
        intel_dp_mst_suspend(dev);
@@ -1600,7 +1587,6 @@ static int i915_drm_suspend(struct drm_device *dev)
        opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
        intel_opregion_notify_adapter(dev_priv, opregion_target_state);
 
-       intel_uncore_suspend(dev_priv);
        intel_opregion_unregister(dev_priv);
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1595,9 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        intel_csr_ucode_suspend(dev_priv);
 
-out:
        enable_rpm_wakeref_asserts(dev_priv);
 
-       return error;
+       return 0;
 }
 
 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1608,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        disable_rpm_wakeref_asserts(dev_priv);
 
+       i915_gem_suspend_late(dev_priv);
+
        intel_display_set_init_power(dev_priv, false);
+       intel_uncore_suspend(dev_priv);
 
        /*
         * In case of firmware assisted context save/restore don't manually
@@ -1710,6 +1698,8 @@ static int i915_drm_resume(struct drm_device *dev)
        disable_rpm_wakeref_asserts(dev_priv);
        intel_sanitize_gt_powersave(dev_priv);
 
+       i915_gem_sanitize(dev_priv);
+
        ret = i915_ggtt_enable_hw(dev_priv);
        if (ret)
                DRM_ERROR("failed to re-enable GGTT\n");
@@ -1851,7 +1841,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        else
                intel_display_set_init_power(dev_priv, true);
 
-       i915_gem_sanitize(dev_priv);
+       intel_engines_sanitize(dev_priv);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -2081,6 +2071,22 @@ out:
        return ret;
 }
 
+static int i915_pm_prepare(struct device *kdev)
+{
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       if (!dev) {
+               dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
+
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
+
+       return i915_drm_prepare(dev);
+}
+
 static int i915_pm_suspend(struct device *kdev)
 {
        struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2737,7 @@ const struct dev_pm_ops i915_pm_ops = {
         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
         * PMSG_RESUME]
         */
+       .prepare = i915_pm_prepare,
        .suspend = i915_pm_suspend,
        .suspend_late = i915_pm_suspend_late,
        .resume_early = i915_pm_resume_early,
index 52f3b91..f4751b3 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/hash.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
+#include <linux/mm_types.h>
 #include <linux/perf_event.h>
 #include <linux/pm_qos.h>
 #include <linux/reservation.h>
@@ -85,8 +86,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20180514"
-#define DRIVER_TIMESTAMP       1526300884
+#define DRIVER_DATE            "20180620"
+#define DRIVER_TIMESTAMP       1529529048
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
        I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
 bool __i915_inject_load_failure(const char *func, int line);
 #define i915_inject_load_failure() \
        __i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
 #else
+
 #define i915_inject_load_failure() false
+#define i915_error_injected() false
+
 #endif
 
+#define i915_load_error(i915, fmt, ...)                                         \
+       __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+                     fmt, ##__VA_ARGS__)
+
 typedef struct {
        uint32_t val;
 } uint_fixed_16_16_t;
@@ -608,13 +620,12 @@ struct i915_psr {
        bool sink_support;
        struct intel_dp *enabled;
        bool active;
-       struct delayed_work work;
+       struct work_struct work;
        unsigned busy_frontbuffer_bits;
        bool sink_psr2_support;
        bool link_standby;
        bool colorimetry_support;
        bool alpm;
-       bool has_hw_tracking;
        bool psr2_enabled;
        u8 sink_sync_latency;
        bool debug;
@@ -639,7 +650,7 @@ enum intel_pch {
        PCH_KBP,        /* Kaby Lake PCH */
        PCH_CNP,        /* Cannon Lake PCH */
        PCH_ICP,        /* Ice Lake PCH */
-       PCH_NOP,
+       PCH_NOP,        /* PCH without south display */
 };
 
 enum intel_sbi_destination {
@@ -1002,6 +1013,8 @@ struct i915_gem_mm {
 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
 
+#define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
+
 enum modeset_restore {
        MODESET_ON_LID_OPEN,
        MODESET_DONE,
@@ -1012,6 +1025,7 @@ enum modeset_restore {
 #define DP_AUX_B 0x10
 #define DP_AUX_C 0x20
 #define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
 #define DP_AUX_F 0x60
 
 #define DDC_PIN_B  0x05
@@ -1056,9 +1070,9 @@ struct intel_vbt_data {
        /* Feature bits */
        unsigned int int_tv_support:1;
        unsigned int lvds_dither:1;
-       unsigned int lvds_vbt:1;
        unsigned int int_crt_support:1;
        unsigned int lvds_use_ssc:1;
+       unsigned int int_lvds_support:1;
        unsigned int display_clock_mode:1;
        unsigned int fdi_rx_polarity_inverted:1;
        unsigned int panel_type:4;
@@ -1074,7 +1088,6 @@ struct intel_vbt_data {
                int vswing;
                bool low_vswing;
                bool initialized;
-               bool support;
                int bpp;
                struct edp_power_seq pps;
        } edp;
@@ -1085,8 +1098,8 @@ struct intel_vbt_data {
                bool require_aux_wakeup;
                int idle_frames;
                enum psr_lines_to_wait lines_to_wait;
-               int tp1_wakeup_time;
-               int tp2_tp3_wakeup_time;
+               int tp1_wakeup_time_us;
+               int tp2_tp3_wakeup_time_us;
        } psr;
 
        struct {
@@ -1299,7 +1312,7 @@ struct i915_frontbuffer_tracking {
 };
 
 struct i915_wa_reg {
-       i915_reg_t addr;
+       u32 addr;
        u32 value;
        /* bitmask representing WA bits */
        u32 mask;
@@ -1850,6 +1863,7 @@ struct drm_i915_private {
                 */
                struct ida hw_ida;
 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
 #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
        } contexts;
 
@@ -1957,7 +1971,9 @@ struct drm_i915_private {
                         */
                        struct i915_perf_stream *exclusive_stream;
 
+                       struct intel_context *pinned_ctx;
                        u32 specific_ctx_id;
+                       u32 specific_ctx_id_mask;
 
                        struct hrtimer poll_check_timer;
                        wait_queue_head_t poll_wq;
@@ -2747,6 +2763,8 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
 int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
 int intel_engines_init(struct drm_i915_private *dev_priv);
 
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
 /* intel_hotplug.c */
 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                           u32 pin_mask, u32 long_mask);
@@ -3168,12 +3186,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
                           unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
                         unsigned int flags,
                         long timeout,
@@ -3212,7 +3232,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
-       return container_of(vm, struct i915_hw_ppgtt, base);
+       return container_of(vm, struct i915_hw_ppgtt, vm);
 }
 
 /* i915_gem_fence_reg.c */
@@ -3677,14 +3697,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
         return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
 }
 
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
-       unsigned long j = timespec_to_jiffies(value);
-
-       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
 /*
  * If you need to wait X milliseconds between events A and B, but event B
  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
index d44ad7b..858d188 100644 (file)
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
                      struct drm_mm_node *node, u32 size)
 {
        memset(node, 0, sizeof(*node));
-       return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+       return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
                                           size, 0, I915_COLOR_UNEVICTABLE,
                                           0, ggtt->mappable_end,
                                           DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 
 static u32 __i915_gem_park(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(i915->gt.active_requests);
        GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 
 void i915_gem_park(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(i915->gt.active_requests);
 
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
 
 void i915_gem_unpark(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(!i915->gt.active_requests);
 
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        struct i915_vma *vma;
        u64 pinned;
 
-       pinned = ggtt->base.reserved;
+       pinned = ggtt->vm.reserved;
        mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+       list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
                if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
-       list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+       list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
                if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = ggtt->base.total;
+       args->aper_size = ggtt->vm.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -1217,9 +1223,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                page_length = remain < page_length ? remain : page_length;
                if (node.allocated) {
                        wmb();
-                       ggtt->base.insert_page(&ggtt->base,
-                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-                                              node.start, I915_CACHE_NONE, 0);
+                       ggtt->vm.insert_page(&ggtt->vm,
+                                            i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                            node.start, I915_CACHE_NONE, 0);
                        wmb();
                } else {
                        page_base += offset & PAGE_MASK;
@@ -1240,8 +1246,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 out_unpin:
        if (node.allocated) {
                wmb();
-               ggtt->base.clear_range(&ggtt->base,
-                                      node.start, node.size);
+               ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
                i915_vma_unpin(vma);
@@ -1420,9 +1425,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                page_length = remain < page_length ? remain : page_length;
                if (node.allocated) {
                        wmb(); /* flush the write before we modify the GGTT */
-                       ggtt->base.insert_page(&ggtt->base,
-                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-                                              node.start, I915_CACHE_NONE, 0);
+                       ggtt->vm.insert_page(&ggtt->vm,
+                                            i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                            node.start, I915_CACHE_NONE, 0);
                        wmb(); /* flush modifications to the GGTT (insert_page) */
                } else {
                        page_base += offset & PAGE_MASK;
@@ -1449,8 +1454,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_unpin:
        if (node.allocated) {
                wmb();
-               ggtt->base.clear_range(&ggtt->base,
-                                      node.start, node.size);
+               ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
                i915_vma_unpin(vma);
@@ -1991,9 +1995,9 @@ compute_partial_view(struct drm_i915_gem_object *obj,
  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 {
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
        struct vm_area_struct *area = vmf->vma;
        struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
        struct drm_device *dev = obj->base.dev;
@@ -2108,10 +2112,8 @@ err:
                 * fail). But any other -EIO isn't ours (e.g. swap in failure)
                 * and so needs to be reported.
                 */
-               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
-                       ret = VM_FAULT_SIGBUS;
-                       break;
-               }
+               if (!i915_terminally_wedged(&dev_priv->gpu_error))
+                       return VM_FAULT_SIGBUS;
        case -EAGAIN:
                /*
                 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -2126,21 +2128,16 @@ err:
                 * EBUSY is ok: this just means that another thread
                 * already did the job.
                 */
-               ret = VM_FAULT_NOPAGE;
-               break;
+               return VM_FAULT_NOPAGE;
        case -ENOMEM:
-               ret = VM_FAULT_OOM;
-               break;
+               return VM_FAULT_OOM;
        case -ENOSPC:
        case -EFAULT:
-               ret = VM_FAULT_SIGBUS;
-               break;
+               return VM_FAULT_SIGBUS;
        default:
                WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
-               ret = VM_FAULT_SIGBUS;
-               break;
+               return VM_FAULT_SIGBUS;
        }
-       return ret;
 }
 
 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
@@ -2404,29 +2401,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
        rcu_read_unlock();
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-                                enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *pages;
 
-       if (i915_gem_object_has_pinned_pages(obj))
-               return;
-
-       GEM_BUG_ON(obj->bind_count);
-       if (!i915_gem_object_has_pages(obj))
-               return;
-
-       /* May be called by shrinker from within get_pages() (on another bo) */
-       mutex_lock_nested(&obj->mm.lock, subclass);
-       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
-               goto unlock;
-
-       /* ->put_pages might need to allocate memory for the bit17 swizzle
-        * array, hence protect them from being reaped by removing them from gtt
-        * lists early. */
        pages = fetch_and_zero(&obj->mm.pages);
-       GEM_BUG_ON(!pages);
+       if (!pages)
+               return NULL;
 
        spin_lock(&i915->mm.obj_lock);
        list_del(&obj->mm.link);
@@ -2445,12 +2428,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
        }
 
        __i915_gem_object_reset_page_iter(obj);
+       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+       return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                                enum i915_mm_subclass subclass)
+{
+       struct sg_table *pages;
+
+       if (i915_gem_object_has_pinned_pages(obj))
+               return;
+
+       GEM_BUG_ON(obj->bind_count);
+       if (!i915_gem_object_has_pages(obj))
+               return;
+
+       /* May be called by shrinker from within get_pages() (on another bo) */
+       mutex_lock_nested(&obj->mm.lock, subclass);
+       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+               goto unlock;
 
+       /*
+        * ->put_pages might need to allocate memory for the bit17 swizzle
+        * array, hence protect them from being reaped by removing them from gtt
+        * lists early.
+        */
+       pages = __i915_gem_object_unset_pages(obj);
        if (!IS_ERR(pages))
                obj->ops->put_pages(obj, pages);
 
-       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -2968,16 +2976,16 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
        score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
        banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
 
-       DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
-                        ctx->name, atomic_read(&ctx->guilty_count),
-                        score, yesno(banned && bannable));
-
        /* Cool contexts don't accumulate client ban score */
        if (!bannable)
                return;
 
-       if (banned)
+       if (banned) {
+               DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+                                ctx->name, atomic_read(&ctx->guilty_count),
+                                score);
                i915_gem_context_set_banned(ctx);
+       }
 
        if (!IS_ERR_OR_NULL(ctx->file_priv))
                i915_gem_client_mark_guilty(ctx->file_priv, ctx);
@@ -3025,7 +3033,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 struct i915_request *
 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
-       struct i915_request *request = NULL;
+       struct i915_request *request;
 
        /*
         * During the reset sequence, we must prevent the engine from
@@ -3036,52 +3044,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
         */
        intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
 
-       /*
-        * Prevent the signaler thread from updating the request
-        * state (by calling dma_fence_signal) as we are processing
-        * the reset. The write from the GPU of the seqno is
-        * asynchronous and the signaler thread may see a different
-        * value to us and declare the request complete, even though
-        * the reset routine have picked that request as the active
-        * (incomplete) request. This conflict is not handled
-        * gracefully!
-        */
-       kthread_park(engine->breadcrumbs.signaler);
-
-       /*
-        * Prevent request submission to the hardware until we have
-        * completed the reset in i915_gem_reset_finish(). If a request
-        * is completed by one engine, it may then queue a request
-        * to a second via its execlists->tasklet *just* as we are
-        * calling engine->init_hw() and also writing the ELSP.
-        * Turning off the execlists->tasklet until the reset is over
-        * prevents the race.
-        *
-        * Note that this needs to be a single atomic operation on the
-        * tasklet (flush existing tasks, prevent new tasks) to prevent
-        * a race between reset and set-wedged. It is not, so we do the best
-        * we can atm and make sure we don't lock the machine up in the more
-        * common case of recursively being called from set-wedged from inside
-        * i915_reset.
-        */
-       if (!atomic_read(&engine->execlists.tasklet.count))
-               tasklet_kill(&engine->execlists.tasklet);
-       tasklet_disable(&engine->execlists.tasklet);
-
-       /*
-        * We're using worker to queue preemption requests from the tasklet in
-        * GuC submission mode.
-        * Even though tasklet was disabled, we may still have a worker queued.
-        * Let's make sure that all workers scheduled before disabling the
-        * tasklet are completed before continuing with the reset.
-        */
-       if (engine->i915->guc.preempt_wq)
-               flush_workqueue(engine->i915->guc.preempt_wq);
-
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
-
-       request = i915_gem_find_active_request(engine);
+       request = engine->reset.prepare(engine);
        if (request && request->fence.error == -EIO)
                request = ERR_PTR(-EIO); /* Previous reset failed! */
 
@@ -3133,7 +3096,7 @@ static void skip_request(struct i915_request *request)
 static void engine_skip_context(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       struct i915_gem_context *hung_ctx = request->ctx;
+       struct i915_gem_context *hung_ctx = request->gem_context;
        struct i915_timeline *timeline = request->timeline;
        unsigned long flags;
 
@@ -3143,7 +3106,7 @@ static void engine_skip_context(struct i915_request *request)
        spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
 
        list_for_each_entry_continue(request, &engine->timeline.requests, link)
-               if (request->ctx == hung_ctx)
+               if (request->gem_context == hung_ctx)
                        skip_request(request);
 
        list_for_each_entry(request, &timeline->requests, link)
@@ -3189,11 +3152,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
        }
 
        if (stalled) {
-               i915_gem_context_mark_guilty(request->ctx);
+               i915_gem_context_mark_guilty(request->gem_context);
                skip_request(request);
 
                /* If this context is now banned, skip all pending requests. */
-               if (i915_gem_context_is_banned(request->ctx))
+               if (i915_gem_context_is_banned(request->gem_context))
                        engine_skip_context(request);
        } else {
                /*
@@ -3203,15 +3166,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
                 */
                request = i915_gem_find_active_request(engine);
                if (request) {
-                       i915_gem_context_mark_innocent(request->ctx);
+                       unsigned long flags;
+
+                       i915_gem_context_mark_innocent(request->gem_context);
                        dma_fence_set_error(&request->fence, -EAGAIN);
 
                        /* Rewind the engine to replay the incomplete rq */
-                       spin_lock_irq(&engine->timeline.lock);
+                       spin_lock_irqsave(&engine->timeline.lock, flags);
                        request = list_prev_entry(request, link);
                        if (&request->link == &engine->timeline.requests)
                                request = NULL;
-                       spin_unlock_irq(&engine->timeline.lock);
+                       spin_unlock_irqrestore(&engine->timeline.lock, flags);
                }
        }
 
@@ -3232,13 +3197,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
        if (request)
                request = i915_gem_reset_request(engine, request, stalled);
 
-       if (request) {
-               DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
-                                engine->name, request->global_seqno);
-       }
-
        /* Setup the CS to resume from the breadcrumb of the hung request */
-       engine->reset_hw(engine, request);
+       engine->reset.reset(engine, request);
 }
 
 void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3252,14 +3212,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
        i915_retire_requests(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
-               struct i915_gem_context *ctx;
+               struct intel_context *ce;
 
                i915_gem_reset_engine(engine,
                                      engine->hangcheck.active_request,
                                      stalled_mask & ENGINE_MASK(id));
-               ctx = fetch_and_zero(&engine->last_retired_context);
-               if (ctx)
-                       intel_context_unpin(ctx, engine);
+               ce = fetch_and_zero(&engine->last_retired_context);
+               if (ce)
+                       intel_context_unpin(ce);
 
                /*
                 * Ostensibily, we always want a context loaded for powersaving,
@@ -3277,7 +3237,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
                        rq = i915_request_alloc(engine,
                                                dev_priv->kernel_context);
                        if (!IS_ERR(rq))
-                               __i915_request_add(rq, false);
+                               i915_request_add(rq);
                }
        }
 
@@ -3286,8 +3246,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
-       tasklet_enable(&engine->execlists.tasklet);
-       kthread_unpark(engine->breadcrumbs.signaler);
+       engine->reset.finish(engine);
 
        intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
 }
@@ -3565,6 +3524,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
                work_pending(&i915->gt.idle_work.work));
 }
 
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return;
+
+       GEM_BUG_ON(i915->gt.active_requests);
+       for_each_engine(engine, i915, id) {
+               GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+               GEM_BUG_ON(engine->last_retired_context !=
+                          to_intel_context(i915->kernel_context, engine));
+       }
+}
+
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
@@ -3576,6 +3551,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
        if (!READ_ONCE(dev_priv->gt.awake))
                return;
 
+       if (READ_ONCE(dev_priv->gt.active_requests))
+               return;
+
+       /*
+        * Flush out the last user context, leaving only the pinned
+        * kernel context resident. When we are idling on the kernel_context,
+        * no more new requests (with a context switch) are emitted and we
+        * can finally rest. A consequence is that the idle work handler is
+        * always called at least twice before idling (and if the system is
+        * idle that implies a round trip through the retire worker).
+        */
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i915_gem_switch_to_kernel_context(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+                 READ_ONCE(dev_priv->gt.active_requests));
+
        /*
         * Wait for last execlists context complete, but bail out in case a
         * new request is submitted. As we don't trust the hardware, we
@@ -3609,6 +3602,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
 
        epoch = __i915_gem_park(dev_priv);
 
+       assert_kernel_context_is_current(dev_priv);
+
        rearm_hangcheck = false;
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3757,7 +3752,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
 {
-       return i915_gem_active_wait(&tl->last_request, flags);
+       struct i915_request *rq;
+       long ret;
+
+       rq = i915_gem_active_get_unlocked(&tl->last_request);
+       if (!rq)
+               return 0;
+
+       /*
+        * "Race-to-idle".
+        *
+        * Switching to the kernel context is often used a synchronous
+        * step prior to idling, e.g. in suspend for flushing all
+        * current operations to memory before sleeping. These we
+        * want to complete as quickly as possible to avoid prolonged
+        * stalls, so allow the gpu to boost to maximum clocks.
+        */
+       if (flags & I915_WAIT_FOR_IDLE_BOOST)
+               gen6_rps_boost(rq, NULL);
+
+       ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT);
+       i915_request_put(rq);
+
+       return ret < 0 ? ret : 0;
 }
 
 static int wait_for_engines(struct drm_i915_private *i915)
@@ -3775,6 +3792,9 @@ static int wait_for_engines(struct drm_i915_private *i915)
 
 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
+       GEM_TRACE("flags=%x (%s)\n",
+                 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked");
+
        /* If the device is asleep, we have no requests outstanding */
        if (!READ_ONCE(i915->gt.awake))
                return 0;
@@ -3791,6 +3811,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
                                return err;
                }
                i915_retire_requests(i915);
+               GEM_BUG_ON(i915->gt.active_requests);
 
                return wait_for_engines(i915);
        } else {
@@ -4379,7 +4400,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         u64 flags)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct i915_address_space *vm = &dev_priv->ggtt.base;
+       struct i915_address_space *vm = &dev_priv->ggtt.vm;
        struct i915_vma *vma;
        int ret;
 
@@ -4967,25 +4988,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
                i915_gem_object_put(obj);
 }
 
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *kernel_context = i915->kernel_context;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       int err;
 
-       for_each_engine(engine, i915, id) {
-               GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
-               GEM_BUG_ON(engine->last_retired_context != kernel_context);
-       }
-}
+       GEM_TRACE("\n");
 
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
-       if (i915_terminally_wedged(&i915->gpu_error)) {
-               mutex_lock(&i915->drm.struct_mutex);
+       mutex_lock(&i915->drm.struct_mutex);
+
+       intel_runtime_pm_get(i915);
+       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+       /*
+        * As we have just resumed the machine and woken the device up from
+        * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+        * back to defaults, recovering from whatever wedged state we left it
+        * in and so worth trying to use the device once more.
+        */
+       if (i915_terminally_wedged(&i915->gpu_error))
                i915_gem_unset_wedged(i915);
-               mutex_unlock(&i915->drm.struct_mutex);
-       }
 
        /*
         * If we inherit context state from the BIOS or earlier occupants
@@ -4995,8 +5016,17 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
         * it may impact the display and we are uncertain about the stability
         * of the reset, so this could be applied to even earlier gen.
         */
+       err = -ENODEV;
        if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
-               WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+               err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+       if (!err)
+               intel_engines_sanitize(i915);
+
+       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+       intel_runtime_pm_put(i915);
+
+       i915_gem_contexts_lost(i915);
+       mutex_unlock(&i915->drm.struct_mutex);
 }
 
 int i915_gem_suspend(struct drm_i915_private *dev_priv)
@@ -5004,6 +5034,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        struct drm_device *dev = &dev_priv->drm;
        int ret;
 
+       GEM_TRACE("\n");
+
        intel_runtime_pm_get(dev_priv);
        intel_suspend_gt_powersave(dev_priv);
 
@@ -5024,13 +5056,13 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
 
                ret = i915_gem_wait_for_idle(dev_priv,
                                             I915_WAIT_INTERRUPTIBLE |
-                                            I915_WAIT_LOCKED);
+                                            I915_WAIT_LOCKED |
+                                            I915_WAIT_FOR_IDLE_BOOST);
                if (ret && ret != -EIO)
                        goto err_unlock;
 
                assert_kernel_context_is_current(dev_priv);
        }
-       i915_gem_contexts_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        intel_uc_suspend(dev_priv);
@@ -5050,6 +5082,24 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        if (WARN_ON(!intel_engines_are_idle(dev_priv)))
                i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
 
+       intel_runtime_pm_put(dev_priv);
+       return 0;
+
+err_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       intel_runtime_pm_put(dev_priv);
+       return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+       struct drm_i915_gem_object *obj;
+       struct list_head *phases[] = {
+               &i915->mm.unbound_list,
+               &i915->mm.bound_list,
+               NULL
+       }, **phase;
+
        /*
         * Neither the BIOS, ourselves or any other kernel
         * expects the system to be in execlists mode on startup,
@@ -5069,20 +5119,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
         * machines is a good idea, we don't - just in case it leaves the
         * machine in an unusable condition.
         */
-       intel_uc_sanitize(dev_priv);
-       i915_gem_sanitize(dev_priv);
 
-       intel_runtime_pm_put(dev_priv);
-       return 0;
+       mutex_lock(&i915->drm.struct_mutex);
+       for (phase = phases; *phase; phase++) {
+               list_for_each_entry(obj, *phase, mm.link)
+                       WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+       }
+       mutex_unlock(&i915->drm.struct_mutex);
 
-err_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       intel_runtime_pm_put(dev_priv);
-       return ret;
+       intel_uc_sanitize(i915);
+       i915_gem_sanitize(i915);
 }
 
 void i915_gem_resume(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        WARN_ON(i915->gt.awake);
 
        mutex_lock(&i915->drm.struct_mutex);
@@ -5256,9 +5308,15 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 
        /* Only when the HW is re-initialised, can we replay the requests */
        ret = __i915_gem_restart_engines(dev_priv);
+       if (ret)
+               goto cleanup_uc;
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        return ret;
+
+cleanup_uc:
+       intel_uc_fini_hw(dev_priv);
+       goto out;
 }
 
 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
@@ -5294,7 +5352,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
                if (engine->init_context)
                        err = engine->init_context(rq);
 
-               __i915_request_add(rq, true);
+               i915_request_add(rq);
                if (err)
                        goto err_active;
        }
@@ -5379,12 +5437,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       /*
-        * We need to fallback to 4K pages since gvt gtt handling doesn't
-        * support huge page entries - we will need to check either hypervisor
-        * mm can support huge guest page or just do emulation in gvt.
-        */
-       if (intel_vgpu_active(dev_priv))
+       /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+       if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
                mkwrite_device_info(dev_priv)->page_sizes =
                        I915_GTT_PAGE_SIZE_4K;
 
@@ -5484,8 +5538,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
         * driver doesn't explode during runtime.
         */
 err_init_hw:
-       i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
-       i915_gem_contexts_lost(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       WARN_ON(i915_gem_suspend(dev_priv));
+       i915_gem_suspend_late(dev_priv);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uc_fini_hw(dev_priv);
 err_uc_init:
        intel_uc_fini(dev_priv);
@@ -5514,7 +5572,8 @@ err_unlock:
                 * for all other failure, such as an allocation failure, bail.
                 */
                if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
-                       DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+                       i915_load_error(dev_priv,
+                                       "Failed to initialize GPU, declaring it wedged!\n");
                        i915_gem_set_wedged(dev_priv);
                }
                ret = 0;
@@ -5524,6 +5583,28 @@ err_unlock:
        return ret;
 }
 
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+       i915_gem_suspend_late(dev_priv);
+
+       /* Flush any outstanding unpin_work. */
+       i915_gem_drain_workqueue(dev_priv);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       intel_uc_fini_hw(dev_priv);
+       intel_uc_fini(dev_priv);
+       i915_gem_cleanup_engines(dev_priv);
+       i915_gem_contexts_fini(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       intel_uc_fini_misc(dev_priv);
+       i915_gem_cleanup_userptr(dev_priv);
+
+       i915_gem_drain_freed_objects(dev_priv);
+
+       WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
 void i915_gem_init_mmio(struct drm_i915_private *i915)
 {
        i915_gem_sanitize(i915);
@@ -5688,16 +5769,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
 {
        struct drm_i915_gem_object *obj;
        struct list_head *phases[] = {
-               &dev_priv->mm.unbound_list,
-               &dev_priv->mm.bound_list,
+               &i915->mm.unbound_list,
+               &i915->mm.bound_list,
                NULL
-       }, **p;
+       }, **phase;
 
-       /* Called just before we write the hibernation image.
+       /*
+        * Called just before we write the hibernation image.
         *
         * We need to update the domain tracking to reflect that the CPU
         * will be accessing all the pages to create and restore from the
@@ -5711,15 +5793,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
         * the objects as well, see i915_gem_freeze()
         */
 
-       i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
-       i915_gem_drain_freed_objects(dev_priv);
+       i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+       i915_gem_drain_freed_objects(i915);
 
-       spin_lock(&dev_priv->mm.obj_lock);
-       for (p = phases; *p; p++) {
-               list_for_each_entry(obj, *p, mm.link)
-                       __start_cpu_write(obj);
+       mutex_lock(&i915->drm.struct_mutex);
+       for (phase = phases; *phase; phase++) {
+               list_for_each_entry(obj, *phase, mm.link)
+                       WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
        }
-       spin_unlock(&dev_priv->mm.obj_lock);
+       mutex_unlock(&i915->drm.struct_mutex);
 
        return 0;
 }
@@ -6039,16 +6121,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
                goto err_unlock;
        }
 
-       pages = fetch_and_zero(&obj->mm.pages);
-       if (pages) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
-               __i915_gem_object_reset_page_iter(obj);
-
-               spin_lock(&i915->mm.obj_lock);
-               list_del(&obj->mm.link);
-               spin_unlock(&i915->mm.obj_lock);
-       }
+       pages = __i915_gem_object_unset_pages(obj);
 
        obj->ops = &i915_gem_phys_ops;
 
@@ -6066,7 +6139,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 
 err_xfer:
        obj->ops = &i915_gem_object_ops;
-       obj->mm.pages = pages;
+       if (!IS_ERR_OR_NULL(pages)) {
+               unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+               __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+       }
 err_unlock:
        mutex_unlock(&obj->mm.lock);
        return err;
index 5259204..261da57 100644 (file)
@@ -26,6 +26,7 @@
 #define __I915_GEM_H__
 
 #include <linux/bug.h>
+#include <linux/interrupt.h>
 
 struct drm_i915_private;
 
@@ -62,9 +63,12 @@ struct drm_i915_private;
 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
 #define GEM_TRACE(...) trace_printk(__VA_ARGS__)
 #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+       do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
 #else
 #define GEM_TRACE(...) do { } while (0)
 #define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
 #define I915_NUM_ENGINES 8
@@ -72,4 +76,16 @@ struct drm_i915_private;
 void i915_gem_park(struct drm_i915_private *i915);
 void i915_gem_unpark(struct drm_i915_private *i915);
 
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+       if (atomic_inc_return(&t->count) == 1)
+               tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+       if (atomic_dec_return(&t->count) == 0)
+               tasklet_kill(t);
+}
+
 #endif /* __I915_GEM_H__ */
index 060335d..ccf463a 100644 (file)
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
                struct intel_context *ce = &ctx->__engine[n];
 
-               if (!ce->state)
-                       continue;
-
-               WARN_ON(ce->pin_count);
-               if (ce->ring)
-                       intel_ring_free(ce->ring);
-
-               __i915_gem_object_release_unless_active(ce->state->obj);
+               if (ce->ops)
+                       ce->ops->destroy(ce);
        }
 
        kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
         */
        lut_close(ctx);
        if (ctx->ppgtt)
-               i915_ppgtt_close(&ctx->ppgtt->base);
+               i915_ppgtt_close(&ctx->ppgtt->vm);
 
        ctx->file_priv = ERR_PTR(-EBADF);
        i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
        int ret;
        unsigned int max;
 
-       if (INTEL_GEN(dev_priv) >= 11)
+       if (INTEL_GEN(dev_priv) >= 11) {
                max = GEN11_MAX_CONTEXT_HW_ID;
-       else
-               max = MAX_CONTEXT_HW_ID;
+       } else {
+               /*
+                * When using GuC in proxy submission, GuC consumes the
+                * highest bit in the context id to indicate proxy submission.
+                */
+               if (USES_GUC_SUBMISSION(dev_priv))
+                       max = MAX_GUC_CONTEXT_HW_ID;
+               else
+                       max = MAX_CONTEXT_HW_ID;
+       }
+
 
        ret = ida_simple_get(&dev_priv->contexts.hw_ida,
                             0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
        desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
 
        address_mode = INTEL_LEGACY_32B_CONTEXT;
-       if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+       if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
                address_mode = INTEL_LEGACY_64B_CONTEXT;
        desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
                    struct drm_i915_file_private *file_priv)
 {
        struct i915_gem_context *ctx;
+       unsigned int n;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        ctx->i915 = dev_priv;
        ctx->sched.priority = I915_PRIORITY_NORMAL;
 
+       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+               struct intel_context *ce = &ctx->__engine[n];
+
+               ce->gem_context = ctx;
+       }
+
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
 
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       for_each_engine(engine, dev_priv, id) {
-               engine->legacy_active_context = NULL;
-               engine->legacy_active_ppgtt = NULL;
-
-               if (!engine->last_retired_context)
-                       continue;
-
-               intel_context_unpin(engine->last_retired_context, engine);
-               engine->last_retired_context = NULL;
-       }
+       for_each_engine(engine, dev_priv, id)
+               intel_engine_lost_context(engine);
 }
 
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline,
 {
        struct i915_request *rq;
 
-       if (timeline == &engine->timeline)
-               return NULL;
+       GEM_BUG_ON(timeline == &engine->timeline);
 
        rq = i915_gem_active_raw(&timeline->last_request,
                                 &engine->i915->drm.struct_mutex);
-       if (rq && rq->engine == engine)
+       if (rq && rq->engine == engine) {
+               GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+                         timeline->name, engine->name,
+                         rq->fence.context, rq->fence.seqno);
+               GEM_BUG_ON(rq->timeline != timeline);
                return rq;
+       }
 
        return NULL;
 }
 
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
 {
-       struct i915_timeline *timeline;
+       struct drm_i915_private *i915 = engine->i915;
+       const struct intel_context * const ce =
+               to_intel_context(i915->kernel_context, engine);
+       struct i915_timeline *barrier = ce->ring->timeline;
+       struct intel_ring *ring;
+       bool any_active = false;
 
-       list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
-               if (last_request_on_engine(timeline, engine))
+       lockdep_assert_held(&i915->drm.struct_mutex);
+       list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+               struct i915_request *rq;
+
+               rq = last_request_on_engine(ring->timeline, engine);
+               if (!rq)
+                       continue;
+
+               any_active = true;
+
+               if (rq->hw_context == ce)
+                       continue;
+
+               /*
+                * Was this request submitted after the previous
+                * switch-to-kernel-context?
+                */
+               if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+                       GEM_TRACE("%s needs barrier for %llx:%d\n",
+                                 ring->timeline->name,
+                                 rq->fence.context,
+                                 rq->fence.seqno);
                        return false;
+               }
+
+               GEM_TRACE("%s has barrier after %llx:%d\n",
+                         ring->timeline->name,
+                         rq->fence.context,
+                         rq->fence.seqno);
        }
 
-       return intel_engine_has_kernel_context(engine);
+       /*
+        * If any other timeline was still active and behind the last barrier,
+        * then our last switch-to-kernel-context must still be queued and
+        * will run last (leaving the engine in the kernel context when it
+        * eventually idles).
+        */
+       if (any_active)
+               return true;
+
+       /* The engine is idle; check that it is idling in the kernel context. */
+       return engine->last_retired_context == ce;
 }
 
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 {
        struct intel_engine_cs *engine;
-       struct i915_timeline *timeline;
        enum intel_engine_id id;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
 
-       i915_retire_requests(dev_priv);
+       lockdep_assert_held(&i915->drm.struct_mutex);
+       GEM_BUG_ON(!i915->kernel_context);
+
+       i915_retire_requests(i915);
 
-       for_each_engine(engine, dev_priv, id) {
+       for_each_engine(engine, i915, id) {
+               struct intel_ring *ring;
                struct i915_request *rq;
 
-               if (engine_has_idle_kernel_context(engine))
+               GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+               if (engine_has_kernel_context_barrier(engine))
                        continue;
 
-               rq = i915_request_alloc(engine, dev_priv->kernel_context);
+               GEM_TRACE("emit barrier on %s\n", engine->name);
+
+               rq = i915_request_alloc(engine, i915->kernel_context);
                if (IS_ERR(rq))
                        return PTR_ERR(rq);
 
                /* Queue this switch after all other activity */
-               list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+               list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
                        struct i915_request *prev;
 
-                       prev = last_request_on_engine(timeline, engine);
-                       if (prev)
-                               i915_sw_fence_await_sw_fence_gfp(&rq->submit,
-                                                                &prev->submit,
-                                                                I915_FENCE_GFP);
+                       prev = last_request_on_engine(ring->timeline, engine);
+                       if (!prev)
+                               continue;
+
+                       if (prev->gem_context == i915->kernel_context)
+                               continue;
+
+                       GEM_TRACE("add barrier on %s for %llx:%d\n",
+                                 engine->name,
+                                 prev->fence.context,
+                                 prev->fence.seqno);
+                       i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+                                                        &prev->submit,
+                                                        I915_FENCE_GFP);
+                       i915_timeline_sync_set(rq->timeline, &prev->fence);
                }
 
-               /*
-                * Force a flush after the switch to ensure that all rendering
-                * and operations prior to switching to the kernel context hits
-                * memory. This should be guaranteed by the previous request,
-                * but an extra layer of paranoia before we declare the system
-                * idle (on suspend etc) is advisable!
-                */
-               __i915_request_add(rq, true);
+               i915_request_add(rq);
        }
 
        return 0;
@@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                break;
        case I915_CONTEXT_PARAM_GTT_SIZE:
                if (ctx->ppgtt)
-                       args->value = ctx->ppgtt->base.total;
+                       args->value = ctx->ppgtt->vm.total;
                else if (to_i915(dev)->mm.aliasing_ppgtt)
-                       args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+                       args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
                else
-                       args->value = to_i915(dev)->ggtt.base.total;
+                       args->value = to_i915(dev)->ggtt.vm.total;
                break;
        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
                args->value = i915_gem_context_no_error_capture(ctx);
index ace3b12..b116e49 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/radix-tree.h>
 
 #include "i915_gem.h"
+#include "i915_scheduler.h"
 
 struct pid;
 
@@ -45,6 +46,13 @@ struct intel_ring;
 
 #define DEFAULT_CONTEXT_HANDLE 0
 
+struct intel_context;
+
+struct intel_context_ops {
+       void (*unpin)(struct intel_context *ce);
+       void (*destroy)(struct intel_context *ce);
+};
+
 /**
  * struct i915_gem_context - client state
  *
@@ -144,11 +152,14 @@ struct i915_gem_context {
 
        /** engine: per-engine logical HW state */
        struct intel_context {
+               struct i915_gem_context *gem_context;
                struct i915_vma *state;
                struct intel_ring *ring;
                u32 *lrc_reg_state;
                u64 lrc_desc;
                int pin_count;
+
+               const struct intel_context_ops *ops;
        } __engine[I915_NUM_ENGINES];
 
        /** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
        return &ctx->__engine[engine->id];
 }
 
-static inline struct intel_ring *
+static inline struct intel_context *
 intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 {
        return engine->context_pin(engine, ctx);
 }
 
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
-                                      const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
-
        GEM_BUG_ON(!ce->pin_count);
        ce->pin_count++;
 }
 
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
-                                      struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
 {
-       engine->context_unpin(engine, ctx);
+       GEM_BUG_ON(!ce->pin_count);
+       if (--ce->pin_count)
+               return;
+
+       GEM_BUG_ON(!ce->ops);
+       ce->ops->unpin(ce);
 }
 
 /* i915_gem_context.c */
index 69a7aec..82e2ca1 100644 (file)
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
        i915_gem_object_unpin_map(obj);
 }
 
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
-       return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
        .release = drm_gem_dmabuf_release,
        .map = i915_gem_dmabuf_kmap,
-       .map_atomic = i915_gem_dmabuf_kmap_atomic,
        .unmap = i915_gem_dmabuf_kunmap,
-       .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
        .mmap = i915_gem_dmabuf_mmap,
        .vmap = i915_gem_dmabuf_vmap,
        .vunmap = i915_gem_dmabuf_vunmap,
index 22df17c..60dc2a8 100644 (file)
@@ -723,7 +723,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
                return -ENOENT;
 
        eb->ctx = ctx;
-       eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+       eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
 
        eb->context_flags = 0;
        if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -921,7 +921,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
        i915_gem_object_unpin_map(cache->rq->batch->obj);
        i915_gem_chipset_flush(cache->rq->i915);
 
-       __i915_request_add(cache->rq, true);
+       i915_request_add(cache->rq);
        cache->rq = NULL;
 }
 
@@ -948,9 +948,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
                if (cache->node.allocated) {
                        struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
-                       ggtt->base.clear_range(&ggtt->base,
-                                              cache->node.start,
-                                              cache->node.size);
+                       ggtt->vm.clear_range(&ggtt->vm,
+                                            cache->node.start,
+                                            cache->node.size);
                        drm_mm_remove_node(&cache->node);
                } else {
                        i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1021,7 +1021,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                if (IS_ERR(vma)) {
                        memset(&cache->node, 0, sizeof(cache->node));
                        err = drm_mm_insert_node_in_range
-                               (&ggtt->base.mm, &cache->node,
+                               (&ggtt->vm.mm, &cache->node,
                                 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                 0, ggtt->mappable_end,
                                 DRM_MM_INSERT_LOW);
@@ -1042,9 +1042,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
        offset = cache->node.start;
        if (cache->node.allocated) {
                wmb();
-               ggtt->base.insert_page(&ggtt->base,
-                                      i915_gem_object_get_dma_address(obj, page),
-                                      offset, I915_CACHE_NONE, 0);
+               ggtt->vm.insert_page(&ggtt->vm,
+                                    i915_gem_object_get_dma_address(obj, page),
+                                    offset, I915_CACHE_NONE, 0);
        } else {
                offset += page << PAGE_SHIFT;
        }
@@ -2438,7 +2438,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        trace_i915_request_queue(eb.request, eb.batch_flags);
        err = eb_submit(&eb);
 err_request:
-       __i915_request_add(eb.request, err == 0);
+       i915_request_add(eb.request);
        add_to_client(eb.request, file);
 
        if (fences)
index 996ab2a..c6aa761 100644 (file)
@@ -42,7 +42,7 @@
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 
 /**
  * DOC: Global GTT views
@@ -195,13 +195,13 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
                          u32 unused)
 {
        u32 pte_flags;
-       int ret;
+       int err;
 
        if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
-               ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
-                                                vma->size);
-               if (ret)
-                       return ret;
+               err = vma->vm->allocate_va_range(vma->vm,
+                                                vma->node.start, vma->size);
+               if (err)
+                       return err;
        }
 
        /* Currently applicable only to VLV */
@@ -489,7 +489,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
                            struct i915_page_dma *p,
                            gfp_t gfp)
 {
-       p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+       p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
        if (unlikely(!p->page))
                return -ENOMEM;
 
@@ -506,7 +506,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
 static int setup_page_dma(struct i915_address_space *vm,
                          struct i915_page_dma *p)
 {
-       return __setup_page_dma(vm, p, I915_GFP_DMA);
+       return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 }
 
 static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +520,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
 
 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
 
 static void fill_page_dma(struct i915_address_space *vm,
                          struct i915_page_dma *p,
@@ -614,7 +614,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 {
        struct i915_page_table *pt;
 
-       pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+       pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
        if (unlikely(!pt))
                return ERR_PTR(-ENOMEM);
 
@@ -640,18 +640,17 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
 }
 
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
                               struct i915_page_table *pt)
 {
-       fill32_px(vm, pt,
-                 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+       fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 {
        struct i915_page_directory *pd;
 
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+       pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
        if (unlikely(!pd))
                return ERR_PTR(-ENOMEM);
 
@@ -685,7 +684,7 @@ static int __pdp_init(struct i915_address_space *vm,
        const unsigned int pdpes = i915_pdpes_per_pdp(vm);
 
        pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
-                                           GFP_KERNEL | __GFP_NOWARN);
+                                           I915_GFP_ALLOW_FAIL);
        if (unlikely(!pdp->page_directory))
                return -ENOMEM;
 
@@ -765,53 +764,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
        memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
 }
 
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
-                         unsigned entry,
-                         dma_addr_t addr)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 *cs;
-
-       BUG_ON(entry >= 4);
-
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(1);
-       *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
-       *cs++ = upper_32_bits(addr);
-       *cs++ = MI_LOAD_REGISTER_IMM(1);
-       *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
-       *cs++ = lower_32_bits(addr);
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
-                              struct i915_request *rq)
-{
-       int i, ret;
-
-       for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
-               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
-               ret = gen8_write_pdp(rq, i, pd_daddr);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
-                              struct i915_request *rq)
-{
-       return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
  * the page table structures, we mark them dirty so that
  * context switching/execlist queuing code takes extra steps
@@ -819,7 +771,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
  */
 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 {
-       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
 }
 
 /* Removes entries from a single page table, releasing it if it's empty.
@@ -1012,7 +964,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
        gen8_pte_t *vaddr;
        bool ret;
 
-       GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+       GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
        pd = pdp->page_directory[idx->pdpe];
        vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
        do {
@@ -1043,7 +995,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
                                        break;
                                }
 
-                               GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+                               GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
                                pd = pdp->page_directory[idx->pdpe];
                        }
 
@@ -1229,7 +1181,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 {
        int ret;
 
-       ret = setup_scratch_page(vm, I915_GFP_DMA);
+       ret = setup_scratch_page(vm, __GFP_HIGHMEM);
        if (ret)
                return ret;
 
@@ -1272,7 +1224,7 @@ free_scratch_page:
 
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        struct drm_i915_private *dev_priv = vm->i915;
        enum vgt_g2v_type msg;
        int i;
@@ -1333,13 +1285,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
        int i;
 
        for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
-               if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+               if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
                        continue;
 
-               gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+               gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
        }
 
-       cleanup_px(&ppgtt->base, &ppgtt->pml4);
+       cleanup_px(&ppgtt->vm, &ppgtt->pml4);
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1305,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
        if (use_4lvl(vm))
                gen8_ppgtt_cleanup_4lvl(ppgtt);
        else
-               gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+               gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
 
        gen8_free_scratch(vm);
 }
@@ -1489,7 +1441,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
                          gen8_pte_t scratch_pte,
                          struct seq_file *m)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        struct i915_page_directory *pd;
        u32 pdpe;
 
@@ -1499,7 +1451,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
                u64 pd_start = start;
                u32 pde;
 
-               if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+               if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
                        continue;
 
                seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1459,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
                        u32 pte;
                        gen8_pte_t *pt_vaddr;
 
-                       if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+                       if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
                                continue;
 
                        pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1492,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        const gen8_pte_t scratch_pte =
                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
-       u64 start = 0, length = ppgtt->base.total;
+       u64 start = 0, length = ppgtt->vm.total;
 
        if (use_4lvl(vm)) {
                u64 pml4e;
@@ -1551,7 +1503,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
                struct i915_page_directory_pointer *pdp;
 
                gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-                       if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+                       if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
                                continue;
 
                        seq_printf(m, "    PML4E #%llu\n", pml4e);
@@ -1564,10 +1516,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 
 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
        struct i915_page_directory *pd;
-       u64 start = 0, length = ppgtt->base.total;
+       u64 start = 0, length = ppgtt->vm.total;
        u64 from = start;
        unsigned int pdpe;
 
@@ -1601,211 +1553,142 @@ unwind:
  * space.
  *
  */
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 {
-       struct i915_address_space *vm = &ppgtt->base;
-       struct drm_i915_private *dev_priv = vm->i915;
-       int ret;
+       struct i915_hw_ppgtt *ppgtt;
+       int err;
 
-       ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
+
+       ppgtt->vm.i915 = i915;
+       ppgtt->vm.dma = &i915->drm.pdev->dev;
+
+       ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
                1ULL << 48 :
                1ULL << 32;
 
        /* There are only few exceptions for gen >=6. chv and bxt.
         * And we are not sure about the latter so play safe for now.
         */
-       if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
-               ppgtt->base.pt_kmap_wc = true;
+       if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
+               ppgtt->vm.pt_kmap_wc = true;
 
-       ret = gen8_init_scratch(&ppgtt->base);
-       if (ret) {
-               ppgtt->base.total = 0;
-               return ret;
-       }
+       err = gen8_init_scratch(&ppgtt->vm);
+       if (err)
+               goto err_free;
 
-       if (use_4lvl(vm)) {
-               ret = setup_px(&ppgtt->base, &ppgtt->pml4);
-               if (ret)
-                       goto free_scratch;
+       if (use_4lvl(&ppgtt->vm)) {
+               err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+               if (err)
+                       goto err_scratch;
 
-               gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+               gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
 
-               ppgtt->switch_mm = gen8_mm_switch_4lvl;
-               ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
-               ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
-               ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+               ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+               ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+               ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
        } else {
-               ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
-               if (ret)
-                       goto free_scratch;
+               err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
+               if (err)
+                       goto err_scratch;
 
-               if (intel_vgpu_active(dev_priv)) {
-                       ret = gen8_preallocate_top_level_pdp(ppgtt);
-                       if (ret) {
+               if (intel_vgpu_active(i915)) {
+                       err = gen8_preallocate_top_level_pdp(ppgtt);
+                       if (err) {
                                __pdp_fini(&ppgtt->pdp);
-                               goto free_scratch;
+                               goto err_scratch;
                        }
                }
 
-               ppgtt->switch_mm = gen8_mm_switch_3lvl;
-               ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
-               ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
-               ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+               ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+               ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+               ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
        }
 
-       if (intel_vgpu_active(dev_priv))
+       if (intel_vgpu_active(i915))
                gen8_ppgtt_notify_vgt(ppgtt, true);
 
-       ppgtt->base.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->base.unbind_vma = ppgtt_unbind_vma;
-       ppgtt->base.bind_vma = ppgtt_bind_vma;
-       ppgtt->base.set_pages = ppgtt_set_pages;
-       ppgtt->base.clear_pages = clear_pages;
+       ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
        ppgtt->debug_dump = gen8_dump_ppgtt;
 
-       return 0;
+       ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
+       ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
 
-free_scratch:
-       gen8_free_scratch(&ppgtt->base);
-       return ret;
+       return ppgtt;
+
+err_scratch:
+       gen8_free_scratch(&ppgtt->vm);
+err_free:
+       kfree(ppgtt);
+       return ERR_PTR(err);
 }
 
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
-       struct i915_address_space *vm = &ppgtt->base;
-       struct i915_page_table *unused;
-       gen6_pte_t scratch_pte;
-       u32 pd_entry, pte, pde;
-       u32 start = 0, length = ppgtt->base.total;
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+       struct i915_page_table *pt;
+       u32 pte, pde;
 
-       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-                                    I915_CACHE_LLC, 0);
+       gen6_for_all_pdes(pt, &base->pd, pde) {
+               gen6_pte_t *vaddr;
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
-               u32 expected;
-               gen6_pte_t *pt_vaddr;
-               const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
-               pd_entry = readl(ppgtt->pd_addr + pde);
-               expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
-
-               if (pd_entry != expected)
-                       seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
-                                  pde,
-                                  pd_entry,
-                                  expected);
-               seq_printf(m, "\tPDE: %x\n", pd_entry);
-
-               pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
-
-               for (pte = 0; pte < GEN6_PTES; pte+=4) {
-                       unsigned long va =
-                               (pde * PAGE_SIZE * GEN6_PTES) +
-                               (pte * PAGE_SIZE);
+               if (pt == base->vm.scratch_pt)
+                       continue;
+
+               if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+                       u32 expected =
+                               GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
+                               GEN6_PDE_VALID;
+                       u32 pd_entry = readl(ppgtt->pd_addr + pde);
+
+                       if (pd_entry != expected)
+                               seq_printf(m,
+                                          "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+                                          pde,
+                                          pd_entry,
+                                          expected);
+
+                       seq_printf(m, "\tPDE: %x\n", pd_entry);
+               }
+
+               vaddr = kmap_atomic_px(base->pd.page_table[pde]);
+               for (pte = 0; pte < GEN6_PTES; pte += 4) {
                        int i;
-                       bool found = false;
+
                        for (i = 0; i < 4; i++)
-                               if (pt_vaddr[pte + i] != scratch_pte)
-                                       found = true;
-                       if (!found)
+                               if (vaddr[pte + i] != scratch_pte)
+                                       break;
+                       if (i == 4)
                                continue;
 
-                       seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+                       seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+                                  pde, pte,
+                                  (pde * GEN6_PTES + pte) * PAGE_SIZE);
                        for (i = 0; i < 4; i++) {
-                               if (pt_vaddr[pte + i] != scratch_pte)
-                                       seq_printf(m, " %08x", pt_vaddr[pte + i]);
+                               if (vaddr[pte + i] != scratch_pte)
+                                       seq_printf(m, " %08x", vaddr[pte + i]);
                                else
-                                       seq_puts(m, "  SCRATCH ");
+                                       seq_puts(m, "  SCRATCH");
                        }
                        seq_puts(m, "\n");
                }
-               kunmap_atomic(pt_vaddr);
+               kunmap_atomic(vaddr);
        }
 }
 
 /* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
                                  const unsigned int pde,
                                  const struct i915_page_table *pt)
 {
        /* Caller needs to make sure the write completes if necessary */
-       writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
-                      ppgtt->pd_addr + pde);
-}
-
-/* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
-static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
-                                 u32 start, u32 length)
-{
-       struct i915_page_table *pt;
-       unsigned int pde;
-
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
-               gen6_write_pde(ppgtt, pde, pt);
-
-       mark_tlbs_dirty(ppgtt);
-       wmb();
-}
-
-static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
-{
-       GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
-       return ppgtt->pd.base.ggtt_offset << 10;
-}
-
-static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 *cs;
-
-       /* NB: TLBs must be flushed and invalidated before a switch */
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(2);
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
-       *cs++ = PP_DIR_DCLV_2G;
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-       *cs++ = get_pd_offset(ppgtt);
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
-static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 *cs;
-
-       /* NB: TLBs must be flushed and invalidated before a switch */
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(2);
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
-       *cs++ = PP_DIR_DCLV_2G;
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-       *cs++ = get_pd_offset(ppgtt);
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
-static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       struct drm_i915_private *dev_priv = rq->i915;
-
-       I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
-       I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-       return 0;
+       iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+                 ppgtt->pd_addr + pde);
 }
 
 static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
@@ -1867,22 +1750,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   u64 start, u64 length)
 {
-       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
        unsigned int first_entry = start >> PAGE_SHIFT;
        unsigned int pde = first_entry / GEN6_PTES;
        unsigned int pte = first_entry % GEN6_PTES;
        unsigned int num_entries = length >> PAGE_SHIFT;
-       gen6_pte_t scratch_pte =
-               vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
 
        while (num_entries) {
-               struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
-               unsigned int end = min(pte + num_entries, GEN6_PTES);
+               struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+               const unsigned int end = min(pte + num_entries, GEN6_PTES);
+               const unsigned int count = end - pte;
                gen6_pte_t *vaddr;
 
-               num_entries -= end - pte;
+               GEM_BUG_ON(pt == vm->scratch_pt);
+
+               num_entries -= count;
+
+               GEM_BUG_ON(count > pt->used_ptes);
+               pt->used_ptes -= count;
+               if (!pt->used_ptes)
+                       ppgtt->scan_for_unused_pt = true;
 
-               /* Note that the hw doesn't support removing PDE on the fly
+               /*
+                * Note that the hw doesn't support removing PDE on the fly
                 * (they are cached inside the context with no means to
                 * invalidate the cache), so we can only reset the PTE
                 * entries back to scratch.
@@ -1911,6 +1802,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
        struct sgt_dma iter = sgt_dma(vma);
        gen6_pte_t *vaddr;
 
+       GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+
        vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
        do {
                vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1939,194 +1832,277 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 static int gen6_alloc_va_range(struct i915_address_space *vm,
                               u64 start, u64 length)
 {
-       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
        struct i915_page_table *pt;
        u64 from = start;
        unsigned int pde;
        bool flush = false;
 
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
+       gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+               const unsigned int count = gen6_pte_count(start, length);
+
                if (pt == vm->scratch_pt) {
                        pt = alloc_pt(vm);
                        if (IS_ERR(pt))
                                goto unwind_out;
 
-                       gen6_initialize_pt(vm, pt);
-                       ppgtt->pd.page_table[pde] = pt;
-                       gen6_write_pde(ppgtt, pde, pt);
-                       flush = true;
+                       gen6_initialize_pt(ppgtt, pt);
+                       ppgtt->base.pd.page_table[pde] = pt;
+
+                       if (i915_vma_is_bound(ppgtt->vma,
+                                             I915_VMA_GLOBAL_BIND)) {
+                               gen6_write_pde(ppgtt, pde, pt);
+                               flush = true;
+                       }
+
+                       GEM_BUG_ON(pt->used_ptes);
                }
+
+               pt->used_ptes += count;
        }
 
        if (flush) {
-               mark_tlbs_dirty(ppgtt);
-               wmb();
+               mark_tlbs_dirty(&ppgtt->base);
+               gen6_ggtt_invalidate(ppgtt->base.vm.i915);
        }
 
        return 0;
 
 unwind_out:
-       gen6_ppgtt_clear_range(vm, from, start);
+       gen6_ppgtt_clear_range(vm, from, start - from);
        return -ENOMEM;
 }
 
-static int gen6_init_scratch(struct i915_address_space *vm)
+static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 {
+       struct i915_address_space * const vm = &ppgtt->base.vm;
+       struct i915_page_table *unused;
+       u32 pde;
        int ret;
 
-       ret = setup_scratch_page(vm, I915_GFP_DMA);
+       ret = setup_scratch_page(vm, __GFP_HIGHMEM);
        if (ret)
                return ret;
 
+       ppgtt->scratch_pte =
+               vm->pte_encode(vm->scratch_page.daddr,
+                              I915_CACHE_NONE, PTE_READ_ONLY);
+
        vm->scratch_pt = alloc_pt(vm);
        if (IS_ERR(vm->scratch_pt)) {
                cleanup_scratch_page(vm);
                return PTR_ERR(vm->scratch_pt);
        }
 
-       gen6_initialize_pt(vm, vm->scratch_pt);
+       gen6_initialize_pt(ppgtt, vm->scratch_pt);
+       gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
+               ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
        return 0;
 }
 
-static void gen6_free_scratch(struct i915_address_space *vm)
+static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
 {
        free_pt(vm, vm->scratch_pt);
        cleanup_scratch_page(vm);
 }
 
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
 {
-       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-       struct i915_page_directory *pd = &ppgtt->pd;
        struct i915_page_table *pt;
        u32 pde;
 
-       drm_mm_remove_node(&ppgtt->node);
+       gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+               if (pt != ppgtt->base.vm.scratch_pt)
+                       free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
 
-       gen6_for_all_pdes(pt, pd, pde)
-               if (pt != vm->scratch_pt)
-                       free_pt(vm, pt);
+       i915_vma_destroy(ppgtt->vma);
 
-       gen6_free_scratch(vm);
+       gen6_ppgtt_free_pd(ppgtt);
+       gen6_ppgtt_free_scratch(vm);
 }
 
-static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+static int pd_vma_set_pages(struct i915_vma *vma)
 {
-       struct i915_address_space *vm = &ppgtt->base;
-       struct drm_i915_private *dev_priv = ppgtt->base.i915;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       int ret;
+       vma->pages = ERR_PTR(-ENODEV);
+       return 0;
+}
 
-       /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
-        * allocator works in address space sizes, so it's multiplied by page
-        * size. We allocate at the top of the GTT to avoid fragmentation.
-        */
-       BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!vma->pages);
 
-       ret = gen6_init_scratch(vm);
-       if (ret)
-               return ret;
+       vma->pages = NULL;
+}
 
-       ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
-                                 GEN6_PD_SIZE, GEN6_PD_ALIGN,
-                                 I915_COLOR_UNEVICTABLE,
-                                 0, ggtt->base.total,
-                                 PIN_HIGH);
-       if (ret)
-               goto err_out;
+static int pd_vma_bind(struct i915_vma *vma,
+                      enum i915_cache_level cache_level,
+                      u32 unused)
+{
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+       struct gen6_hw_ppgtt *ppgtt = vma->private;
+       u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+       struct i915_page_table *pt;
+       unsigned int pde;
 
-       if (ppgtt->node.start < ggtt->mappable_end)
-               DRM_DEBUG("Forced to use aperture for PDEs\n");
+       ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+       ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
 
-       ppgtt->pd.base.ggtt_offset =
-               ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+       gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+               gen6_write_pde(ppgtt, pde, pt);
 
-       ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
-               ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+       mark_tlbs_dirty(&ppgtt->base);
+       gen6_ggtt_invalidate(ppgtt->base.vm.i915);
 
        return 0;
+}
 
-err_out:
-       gen6_free_scratch(vm);
-       return ret;
+static void pd_vma_unbind(struct i915_vma *vma)
+{
+       struct gen6_hw_ppgtt *ppgtt = vma->private;
+       struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+       struct i915_page_table *pt;
+       unsigned int pde;
+
+       if (!ppgtt->scan_for_unused_pt)
+               return;
+
+       /* Free all no longer used page tables */
+       gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
+               if (pt->used_ptes || pt == scratch_pt)
+                       continue;
+
+               free_pt(&ppgtt->base.vm, pt);
+               ppgtt->base.pd.page_table[pde] = scratch_pt;
+       }
+
+       ppgtt->scan_for_unused_pt = false;
 }
 
-static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+static const struct i915_vma_ops pd_vma_ops = {
+       .set_pages = pd_vma_set_pages,
+       .clear_pages = pd_vma_clear_pages,
+       .bind_vma = pd_vma_bind,
+       .unbind_vma = pd_vma_unbind,
+};
+
+static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 {
-       return gen6_ppgtt_allocate_page_directories(ppgtt);
+       struct drm_i915_private *i915 = ppgtt->base.vm.i915;
+       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct i915_vma *vma;
+       int i;
+
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(size > ggtt->vm.total);
+
+       vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+       if (!vma)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+               init_request_active(&vma->last_read[i], NULL);
+       init_request_active(&vma->last_fence, NULL);
+
+       vma->vm = &ggtt->vm;
+       vma->ops = &pd_vma_ops;
+       vma->private = ppgtt;
+
+       vma->size = size;
+       vma->fence_size = size;
+       vma->flags = I915_VMA_GGTT;
+       vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
+
+       INIT_LIST_HEAD(&vma->obj_link);
+       list_add(&vma->vm_link, &vma->vm->unbound_list);
+
+       return vma;
 }
 
-static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
-                                 u64 start, u64 length)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 {
-       struct i915_page_table *unused;
-       u32 pde;
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
-               ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+       /*
+        * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+        * which will be pinned into every active context.
+        * (When vma->pin_count becomes atomic, I expect we will naturally
+        * need a larger, unpacked, type and kill this redundancy.)
+        */
+       if (ppgtt->pin_count++)
+               return 0;
+
+       /*
+        * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+        * allocator works in address space sizes, so it's multiplied by page
+        * size. We allocate at the top of the GTT to avoid fragmentation.
+        */
+       return i915_vma_pin(ppgtt->vma,
+                           0, GEN6_PD_ALIGN,
+                           PIN_GLOBAL | PIN_HIGH);
 }
 
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
 {
-       struct drm_i915_private *dev_priv = ppgtt->base.i915;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       int ret;
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
 
-       ppgtt->base.pte_encode = ggtt->base.pte_encode;
-       if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
-               ppgtt->switch_mm = gen6_mm_switch;
-       else if (IS_HASWELL(dev_priv))
-               ppgtt->switch_mm = hsw_mm_switch;
-       else if (IS_GEN7(dev_priv))
-               ppgtt->switch_mm = gen7_mm_switch;
-       else
-               BUG();
+       GEM_BUG_ON(!ppgtt->pin_count);
+       if (--ppgtt->pin_count)
+               return;
 
-       ret = gen6_ppgtt_alloc(ppgtt);
-       if (ret)
-               return ret;
+       i915_vma_unpin(ppgtt->vma);
+}
+
+static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
+{
+       struct i915_ggtt * const ggtt = &i915->ggtt;
+       struct gen6_hw_ppgtt *ppgtt;
+       int err;
+
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
 
-       ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+       ppgtt->base.vm.i915 = i915;
+       ppgtt->base.vm.dma = &i915->drm.pdev->dev;
 
-       gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
-       gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+       ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
 
-       ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
-       if (ret) {
-               gen6_ppgtt_cleanup(&ppgtt->base);
-               return ret;
-       }
+       ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+       ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.debug_dump = gen6_dump_ppgtt;
 
-       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->base.unbind_vma = ppgtt_unbind_vma;
-       ppgtt->base.bind_vma = ppgtt_bind_vma;
-       ppgtt->base.set_pages = ppgtt_set_pages;
-       ppgtt->base.clear_pages = clear_pages;
-       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->debug_dump = gen6_dump_ppgtt;
+       ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
+       ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+       ppgtt->base.vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
 
-       DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
-                        ppgtt->node.size >> 20,
-                        ppgtt->node.start / PAGE_SIZE);
+       ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
 
-       DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
-                        ppgtt->pd.base.ggtt_offset << 10);
+       err = gen6_ppgtt_init_scratch(ppgtt);
+       if (err)
+               goto err_free;
 
-       return 0;
-}
+       ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+       if (IS_ERR(ppgtt->vma)) {
+               err = PTR_ERR(ppgtt->vma);
+               goto err_scratch;
+       }
 
-static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
-                          struct drm_i915_private *dev_priv)
-{
-       ppgtt->base.i915 = dev_priv;
-       ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+       return &ppgtt->base;
 
-       if (INTEL_GEN(dev_priv) < 8)
-               return gen6_ppgtt_init(ppgtt);
-       else
-               return gen8_ppgtt_init(ppgtt);
+err_scratch:
+       gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_free:
+       kfree(ppgtt);
+       return ERR_PTR(err);
 }
 
 static void i915_address_space_init(struct i915_address_space *vm,
@@ -2212,29 +2188,31 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
        return 0;
 }
 
+static struct i915_hw_ppgtt *
+__hw_ppgtt_create(struct drm_i915_private *i915)
+{
+       if (INTEL_GEN(i915) < 8)
+               return gen6_ppgtt_create(i915);
+       else
+               return gen8_ppgtt_create(i915);
+}
+
 struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *dev_priv,
+i915_ppgtt_create(struct drm_i915_private *i915,
                  struct drm_i915_file_private *fpriv,
                  const char *name)
 {
        struct i915_hw_ppgtt *ppgtt;
-       int ret;
-
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return ERR_PTR(-ENOMEM);
 
-       ret = __hw_ppgtt_init(ppgtt, dev_priv);
-       if (ret) {
-               kfree(ppgtt);
-               return ERR_PTR(ret);
-       }
+       ppgtt = __hw_ppgtt_create(i915);
+       if (IS_ERR(ppgtt))
+               return ppgtt;
 
        kref_init(&ppgtt->ref);
-       i915_address_space_init(&ppgtt->base, dev_priv, name);
-       ppgtt->base.file = fpriv;
+       i915_address_space_init(&ppgtt->vm, i915, name);
+       ppgtt->vm.file = fpriv;
 
-       trace_i915_ppgtt_create(&ppgtt->base);
+       trace_i915_ppgtt_create(&ppgtt->vm);
 
        return ppgtt;
 }
@@ -2268,16 +2246,16 @@ void i915_ppgtt_release(struct kref *kref)
        struct i915_hw_ppgtt *ppgtt =
                container_of(kref, struct i915_hw_ppgtt, ref);
 
-       trace_i915_ppgtt_release(&ppgtt->base);
+       trace_i915_ppgtt_release(&ppgtt->vm);
 
-       ppgtt_destroy_vma(&ppgtt->base);
+       ppgtt_destroy_vma(&ppgtt->vm);
 
-       GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
-       GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
-       GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
 
-       ppgtt->base.cleanup(&ppgtt->base);
-       i915_address_space_fini(&ppgtt->base);
+       ppgtt->vm.cleanup(&ppgtt->vm);
+       i915_address_space_fini(&ppgtt->vm);
        kfree(ppgtt);
 }
 
@@ -2373,7 +2351,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
 
        i915_check_and_clear_faults(dev_priv);
 
-       ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+       ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
        i915_ggtt_invalidate(dev_priv);
 }
@@ -2715,17 +2693,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
        if (flags & I915_VMA_LOCAL_BIND) {
                struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 
-               if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
-                   appgtt->base.allocate_va_range) {
-                       ret = appgtt->base.allocate_va_range(&appgtt->base,
-                                                            vma->node.start,
-                                                            vma->size);
+               if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+                       ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+                                                          vma->node.start,
+                                                          vma->size);
                        if (ret)
                                return ret;
                }
 
-               appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
-                                           pte_flags);
+               appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+                                         pte_flags);
        }
 
        if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2725,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
        }
 
        if (vma->flags & I915_VMA_LOCAL_BIND) {
-               struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+               struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
 
                vm->clear_range(vm, vma->node.start, vma->size);
        }
@@ -2815,30 +2792,28 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
        if (IS_ERR(ppgtt))
                return PTR_ERR(ppgtt);
 
-       if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+       if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
                err = -ENODEV;
                goto err_ppgtt;
        }
 
-       if (ppgtt->base.allocate_va_range) {
-               /* Note we only pre-allocate as far as the end of the global
-                * GTT. On 48b / 4-level page-tables, the difference is very,
-                * very significant! We have to preallocate as GVT/vgpu does
-                * not like the page directory disappearing.
-                */
-               err = ppgtt->base.allocate_va_range(&ppgtt->base,
-                                                   0, ggtt->base.total);
-               if (err)
-                       goto err_ppgtt;
-       }
+       /*
+        * Note we only pre-allocate as far as the end of the global
+        * GTT. On 48b / 4-level page-tables, the difference is very,
+        * very significant! We have to preallocate as GVT/vgpu does
+        * not like the page directory disappearing.
+        */
+       err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+       if (err)
+               goto err_ppgtt;
 
        i915->mm.aliasing_ppgtt = ppgtt;
 
-       GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
-       ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+       GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+       ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
 
-       GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
-       ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+       GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+       ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
 
        return 0;
 
@@ -2858,8 +2833,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
 
        i915_ppgtt_put(ppgtt);
 
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2858,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
                return ret;
 
        /* Reserve a mappable slot for our lockless error capture */
-       ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+       ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
                                          PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                          0, ggtt->mappable_end,
                                          DRM_MM_INSERT_LOW);
@@ -2891,16 +2866,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
                return ret;
 
        /* Clear any non-preallocated blocks */
-       drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+       drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt->base.clear_range(&ggtt->base, hole_start,
-                                      hole_end - hole_start);
+               ggtt->vm.clear_range(&ggtt->vm, hole_start,
+                                    hole_end - hole_start);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt->base.clear_range(&ggtt->base,
-                              ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+       ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 
        if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
                ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,28 +2899,24 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        struct i915_vma *vma, *vn;
        struct pagevec *pvec;
 
-       ggtt->base.closed = true;
-
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
-       list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
-               WARN_ON(i915_vma_unbind(vma));
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-
-       i915_gem_cleanup_stolen(&dev_priv->drm);
+       ggtt->vm.closed = true;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_gem_fini_aliasing_ppgtt(dev_priv);
 
+       GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+               WARN_ON(i915_vma_unbind(vma));
+
        if (drm_mm_node_allocated(&ggtt->error_capture))
                drm_mm_remove_node(&ggtt->error_capture);
 
-       if (drm_mm_initialized(&ggtt->base.mm)) {
+       if (drm_mm_initialized(&ggtt->vm.mm)) {
                intel_vgt_deballoon(dev_priv);
-               i915_address_space_fini(&ggtt->base);
+               i915_address_space_fini(&ggtt->vm);
        }
 
-       ggtt->base.cleanup(&ggtt->base);
+       ggtt->vm.cleanup(&ggtt->vm);
 
        pvec = &dev_priv->mm.wc_stash;
        if (pvec->nr) {
@@ -2958,6 +2928,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 
        arch_phys_wc_del(ggtt->mtrr);
        io_mapping_fini(&ggtt->iomap);
+
+       i915_gem_cleanup_stolen(&dev_priv->drm);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2996,7 +2968,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
 
 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        phys_addr_t phys_addr;
        int ret;
@@ -3020,7 +2992,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
                return -ENOMEM;
        }
 
-       ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+       ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
        if (ret) {
                DRM_ERROR("Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
@@ -3326,7 +3298,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
@@ -3350,29 +3322,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        else
                size = gen8_get_total_gtt_size(snb_gmch_ctl);
 
-       ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
-       ggtt->base.cleanup = gen6_gmch_remove;
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.insert_page = gen8_ggtt_insert_page;
-       ggtt->base.clear_range = nop_clear_range;
+       ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+       ggtt->vm.cleanup = gen6_gmch_remove;
+       ggtt->vm.insert_page = gen8_ggtt_insert_page;
+       ggtt->vm.clear_range = nop_clear_range;
        if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
-               ggtt->base.clear_range = gen8_ggtt_clear_range;
+               ggtt->vm.clear_range = gen8_ggtt_clear_range;
 
-       ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+       ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
 
        /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
        if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
-               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
-               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
-               if (ggtt->base.clear_range != nop_clear_range)
-                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+               ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->vm.clear_range != nop_clear_range)
+                       ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
        }
 
        ggtt->invalidate = gen6_ggtt_invalidate;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        setup_private_pat(dev_priv);
 
        return ggtt_probe_common(ggtt, size);
@@ -3380,7 +3353,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
@@ -3407,29 +3380,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
        size = gen6_get_total_gtt_size(snb_gmch_ctl);
-       ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+       ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
-       ggtt->base.clear_range = gen6_ggtt_clear_range;
-       ggtt->base.insert_page = gen6_ggtt_insert_page;
-       ggtt->base.insert_entries = gen6_ggtt_insert_entries;
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.cleanup = gen6_gmch_remove;
+       ggtt->vm.clear_range = gen6_ggtt_clear_range;
+       ggtt->vm.insert_page = gen6_ggtt_insert_page;
+       ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+       ggtt->vm.cleanup = gen6_gmch_remove;
 
        ggtt->invalidate = gen6_ggtt_invalidate;
 
        if (HAS_EDRAM(dev_priv))
-               ggtt->base.pte_encode = iris_pte_encode;
+               ggtt->vm.pte_encode = iris_pte_encode;
        else if (IS_HASWELL(dev_priv))
-               ggtt->base.pte_encode = hsw_pte_encode;
+               ggtt->vm.pte_encode = hsw_pte_encode;
        else if (IS_VALLEYVIEW(dev_priv))
-               ggtt->base.pte_encode = byt_pte_encode;
+               ggtt->vm.pte_encode = byt_pte_encode;
        else if (INTEL_GEN(dev_priv) >= 7)
-               ggtt->base.pte_encode = ivb_pte_encode;
+               ggtt->vm.pte_encode = ivb_pte_encode;
        else
-               ggtt->base.pte_encode = snb_pte_encode;
+               ggtt->vm.pte_encode = snb_pte_encode;
+
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
 
        return ggtt_probe_common(ggtt, size);
 }
@@ -3441,7 +3415,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
 
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        phys_addr_t gmadr_base;
        int ret;
 
@@ -3451,26 +3425,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
                return -EIO;
        }
 
-       intel_gtt_get(&ggtt->base.total,
-                     &gmadr_base,
-                     &ggtt->mappable_end);
+       intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
 
        ggtt->gmadr =
                (struct resource) DEFINE_RES_MEM(gmadr_base,
                                                 ggtt->mappable_end);
 
        ggtt->do_idle_maps = needs_idle_maps(dev_priv);
-       ggtt->base.insert_page = i915_ggtt_insert_page;
-       ggtt->base.insert_entries = i915_ggtt_insert_entries;
-       ggtt->base.clear_range = i915_ggtt_clear_range;
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.cleanup = i915_gmch_remove;
+       ggtt->vm.insert_page = i915_ggtt_insert_page;
+       ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+       ggtt->vm.clear_range = i915_ggtt_clear_range;
+       ggtt->vm.cleanup = i915_gmch_remove;
 
        ggtt->invalidate = gmch_ggtt_invalidate;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        if (unlikely(ggtt->do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
@@ -3486,8 +3459,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
-       ggtt->base.i915 = dev_priv;
-       ggtt->base.dma = &dev_priv->drm.pdev->dev;
+       ggtt->vm.i915 = dev_priv;
+       ggtt->vm.dma = &dev_priv->drm.pdev->dev;
 
        if (INTEL_GEN(dev_priv) <= 5)
                ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3477,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
         * restriction!
         */
        if (USES_GUC(dev_priv)) {
-               ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
-               ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+               ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+               ggtt->mappable_end =
+                       min_t(u64, ggtt->mappable_end, ggtt->vm.total);
        }
 
-       if ((ggtt->base.total - 1) >> 32) {
+       if ((ggtt->vm.total - 1) >> 32) {
                DRM_ERROR("We never expected a Global GTT with more than 32bits"
                          " of address space! Found %lldM!\n",
-                         ggtt->base.total >> 20);
-               ggtt->base.total = 1ULL << 32;
-               ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+                         ggtt->vm.total >> 20);
+               ggtt->vm.total = 1ULL << 32;
+               ggtt->mappable_end =
+                       min_t(u64, ggtt->mappable_end, ggtt->vm.total);
        }
 
-       if (ggtt->mappable_end > ggtt->base.total) {
+       if (ggtt->mappable_end > ggtt->vm.total) {
                DRM_ERROR("mappable aperture extends past end of GGTT,"
                          " aperture=%pa, total=%llx\n",
-                         &ggtt->mappable_end, ggtt->base.total);
-               ggtt->mappable_end = ggtt->base.total;
+                         &ggtt->mappable_end, ggtt->vm.total);
+               ggtt->mappable_end = ggtt->vm.total;
        }
 
        /* GMADR is the PCI mmio aperture into the global GTT. */
-       DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+       DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
        DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("DSM size = %lluM\n",
                         (u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3551,9 +3526,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
         * and beyond the end of the GTT if we do not provide a guard.
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
-       i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+       i915_address_space_init(&ggtt->vm, dev_priv, "[global]");
        if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
-               ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+               ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3551,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        return 0;
 
 out_gtt_cleanup:
-       ggtt->base.cleanup(&ggtt->base);
+       ggtt->vm.cleanup(&ggtt->vm);
        return ret;
 }
 
@@ -3610,34 +3585,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_i915_gem_object *obj, *on;
+       struct i915_vma *vma, *vn;
 
        i915_check_and_clear_faults(dev_priv);
 
        /* First fill our portion of the GTT with scratch pages */
-       ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+       ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
-       ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+       ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
 
        /* clflush objects bound into the GGTT and rebind them. */
-       list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
-               bool ggtt_bound = false;
-               struct i915_vma *vma;
+       GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+               struct drm_i915_gem_object *obj = vma->obj;
 
-               for_each_ggtt_vma(vma, obj) {
-                       if (!i915_vma_unbind(vma))
-                               continue;
+               if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+                       continue;
 
-                       WARN_ON(i915_vma_bind(vma, obj->cache_level,
-                                             PIN_UPDATE));
-                       ggtt_bound = true;
-               }
+               if (!i915_vma_unbind(vma))
+                       continue;
 
-               if (ggtt_bound)
+               WARN_ON(i915_vma_bind(vma,
+                                     obj ? obj->cache_level : 0,
+                                     PIN_UPDATE));
+               if (obj)
                        WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
        }
 
-       ggtt->base.closed = false;
+       ggtt->vm.closed = false;
+       i915_ggtt_invalidate(dev_priv);
 
        if (INTEL_GEN(dev_priv) >= 8) {
                struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3646,23 +3622,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
                dev_priv->ppat.update_hw(dev_priv);
                return;
        }
-
-       if (USES_PPGTT(dev_priv)) {
-               struct i915_address_space *vm;
-
-               list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-                       struct i915_hw_ppgtt *ppgtt;
-
-                       if (i915_is_ggtt(vm))
-                               ppgtt = dev_priv->mm.aliasing_ppgtt;
-                       else
-                               ppgtt = i915_vm_to_ppgtt(vm);
-
-                       gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
-               }
-       }
-
-       i915_ggtt_invalidate(dev_priv);
 }
 
 static struct scatterlist *
@@ -3880,7 +3839,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
        GEM_BUG_ON(range_overflows(offset, size, vm->total));
-       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
        GEM_BUG_ON(drm_mm_node_allocated(node));
 
        node->size = size;
@@ -3977,7 +3936,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
        GEM_BUG_ON(start >= end);
        GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
-       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
        GEM_BUG_ON(drm_mm_node_allocated(node));
 
        if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +3947,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 
        mode = DRM_MM_INSERT_BEST;
        if (flags & PIN_HIGH)
-               mode = DRM_MM_INSERT_HIGH;
+               mode = DRM_MM_INSERT_HIGHEST;
        if (flags & PIN_MAPPABLE)
                mode = DRM_MM_INSERT_LOW;
 
@@ -4008,6 +3967,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
        if (err != -ENOSPC)
                return err;
 
+       if (mode & DRM_MM_INSERT_ONCE) {
+               err = drm_mm_insert_node_in_range(&vm->mm, node,
+                                                 size, alignment, color,
+                                                 start, end,
+                                                 DRM_MM_INSERT_BEST);
+               if (err != -ENOSPC)
+                       return err;
+       }
+
        if (flags & PIN_NOEVICT)
                return -ENOSPC;
 
index aec4f73..9a4824c 100644 (file)
@@ -58,6 +58,7 @@
 
 struct drm_i915_file_private;
 struct drm_i915_fence_reg;
+struct i915_vma;
 
 typedef u32 gen6_pte_t;
 typedef u64 gen8_pte_t;
@@ -65,7 +66,7 @@ typedef u64 gen8_pde_t;
 typedef u64 gen8_ppgtt_pdpe_t;
 typedef u64 gen8_ppgtt_pml4e_t;
 
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
 
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
@@ -254,6 +255,21 @@ struct i915_pml4 {
        struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
 };
 
+struct i915_vma_ops {
+       /* Map an object into an address space with the given cache flags. */
+       int (*bind_vma)(struct i915_vma *vma,
+                       enum i915_cache_level cache_level,
+                       u32 flags);
+       /*
+        * Unmap an object from an address space. This usually consists of
+        * setting the valid PTE entries to a reserved scratch page.
+        */
+       void (*unbind_vma)(struct i915_vma *vma);
+
+       int (*set_pages)(struct i915_vma *vma);
+       void (*clear_pages)(struct i915_vma *vma);
+};
+
 struct i915_address_space {
        struct drm_mm mm;
        struct drm_i915_private *i915;
@@ -331,15 +347,8 @@ struct i915_address_space {
                               enum i915_cache_level cache_level,
                               u32 flags);
        void (*cleanup)(struct i915_address_space *vm);
-       /** Unmap an object from an address space. This usually consists of
-        * setting the valid PTE entries to a reserved scratch page. */
-       void (*unbind_vma)(struct i915_vma *vma);
-       /* Map an object into an address space with the given cache flags. */
-       int (*bind_vma)(struct i915_vma *vma,
-                       enum i915_cache_level cache_level,
-                       u32 flags);
-       int (*set_pages)(struct i915_vma *vma);
-       void (*clear_pages)(struct i915_vma *vma);
+
+       struct i915_vma_ops vma_ops;
 
        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
        I915_SELFTEST_DECLARE(bool scrub_64K);
@@ -367,7 +376,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
  * the spec.
  */
 struct i915_ggtt {
-       struct i915_address_space base;
+       struct i915_address_space vm;
 
        struct io_mapping iomap;        /* Mapping to our CPU mappable region */
        struct resource gmadr;          /* GMADR resource */
@@ -385,9 +394,9 @@ struct i915_ggtt {
 };
 
 struct i915_hw_ppgtt {
-       struct i915_address_space base;
+       struct i915_address_space vm;
        struct kref ref;
-       struct drm_mm_node node;
+
        unsigned long pd_dirty_rings;
        union {
                struct i915_pml4 pml4;          /* GEN8+ & 48b PPGTT */
@@ -395,13 +404,28 @@ struct i915_hw_ppgtt {
                struct i915_page_directory pd;          /* GEN6-7 */
        };
 
+       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+struct gen6_hw_ppgtt {
+       struct i915_hw_ppgtt base;
+
+       struct i915_vma *vma;
        gen6_pte_t __iomem *pd_addr;
+       gen6_pte_t scratch_pte;
 
-       int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-                        struct i915_request *rq);
-       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+       unsigned int pin_count;
+       bool scan_for_unused_pt;
 };
 
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+
+static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+{
+       BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+       return __to_gen6_ppgtt(base);
+}
+
 /*
  * gen6_for_each_pde() iterates over every pde from start until start+length.
  * If start and start+length are not perfectly divisible, the macro will round
@@ -440,8 +464,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
        const u64 mask = ~((1ULL << pde_shift) - 1);
        u64 end;
 
-       WARN_ON(length == 0);
-       WARN_ON(offset_in_page(addr|length));
+       GEM_BUG_ON(length == 0);
+       GEM_BUG_ON(offset_in_page(addr | length));
 
        end = addr + length;
 
@@ -543,7 +567,7 @@ static inline struct i915_ggtt *
 i915_vm_to_ggtt(struct i915_address_space *vm)
 {
        GEM_BUG_ON(!i915_is_ggtt(vm));
-       return container_of(vm, struct i915_ggtt, base);
+       return container_of(vm, struct i915_ggtt, vm);
 }
 
 #define INTEL_MAX_PPAT_ENTRIES 8
@@ -605,6 +629,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
                kref_put(&ppgtt->ref, i915_ppgtt_release);
 }
 
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
index 1036e86..3210ced 100644 (file)
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
        if (IS_ERR(so.obj))
                return PTR_ERR(so.obj);
 
-       so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+       so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(so.vma)) {
                err = PTR_ERR(so.vma);
                goto err_obj;
index 5757fb7..55e84e7 100644 (file)
@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 
        /* We also want to clear any cached iomaps as they wrap vmap */
        list_for_each_entry_safe(vma, next,
-                                &i915->ggtt.base.inactive_list, vm_link) {
+                                &i915->ggtt.vm.inactive_list, vm_link) {
                unsigned long count = vma->node.size >> PAGE_SHIFT;
                if (vma->iomap && i915_vma_unbind(vma) == 0)
                        freed_pages += count;
index ad949cc..79a3472 100644 (file)
@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
        if (ret)
                goto err;
 
-       vma = i915_vma_instance(obj, &ggtt->base, NULL);
+       vma = i915_vma_instance(obj, &ggtt->vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_pages;
@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
-       ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+       ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
                                   size, gtt_offset, obj->cache_level,
                                   0);
        if (ret) {
@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
        vma->pages = obj->mm.pages;
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
-       list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+       list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
 
        spin_lock(&dev_priv->mm.obj_lock);
        list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
index df234dc..df524c9 100644 (file)
@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915,
                void __iomem *s;
                int ret;
 
-               ggtt->base.insert_page(&ggtt->base, dma, slot,
-                                      I915_CACHE_NONE, 0);
+               ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
                s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
@@ -993,7 +992,7 @@ unwind:
 
 out:
        compress_fini(&compress, dst);
-       ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
        return dst;
 }
 
@@ -1051,6 +1050,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
        int i = 0;
 
        list_for_each_entry(vma, head, vm_link) {
+               if (!vma->obj)
+                       continue;
+
                if (pinned_only && !i915_vma_is_pinned(vma))
                        continue;
 
@@ -1287,9 +1289,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 static void record_request(struct i915_request *request,
                           struct drm_i915_error_request *erq)
 {
-       erq->context = request->ctx->hw_id;
+       struct i915_gem_context *ctx = request->gem_context;
+
+       erq->context = ctx->hw_id;
        erq->sched_attr = request->sched.attr;
-       erq->ban_score = atomic_read(&request->ctx->ban_score);
+       erq->ban_score = atomic_read(&ctx->ban_score);
        erq->seqno = request->global_seqno;
        erq->jiffies = request->emitted_jiffies;
        erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1301,7 @@ static void record_request(struct i915_request *request,
        erq->tail = request->tail;
 
        rcu_read_lock();
-       erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+       erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
        rcu_read_unlock();
 }
 
@@ -1461,12 +1465,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
                request = i915_gem_find_active_request(engine);
                if (request) {
+                       struct i915_gem_context *ctx = request->gem_context;
                        struct intel_ring *ring;
 
-                       ee->vm = request->ctx->ppgtt ?
-                               &request->ctx->ppgtt->base : &ggtt->base;
+                       ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
 
-                       record_context(&ee->context, request->ctx);
+                       record_context(&ee->context, ctx);
 
                        /* We need to copy these to an anonymous buffer
                         * as the simplest method to avoid being overwritten
@@ -1483,11 +1487,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
                        ee->ctx =
                                i915_error_object_create(i915,
-                                                        to_intel_context(request->ctx,
-                                                                         engine)->state);
+                                                        request->hw_context->state);
 
                        error->simulated |=
-                               i915_gem_context_no_error_capture(request->ctx);
+                               i915_gem_context_no_error_capture(ctx);
 
                        ee->rq_head = request->head;
                        ee->rq_post = request->postfix;
@@ -1563,17 +1566,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
 
 static void capture_pinned_buffers(struct i915_gpu_state *error)
 {
-       struct i915_address_space *vm = &error->i915->ggtt.base;
+       struct i915_address_space *vm = &error->i915->ggtt.vm;
        struct drm_i915_error_buffer *bo;
        struct i915_vma *vma;
        int count_inactive, count_active;
 
        count_inactive = 0;
-       list_for_each_entry(vma, &vm->active_list, vm_link)
+       list_for_each_entry(vma, &vm->inactive_list, vm_link)
                count_inactive++;
 
        count_active = 0;
-       list_for_each_entry(vma, &vm->inactive_list, vm_link)
+       list_for_each_entry(vma, &vm->active_list, vm_link)
                count_active++;
 
        bo = NULL;
@@ -1667,7 +1670,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
        }
 
        /* 4: Everything else */
-       if (INTEL_GEN(dev_priv) >= 8) {
+       if (INTEL_GEN(dev_priv) >= 11) {
+               error->ier = I915_READ(GEN8_DE_MISC_IER);
+               error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+               error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+               error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+               error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+               error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+               error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+               error->ngtier = 6;
+       } else if (INTEL_GEN(dev_priv) >= 8) {
                error->ier = I915_READ(GEN8_DE_MISC_IER);
                for (i = 0; i < 4; i++)
                        error->gtier[i] = I915_READ(GEN8_GT_IER(i));
index dac0f8c..58910f1 100644 (file)
@@ -58,7 +58,7 @@ struct i915_gpu_state {
        u32 eir;
        u32 pgtbl_er;
        u32 ier;
-       u32 gtier[4], ngtier;
+       u32 gtier[6], ngtier;
        u32 ccid;
        u32 derrmr;
        u32 forcewake;
index 4a02747..46aaef5 100644 (file)
@@ -115,6 +115,13 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
        [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
 };
 
+static const u32 hpd_gen11[HPD_NUM_PINS] = {
+       [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+       [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+       [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+       [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+};
+
 /* IIR can theoretically queue up two events. Be paranoid. */
 #define GEN8_IRQ_RESET_NDX(type, which) do { \
        I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -1549,6 +1556,22 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
        }
 }
 
+static bool gen11_port_hotplug_long_detect(enum port port, u32 val)
+{
+       switch (port) {
+       case PORT_C:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+       case PORT_D:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+       case PORT_E:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+       case PORT_F:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+       default:
+               return false;
+       }
+}
+
 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
        switch (port) {
@@ -2598,6 +2621,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 }
 
+static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+       u32 pin_mask = 0, long_mask = 0;
+       u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
+       u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+
+       if (trigger_tc) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
+               I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+                                  dig_hotplug_reg, hpd_gen11,
+                                  gen11_port_hotplug_long_detect);
+       }
+
+       if (trigger_tbt) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+               I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+                                  dig_hotplug_reg, hpd_gen11,
+                                  gen11_port_hotplug_long_detect);
+       }
+
+       if (pin_mask)
+               intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+       else
+               DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+}
+
 static irqreturn_t
 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 {
@@ -2633,6 +2690,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
        }
 
+       if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+               iir = I915_READ(GEN11_DE_HPD_IIR);
+               if (iir) {
+                       I915_WRITE(GEN11_DE_HPD_IIR, iir);
+                       ret = IRQ_HANDLED;
+                       gen11_hpd_irq_handler(dev_priv, iir);
+               } else {
+                       DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+               }
+       }
+
        if (master_ctl & GEN8_DE_PORT_IRQ) {
                iir = I915_READ(GEN8_DE_PORT_IIR);
                if (iir) {
@@ -2648,7 +2716,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                                            GEN9_AUX_CHANNEL_C |
                                            GEN9_AUX_CHANNEL_D;
 
-                       if (IS_CNL_WITH_PORT_F(dev_priv))
+                       if (INTEL_GEN(dev_priv) >= 11)
+                               tmp_mask |= ICL_AUX_CHANNEL_E;
+
+                       if (IS_CNL_WITH_PORT_F(dev_priv) ||
+                           INTEL_GEN(dev_priv) >= 11)
                                tmp_mask |= CNL_AUX_CHANNEL_F;
 
                        if (iir & tmp_mask) {
@@ -2950,11 +3022,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
        spin_unlock(&i915->irq_lock);
 }
 
+static void
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
+                     u32 *iir)
+{
+       void __iomem * const regs = dev_priv->regs;
+
+       if (!(master_ctl & GEN11_GU_MISC_IRQ))
+               return;
+
+       *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+       if (likely(*iir))
+               raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+}
+
+static void
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
+                         const u32 master_ctl, const u32 iir)
+{
+       if (!(master_ctl & GEN11_GU_MISC_IRQ))
+               return;
+
+       if (unlikely(!iir)) {
+               DRM_ERROR("GU_MISC iir blank!\n");
+               return;
+       }
+
+       if (iir & GEN11_GU_MISC_GSE)
+               intel_opregion_asle_intr(dev_priv);
+       else
+               DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
+}
+
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private * const i915 = to_i915(arg);
        void __iomem * const regs = i915->regs;
        u32 master_ctl;
+       u32 gu_misc_iir;
 
        if (!intel_irqs_enabled(i915))
                return IRQ_NONE;
@@ -2983,9 +3088,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
                enable_rpm_wakeref_asserts(i915);
        }
 
+       gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+
        /* Acknowledge and enable interrupts. */
        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 
+       gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+
        return IRQ_HANDLED;
 }
 
@@ -3472,6 +3581,8 @@ static void gen11_irq_reset(struct drm_device *dev)
 
        GEN3_IRQ_RESET(GEN8_DE_PORT_);
        GEN3_IRQ_RESET(GEN8_DE_MISC_);
+       GEN3_IRQ_RESET(GEN11_DE_HPD_);
+       GEN3_IRQ_RESET(GEN11_GU_MISC_);
        GEN3_IRQ_RESET(GEN8_PCU_);
 }
 
@@ -3589,6 +3700,41 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
        ibx_hpd_detection_setup(dev_priv);
 }
 
+static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+       u32 hotplug;
+
+       hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+       hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+       I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+
+       hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+       hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+       I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+}
+
+static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+       u32 hotplug_irqs, enabled_irqs;
+       u32 val;
+
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+       hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+
+       val = I915_READ(GEN11_DE_HPD_IMR);
+       val &= ~hotplug_irqs;
+       I915_WRITE(GEN11_DE_HPD_IMR, val);
+       POSTING_READ(GEN11_DE_HPD_IMR);
+
+       gen11_hpd_detection_setup(dev_priv);
+}
+
 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
        u32 val, hotplug;
@@ -3915,9 +4061,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        uint32_t de_pipe_enables;
        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
        u32 de_port_enables;
-       u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
+       u32 de_misc_masked = GEN8_DE_EDP_PSR;
        enum pipe pipe;
 
+       if (INTEL_GEN(dev_priv) <= 10)
+               de_misc_masked |= GEN8_DE_MISC_GSE;
+
        if (INTEL_GEN(dev_priv) >= 9) {
                de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
                de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3928,7 +4077,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
                de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
        }
 
-       if (IS_CNL_WITH_PORT_F(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 11)
+               de_port_masked |= ICL_AUX_CHANNEL_E;
+
+       if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
                de_port_masked |= CNL_AUX_CHANNEL_F;
 
        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3956,10 +4108,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
        GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
 
-       if (IS_GEN9_LP(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 11) {
+               u32 de_hpd_masked = 0;
+               u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
+                                    GEN11_DE_TBT_HOTPLUG_MASK;
+
+               GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+               gen11_hpd_detection_setup(dev_priv);
+       } else if (IS_GEN9_LP(dev_priv)) {
                bxt_hpd_detection_setup(dev_priv);
-       else if (IS_BROADWELL(dev_priv))
+       } else if (IS_BROADWELL(dev_priv)) {
                ilk_hpd_detection_setup(dev_priv);
+       }
 }
 
 static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4011,10 +4171,13 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 static int gen11_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 gu_misc_masked = GEN11_GU_MISC_GSE;
 
        gen11_gt_irq_postinstall(dev_priv);
        gen8_de_irq_postinstall(dev_priv);
 
+       GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
        I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
@@ -4478,7 +4641,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->irq_uninstall = gen11_irq_reset;
                dev->driver->enable_vblank = gen8_enable_vblank;
                dev->driver->disable_vblank = gen8_disable_vblank;
-               dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+               dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
        } else if (INTEL_GEN(dev_priv) >= 8) {
                dev->driver->irq_handler = gen8_irq_handler;
                dev->driver->irq_preinstall = gen8_irq_reset;
index 66ea355..49fcc46 100644 (file)
@@ -130,9 +130,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
 i915_param_named(disable_display, bool, 0400,
        "Disable display (default: false)");
 
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
-       "Enable command parsing (true=enabled [default], false=disabled)");
-
 i915_param_named(mmio_debug, int, 0600,
        "Enable the MMIO debug code for the first N failures (default: off). "
        "This may negatively affect performance.");
index 6684025..aebe046 100644 (file)
@@ -58,7 +58,6 @@ struct drm_printer;
        param(unsigned int, inject_load_failure, 0) \
        /* leave bools at the end to not create holes */ \
        param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
-       param(bool, enable_cmd_parser, true) \
        param(bool, enable_hangcheck, true) \
        param(bool, fastboot, false) \
        param(bool, prefault_disable, false) \
index 4364922..55543f1 100644 (file)
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
        GEN(7),
        .is_lp = 1,
        .num_pipes = 2,
-       .has_psr = 1,
        .has_runtime_pm = 1,
        .has_rc6 = 1,
        .has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
        .is_lp = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_64bit_reloc = 1,
-       .has_psr = 1,
        .has_runtime_pm = 1,
        .has_resource_streamer = 1,
        .has_rc6 = 1,
@@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
        INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+       INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
-       INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+       INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+       INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+       INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
        INTEL_CNL_IDS(&intel_cannonlake_info),
        INTEL_ICL_11_IDS(&intel_icelake_11_info),
        {0, 0, 0}
index 019bd2d..447407f 100644 (file)
@@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000;
  * code assumes all reports have a power-of-two size and ~(size - 1) can
  * be used as a mask to align the OA tail pointer.
  */
-static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
        [I915_OA_FORMAT_A13]        = { 0, 64 },
        [I915_OA_FORMAT_A29]        = { 1, 128 },
        [I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
@@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
        [I915_OA_FORMAT_C4_B8]      = { 7, 64 },
 };
 
-static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
        [I915_OA_FORMAT_A12]                = { 0, 64 },
        [I915_OA_FORMAT_A12_B8_C8]          = { 2, 128 },
        [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
                        continue;
                }
 
-               /*
-                * XXX: Just keep the lower 21 bits for now since I'm not
-                * entirely sure if the HW touches any of the higher bits in
-                * this field
-                */
-               ctx_id = report32[2] & 0x1fffff;
+               ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
 
                /*
                 * Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
        return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
 }
 
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+                                           struct i915_gem_context *ctx)
+{
+       struct intel_engine_cs *engine = i915->engine[RCS];
+       struct intel_context *ce;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(&i915->drm);
+       if (ret)
+               return ERR_PTR(ret);
+
+       /*
+        * As the ID is the gtt offset of the context's vma we
+        * pin the vma to ensure the ID remains fixed.
+        *
+        * NB: implied RCS engine...
+        */
+       ce = intel_context_pin(ctx, engine);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (IS_ERR(ce))
+               return ce;
+
+       i915->perf.oa.pinned_ctx = ce;
+
+       return ce;
+}
+
 /**
  * oa_get_render_ctx_id - determine and hold ctx hw id
  * @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
  */
 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 {
-       struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct drm_i915_private *i915 = stream->dev_priv;
+       struct intel_context *ce;
 
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
-       } else {
-               struct intel_engine_cs *engine = dev_priv->engine[RCS];
-               struct intel_ring *ring;
-               int ret;
-
-               ret = i915_mutex_lock_interruptible(&dev_priv->drm);
-               if (ret)
-                       return ret;
+       ce = oa_pin_context(i915, stream->ctx);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
 
+       switch (INTEL_GEN(i915)) {
+       case 7: {
                /*
-                * As the ID is the gtt offset of the context's vma we
-                * pin the vma to ensure the ID remains fixed.
-                *
-                * NB: implied RCS engine...
+                * On Haswell we don't do any post processing of the reports
+                * and don't need to use the mask.
                 */
-               ring = intel_context_pin(stream->ctx, engine);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-               if (IS_ERR(ring))
-                       return PTR_ERR(ring);
+               i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+               i915->perf.oa.specific_ctx_id_mask = 0;
+               break;
+       }
 
+       case 8:
+       case 9:
+       case 10:
+               if (USES_GUC_SUBMISSION(i915)) {
+                       /*
+                        * When using GuC, the context descriptor we write in
+                        * i915 is read by GuC and rewritten before it's
+                        * actually written into the hardware. The LRCA is
+                        * what is put into the context id field of the
+                        * context descriptor by GuC. Because it's aligned to
+                        * a page, the lower 12bits are always at 0 and
+                        * dropped by GuC. They won't be part of the context
+                        * ID in the OA reports, so squash those lower bits.
+                        */
+                       i915->perf.oa.specific_ctx_id =
+                               lower_32_bits(ce->lrc_desc) >> 12;
 
-               /*
-                * Explicitly track the ID (instead of calling
-                * i915_ggtt_offset() on the fly) considering the difference
-                * with gen8+ and execlists
-                */
-               dev_priv->perf.oa.specific_ctx_id =
-                       i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+                       /*
+                        * GuC uses the top bit to signal proxy submission, so
+                        * ignore that bit.
+                        */
+                       i915->perf.oa.specific_ctx_id_mask =
+                               (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+               } else {
+                       i915->perf.oa.specific_ctx_id_mask =
+                               (1U << GEN8_CTX_ID_WIDTH) - 1;
+                       i915->perf.oa.specific_ctx_id =
+                               upper_32_bits(ce->lrc_desc);
+                       i915->perf.oa.specific_ctx_id &=
+                               i915->perf.oa.specific_ctx_id_mask;
+               }
+               break;
+
+       case 11: {
+               i915->perf.oa.specific_ctx_id_mask =
+                       ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+                       ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+                       ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+               i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
+               i915->perf.oa.specific_ctx_id &=
+                       i915->perf.oa.specific_ctx_id_mask;
+               break;
+       }
+
+       default:
+               MISSING_CASE(INTEL_GEN(i915));
        }
 
+       DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+                        i915->perf.oa.specific_ctx_id,
+                        i915->perf.oa.specific_ctx_id_mask);
+
        return 0;
 }
 
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 {
        struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct intel_context *ce;
 
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-       } else {
-               struct intel_engine_cs *engine = dev_priv->engine[RCS];
+       dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+       dev_priv->perf.oa.specific_ctx_id_mask = 0;
 
+       ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+       if (ce) {
                mutex_lock(&dev_priv->drm.struct_mutex);
-
-               dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-               intel_context_unpin(stream->ctx, engine);
-
+               intel_context_unpin(ce);
                mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 }
index dc87797..c39541e 100644 (file)
@@ -127,6 +127,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
 {
        if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
                i915->pmu.timer_enabled = true;
+               i915->pmu.timer_last = ktime_get();
                hrtimer_start_range_ns(&i915->pmu.timer,
                                       ns_to_ktime(PERIOD), 0,
                                       HRTIMER_MODE_REL_PINNED);
@@ -155,12 +156,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
 }
 
 static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
 {
-       sample->cur += mul_u32_u32(val, unit);
+       sample->cur += val;
 }
 
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -182,8 +184,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
 
                val = !i915_seqno_passed(current_seqno, last_seqno);
 
-               update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
-                             PERIOD, val);
+               if (val)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+                                  period_ns);
 
                if (val && (engine->pmu.enable &
                    (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -194,11 +197,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
                        val = 0;
                }
 
-               update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
-                             PERIOD, !!(val & RING_WAIT));
+               if (val & RING_WAIT)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+                                  period_ns);
 
-               update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
-                             PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+               if (val & RING_WAIT_SEMAPHORE)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+                                  period_ns);
        }
 
        if (fw)
@@ -207,7 +212,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
        intel_runtime_pm_put(dev_priv);
 }
 
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+       sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 {
        if (dev_priv->pmu.enable &
            config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -221,15 +233,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
                        intel_runtime_pm_put(dev_priv);
                }
 
-               update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
-                             1, intel_gpu_freq(dev_priv, val));
+               add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+                               intel_gpu_freq(dev_priv, val),
+                               period_ns / 1000);
        }
 
        if (dev_priv->pmu.enable &
            config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
-               update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
-                             intel_gpu_freq(dev_priv,
-                                            dev_priv->gt_pm.rps.cur_freq));
+               add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+                               intel_gpu_freq(dev_priv,
+                                              dev_priv->gt_pm.rps.cur_freq),
+                               period_ns / 1000);
        }
 }
 
@@ -237,14 +251,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 {
        struct drm_i915_private *i915 =
                container_of(hrtimer, struct drm_i915_private, pmu.timer);
+       unsigned int period_ns;
+       ktime_t now;
 
        if (!READ_ONCE(i915->pmu.timer_enabled))
                return HRTIMER_NORESTART;
 
-       engines_sample(i915);
-       frequency_sample(i915);
+       now = ktime_get();
+       period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+       i915->pmu.timer_last = now;
+
+       /*
+        * Strictly speaking the passed in period may not be 100% accurate for
+        * all internal calculation, since some amount of time can be spent on
+        * grabbing the forcewake. However the potential error from timer call-
+        * back delay greatly dominates this so we keep it simple.
+        */
+       engines_sample(i915, period_ns);
+       frequency_sample(i915, period_ns);
+
+       hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
 
-       hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
        return HRTIMER_RESTART;
 }
 
@@ -519,12 +546,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
                case I915_PMU_ACTUAL_FREQUENCY:
                        val =
                           div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
-                                  FREQUENCY);
+                                  USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_REQUESTED_FREQUENCY:
                        val =
                           div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
-                                  FREQUENCY);
+                                  USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_INTERRUPTS:
                        val = count_interrupts(i915);
index 2ba7352..7f164ca 100644 (file)
@@ -65,6 +65,14 @@ struct i915_pmu {
         * event types.
         */
        u64 enable;
+
+       /**
+        * @timer_last:
+        *
+        * Timestmap of the previous timer invocation.
+        */
+       ktime_t timer_last;
+
        /**
         * @enable_count: Reference counts for the enabled events.
         *
index 195203f..eeaa3d5 100644 (file)
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
  */
 #define VGT_CAPS_FULL_48BIT_PPGTT      BIT(2)
 #define VGT_CAPS_HWSP_EMULATION                BIT(3)
+#define VGT_CAPS_HUGE_GTT              BIT(4)
 
 struct vgt_if {
        u64 magic;              /* VGT_MAGIC */
@@ -93,7 +94,10 @@ struct vgt_if {
        u32 rsv5[4];
 
        u32 g2v_notify;
-       u32 rsv6[7];
+       u32 rsv6[5];
+
+       u32 cursor_x_hot;
+       u32 cursor_y_hot;
 
        struct {
                u32 lo;
index 7720569..4bfd7a9 100644 (file)
@@ -141,21 +141,22 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
 
-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE(pipe, a, b) ((a) + (pipe) * ((b) - (a)))
 #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
 #define _PLANE(plane, a, b) _PIPE(plane, a, b)
 #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _TRANS(tran, a, b) ((a) + (tran) * ((b) - (a)))
 #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port) * ((b) - (a)))
 #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
 #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
 #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
+#define _PLL(pll, a, b) ((a) + (pll) * ((b) - (a)))
 #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
 #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
 #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
 
+#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
 #define _MASKED_FIELD(mask, value) ({                                     \
        if (__builtin_constant_p(mask))                                    \
                BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
@@ -164,7 +165,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
        if (__builtin_constant_p(mask) && __builtin_constant_p(value))     \
                BUILD_BUG_ON_MSG((value) & ~(mask),                        \
                                 "Incorrect value for mask");              \
-       (mask) << 16 | (value); })
+       __MASKED_FIELD(mask, value); })
 #define _MASKED_BIT_ENABLE(a)  ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
 #define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
 
@@ -270,19 +271,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 
 #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define  ILK_GRDOM_FULL                (0<<1)
-#define  ILK_GRDOM_RENDER      (1<<1)
-#define  ILK_GRDOM_MEDIA       (3<<1)
-#define  ILK_GRDOM_MASK                (3<<1)
-#define  ILK_GRDOM_RESET_ENABLE (1<<0)
+#define  ILK_GRDOM_FULL                (0 << 1)
+#define  ILK_GRDOM_RENDER      (1 << 1)
+#define  ILK_GRDOM_MEDIA       (3 << 1)
+#define  ILK_GRDOM_MASK                (3 << 1)
+#define  ILK_GRDOM_RESET_ENABLE (1 << 0)
 
 #define GEN6_MBCUNIT_SNPCR     _MMIO(0x900c) /* for LLC config */
 #define   GEN6_MBC_SNPCR_SHIFT 21
-#define   GEN6_MBC_SNPCR_MASK  (3<<21)
-#define   GEN6_MBC_SNPCR_MAX   (0<<21)
-#define   GEN6_MBC_SNPCR_MED   (1<<21)
-#define   GEN6_MBC_SNPCR_LOW   (2<<21)
-#define   GEN6_MBC_SNPCR_MIN   (3<<21) /* only 1/16th of the cache is shared */
+#define   GEN6_MBC_SNPCR_MASK  (3 << 21)
+#define   GEN6_MBC_SNPCR_MAX   (0 << 21)
+#define   GEN6_MBC_SNPCR_MED   (1 << 21)
+#define   GEN6_MBC_SNPCR_LOW   (2 << 21)
+#define   GEN6_MBC_SNPCR_MIN   (3 << 21) /* only 1/16th of the cache is shared */
 
 #define VLV_G3DCTL             _MMIO(0x9024)
 #define VLV_GSCKGCTL           _MMIO(0x9028)
@@ -314,13 +315,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN11_GRDOM_VECS              (1 << 13)
 #define  GEN11_GRDOM_VECS2             (1 << 14)
 
-#define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(engine)       _MMIO((engine)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base + 0x228)
+#define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base + 0x518)
+#define RING_PP_DIR_DCLV(engine)       _MMIO((engine)->mmio_base + 0x220)
 #define   PP_DIR_DCLV_2G               0xffffffff
 
-#define GEN8_RING_PDP_UDW(engine, n)   _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n)   _MMIO((engine)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n)   _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n)   _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
 
 #define GEN8_R_PWR_CLK_STATE           _MMIO(0x20C8)
 #define   GEN8_RPCS_ENABLE             (1 << 31)
@@ -358,25 +359,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN8_SELECTIVE_READ_ADDRESSING_ENABLE         (1 << 13)
 
 #define GAM_ECOCHK                     _MMIO(0x4090)
-#define   BDW_DISABLE_HDC_INVALIDATION (1<<25)
-#define   ECOCHK_SNB_BIT               (1<<10)
-#define   ECOCHK_DIS_TLB               (1<<8)
-#define   HSW_ECOCHK_ARB_PRIO_SOL      (1<<6)
-#define   ECOCHK_PPGTT_CACHE64B                (0x3<<3)
-#define   ECOCHK_PPGTT_CACHE4B         (0x0<<3)
-#define   ECOCHK_PPGTT_GFDT_IVB                (0x1<<4)
-#define   ECOCHK_PPGTT_LLC_IVB         (0x1<<3)
-#define   ECOCHK_PPGTT_UC_HSW          (0x1<<3)
-#define   ECOCHK_PPGTT_WT_HSW          (0x2<<3)
-#define   ECOCHK_PPGTT_WB_HSW          (0x3<<3)
+#define   BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define   ECOCHK_SNB_BIT               (1 << 10)
+#define   ECOCHK_DIS_TLB               (1 << 8)
+#define   HSW_ECOCHK_ARB_PRIO_SOL      (1 << 6)
+#define   ECOCHK_PPGTT_CACHE64B                (0x3 << 3)
+#define   ECOCHK_PPGTT_CACHE4B         (0x0 << 3)
+#define   ECOCHK_PPGTT_GFDT_IVB                (0x1 << 4)
+#define   ECOCHK_PPGTT_LLC_IVB         (0x1 << 3)
+#define   ECOCHK_PPGTT_UC_HSW          (0x1 << 3)
+#define   ECOCHK_PPGTT_WT_HSW          (0x2 << 3)
+#define   ECOCHK_PPGTT_WB_HSW          (0x3 << 3)
 
 #define GAC_ECO_BITS                   _MMIO(0x14090)
-#define   ECOBITS_SNB_BIT              (1<<13)
-#define   ECOBITS_PPGTT_CACHE64B       (3<<8)
-#define   ECOBITS_PPGTT_CACHE4B                (0<<8)
+#define   ECOBITS_SNB_BIT              (1 << 13)
+#define   ECOBITS_PPGTT_CACHE64B       (3 << 8)
+#define   ECOBITS_PPGTT_CACHE4B                (0 << 8)
 
 #define GAB_CTL                                _MMIO(0x24000)
-#define   GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define   GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
 
 #define GEN6_STOLEN_RESERVED           _MMIO(0x1082C0)
 #define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -404,15 +405,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define _VGA_MSR_WRITE _MMIO(0x3c2)
 #define VGA_MSR_WRITE 0x3c2
 #define VGA_MSR_READ 0x3cc
-#define   VGA_MSR_MEM_EN (1<<1)
-#define   VGA_MSR_CGA_MODE (1<<0)
+#define   VGA_MSR_MEM_EN (1 << 1)
+#define   VGA_MSR_CGA_MODE (1 << 0)
 
 #define VGA_SR_INDEX 0x3c4
 #define SR01                   1
 #define VGA_SR_DATA 0x3c5
 
 #define VGA_AR_INDEX 0x3c0
-#define   VGA_AR_VID_EN (1<<5)
+#define   VGA_AR_VID_EN (1 << 5)
 #define VGA_AR_DATA_WRITE 0x3c0
 #define VGA_AR_DATA_READ 0x3c1
 
@@ -445,8 +446,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define MI_PREDICATE_SRC1_UDW  _MMIO(0x2408 + 4)
 
 #define MI_PREDICATE_RESULT_2  _MMIO(0x2214)
-#define  LOWER_SLICE_ENABLED   (1<<0)
-#define  LOWER_SLICE_DISABLED  (0<<0)
+#define  LOWER_SLICE_ENABLED   (1 << 0)
+#define  LOWER_SLICE_DISABLED  (0 << 0)
 
 /*
  * Registers used only by the command parser
@@ -504,47 +505,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN7_OACONTROL_CTX_MASK           0xFFFFF000
 #define  GEN7_OACONTROL_TIMER_PERIOD_MASK   0x3F
 #define  GEN7_OACONTROL_TIMER_PERIOD_SHIFT  6
-#define  GEN7_OACONTROL_TIMER_ENABLE       (1<<5)
-#define  GEN7_OACONTROL_FORMAT_A13         (0<<2)
-#define  GEN7_OACONTROL_FORMAT_A29         (1<<2)
-#define  GEN7_OACONTROL_FORMAT_A13_B8_C8    (2<<2)
-#define  GEN7_OACONTROL_FORMAT_A29_B8_C8    (3<<2)
-#define  GEN7_OACONTROL_FORMAT_B4_C8       (4<<2)
-#define  GEN7_OACONTROL_FORMAT_A45_B8_C8    (5<<2)
-#define  GEN7_OACONTROL_FORMAT_B4_C8_A16    (6<<2)
-#define  GEN7_OACONTROL_FORMAT_C4_B8       (7<<2)
+#define  GEN7_OACONTROL_TIMER_ENABLE       (1 << 5)
+#define  GEN7_OACONTROL_FORMAT_A13         (0 << 2)
+#define  GEN7_OACONTROL_FORMAT_A29         (1 << 2)
+#define  GEN7_OACONTROL_FORMAT_A13_B8_C8    (2 << 2)
+#define  GEN7_OACONTROL_FORMAT_A29_B8_C8    (3 << 2)
+#define  GEN7_OACONTROL_FORMAT_B4_C8       (4 << 2)
+#define  GEN7_OACONTROL_FORMAT_A45_B8_C8    (5 << 2)
+#define  GEN7_OACONTROL_FORMAT_B4_C8_A16    (6 << 2)
+#define  GEN7_OACONTROL_FORMAT_C4_B8       (7 << 2)
 #define  GEN7_OACONTROL_FORMAT_SHIFT       2
-#define  GEN7_OACONTROL_PER_CTX_ENABLE     (1<<1)
-#define  GEN7_OACONTROL_ENABLE             (1<<0)
+#define  GEN7_OACONTROL_PER_CTX_ENABLE     (1 << 1)
+#define  GEN7_OACONTROL_ENABLE             (1 << 0)
 
 #define GEN8_OACTXID _MMIO(0x2364)
 
 #define GEN8_OA_DEBUG _MMIO(0x2B04)
-#define  GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS    (1<<5)
-#define  GEN9_OA_DEBUG_INCLUDE_CLK_RATIO           (1<<6)
-#define  GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS      (1<<2)
-#define  GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS   (1<<1)
+#define  GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS    (1 << 5)
+#define  GEN9_OA_DEBUG_INCLUDE_CLK_RATIO           (1 << 6)
+#define  GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS      (1 << 2)
+#define  GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS   (1 << 1)
 
 #define GEN8_OACONTROL _MMIO(0x2B00)
-#define  GEN8_OA_REPORT_FORMAT_A12         (0<<2)
-#define  GEN8_OA_REPORT_FORMAT_A12_B8_C8    (2<<2)
-#define  GEN8_OA_REPORT_FORMAT_A36_B8_C8    (5<<2)
-#define  GEN8_OA_REPORT_FORMAT_C4_B8       (7<<2)
+#define  GEN8_OA_REPORT_FORMAT_A12         (0 << 2)
+#define  GEN8_OA_REPORT_FORMAT_A12_B8_C8    (2 << 2)
+#define  GEN8_OA_REPORT_FORMAT_A36_B8_C8    (5 << 2)
+#define  GEN8_OA_REPORT_FORMAT_C4_B8       (7 << 2)
 #define  GEN8_OA_REPORT_FORMAT_SHIFT       2
-#define  GEN8_OA_SPECIFIC_CONTEXT_ENABLE    (1<<1)
-#define  GEN8_OA_COUNTER_ENABLE             (1<<0)
+#define  GEN8_OA_SPECIFIC_CONTEXT_ENABLE    (1 << 1)
+#define  GEN8_OA_COUNTER_ENABLE             (1 << 0)
 
 #define GEN8_OACTXCONTROL _MMIO(0x2360)
 #define  GEN8_OA_TIMER_PERIOD_MASK         0x3F
 #define  GEN8_OA_TIMER_PERIOD_SHIFT        2
-#define  GEN8_OA_TIMER_ENABLE              (1<<1)
-#define  GEN8_OA_COUNTER_RESUME                    (1<<0)
+#define  GEN8_OA_TIMER_ENABLE              (1 << 1)
+#define  GEN8_OA_COUNTER_RESUME                    (1 << 0)
 
 #define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
-#define  GEN7_OABUFFER_OVERRUN_DISABLE     (1<<3)
-#define  GEN7_OABUFFER_EDGE_TRIGGER        (1<<2)
-#define  GEN7_OABUFFER_STOP_RESUME_ENABLE   (1<<1)
-#define  GEN7_OABUFFER_RESUME              (1<<0)
+#define  GEN7_OABUFFER_OVERRUN_DISABLE     (1 << 3)
+#define  GEN7_OABUFFER_EDGE_TRIGGER        (1 << 2)
+#define  GEN7_OABUFFER_STOP_RESUME_ENABLE   (1 << 1)
+#define  GEN7_OABUFFER_RESUME              (1 << 0)
 
 #define GEN8_OABUFFER_UDW _MMIO(0x23b4)
 #define GEN8_OABUFFER _MMIO(0x2b14)
@@ -552,33 +553,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define GEN7_OASTATUS1 _MMIO(0x2364)
 #define  GEN7_OASTATUS1_TAIL_MASK          0xffffffc0
-#define  GEN7_OASTATUS1_COUNTER_OVERFLOW    (1<<2)
-#define  GEN7_OASTATUS1_OABUFFER_OVERFLOW   (1<<1)
-#define  GEN7_OASTATUS1_REPORT_LOST        (1<<0)
+#define  GEN7_OASTATUS1_COUNTER_OVERFLOW    (1 << 2)
+#define  GEN7_OASTATUS1_OABUFFER_OVERFLOW   (1 << 1)
+#define  GEN7_OASTATUS1_REPORT_LOST        (1 << 0)
 
 #define GEN7_OASTATUS2 _MMIO(0x2368)
 #define  GEN7_OASTATUS2_HEAD_MASK           0xffffffc0
 #define  GEN7_OASTATUS2_MEM_SELECT_GGTT     (1 << 0) /* 0: PPGTT, 1: GGTT */
 
 #define GEN8_OASTATUS _MMIO(0x2b08)
-#define  GEN8_OASTATUS_OVERRUN_STATUS      (1<<3)
-#define  GEN8_OASTATUS_COUNTER_OVERFLOW     (1<<2)
-#define  GEN8_OASTATUS_OABUFFER_OVERFLOW    (1<<1)
-#define  GEN8_OASTATUS_REPORT_LOST         (1<<0)
+#define  GEN8_OASTATUS_OVERRUN_STATUS      (1 << 3)
+#define  GEN8_OASTATUS_COUNTER_OVERFLOW     (1 << 2)
+#define  GEN8_OASTATUS_OABUFFER_OVERFLOW    (1 << 1)
+#define  GEN8_OASTATUS_REPORT_LOST         (1 << 0)
 
 #define GEN8_OAHEADPTR _MMIO(0x2B0C)
 #define GEN8_OAHEADPTR_MASK    0xffffffc0
 #define GEN8_OATAILPTR _MMIO(0x2B10)
 #define GEN8_OATAILPTR_MASK    0xffffffc0
 
-#define OABUFFER_SIZE_128K  (0<<3)
-#define OABUFFER_SIZE_256K  (1<<3)
-#define OABUFFER_SIZE_512K  (2<<3)
-#define OABUFFER_SIZE_1M    (3<<3)
-#define OABUFFER_SIZE_2M    (4<<3)
-#define OABUFFER_SIZE_4M    (5<<3)
-#define OABUFFER_SIZE_8M    (6<<3)
-#define OABUFFER_SIZE_16M   (7<<3)
+#define OABUFFER_SIZE_128K  (0 << 3)
+#define OABUFFER_SIZE_256K  (1 << 3)
+#define OABUFFER_SIZE_512K  (2 << 3)
+#define OABUFFER_SIZE_1M    (3 << 3)
+#define OABUFFER_SIZE_2M    (4 << 3)
+#define OABUFFER_SIZE_4M    (5 << 3)
+#define OABUFFER_SIZE_8M    (6 << 3)
+#define OABUFFER_SIZE_16M   (7 << 3)
 
 /*
  * Flexible, Aggregate EU Counter Registers.
@@ -601,35 +602,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OASTARTTRIG1_THRESHOLD_MASK          0xffff
 
 #define OASTARTTRIG2 _MMIO(0x2714)
-#define OASTARTTRIG2_INVERT_A_0 (1<<0)
-#define OASTARTTRIG2_INVERT_A_1 (1<<1)
-#define OASTARTTRIG2_INVERT_A_2 (1<<2)
-#define OASTARTTRIG2_INVERT_A_3 (1<<3)
-#define OASTARTTRIG2_INVERT_A_4 (1<<4)
-#define OASTARTTRIG2_INVERT_A_5 (1<<5)
-#define OASTARTTRIG2_INVERT_A_6 (1<<6)
-#define OASTARTTRIG2_INVERT_A_7 (1<<7)
-#define OASTARTTRIG2_INVERT_A_8 (1<<8)
-#define OASTARTTRIG2_INVERT_A_9 (1<<9)
-#define OASTARTTRIG2_INVERT_A_10 (1<<10)
-#define OASTARTTRIG2_INVERT_A_11 (1<<11)
-#define OASTARTTRIG2_INVERT_A_12 (1<<12)
-#define OASTARTTRIG2_INVERT_A_13 (1<<13)
-#define OASTARTTRIG2_INVERT_A_14 (1<<14)
-#define OASTARTTRIG2_INVERT_A_15 (1<<15)
-#define OASTARTTRIG2_INVERT_B_0 (1<<16)
-#define OASTARTTRIG2_INVERT_B_1 (1<<17)
-#define OASTARTTRIG2_INVERT_B_2 (1<<18)
-#define OASTARTTRIG2_INVERT_B_3 (1<<19)
-#define OASTARTTRIG2_INVERT_C_0 (1<<20)
-#define OASTARTTRIG2_INVERT_C_1 (1<<21)
-#define OASTARTTRIG2_INVERT_D_0 (1<<22)
-#define OASTARTTRIG2_THRESHOLD_ENABLE      (1<<23)
-#define OASTARTTRIG2_START_TRIG_FLAG_MBZ    (1<<24)
-#define OASTARTTRIG2_EVENT_SELECT_0  (1<<28)
-#define OASTARTTRIG2_EVENT_SELECT_1  (1<<29)
-#define OASTARTTRIG2_EVENT_SELECT_2  (1<<30)
-#define OASTARTTRIG2_EVENT_SELECT_3  (1<<31)
+#define OASTARTTRIG2_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG2_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG2_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG2_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG2_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG2_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG2_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG2_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG2_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG2_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG2_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG2_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG2_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG2_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG2_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG2_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG2_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG2_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG2_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG2_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG2_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG2_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG2_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE      (1 << 23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ    (1 << 24)
+#define OASTARTTRIG2_EVENT_SELECT_0  (1 << 28)
+#define OASTARTTRIG2_EVENT_SELECT_1  (1 << 29)
+#define OASTARTTRIG2_EVENT_SELECT_2  (1 << 30)
+#define OASTARTTRIG2_EVENT_SELECT_3  (1 << 31)
 
 #define OASTARTTRIG3 _MMIO(0x2718)
 #define OASTARTTRIG3_NOA_SELECT_MASK      0xf
@@ -658,35 +659,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OASTARTTRIG5_THRESHOLD_MASK          0xffff
 
 #define OASTARTTRIG6 _MMIO(0x2724)
-#define OASTARTTRIG6_INVERT_A_0 (1<<0)
-#define OASTARTTRIG6_INVERT_A_1 (1<<1)
-#define OASTARTTRIG6_INVERT_A_2 (1<<2)
-#define OASTARTTRIG6_INVERT_A_3 (1<<3)
-#define OASTARTTRIG6_INVERT_A_4 (1<<4)
-#define OASTARTTRIG6_INVERT_A_5 (1<<5)
-#define OASTARTTRIG6_INVERT_A_6 (1<<6)
-#define OASTARTTRIG6_INVERT_A_7 (1<<7)
-#define OASTARTTRIG6_INVERT_A_8 (1<<8)
-#define OASTARTTRIG6_INVERT_A_9 (1<<9)
-#define OASTARTTRIG6_INVERT_A_10 (1<<10)
-#define OASTARTTRIG6_INVERT_A_11 (1<<11)
-#define OASTARTTRIG6_INVERT_A_12 (1<<12)
-#define OASTARTTRIG6_INVERT_A_13 (1<<13)
-#define OASTARTTRIG6_INVERT_A_14 (1<<14)
-#define OASTARTTRIG6_INVERT_A_15 (1<<15)
-#define OASTARTTRIG6_INVERT_B_0 (1<<16)
-#define OASTARTTRIG6_INVERT_B_1 (1<<17)
-#define OASTARTTRIG6_INVERT_B_2 (1<<18)
-#define OASTARTTRIG6_INVERT_B_3 (1<<19)
-#define OASTARTTRIG6_INVERT_C_0 (1<<20)
-#define OASTARTTRIG6_INVERT_C_1 (1<<21)
-#define OASTARTTRIG6_INVERT_D_0 (1<<22)
-#define OASTARTTRIG6_THRESHOLD_ENABLE      (1<<23)
-#define OASTARTTRIG6_START_TRIG_FLAG_MBZ    (1<<24)
-#define OASTARTTRIG6_EVENT_SELECT_4  (1<<28)
-#define OASTARTTRIG6_EVENT_SELECT_5  (1<<29)
-#define OASTARTTRIG6_EVENT_SELECT_6  (1<<30)
-#define OASTARTTRIG6_EVENT_SELECT_7  (1<<31)
+#define OASTARTTRIG6_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG6_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG6_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG6_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG6_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG6_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG6_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG6_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG6_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG6_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG6_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG6_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG6_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG6_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG6_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG6_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG6_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG6_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG6_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG6_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG6_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG6_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG6_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE      (1 << 23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ    (1 << 24)
+#define OASTARTTRIG6_EVENT_SELECT_4  (1 << 28)
+#define OASTARTTRIG6_EVENT_SELECT_5  (1 << 29)
+#define OASTARTTRIG6_EVENT_SELECT_6  (1 << 30)
+#define OASTARTTRIG6_EVENT_SELECT_7  (1 << 31)
 
 #define OASTARTTRIG7 _MMIO(0x2728)
 #define OASTARTTRIG7_NOA_SELECT_MASK      0xf
@@ -715,31 +716,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
 
 #define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0  (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1  (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2  (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3  (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4  (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5  (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6  (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7  (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8  (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9  (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0  (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1  (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2  (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3  (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0  (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1  (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0  (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE     (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG2_INVERT_A_0  (1 << 0)
+#define OAREPORTTRIG2_INVERT_A_1  (1 << 1)
+#define OAREPORTTRIG2_INVERT_A_2  (1 << 2)
+#define OAREPORTTRIG2_INVERT_A_3  (1 << 3)
+#define OAREPORTTRIG2_INVERT_A_4  (1 << 4)
+#define OAREPORTTRIG2_INVERT_A_5  (1 << 5)
+#define OAREPORTTRIG2_INVERT_A_6  (1 << 6)
+#define OAREPORTTRIG2_INVERT_A_7  (1 << 7)
+#define OAREPORTTRIG2_INVERT_A_8  (1 << 8)
+#define OAREPORTTRIG2_INVERT_A_9  (1 << 9)
+#define OAREPORTTRIG2_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG2_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG2_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG2_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG2_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG2_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG2_INVERT_B_0  (1 << 16)
+#define OAREPORTTRIG2_INVERT_B_1  (1 << 17)
+#define OAREPORTTRIG2_INVERT_B_2  (1 << 18)
+#define OAREPORTTRIG2_INVERT_B_3  (1 << 19)
+#define OAREPORTTRIG2_INVERT_C_0  (1 << 20)
+#define OAREPORTTRIG2_INVERT_C_1  (1 << 21)
+#define OAREPORTTRIG2_INVERT_D_0  (1 << 22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE     (1 << 23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31)
 
 #define OAREPORTTRIG3 _MMIO(0x2748)
 #define OAREPORTTRIG3_NOA_SELECT_MASK      0xf
@@ -768,31 +769,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
 
 #define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0  (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1  (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2  (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3  (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4  (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5  (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6  (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7  (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8  (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9  (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0  (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1  (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2  (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3  (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0  (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1  (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0  (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE     (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG6_INVERT_A_0  (1 << 0)
+#define OAREPORTTRIG6_INVERT_A_1  (1 << 1)
+#define OAREPORTTRIG6_INVERT_A_2  (1 << 2)
+#define OAREPORTTRIG6_INVERT_A_3  (1 << 3)
+#define OAREPORTTRIG6_INVERT_A_4  (1 << 4)
+#define OAREPORTTRIG6_INVERT_A_5  (1 << 5)
+#define OAREPORTTRIG6_INVERT_A_6  (1 << 6)
+#define OAREPORTTRIG6_INVERT_A_7  (1 << 7)
+#define OAREPORTTRIG6_INVERT_A_8  (1 << 8)
+#define OAREPORTTRIG6_INVERT_A_9  (1 << 9)
+#define OAREPORTTRIG6_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG6_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG6_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG6_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG6_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG6_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG6_INVERT_B_0  (1 << 16)
+#define OAREPORTTRIG6_INVERT_B_1  (1 << 17)
+#define OAREPORTTRIG6_INVERT_B_2  (1 << 18)
+#define OAREPORTTRIG6_INVERT_B_3  (1 << 19)
+#define OAREPORTTRIG6_INVERT_C_0  (1 << 20)
+#define OAREPORTTRIG6_INVERT_C_1  (1 << 21)
+#define OAREPORTTRIG6_INVERT_D_0  (1 << 22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE     (1 << 23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31)
 
 #define OAREPORTTRIG7 _MMIO(0x2758)
 #define OAREPORTTRIG7_NOA_SELECT_MASK      0xf
@@ -828,9 +829,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OACEC_COMPARE_VALUE_MASK    0xffff
 #define OACEC_COMPARE_VALUE_SHIFT   3
 
-#define OACEC_SELECT_NOA       (0<<19)
-#define OACEC_SELECT_PREV      (1<<19)
-#define OACEC_SELECT_BOOLEAN   (2<<19)
+#define OACEC_SELECT_NOA       (0 << 19)
+#define OACEC_SELECT_PREV      (1 << 19)
+#define OACEC_SELECT_BOOLEAN   (2 << 19)
 
 /* CECX_1 */
 #define OACEC_MASK_MASK                    0xffff
@@ -948,9 +949,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  * Reset registers
  */
 #define DEBUG_RESET_I830               _MMIO(0x6070)
-#define  DEBUG_RESET_FULL              (1<<7)
-#define  DEBUG_RESET_RENDER            (1<<8)
-#define  DEBUG_RESET_DISPLAY           (1<<9)
+#define  DEBUG_RESET_FULL              (1 << 7)
+#define  DEBUG_RESET_RENDER            (1 << 8)
+#define  DEBUG_RESET_DISPLAY           (1 << 9)
 
 /*
  * IOSF sideband
@@ -961,7 +962,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   IOSF_PORT_SHIFT                      8
 #define   IOSF_BYTE_ENABLES_SHIFT              4
 #define   IOSF_BAR_SHIFT                       1
-#define   IOSF_SB_BUSY                         (1<<0)
+#define   IOSF_SB_BUSY                         (1 << 0)
 #define   IOSF_PORT_BUNIT                      0x03
 #define   IOSF_PORT_PUNIT                      0x04
 #define   IOSF_PORT_NC                         0x11
@@ -1098,8 +1099,8 @@ enum i915_power_well_id {
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define PUNIT_REG_GPU_FREQ_STS                 0xd8
-#define   GPLLENABLE                           (1<<4)
-#define   GENFREQSTATUS                                (1<<0)
+#define   GPLLENABLE                           (1 << 4)
+#define   GENFREQSTATUS                                (1 << 0)
 #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ         0xdc
 #define PUNIT_REG_CZ_TIMESTAMP                 0xce
 
@@ -1141,11 +1142,11 @@ enum i915_power_well_id {
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT           27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK            0xf8000000
 
-#define VLV_TURBO_SOC_OVERRIDE 0x04
-#define        VLV_OVERRIDE_EN 1
-#define        VLV_SOC_TDP_EN  (1 << 1)
-#define        VLV_BIAS_CPU_125_SOC_875 (6 << 2)
-#define        CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+#define VLV_TURBO_SOC_OVERRIDE         0x04
+#define   VLV_OVERRIDE_EN              1
+#define   VLV_SOC_TDP_EN               (1 << 1)
+#define   VLV_BIAS_CPU_125_SOC_875     (6 << 2)
+#define   CHV_BIAS_CPU_50_SOC_50       (3 << 2)
 
 /* vlv2 north clock has */
 #define CCK_FUSE_REG                           0x8
@@ -1194,10 +1195,10 @@ enum i915_power_well_id {
 #define DPIO_DEVFN                     0
 
 #define DPIO_CTL                       _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define  DPIO_MODSEL1                  (1<<3) /* if ref clk b == 27 */
-#define  DPIO_MODSEL0                  (1<<2) /* if ref clk a == 27 */
-#define  DPIO_SFR_BYPASS               (1<<1)
-#define  DPIO_CMNRST                   (1<<0)
+#define  DPIO_MODSEL1                  (1 << 3) /* if ref clk b == 27 */
+#define  DPIO_MODSEL0                  (1 << 2) /* if ref clk a == 27 */
+#define  DPIO_SFR_BYPASS               (1 << 1)
+#define  DPIO_CMNRST                   (1 << 0)
 
 #define DPIO_PHY(pipe)                 ((pipe) >> 1)
 #define DPIO_PHY_IOSF_PORT(phy)                (dev_priv->dpio_phy_iosf_port[phy])
@@ -1215,7 +1216,7 @@ enum i915_power_well_id {
 #define   DPIO_P1_SHIFT                        (21) /* 3 bits */
 #define   DPIO_P2_SHIFT                        (16) /* 5 bits */
 #define   DPIO_N_SHIFT                 (12) /* 4 bits */
-#define   DPIO_ENABLE_CALIBRATION      (1<<11)
+#define   DPIO_ENABLE_CALIBRATION      (1 << 11)
 #define   DPIO_M1DIV_SHIFT             (8) /* 3 bits */
 #define   DPIO_M2DIV_MASK              0xff
 #define _VLV_PLL_DW3_CH1               0x802c
@@ -1264,10 +1265,10 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW0_CH0               0x8200
 #define _VLV_PCS_DW0_CH1               0x8400
-#define   DPIO_PCS_TX_LANE2_RESET      (1<<16)
-#define   DPIO_PCS_TX_LANE1_RESET      (1<<7)
-#define   DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
-#define   DPIO_RIGHT_TXFIFO_RST_MASTER2        (1<<3)
+#define   DPIO_PCS_TX_LANE2_RESET      (1 << 16)
+#define   DPIO_PCS_TX_LANE1_RESET      (1 << 7)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER2        (1 << 3)
 #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
 #define _VLV_PCS01_DW0_CH0             0x200
@@ -1279,11 +1280,11 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW1_CH0               0x8204
 #define _VLV_PCS_DW1_CH1               0x8404
-#define   CHV_PCS_REQ_SOFTRESET_EN     (1<<23)
-#define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN        (1<<22)
-#define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define   CHV_PCS_REQ_SOFTRESET_EN     (1 << 23)
+#define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN        (1 << 22)
+#define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
 #define   DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define   DPIO_PCS_CLK_SOFT_RESET      (1<<5)
+#define   DPIO_PCS_CLK_SOFT_RESET      (1 << 5)
 #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
 
 #define _VLV_PCS01_DW1_CH0             0x204
@@ -1308,12 +1309,12 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW9_CH0               0x8224
 #define _VLV_PCS_DW9_CH1               0x8424
-#define   DPIO_PCS_TX2MARGIN_MASK      (0x7<<13)
-#define   DPIO_PCS_TX2MARGIN_000       (0<<13)
-#define   DPIO_PCS_TX2MARGIN_101       (1<<13)
-#define   DPIO_PCS_TX1MARGIN_MASK      (0x7<<10)
-#define   DPIO_PCS_TX1MARGIN_000       (0<<10)
-#define   DPIO_PCS_TX1MARGIN_101       (1<<10)
+#define   DPIO_PCS_TX2MARGIN_MASK      (0x7 << 13)
+#define   DPIO_PCS_TX2MARGIN_000       (0 << 13)
+#define   DPIO_PCS_TX2MARGIN_101       (1 << 13)
+#define   DPIO_PCS_TX1MARGIN_MASK      (0x7 << 10)
+#define   DPIO_PCS_TX1MARGIN_000       (0 << 10)
+#define   DPIO_PCS_TX1MARGIN_101       (1 << 10)
 #define        VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
 #define _VLV_PCS01_DW9_CH0             0x224
@@ -1325,14 +1326,14 @@ enum i915_power_well_id {
 
 #define _CHV_PCS_DW10_CH0              0x8228
 #define _CHV_PCS_DW10_CH1              0x8428
-#define   DPIO_PCS_SWING_CALC_TX0_TX2  (1<<30)
-#define   DPIO_PCS_SWING_CALC_TX1_TX3  (1<<31)
-#define   DPIO_PCS_TX2DEEMP_MASK       (0xf<<24)
-#define   DPIO_PCS_TX2DEEMP_9P5                (0<<24)
-#define   DPIO_PCS_TX2DEEMP_6P0                (2<<24)
-#define   DPIO_PCS_TX1DEEMP_MASK       (0xf<<16)
-#define   DPIO_PCS_TX1DEEMP_9P5                (0<<16)
-#define   DPIO_PCS_TX1DEEMP_6P0                (2<<16)
+#define   DPIO_PCS_SWING_CALC_TX0_TX2  (1 << 30)
+#define   DPIO_PCS_SWING_CALC_TX1_TX3  (1 << 31)
+#define   DPIO_PCS_TX2DEEMP_MASK       (0xf << 24)
+#define   DPIO_PCS_TX2DEEMP_9P5                (0 << 24)
+#define   DPIO_PCS_TX2DEEMP_6P0                (2 << 24)
+#define   DPIO_PCS_TX1DEEMP_MASK       (0xf << 16)
+#define   DPIO_PCS_TX1DEEMP_9P5                (0 << 16)
+#define   DPIO_PCS_TX1DEEMP_6P0                (2 << 16)
 #define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
 
 #define _VLV_PCS01_DW10_CH0            0x0228
@@ -1344,10 +1345,10 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW11_CH0              0x822c
 #define _VLV_PCS_DW11_CH1              0x842c
-#define   DPIO_TX2_STAGGER_MASK(x)     ((x)<<24)
-#define   DPIO_LANEDESKEW_STRAP_OVRD   (1<<3)
-#define   DPIO_LEFT_TXFIFO_RST_MASTER  (1<<1)
-#define   DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
+#define   DPIO_TX2_STAGGER_MASK(x)     ((x) << 24)
+#define   DPIO_LANEDESKEW_STRAP_OVRD   (1 << 3)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER  (1 << 1)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
 #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
 #define _VLV_PCS01_DW11_CH0            0x022c
@@ -1366,11 +1367,11 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW12_CH0              0x8230
 #define _VLV_PCS_DW12_CH1              0x8430
-#define   DPIO_TX2_STAGGER_MULT(x)     ((x)<<20)
-#define   DPIO_TX1_STAGGER_MULT(x)     ((x)<<16)
-#define   DPIO_TX1_STAGGER_MASK(x)     ((x)<<8)
-#define   DPIO_LANESTAGGER_STRAP_OVRD  (1<<6)
-#define   DPIO_LANESTAGGER_STRAP(x)    ((x)<<0)
+#define   DPIO_TX2_STAGGER_MULT(x)     ((x) << 20)
+#define   DPIO_TX1_STAGGER_MULT(x)     ((x) << 16)
+#define   DPIO_TX1_STAGGER_MASK(x)     ((x) << 8)
+#define   DPIO_LANESTAGGER_STRAP_OVRD  (1 << 6)
+#define   DPIO_LANESTAGGER_STRAP(x)    ((x) << 0)
 #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
 
 #define _VLV_PCS_DW14_CH0              0x8238
@@ -1391,7 +1392,7 @@ enum i915_power_well_id {
 #define _VLV_TX_DW3_CH0                        0x828c
 #define _VLV_TX_DW3_CH1                        0x848c
 /* The following bit for CHV phy */
-#define   DPIO_TX_UNIQ_TRANS_SCALE_EN  (1<<27)
+#define   DPIO_TX_UNIQ_TRANS_SCALE_EN  (1 << 27)
 #define   DPIO_SWING_MARGIN101_SHIFT   16
 #define   DPIO_SWING_MARGIN101_MASK    (0xff << DPIO_SWING_MARGIN101_SHIFT)
 #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
@@ -1410,7 +1411,7 @@ enum i915_power_well_id {
 
 #define _VLV_TX_DW5_CH0                        0x8294
 #define _VLV_TX_DW5_CH1                        0x8494
-#define   DPIO_TX_OCALINIT_EN          (1<<31)
+#define   DPIO_TX_OCALINIT_EN          (1 << 31)
 #define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
 
 #define _VLV_TX_DW11_CH0               0x82ac
@@ -1640,10 +1641,10 @@ enum i915_power_well_id {
 #define  PORT_PLL_LOCK_THRESHOLD_SHIFT 1
 #define  PORT_PLL_LOCK_THRESHOLD_MASK  (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
 /* PORT_PLL_10_A */
-#define  PORT_PLL_DCO_AMP_OVR_EN_H     (1<<27)
+#define  PORT_PLL_DCO_AMP_OVR_EN_H     (1 << 27)
 #define  PORT_PLL_DCO_AMP_DEFAULT      15
 #define  PORT_PLL_DCO_AMP_MASK         0x3c00
-#define  PORT_PLL_DCO_AMP(x)           ((x)<<10)
+#define  PORT_PLL_DCO_AMP(x)           ((x) << 10)
 #define _PORT_PLL_BASE(phy, ch)                _BXT_PHY_CH(phy, ch, \
                                                    _PORT_PLL_0_B, \
                                                    _PORT_PLL_0_C)
@@ -1745,7 +1746,7 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_D_GRP_OFFSET, \
                                               _CNL_PORT_TX_AE_GRP_OFFSET, \
                                               _CNL_PORT_TX_F_GRP_OFFSET) + \
-                                              4*(dw))
+                                              4 * (dw))
 #define _CNL_PORT_TX_DW_LN0(port, dw)  (_PICK((port), \
                                               _CNL_PORT_TX_AE_LN0_OFFSET, \
                                               _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1753,7 +1754,7 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_D_LN0_OFFSET, \
                                               _CNL_PORT_TX_AE_LN0_OFFSET, \
                                               _CNL_PORT_TX_F_LN0_OFFSET) + \
-                                              4*(dw))
+                                              4 * (dw))
 
 #define CNL_PORT_TX_DW2_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
 #define CNL_PORT_TX_DW2_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
@@ -1779,7 +1780,7 @@ enum i915_power_well_id {
 #define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
 #define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
 #define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
-                                            (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+                                          ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
                                                    _CNL_PORT_TX_DW4_LN0_AE)))
 #define _ICL_PORT_TX_DW4_GRP_A         0x162690
 #define _ICL_PORT_TX_DW4_GRP_B         0x6C690
@@ -1792,8 +1793,8 @@ enum i915_power_well_id {
 #define ICL_PORT_TX_DW4_LN(port, ln)   _MMIO(_PORT(port, \
                                                   _ICL_PORT_TX_DW4_LN0_A, \
                                                   _ICL_PORT_TX_DW4_LN0_B) + \
-                                             (ln * (_ICL_PORT_TX_DW4_LN1_A - \
-                                                    _ICL_PORT_TX_DW4_LN0_A)))
+                                            ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
+                                                     _ICL_PORT_TX_DW4_LN0_A)))
 #define   LOADGEN_SELECT               (1 << 31)
 #define   POST_CURSOR_1(x)             ((x) << 12)
 #define   POST_CURSOR_1_MASK           (0x3F << 12)
@@ -1990,6 +1991,11 @@ enum i915_power_well_id {
                                                   _ICL_PORT_COMP_DW10_A, \
                                                   _ICL_PORT_COMP_DW10_B)
 
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1            _MMIO(0x1638C0)
+#define   DFLEXDPMLE1_DPMLETC_MASK(n)  (0xf << (4 * (n)))
+#define   DFLEXDPMLE1_DPMLETC(n, x)    ((x) << (4 * (n)))
+
 /* BXT PHY Ref registers */
 #define _PORT_REF_DW3_A                        0x16218C
 #define _PORT_REF_DW3_BC               0x6C18C
@@ -2134,8 +2140,8 @@ enum i915_power_well_id {
 /* SKL balance leg register */
 #define DISPIO_CR_TX_BMU_CR0           _MMIO(0x6C00C)
 /* I_boost values */
-#define BALANCE_LEG_SHIFT(port)                (8+3*(port))
-#define BALANCE_LEG_MASK(port)         (7<<(8+3*(port)))
+#define BALANCE_LEG_SHIFT(port)                (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port)         (7 << (8 + 3 * (port)))
 /* Balance leg disable bits */
 #define BALANCE_LEG_DISABLE_SHIFT      23
 #define BALANCE_LEG_DISABLE(port)      (1 << (23 + (port)))
@@ -2155,10 +2161,10 @@ enum i915_power_well_id {
 #define   I830_FENCE_TILING_Y_SHIFT    12
 #define   I830_FENCE_SIZE_BITS(size)   ((ffs((size) >> 19) - 1) << 8)
 #define   I830_FENCE_PITCH_SHIFT       4
-#define   I830_FENCE_REG_VALID         (1<<0)
+#define   I830_FENCE_REG_VALID         (1 << 0)
 #define   I915_FENCE_MAX_PITCH_VAL     4
 #define   I830_FENCE_MAX_PITCH_VAL     6
-#define   I830_FENCE_MAX_SIZE_VAL      (1<<8)
+#define   I830_FENCE_MAX_SIZE_VAL      (1 << 8)
 
 #define   I915_FENCE_START_MASK                0x0ff00000
 #define   I915_FENCE_SIZE_BITS(size)   ((ffs((size) >> 20) - 1) << 8)
@@ -2167,7 +2173,7 @@ enum i915_power_well_id {
 #define FENCE_REG_965_HI(i)            _MMIO(0x03000 + (i) * 8 + 4)
 #define   I965_FENCE_PITCH_SHIFT       2
 #define   I965_FENCE_TILING_Y_SHIFT    1
-#define   I965_FENCE_REG_VALID         (1<<0)
+#define   I965_FENCE_REG_VALID         (1 << 0)
 #define   I965_FENCE_MAX_PITCH_VAL     0x0400
 
 #define FENCE_REG_GEN6_LO(i)           _MMIO(0x100000 + (i) * 8)
@@ -2190,13 +2196,13 @@ enum i915_power_well_id {
 #define   PGTBL_ADDRESS_LO_MASK        0xfffff000 /* bits [31:12] */
 #define   PGTBL_ADDRESS_HI_MASK        0x000000f0 /* bits [35:32] (gen4) */
 #define PGTBL_ER       _MMIO(0x02024)
-#define PRB0_BASE      (0x2030-0x30)
-#define PRB1_BASE      (0x2040-0x30) /* 830,gen3 */
-#define PRB2_BASE      (0x2050-0x30) /* gen3 */
-#define SRB0_BASE      (0x2100-0x30) /* gen2 */
-#define SRB1_BASE      (0x2110-0x30) /* gen2 */
-#define SRB2_BASE      (0x2120-0x30) /* 830 */
-#define SRB3_BASE      (0x2130-0x30) /* 830 */
+#define PRB0_BASE      (0x2030 - 0x30)
+#define PRB1_BASE      (0x2040 - 0x30) /* 830,gen3 */
+#define PRB2_BASE      (0x2050 - 0x30) /* gen3 */
+#define SRB0_BASE      (0x2100 - 0x30) /* gen2 */
+#define SRB1_BASE      (0x2110 - 0x30) /* gen2 */
+#define SRB2_BASE      (0x2120 - 0x30) /* 830 */
+#define SRB3_BASE      (0x2130 - 0x30) /* 830 */
 #define RENDER_RING_BASE       0x02000
 #define BSD_RING_BASE          0x04000
 #define GEN6_BSD_RING_BASE     0x12000
@@ -2209,14 +2215,14 @@ enum i915_power_well_id {
 #define GEN11_VEBOX_RING_BASE          0x1c8000
 #define GEN11_VEBOX2_RING_BASE         0x1d8000
 #define BLT_RING_BASE          0x22000
-#define RING_TAIL(base)                _MMIO((base)+0x30)
-#define RING_HEAD(base)                _MMIO((base)+0x34)
-#define RING_START(base)       _MMIO((base)+0x38)
-#define RING_CTL(base)         _MMIO((base)+0x3c)
+#define RING_TAIL(base)                _MMIO((base) + 0x30)
+#define RING_HEAD(base)                _MMIO((base) + 0x34)
+#define RING_START(base)       _MMIO((base) + 0x38)
+#define RING_CTL(base)         _MMIO((base) + 0x3c)
 #define   RING_CTL_SIZE(size)  ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_SYNC_0(base)      _MMIO((base)+0x40)
-#define RING_SYNC_1(base)      _MMIO((base)+0x44)
-#define RING_SYNC_2(base)      _MMIO((base)+0x48)
+#define RING_SYNC_0(base)      _MMIO((base) + 0x40)
+#define RING_SYNC_1(base)      _MMIO((base) + 0x44)
+#define RING_SYNC_2(base)      _MMIO((base) + 0x48)
 #define GEN6_RVSYNC    (RING_SYNC_0(RENDER_RING_BASE))
 #define GEN6_RBSYNC    (RING_SYNC_1(RENDER_RING_BASE))
 #define GEN6_RVESYNC   (RING_SYNC_2(RENDER_RING_BASE))
@@ -2230,21 +2236,22 @@ enum i915_power_well_id {
 #define GEN6_VERSYNC   (RING_SYNC_1(VEBOX_RING_BASE))
 #define GEN6_VEVSYNC   (RING_SYNC_2(VEBOX_RING_BASE))
 #define GEN6_NOSYNC    INVALID_MMIO_REG
-#define RING_PSMI_CTL(base)    _MMIO((base)+0x50)
-#define RING_MAX_IDLE(base)    _MMIO((base)+0x54)
-#define RING_HWS_PGA(base)     _MMIO((base)+0x80)
-#define RING_HWS_PGA_GEN6(base)        _MMIO((base)+0x2080)
-#define RING_RESET_CTL(base)   _MMIO((base)+0xd0)
+#define RING_PSMI_CTL(base)    _MMIO((base) + 0x50)
+#define RING_MAX_IDLE(base)    _MMIO((base) + 0x54)
+#define RING_HWS_PGA(base)     _MMIO((base) + 0x80)
+#define RING_HWS_PGA_GEN6(base)        _MMIO((base) + 0x2080)
+#define RING_RESET_CTL(base)   _MMIO((base) + 0xd0)
 #define   RESET_CTL_REQUEST_RESET  (1 << 0)
 #define   RESET_CTL_READY_TO_RESET (1 << 1)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
 
 #define HSW_GTT_CACHE_EN       _MMIO(0x4024)
 #define   GTT_CACHE_EN_ALL     0xF0007FFF
 #define GEN7_WR_WATERMARK      _MMIO(0x4028)
 #define GEN7_GFX_PRIO_CTRL     _MMIO(0x402C)
 #define ARB_MODE               _MMIO(0x4030)
-#define   ARB_MODE_SWIZZLE_SNB (1<<4)
-#define   ARB_MODE_SWIZZLE_IVB (1<<5)
+#define   ARB_MODE_SWIZZLE_SNB (1 << 4)
+#define   ARB_MODE_SWIZZLE_IVB (1 << 5)
 #define GEN7_GFX_PEND_TLB0     _MMIO(0x4034)
 #define GEN7_GFX_PEND_TLB1     _MMIO(0x4038)
 /* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
@@ -2254,30 +2261,30 @@ enum i915_power_well_id {
 #define GEN7_GFX_MAX_REQ_COUNT         _MMIO(0x4074)
 
 #define GAMTARBMODE            _MMIO(0x04a08)
-#define   ARB_MODE_BWGTLB_DISABLE (1<<9)
-#define   ARB_MODE_SWIZZLE_BDW (1<<1)
+#define   ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define   ARB_MODE_SWIZZLE_BDW (1 << 1)
 #define RENDER_HWS_PGA_GEN7    _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
 #define GEN8_RING_FAULT_REG    _MMIO(0x4094)
 #define   GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define   RING_FAULT_GTTSEL_MASK (1<<11)
+#define   RING_FAULT_GTTSEL_MASK (1 << 11)
 #define   RING_FAULT_SRCID(x)  (((x) >> 3) & 0xff)
 #define   RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define   RING_FAULT_VALID     (1<<0)
+#define   RING_FAULT_VALID     (1 << 0)
 #define DONE_REG               _MMIO(0x40b0)
 #define GEN8_PRIVATE_PAT_LO    _MMIO(0x40e0)
 #define GEN8_PRIVATE_PAT_HI    _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
 #define BSD_HWS_PGA_GEN7       _MMIO(0x04180)
 #define BLT_HWS_PGA_GEN7       _MMIO(0x04280)
 #define VEBOX_HWS_PGA_GEN7     _MMIO(0x04380)
-#define RING_ACTHD(base)       _MMIO((base)+0x74)
-#define RING_ACTHD_UDW(base)   _MMIO((base)+0x5c)
-#define RING_NOPID(base)       _MMIO((base)+0x94)
-#define RING_IMR(base)         _MMIO((base)+0xa8)
-#define RING_HWSTAM(base)      _MMIO((base)+0x98)
-#define RING_TIMESTAMP(base)           _MMIO((base)+0x358)
-#define RING_TIMESTAMP_UDW(base)       _MMIO((base)+0x358 + 4)
+#define RING_ACTHD(base)       _MMIO((base) + 0x74)
+#define RING_ACTHD_UDW(base)   _MMIO((base) + 0x5c)
+#define RING_NOPID(base)       _MMIO((base) + 0x94)
+#define RING_IMR(base)         _MMIO((base) + 0xa8)
+#define RING_HWSTAM(base)      _MMIO((base) + 0x98)
+#define RING_TIMESTAMP(base)           _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base)       _MMIO((base) + 0x358 + 4)
 #define   TAIL_ADDR            0x001FFFF8
 #define   HEAD_WRAP_COUNT      0xFFE00000
 #define   HEAD_WRAP_ONE                0x00200000
@@ -2290,24 +2297,25 @@ enum i915_power_well_id {
 #define   RING_VALID_MASK      0x00000001
 #define   RING_VALID           0x00000001
 #define   RING_INVALID         0x00000000
-#define   RING_WAIT_I8XX       (1<<0) /* gen2, PRBx_HEAD */
-#define   RING_WAIT            (1<<11) /* gen3+, PRBx_CTL */
-#define   RING_WAIT_SEMAPHORE  (1<<10) /* gen6+ */
+#define   RING_WAIT_I8XX       (1 << 0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT            (1 << 11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE  (1 << 10) /* gen6+ */
 
-#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
 #define   RING_MAX_NONPRIV_SLOTS  12
 
 #define GEN7_TLB_RD_ADDR       _MMIO(0x4700)
 
 #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1<<18)
+#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1 << 18)
 
 #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
 #define   GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
 
 #define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
-#define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1<<28)
-#define   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT       (1<<24)
+#define   GAMT_CHKN_DISABLE_L3_COH_PIPE                        (1 << 31)
+#define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1 << 28)
+#define   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT       (1 << 24)
 
 #if 0
 #define PRB0_TAIL      _MMIO(0x2030)
@@ -2333,19 +2341,19 @@ enum i915_power_well_id {
 #define   GEN11_MCR_SLICE_MASK         GEN11_MCR_SLICE(0xf)
 #define   GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
 #define   GEN11_MCR_SUBSLICE_MASK      GEN11_MCR_SUBSLICE(0x7)
-#define RING_IPEIR(base)       _MMIO((base)+0x64)
-#define RING_IPEHR(base)       _MMIO((base)+0x68)
+#define RING_IPEIR(base)       _MMIO((base) + 0x64)
+#define RING_IPEHR(base)       _MMIO((base) + 0x68)
 /*
  * On GEN4, only the render ring INSTDONE exists and has a different
  * layout than the GEN7+ version.
  * The GEN2 counterpart of this register is GEN2_INSTDONE.
  */
-#define RING_INSTDONE(base)    _MMIO((base)+0x6c)
-#define RING_INSTPS(base)      _MMIO((base)+0x70)
-#define RING_DMA_FADD(base)    _MMIO((base)+0x78)
-#define RING_DMA_FADD_UDW(base)        _MMIO((base)+0x60) /* gen8+ */
-#define RING_INSTPM(base)      _MMIO((base)+0xc0)
-#define RING_MI_MODE(base)     _MMIO((base)+0x9c)
+#define RING_INSTDONE(base)    _MMIO((base) + 0x6c)
+#define RING_INSTPS(base)      _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base)    _MMIO((base) + 0x78)
+#define RING_DMA_FADD_UDW(base)        _MMIO((base) + 0x60) /* gen8+ */
+#define RING_INSTPM(base)      _MMIO((base) + 0xc0)
+#define RING_MI_MODE(base)     _MMIO((base) + 0x9c)
 #define INSTPS         _MMIO(0x2070) /* 965+ only */
 #define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
 #define ACTHD_I965     _MMIO(0x2074)
@@ -2353,37 +2361,37 @@ enum i915_power_well_id {
 #define HWS_ADDRESS_MASK       0xfffff000
 #define HWS_START_ADDRESS_SHIFT        4
 #define PWRCTXA                _MMIO(0x2088) /* 965GM+ only */
-#define   PWRCTX_EN    (1<<0)
+#define   PWRCTX_EN    (1 << 0)
 #define IPEIR          _MMIO(0x2088)
 #define IPEHR          _MMIO(0x208c)
 #define GEN2_INSTDONE  _MMIO(0x2090)
 #define NOPID          _MMIO(0x2094)
 #define HWSTAM         _MMIO(0x2098)
 #define DMA_FADD_I8XX  _MMIO(0x20d0)
-#define RING_BBSTATE(base)     _MMIO((base)+0x110)
+#define RING_BBSTATE(base)     _MMIO((base) + 0x110)
 #define   RING_BB_PPGTT                (1 << 5)
-#define RING_SBBADDR(base)     _MMIO((base)+0x114) /* hsw+ */
-#define RING_SBBSTATE(base)    _MMIO((base)+0x118) /* hsw+ */
-#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
-#define RING_BBADDR(base)      _MMIO((base)+0x140)
-#define RING_BBADDR_UDW(base)  _MMIO((base)+0x168) /* gen8+ */
-#define RING_BB_PER_CTX_PTR(base)      _MMIO((base)+0x1c0) /* gen8+ */
-#define RING_INDIRECT_CTX(base)                _MMIO((base)+0x1c4) /* gen8+ */
-#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
-#define RING_CTX_TIMESTAMP(base)       _MMIO((base)+0x3a8) /* gen8+ */
+#define RING_SBBADDR(base)     _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base)    _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base)      _MMIO((base) + 0x140)
+#define RING_BBADDR_UDW(base)  _MMIO((base) + 0x168) /* gen8+ */
+#define RING_BB_PER_CTX_PTR(base)      _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base)                _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define RING_CTX_TIMESTAMP(base)       _MMIO((base) + 0x3a8) /* gen8+ */
 
 #define ERROR_GEN6     _MMIO(0x40a0)
 #define GEN7_ERR_INT   _MMIO(0x44040)
-#define   ERR_INT_POISON               (1<<31)
-#define   ERR_INT_MMIO_UNCLAIMED       (1<<13)
-#define   ERR_INT_PIPE_CRC_DONE_C      (1<<8)
-#define   ERR_INT_FIFO_UNDERRUN_C      (1<<6)
-#define   ERR_INT_PIPE_CRC_DONE_B      (1<<5)
-#define   ERR_INT_FIFO_UNDERRUN_B      (1<<3)
-#define   ERR_INT_PIPE_CRC_DONE_A      (1<<2)
-#define   ERR_INT_PIPE_CRC_DONE(pipe)  (1<<(2 + (pipe)*3))
-#define   ERR_INT_FIFO_UNDERRUN_A      (1<<0)
-#define   ERR_INT_FIFO_UNDERRUN(pipe)  (1<<((pipe)*3))
+#define   ERR_INT_POISON               (1 << 31)
+#define   ERR_INT_MMIO_UNCLAIMED       (1 << 13)
+#define   ERR_INT_PIPE_CRC_DONE_C      (1 << 8)
+#define   ERR_INT_FIFO_UNDERRUN_C      (1 << 6)
+#define   ERR_INT_PIPE_CRC_DONE_B      (1 << 5)
+#define   ERR_INT_FIFO_UNDERRUN_B      (1 << 3)
+#define   ERR_INT_PIPE_CRC_DONE_A      (1 << 2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)  (1 << (2 + (pipe) * 3))
+#define   ERR_INT_FIFO_UNDERRUN_A      (1 << 0)
+#define   ERR_INT_FIFO_UNDERRUN(pipe)  (1 << ((pipe) * 3))
 
 #define GEN8_FAULT_TLB_DATA0           _MMIO(0x4b10)
 #define GEN8_FAULT_TLB_DATA1           _MMIO(0x4b14)
@@ -2391,7 +2399,7 @@ enum i915_power_well_id {
 #define   FAULT_GTT_SEL                        (1 << 4)
 
 #define FPGA_DBG               _MMIO(0x42300)
-#define   FPGA_DBG_RM_NOCLAIM  (1<<31)
+#define   FPGA_DBG_RM_NOCLAIM  (1 << 31)
 
 #define CLAIM_ER               _MMIO(VLV_DISPLAY_BASE + 0x2028)
 #define   CLAIM_ER_CLR         (1 << 31)
@@ -2400,22 +2408,22 @@ enum i915_power_well_id {
 
 #define DERRMR         _MMIO(0x44050)
 /* Note that HBLANK events are reserved on bdw+ */
-#define   DERRMR_PIPEA_SCANLINE                (1<<0)
-#define   DERRMR_PIPEA_PRI_FLIP_DONE   (1<<1)
-#define   DERRMR_PIPEA_SPR_FLIP_DONE   (1<<2)
-#define   DERRMR_PIPEA_VBLANK          (1<<3)
-#define   DERRMR_PIPEA_HBLANK          (1<<5)
-#define   DERRMR_PIPEB_SCANLINE        (1<<8)
-#define   DERRMR_PIPEB_PRI_FLIP_DONE   (1<<9)
-#define   DERRMR_PIPEB_SPR_FLIP_DONE   (1<<10)
-#define   DERRMR_PIPEB_VBLANK          (1<<11)
-#define   DERRMR_PIPEB_HBLANK          (1<<13)
+#define   DERRMR_PIPEA_SCANLINE                (1 << 0)
+#define   DERRMR_PIPEA_PRI_FLIP_DONE   (1 << 1)
+#define   DERRMR_PIPEA_SPR_FLIP_DONE   (1 << 2)
+#define   DERRMR_PIPEA_VBLANK          (1 << 3)
+#define   DERRMR_PIPEA_HBLANK          (1 << 5)
+#define   DERRMR_PIPEB_SCANLINE                (1 << 8)
+#define   DERRMR_PIPEB_PRI_FLIP_DONE   (1 << 9)
+#define   DERRMR_PIPEB_SPR_FLIP_DONE   (1 << 10)
+#define   DERRMR_PIPEB_VBLANK          (1 << 11)
+#define   DERRMR_PIPEB_HBLANK          (1 << 13)
 /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define   DERRMR_PIPEC_SCANLINE                (1<<14)
-#define   DERRMR_PIPEC_PRI_FLIP_DONE   (1<<15)
-#define   DERRMR_PIPEC_SPR_FLIP_DONE   (1<<20)
-#define   DERRMR_PIPEC_VBLANK          (1<<21)
-#define   DERRMR_PIPEC_HBLANK          (1<<22)
+#define   DERRMR_PIPEC_SCANLINE                (1 << 14)
+#define   DERRMR_PIPEC_PRI_FLIP_DONE   (1 << 15)
+#define   DERRMR_PIPEC_SPR_FLIP_DONE   (1 << 20)
+#define   DERRMR_PIPEC_VBLANK          (1 << 21)
+#define   DERRMR_PIPEC_HBLANK          (1 << 22)
 
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
@@ -2439,7 +2447,7 @@ enum i915_power_well_id {
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE       (1 << 5)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
-#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)      ((x)<<1) /* gen8+ */
+#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)      ((x) << 1) /* gen8+ */
 #define  _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH  (1 << 1) /* gen6 */
 
 #define MI_MODE                _MMIO(0x209c)
@@ -2478,22 +2486,22 @@ enum i915_power_well_id {
 
 #define GFX_MODE       _MMIO(0x2520)
 #define GFX_MODE_GEN7  _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
-#define   GFX_RUN_LIST_ENABLE          (1<<15)
-#define   GFX_INTERRUPT_STEERING       (1<<14)
-#define   GFX_TLB_INVALIDATE_EXPLICIT  (1<<13)
-#define   GFX_SURFACE_FAULT_ENABLE     (1<<12)
-#define   GFX_REPLAY_MODE              (1<<11)
-#define   GFX_PSMI_GRANULARITY         (1<<10)
-#define   GFX_PPGTT_ENABLE             (1<<9)
-#define   GEN8_GFX_PPGTT_48B           (1<<7)
-
-#define   GFX_FORWARD_VBLANK_MASK      (3<<5)
-#define   GFX_FORWARD_VBLANK_NEVER     (0<<5)
-#define   GFX_FORWARD_VBLANK_ALWAYS    (1<<5)
-#define   GFX_FORWARD_VBLANK_COND      (2<<5)
-
-#define   GEN11_GFX_DISABLE_LEGACY_MODE        (1<<3)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define   GFX_RUN_LIST_ENABLE          (1 << 15)
+#define   GFX_INTERRUPT_STEERING       (1 << 14)
+#define   GFX_TLB_INVALIDATE_EXPLICIT  (1 << 13)
+#define   GFX_SURFACE_FAULT_ENABLE     (1 << 12)
+#define   GFX_REPLAY_MODE              (1 << 11)
+#define   GFX_PSMI_GRANULARITY         (1 << 10)
+#define   GFX_PPGTT_ENABLE             (1 << 9)
+#define   GEN8_GFX_PPGTT_48B           (1 << 7)
+
+#define   GFX_FORWARD_VBLANK_MASK      (3 << 5)
+#define   GFX_FORWARD_VBLANK_NEVER     (0 << 5)
+#define   GFX_FORWARD_VBLANK_ALWAYS    (1 << 5)
+#define   GFX_FORWARD_VBLANK_COND      (2 << 5)
+
+#define   GEN11_GFX_DISABLE_LEGACY_MODE        (1 << 3)
 
 #define VLV_DISPLAY_BASE 0x180000
 #define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -2507,8 +2515,8 @@ enum i915_power_well_id {
 #define IMR            _MMIO(0x20a8)
 #define ISR            _MMIO(0x20ac)
 #define VLV_GUNIT_CLOCK_GATE   _MMIO(VLV_DISPLAY_BASE + 0x2060)
-#define   GINT_DIS             (1<<22)
-#define   GCFG_DIS             (1<<8)
+#define   GINT_DIS             (1 << 22)
+#define   GCFG_DIS             (1 << 8)
 #define VLV_GUNIT_CLOCK_GATE2  _MMIO(VLV_DISPLAY_BASE + 0x2064)
 #define VLV_IIR_RW     _MMIO(VLV_DISPLAY_BASE + 0x2084)
 #define VLV_IER                _MMIO(VLV_DISPLAY_BASE + 0x20a0)
@@ -2518,35 +2526,35 @@ enum i915_power_well_id {
 #define VLV_PCBR       _MMIO(VLV_DISPLAY_BASE + 0x2120)
 #define VLV_PCBR_ADDR_SHIFT    12
 
-#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
 #define EIR            _MMIO(0x20b0)
 #define EMR            _MMIO(0x20b4)
 #define ESR            _MMIO(0x20b8)
-#define   GM45_ERROR_PAGE_TABLE                                (1<<5)
-#define   GM45_ERROR_MEM_PRIV                          (1<<4)
-#define   I915_ERROR_PAGE_TABLE                                (1<<4)
-#define   GM45_ERROR_CP_PRIV                           (1<<3)
-#define   I915_ERROR_MEMORY_REFRESH                    (1<<1)
-#define   I915_ERROR_INSTRUCTION                       (1<<0)
+#define   GM45_ERROR_PAGE_TABLE                                (1 << 5)
+#define   GM45_ERROR_MEM_PRIV                          (1 << 4)
+#define   I915_ERROR_PAGE_TABLE                                (1 << 4)
+#define   GM45_ERROR_CP_PRIV                           (1 << 3)
+#define   I915_ERROR_MEMORY_REFRESH                    (1 << 1)
+#define   I915_ERROR_INSTRUCTION                       (1 << 0)
 #define INSTPM         _MMIO(0x20c0)
-#define   INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define   INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+#define   INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define   INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
                                        will not assert AGPBUSY# and will only
                                        be delivered when out of C3. */
-#define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
-#define   INSTPM_TLB_INVALIDATE        (1<<9)
-#define   INSTPM_SYNC_FLUSH    (1<<5)
+#define   INSTPM_FORCE_ORDERING                                (1 << 7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE        (1 << 9)
+#define   INSTPM_SYNC_FLUSH    (1 << 5)
 #define ACTHD          _MMIO(0x20c8)
 #define MEM_MODE       _MMIO(0x20cc)
-#define   MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
-#define   MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
-#define   MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define   MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
+#define   MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
+#define   MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
 #define FW_BLC         _MMIO(0x20d8)
 #define FW_BLC2                _MMIO(0x20dc)
 #define FW_BLC_SELF    _MMIO(0x20e0) /* 915+ only */
-#define   FW_BLC_SELF_EN_MASK      (1<<31)
-#define   FW_BLC_SELF_FIFO_MASK    (1<<16) /* 945 only */
-#define   FW_BLC_SELF_EN           (1<<15) /* 945 only */
+#define   FW_BLC_SELF_EN_MASK      (1 << 31)
+#define   FW_BLC_SELF_FIFO_MASK    (1 << 16) /* 945 only */
+#define   FW_BLC_SELF_EN           (1 << 15) /* 945 only */
 #define MM_BURST_LENGTH     0x00700000
 #define MM_FIFO_WATERMARK   0x0001F000
 #define LM_BURST_LENGTH     0x00000700
@@ -2645,37 +2653,40 @@ enum i915_power_well_id {
 #define   MI_AGPBUSY_830_MODE                  (1 << 0) /* 85x only */
 
 #define CACHE_MODE_0   _MMIO(0x2120) /* 915+ only */
-#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
-#define   CM0_IZ_OPT_DISABLE      (1<<6)
-#define   CM0_ZR_OPT_DISABLE      (1<<5)
-#define          CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
-#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define   CM0_COLOR_EVICT_DISABLE (1<<3)
-#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define   CM0_IZ_OPT_DISABLE      (1 << 6)
+#define   CM0_ZR_OPT_DISABLE      (1 << 5)
+#define          CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define   CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define   CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define   CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define   CM0_RC_OP_FLUSH_DISABLE (1 << 0)
 #define GFX_FLSH_CNTL  _MMIO(0x2170) /* 915+ only */
 #define GFX_FLSH_CNTL_GEN6     _MMIO(0x101008)
-#define   GFX_FLSH_CNTL_EN     (1<<0)
+#define   GFX_FLSH_CNTL_EN     (1 << 0)
 #define ECOSKPD                _MMIO(0x21d0)
-#define   ECO_GATING_CX_ONLY   (1<<3)
-#define   ECO_FLIP_DONE                (1<<0)
+#define   ECO_GATING_CX_ONLY   (1 << 3)
+#define   ECO_FLIP_DONE                (1 << 0)
 
 #define CACHE_MODE_0_GEN7      _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1<<0)
-#define   HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define   HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
 #define CACHE_MODE_1           _MMIO(0x7004) /* IVB+ */
-#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    (1<<6)
-#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    (1<<6)
-#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   (1<<1)
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    (1 << 6)
+#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    (1 << 6)
+#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   (1 << 1)
+
+#define GEN10_CACHE_MODE_SS                    _MMIO(0xe420)
+#define   FLOAT_BLEND_OPTIMIZATION_ENABLE      (1 << 4)
 
 #define GEN6_BLITTER_ECOSKPD   _MMIO(0x221d0)
 #define   GEN6_BLITTER_LOCK_SHIFT                      16
-#define   GEN6_BLITTER_FBC_NOTIFY                      (1<<3)
+#define   GEN6_BLITTER_FBC_NOTIFY                      (1 << 3)
 
 #define GEN6_RC_SLEEP_PSMI_CONTROL     _MMIO(0x2050)
 #define   GEN6_PSMI_SLEEP_MSG_DISABLE  (1 << 0)
 #define   GEN8_RC_SEMA_IDLE_MSG_DISABLE        (1 << 12)
-#define   GEN8_FF_DOP_CLOCK_GATE_DISABLE       (1<<10)
+#define   GEN8_FF_DOP_CLOCK_GATE_DISABLE       (1 << 10)
 
 #define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
 #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
@@ -2714,6 +2725,10 @@ enum i915_power_well_id {
 #define   GEN10_F2_SS_DIS_SHIFT                18
 #define   GEN10_F2_SS_DIS_MASK         (0xf << GEN10_F2_SS_DIS_SHIFT)
 
+#define        GEN10_MIRROR_FUSE3              _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT     4
+#define GEN10_L3BANK_MASK   0x0F
+
 #define GEN8_EU_DISABLE0               _MMIO(0x9134)
 #define   GEN8_EU_DIS0_S0_MASK         0xffffff
 #define   GEN8_EU_DIS0_S1_SHIFT                24
@@ -2727,7 +2742,7 @@ enum i915_power_well_id {
 #define GEN8_EU_DISABLE2               _MMIO(0x913c)
 #define   GEN8_EU_DIS2_S2_MASK         0xff
 
-#define GEN9_EU_DISABLE(slice)         _MMIO(0x9134 + (slice)*0x4)
+#define GEN9_EU_DISABLE(slice)         _MMIO(0x9134 + (slice) * 0x4)
 
 #define GEN10_EU_DISABLE3              _MMIO(0x9140)
 #define   GEN10_EU_DIS_SS_MASK         0xff
@@ -2784,44 +2799,44 @@ enum i915_power_well_id {
         (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
 
 /* These are all the "old" interrupts */
-#define ILK_BSD_USER_INTERRUPT                         (1<<5)
-
-#define I915_PM_INTERRUPT                              (1<<31)
-#define I915_ISP_INTERRUPT                             (1<<22)
-#define I915_LPE_PIPE_B_INTERRUPT                      (1<<21)
-#define I915_LPE_PIPE_A_INTERRUPT                      (1<<20)
-#define I915_MIPIC_INTERRUPT                           (1<<19)
-#define I915_MIPIA_INTERRUPT                           (1<<18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT             (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT                    (1<<17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT           (1<<16)
-#define I915_MASTER_ERROR_INTERRUPT                    (1<<15)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT     (1<<15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT           (1<<14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT       (1<<14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT           (1<<13)
-#define I915_HWB_OOM_INTERRUPT                         (1<<13)
-#define I915_LPE_PIPE_C_INTERRUPT                      (1<<12)
-#define I915_SYNC_STATUS_INTERRUPT                     (1<<12)
-#define I915_MISC_INTERRUPT                            (1<<11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT    (1<<11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT           (1<<10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT    (1<<10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT            (1<<9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT      (1<<9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT             (1<<8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT    (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT           (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT            (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT           (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT            (1<<4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT             (1<<3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT             (1<<2)
-#define I915_DEBUG_INTERRUPT                           (1<<2)
-#define I915_WINVALID_INTERRUPT                                (1<<1)
-#define I915_USER_INTERRUPT                            (1<<1)
-#define I915_ASLE_INTERRUPT                            (1<<0)
-#define I915_BSD_USER_INTERRUPT                                (1<<25)
+#define ILK_BSD_USER_INTERRUPT                         (1 << 5)
+
+#define I915_PM_INTERRUPT                              (1 << 31)
+#define I915_ISP_INTERRUPT                             (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT                      (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT                      (1 << 20)
+#define I915_MIPIC_INTERRUPT                           (1 << 19)
+#define I915_MIPIA_INTERRUPT                           (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT             (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT                    (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT           (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT                    (1 << 15)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT     (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT           (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT       (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT           (1 << 13)
+#define I915_HWB_OOM_INTERRUPT                         (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT                      (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT                     (1 << 12)
+#define I915_MISC_INTERRUPT                            (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT    (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT           (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT    (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT            (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT      (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT             (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT    (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT           (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT            (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT           (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT            (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT             (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT             (1 << 2)
+#define I915_DEBUG_INTERRUPT                           (1 << 2)
+#define I915_WINVALID_INTERRUPT                                (1 << 1)
+#define I915_USER_INTERRUPT                            (1 << 1)
+#define I915_ASLE_INTERRUPT                            (1 << 0)
+#define I915_BSD_USER_INTERRUPT                                (1 << 25)
 
 #define I915_HDMI_LPE_AUDIO_BASE       (VLV_DISPLAY_BASE + 0x65000)
 #define I915_HDMI_LPE_AUDIO_SIZE       0x1000
@@ -2844,19 +2859,19 @@ enum i915_power_well_id {
 #define GEN7_FF_THREAD_MODE            _MMIO(0x20a0)
 #define   GEN7_FF_SCHED_MASK           0x0077070
 #define   GEN8_FF_DS_REF_CNT_FFME      (1 << 19)
-#define   GEN7_FF_TS_SCHED_HS1         (0x5<<16)
-#define   GEN7_FF_TS_SCHED_HS0         (0x3<<16)
-#define   GEN7_FF_TS_SCHED_LOAD_BALANCE        (0x1<<16)
-#define   GEN7_FF_TS_SCHED_HW          (0x0<<16) /* Default */
+#define   GEN7_FF_TS_SCHED_HS1         (0x5 << 16)
+#define   GEN7_FF_TS_SCHED_HS0         (0x3 << 16)
+#define   GEN7_FF_TS_SCHED_LOAD_BALANCE        (0x1 << 16)
+#define   GEN7_FF_TS_SCHED_HW          (0x0 << 16) /* Default */
 #define   GEN7_FF_VS_REF_CNT_FFME      (1 << 15)
-#define   GEN7_FF_VS_SCHED_HS1         (0x5<<12)
-#define   GEN7_FF_VS_SCHED_HS0         (0x3<<12)
-#define   GEN7_FF_VS_SCHED_LOAD_BALANCE        (0x1<<12) /* Default */
-#define   GEN7_FF_VS_SCHED_HW          (0x0<<12)
-#define   GEN7_FF_DS_SCHED_HS1         (0x5<<4)
-#define   GEN7_FF_DS_SCHED_HS0         (0x3<<4)
-#define   GEN7_FF_DS_SCHED_LOAD_BALANCE        (0x1<<4)  /* Default */
-#define   GEN7_FF_DS_SCHED_HW          (0x0<<4)
+#define   GEN7_FF_VS_SCHED_HS1         (0x5 << 12)
+#define   GEN7_FF_VS_SCHED_HS0         (0x3 << 12)
+#define   GEN7_FF_VS_SCHED_LOAD_BALANCE        (0x1 << 12) /* Default */
+#define   GEN7_FF_VS_SCHED_HW          (0x0 << 12)
+#define   GEN7_FF_DS_SCHED_HS1         (0x5 << 4)
+#define   GEN7_FF_DS_SCHED_HS0         (0x3 << 4)
+#define   GEN7_FF_DS_SCHED_LOAD_BALANCE        (0x1 << 4)  /* Default */
+#define   GEN7_FF_DS_SCHED_HW          (0x0 << 4)
 
 /*
  * Framebuffer compression (915+ only)
@@ -2865,51 +2880,51 @@ enum i915_power_well_id {
 #define FBC_CFB_BASE           _MMIO(0x3200) /* 4k page aligned */
 #define FBC_LL_BASE            _MMIO(0x3204) /* 4k page aligned */
 #define FBC_CONTROL            _MMIO(0x3208)
-#define   FBC_CTL_EN           (1<<31)
-#define   FBC_CTL_PERIODIC     (1<<30)
+#define   FBC_CTL_EN           (1 << 31)
+#define   FBC_CTL_PERIODIC     (1 << 30)
 #define   FBC_CTL_INTERVAL_SHIFT (16)
-#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define   FBC_CTL_C3_IDLE      (1<<13)
+#define   FBC_CTL_UNCOMPRESSIBLE (1 << 14)
+#define   FBC_CTL_C3_IDLE      (1 << 13)
 #define   FBC_CTL_STRIDE_SHIFT (5)
 #define   FBC_CTL_FENCENO_SHIFT        (0)
 #define FBC_COMMAND            _MMIO(0x320c)
-#define   FBC_CMD_COMPRESS     (1<<0)
+#define   FBC_CMD_COMPRESS     (1 << 0)
 #define FBC_STATUS             _MMIO(0x3210)
-#define   FBC_STAT_COMPRESSING (1<<31)
-#define   FBC_STAT_COMPRESSED  (1<<30)
-#define   FBC_STAT_MODIFIED    (1<<29)
+#define   FBC_STAT_COMPRESSING (1 << 31)
+#define   FBC_STAT_COMPRESSED  (1 << 30)
+#define   FBC_STAT_MODIFIED    (1 << 29)
 #define   FBC_STAT_CURRENT_LINE_SHIFT  (0)
 #define FBC_CONTROL2           _MMIO(0x3214)
-#define   FBC_CTL_FENCE_DBL    (0<<4)
-#define   FBC_CTL_IDLE_IMM     (0<<2)
-#define   FBC_CTL_IDLE_FULL    (1<<2)
-#define   FBC_CTL_IDLE_LINE    (2<<2)
-#define   FBC_CTL_IDLE_DEBUG   (3<<2)
-#define   FBC_CTL_CPU_FENCE    (1<<1)
-#define   FBC_CTL_PLANE(plane) ((plane)<<0)
+#define   FBC_CTL_FENCE_DBL    (0 << 4)
+#define   FBC_CTL_IDLE_IMM     (0 << 2)
+#define   FBC_CTL_IDLE_FULL    (1 << 2)
+#define   FBC_CTL_IDLE_LINE    (2 << 2)
+#define   FBC_CTL_IDLE_DEBUG   (3 << 2)
+#define   FBC_CTL_CPU_FENCE    (1 << 1)
+#define   FBC_CTL_PLANE(plane) ((plane) << 0)
 #define FBC_FENCE_OFF          _MMIO(0x3218) /* BSpec typo has 321Bh */
 #define FBC_TAG(i)             _MMIO(0x3300 + (i) * 4)
 
 #define FBC_LL_SIZE            (1536)
 
 #define FBC_LLC_READ_CTRL      _MMIO(0x9044)
-#define   FBC_LLC_FULLY_OPEN   (1<<30)
+#define   FBC_LLC_FULLY_OPEN   (1 << 30)
 
 /* Framebuffer compression for GM45+ */
 #define DPFC_CB_BASE           _MMIO(0x3200)
 #define DPFC_CONTROL           _MMIO(0x3208)
-#define   DPFC_CTL_EN          (1<<31)
-#define   DPFC_CTL_PLANE(plane)        ((plane)<<30)
-#define   IVB_DPFC_CTL_PLANE(plane)    ((plane)<<29)
-#define   DPFC_CTL_FENCE_EN    (1<<29)
-#define   IVB_DPFC_CTL_FENCE_EN        (1<<28)
-#define   DPFC_CTL_PERSISTENT_MODE     (1<<25)
-#define   DPFC_SR_EN           (1<<10)
-#define   DPFC_CTL_LIMIT_1X    (0<<6)
-#define   DPFC_CTL_LIMIT_2X    (1<<6)
-#define   DPFC_CTL_LIMIT_4X    (2<<6)
+#define   DPFC_CTL_EN          (1 << 31)
+#define   DPFC_CTL_PLANE(plane)        ((plane) << 30)
+#define   IVB_DPFC_CTL_PLANE(plane)    ((plane) << 29)
+#define   DPFC_CTL_FENCE_EN    (1 << 29)
+#define   IVB_DPFC_CTL_FENCE_EN        (1 << 28)
+#define   DPFC_CTL_PERSISTENT_MODE     (1 << 25)
+#define   DPFC_SR_EN           (1 << 10)
+#define   DPFC_CTL_LIMIT_1X    (0 << 6)
+#define   DPFC_CTL_LIMIT_2X    (1 << 6)
+#define   DPFC_CTL_LIMIT_4X    (2 << 6)
 #define DPFC_RECOMP_CTL                _MMIO(0x320c)
-#define   DPFC_RECOMP_STALL_EN (1<<27)
+#define   DPFC_RECOMP_STALL_EN (1 << 27)
 #define   DPFC_RECOMP_STALL_WM_SHIFT (16)
 #define   DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
 #define   DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
@@ -2922,12 +2937,12 @@ enum i915_power_well_id {
 #define DPFC_STATUS2           _MMIO(0x3214)
 #define DPFC_FENCE_YOFF                _MMIO(0x3218)
 #define DPFC_CHICKEN           _MMIO(0x3224)
-#define   DPFC_HT_MODIFY       (1<<31)
+#define   DPFC_HT_MODIFY       (1 << 31)
 
 /* Framebuffer compression for Ironlake */
 #define ILK_DPFC_CB_BASE       _MMIO(0x43200)
 #define ILK_DPFC_CONTROL       _MMIO(0x43208)
-#define   FBC_CTL_FALSE_COLOR  (1<<10)
+#define   FBC_CTL_FALSE_COLOR  (1 << 10)
 /* The bit 28-8 is reserved */
 #define   DPFC_RESERVED                (0x1FFFFF00)
 #define ILK_DPFC_RECOMP_CTL    _MMIO(0x4320c)
@@ -2938,15 +2953,15 @@ enum i915_power_well_id {
 #define  BDW_FBC_COMP_SEG_MASK 0xfff
 #define ILK_DPFC_FENCE_YOFF    _MMIO(0x43218)
 #define ILK_DPFC_CHICKEN       _MMIO(0x43224)
-#define   ILK_DPFC_DISABLE_DUMMY0 (1<<8)
-#define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1<<23)
+#define   ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1 << 23)
 #define ILK_FBC_RT_BASE                _MMIO(0x2128)
-#define   ILK_FBC_RT_VALID     (1<<0)
-#define   SNB_FBC_FRONT_BUFFER (1<<1)
+#define   ILK_FBC_RT_VALID     (1 << 0)
+#define   SNB_FBC_FRONT_BUFFER (1 << 1)
 
 #define ILK_DISPLAY_CHICKEN1   _MMIO(0x42000)
-#define   ILK_FBCQ_DIS         (1<<22)
-#define          ILK_PABSTRETCH_DIS    (1<<21)
+#define   ILK_FBCQ_DIS         (1 << 22)
+#define          ILK_PABSTRETCH_DIS    (1 << 21)
 
 
 /*
@@ -2955,7 +2970,7 @@ enum i915_power_well_id {
  * The following two registers are of type GTTMMADR
  */
 #define SNB_DPFC_CTL_SA                _MMIO(0x100100)
-#define   SNB_CPU_FENCE_ENABLE (1<<29)
+#define   SNB_CPU_FENCE_ENABLE (1 << 29)
 #define DPFC_CPU_FENCE_OFFSET  _MMIO(0x100104)
 
 /* Framebuffer compression for Ivybridge */
@@ -2965,8 +2980,8 @@ enum i915_power_well_id {
 #define   IPS_ENABLE   (1 << 31)
 
 #define MSG_FBC_REND_STATE     _MMIO(0x50380)
-#define   FBC_REND_NUKE                (1<<2)
-#define   FBC_REND_CACHE_CLEAN (1<<1)
+#define   FBC_REND_NUKE                (1 << 2)
+#define   FBC_REND_CACHE_CLEAN (1 << 1)
 
 /*
  * GPIO regs
@@ -2979,6 +2994,10 @@ enum i915_power_well_id {
 #define GPIOF                  _MMIO(0x5024)
 #define GPIOG                  _MMIO(0x5028)
 #define GPIOH                  _MMIO(0x502c)
+#define GPIOJ                  _MMIO(0x5034)
+#define GPIOK                  _MMIO(0x5038)
+#define GPIOL                  _MMIO(0x503C)
+#define GPIOM                  _MMIO(0x5040)
 # define GPIO_CLOCK_DIR_MASK           (1 << 0)
 # define GPIO_CLOCK_DIR_IN             (0 << 1)
 # define GPIO_CLOCK_DIR_OUT            (1 << 1)
@@ -2995,12 +3014,12 @@ enum i915_power_well_id {
 # define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
 
 #define GMBUS0                 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define   GMBUS_AKSV_SELECT    (1<<11)
-#define   GMBUS_RATE_100KHZ    (0<<8)
-#define   GMBUS_RATE_50KHZ     (1<<8)
-#define   GMBUS_RATE_400KHZ    (2<<8) /* reserved on Pineview */
-#define   GMBUS_RATE_1MHZ      (3<<8) /* reserved on Pineview */
-#define   GMBUS_HOLD_EXT       (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_AKSV_SELECT    (1 << 11)
+#define   GMBUS_RATE_100KHZ    (0 << 8)
+#define   GMBUS_RATE_50KHZ     (1 << 8)
+#define   GMBUS_RATE_400KHZ    (2 << 8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ      (3 << 8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT       (1 << 7) /* 300ns hold time, rsvd on Pineview */
 #define   GMBUS_PIN_DISABLED   0
 #define   GMBUS_PIN_SSC                1
 #define   GMBUS_PIN_VGADDC     2
@@ -3021,36 +3040,36 @@ enum i915_power_well_id {
 
 #define   GMBUS_NUM_PINS       13 /* including 0 */
 #define GMBUS1                 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define   GMBUS_SW_CLR_INT     (1<<31)
-#define   GMBUS_SW_RDY         (1<<30)
-#define   GMBUS_ENT            (1<<29) /* enable timeout */
-#define   GMBUS_CYCLE_NONE     (0<<25)
-#define   GMBUS_CYCLE_WAIT     (1<<25)
-#define   GMBUS_CYCLE_INDEX    (2<<25)
-#define   GMBUS_CYCLE_STOP     (4<<25)
+#define   GMBUS_SW_CLR_INT     (1 << 31)
+#define   GMBUS_SW_RDY         (1 << 30)
+#define   GMBUS_ENT            (1 << 29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE     (0 << 25)
+#define   GMBUS_CYCLE_WAIT     (1 << 25)
+#define   GMBUS_CYCLE_INDEX    (2 << 25)
+#define   GMBUS_CYCLE_STOP     (4 << 25)
 #define   GMBUS_BYTE_COUNT_SHIFT 16
 #define   GMBUS_BYTE_COUNT_MAX   256U
 #define   GMBUS_SLAVE_INDEX_SHIFT 8
 #define   GMBUS_SLAVE_ADDR_SHIFT 1
-#define   GMBUS_SLAVE_READ     (1<<0)
-#define   GMBUS_SLAVE_WRITE    (0<<0)
+#define   GMBUS_SLAVE_READ     (1 << 0)
+#define   GMBUS_SLAVE_WRITE    (0 << 0)
 #define GMBUS2                 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define   GMBUS_INUSE          (1<<15)
-#define   GMBUS_HW_WAIT_PHASE  (1<<14)
-#define   GMBUS_STALL_TIMEOUT  (1<<13)
-#define   GMBUS_INT            (1<<12)
-#define   GMBUS_HW_RDY         (1<<11)
-#define   GMBUS_SATOER         (1<<10)
-#define   GMBUS_ACTIVE         (1<<9)
+#define   GMBUS_INUSE          (1 << 15)
+#define   GMBUS_HW_WAIT_PHASE  (1 << 14)
+#define   GMBUS_STALL_TIMEOUT  (1 << 13)
+#define   GMBUS_INT            (1 << 12)
+#define   GMBUS_HW_RDY         (1 << 11)
+#define   GMBUS_SATOER         (1 << 10)
+#define   GMBUS_ACTIVE         (1 << 9)
 #define GMBUS3                 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
 #define GMBUS4                 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-#define   GMBUS_NAK_EN         (1<<3)
-#define   GMBUS_IDLE_EN                (1<<2)
-#define   GMBUS_HW_WAIT_EN     (1<<1)
-#define   GMBUS_HW_RDY_EN      (1<<0)
+#define   GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define   GMBUS_NAK_EN         (1 << 3)
+#define   GMBUS_IDLE_EN                (1 << 2)
+#define   GMBUS_HW_WAIT_EN     (1 << 1)
+#define   GMBUS_HW_RDY_EN      (1 << 0)
 #define GMBUS5                 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define   GMBUS_2BYTE_INDEX_EN (1<<31)
+#define   GMBUS_2BYTE_INDEX_EN (1 << 31)
 
 /*
  * Clock control & power management
@@ -3088,10 +3107,10 @@ enum i915_power_well_id {
 #define   DPLL_P2_CLOCK_DIV_MASK       0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK  0x00ff0000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define   DPLL_LOCK_VLV                        (1<<15)
-#define   DPLL_INTEGRATED_CRI_CLK_VLV  (1<<14)
-#define   DPLL_INTEGRATED_REF_CLK_VLV  (1<<13)
-#define   DPLL_SSC_REF_CLK_CHV         (1<<13)
+#define   DPLL_LOCK_VLV                        (1 << 15)
+#define   DPLL_INTEGRATED_CRI_CLK_VLV  (1 << 14)
+#define   DPLL_INTEGRATED_REF_CLK_VLV  (1 << 13)
+#define   DPLL_SSC_REF_CLK_CHV         (1 << 13)
 #define   DPLL_PORTC_READY_MASK                (0xf << 4)
 #define   DPLL_PORTB_READY_MASK                (0xf)
 
@@ -3101,20 +3120,20 @@ enum i915_power_well_id {
 #define DPIO_PHY_STATUS                        _MMIO(VLV_DISPLAY_BASE + 0x6240)
 #define   DPLL_PORTD_READY_MASK                (0xf)
 #define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define   PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)   (1 << (2*(phy)+(ch)+27))
+#define   PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)   (1 << (2 * (phy) + (ch) + 27))
 #define   PHY_LDO_DELAY_0NS                    0x0
 #define   PHY_LDO_DELAY_200NS                  0x1
 #define   PHY_LDO_DELAY_600NS                  0x2
-#define   PHY_LDO_SEQ_DELAY(delay, phy)                ((delay) << (2*(phy)+23))
-#define   PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)        ((mask) << (8*(phy)+4*(ch)+11))
+#define   PHY_LDO_SEQ_DELAY(delay, phy)                ((delay) << (2 * (phy) + 23))
+#define   PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)        ((mask) << (8 * (phy) + 4 * (ch) + 11))
 #define   PHY_CH_SU_PSR                                0x1
 #define   PHY_CH_DEEP_PSR                      0x7
-#define   PHY_CH_POWER_MODE(mode, phy, ch)     ((mode) << (6*(phy)+3*(ch)+2))
+#define   PHY_CH_POWER_MODE(mode, phy, ch)     ((mode) << (6 * (phy) + 3 * (ch) + 2))
 #define   PHY_COM_LANE_RESET_DEASSERT(phy)     (1 << (phy))
 #define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define   PHY_POWERGOOD(phy)   (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define   PHY_STATUS_CMN_LDO(phy, ch)                   (1 << (6-(6*(phy)+3*(ch))))
-#define   PHY_STATUS_SPLINE_LDO(phy, ch, spline)        (1 << (8-(6*(phy)+3*(ch)+(spline))))
+#define   PHY_POWERGOOD(phy)   (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define   PHY_STATUS_CMN_LDO(phy, ch)                   (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define   PHY_STATUS_SPLINE_LDO(phy, ch, spline)        (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
 
 /*
  * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3135,7 +3154,7 @@ enum i915_power_well_id {
 /* Ironlake */
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)       (((x)-1) << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)       (((x) - 1) << 9)
 # define DPLL_FPA1_P1_POST_DIV_SHIFT            0
 # define DPLL_FPA1_P1_POST_DIV_MASK             0xff
 
@@ -3224,10 +3243,10 @@ enum i915_power_well_id {
 #define   DPLLA_TEST_M_BYPASS          (1 << 2)
 #define   DPLLA_INPUT_BUFFER_ENABLE    (1 << 0)
 #define D_STATE                _MMIO(0x6104)
-#define  DSTATE_GFX_RESET_I830                 (1<<6)
-#define  DSTATE_PLL_D3_OFF                     (1<<3)
-#define  DSTATE_GFX_CLOCK_GATING               (1<<1)
-#define  DSTATE_DOT_CLOCK_GATING               (1<<0)
+#define  DSTATE_GFX_RESET_I830                 (1 << 6)
+#define  DSTATE_PLL_D3_OFF                     (1 << 3)
+#define  DSTATE_GFX_CLOCK_GATING               (1 << 1)
+#define  DSTATE_DOT_CLOCK_GATING               (1 << 0)
 #define DSPCLK_GATE_D  _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
 # define DPUNIT_B_CLOCK_GATE_DISABLE           (1 << 30) /* 965 */
 # define VSUNIT_CLOCK_GATE_DISABLE             (1 << 29) /* 965 */
@@ -3344,7 +3363,7 @@ enum i915_power_well_id {
 #define DEUC                   _MMIO(0x6214)          /* CRL only */
 
 #define FW_BLC_SELF_VLV                _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define  FW_CSPWRDWNEN         (1<<15)
+#define  FW_CSPWRDWNEN         (1 << 15)
 
 #define MI_ARB_VLV             _MMIO(VLV_DISPLAY_BASE + 0x6504)
 
@@ -3469,7 +3488,7 @@ enum i915_power_well_id {
 #define HPLLVCO_MOBILE          _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
 
 #define TSC1                   _MMIO(0x11001)
-#define   TSE                  (1<<0)
+#define   TSE                  (1 << 0)
 #define TR1                    _MMIO(0x11006)
 #define TSFS                   _MMIO(0x11020)
 #define   TSFS_SLOPE_MASK      0x0000ff00
@@ -3515,23 +3534,23 @@ enum i915_power_well_id {
 #define   MEMCTL_CMD_CHVID     3
 #define   MEMCTL_CMD_VMMOFF    4
 #define   MEMCTL_CMD_VMMON     5
-#define   MEMCTL_CMD_STS       (1<<12) /* write 1 triggers command, clears
+#define   MEMCTL_CMD_STS       (1 << 12) /* write 1 triggers command, clears
                                           when command complete */
 #define   MEMCTL_FREQ_MASK     0x0f00 /* jitter, from 0-15 */
 #define   MEMCTL_FREQ_SHIFT    8
-#define   MEMCTL_SFCAVM                (1<<7)
+#define   MEMCTL_SFCAVM                (1 << 7)
 #define   MEMCTL_TGT_VID_MASK  0x007f
 #define MEMIHYST               _MMIO(0x1117c)
 #define MEMINTREN              _MMIO(0x11180) /* 16 bits */
-#define   MEMINT_RSEXIT_EN     (1<<8)
-#define   MEMINT_CX_SUPR_EN    (1<<7)
-#define   MEMINT_CONT_BUSY_EN  (1<<6)
-#define   MEMINT_AVG_BUSY_EN   (1<<5)
-#define   MEMINT_EVAL_CHG_EN   (1<<4)
-#define   MEMINT_MON_IDLE_EN   (1<<3)
-#define   MEMINT_UP_EVAL_EN    (1<<2)
-#define   MEMINT_DOWN_EVAL_EN  (1<<1)
-#define   MEMINT_SW_CMD_EN     (1<<0)
+#define   MEMINT_RSEXIT_EN     (1 << 8)
+#define   MEMINT_CX_SUPR_EN    (1 << 7)
+#define   MEMINT_CONT_BUSY_EN  (1 << 6)
+#define   MEMINT_AVG_BUSY_EN   (1 << 5)
+#define   MEMINT_EVAL_CHG_EN   (1 << 4)
+#define   MEMINT_MON_IDLE_EN   (1 << 3)
+#define   MEMINT_UP_EVAL_EN    (1 << 2)
+#define   MEMINT_DOWN_EVAL_EN  (1 << 1)
+#define   MEMINT_SW_CMD_EN     (1 << 0)
 #define MEMINTRSTR             _MMIO(0x11182) /* 16 bits */
 #define   MEM_RSEXIT_MASK      0xc000
 #define   MEM_RSEXIT_SHIFT     14
@@ -3553,26 +3572,26 @@ enum i915_power_well_id {
 #define   MEM_INT_STEER_SMI    2
 #define   MEM_INT_STEER_SCI    3
 #define MEMINTRSTS             _MMIO(0x11184)
-#define   MEMINT_RSEXIT                (1<<7)
-#define   MEMINT_CONT_BUSY     (1<<6)
-#define   MEMINT_AVG_BUSY      (1<<5)
-#define   MEMINT_EVAL_CHG      (1<<4)
-#define   MEMINT_MON_IDLE      (1<<3)
-#define   MEMINT_UP_EVAL       (1<<2)
-#define   MEMINT_DOWN_EVAL     (1<<1)
-#define   MEMINT_SW_CMD                (1<<0)
+#define   MEMINT_RSEXIT                (1 << 7)
+#define   MEMINT_CONT_BUSY     (1 << 6)
+#define   MEMINT_AVG_BUSY      (1 << 5)
+#define   MEMINT_EVAL_CHG      (1 << 4)
+#define   MEMINT_MON_IDLE      (1 << 3)
+#define   MEMINT_UP_EVAL       (1 << 2)
+#define   MEMINT_DOWN_EVAL     (1 << 1)
+#define   MEMINT_SW_CMD                (1 << 0)
 #define MEMMODECTL             _MMIO(0x11190)
-#define   MEMMODE_BOOST_EN     (1<<31)
+#define   MEMMODE_BOOST_EN     (1 << 31)
 #define   MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
 #define   MEMMODE_BOOST_FREQ_SHIFT 24
 #define   MEMMODE_IDLE_MODE_MASK 0x00030000
 #define   MEMMODE_IDLE_MODE_SHIFT 16
 #define   MEMMODE_IDLE_MODE_EVAL 0
 #define   MEMMODE_IDLE_MODE_CONT 1
-#define   MEMMODE_HWIDLE_EN    (1<<15)
-#define   MEMMODE_SWMODE_EN    (1<<14)
-#define   MEMMODE_RCLK_GATE    (1<<13)
-#define   MEMMODE_HW_UPDATE    (1<<12)
+#define   MEMMODE_HWIDLE_EN    (1 << 15)
+#define   MEMMODE_SWMODE_EN    (1 << 14)
+#define   MEMMODE_RCLK_GATE    (1 << 13)
+#define   MEMMODE_HW_UPDATE    (1 << 12)
 #define   MEMMODE_FSTART_MASK  0x00000f00 /* starting jitter, 0-15 */
 #define   MEMMODE_FSTART_SHIFT 8
 #define   MEMMODE_FMAX_MASK    0x000000f0 /* max jitter, 0-15 */
@@ -3586,8 +3605,8 @@ enum i915_power_well_id {
 #define   SWMEMCMD_TARVID      (3 << 13)
 #define   SWMEMCMD_VRM_OFF     (4 << 13)
 #define   SWMEMCMD_VRM_ON      (5 << 13)
-#define   CMDSTS               (1<<12)
-#define   SFCAVM               (1<<11)
+#define   CMDSTS               (1 << 12)
+#define   SFCAVM               (1 << 11)
 #define   SWFREQ_MASK          0x0380 /* P0-7 */
 #define   SWFREQ_SHIFT         7
 #define   TARVID_MASK          0x001f
@@ -3596,49 +3615,49 @@ enum i915_power_well_id {
 #define RCUPEI                 _MMIO(0x111b0)
 #define RCDNEI                 _MMIO(0x111b4)
 #define RSTDBYCTL              _MMIO(0x111b8)
-#define   RS1EN                        (1<<31)
-#define   RS2EN                        (1<<30)
-#define   RS3EN                        (1<<29)
-#define   D3RS3EN              (1<<28) /* Display D3 imlies RS3 */
-#define   SWPROMORSX           (1<<27) /* RSx promotion timers ignored */
-#define   RCWAKERW             (1<<26) /* Resetwarn from PCH causes wakeup */
-#define   DPRSLPVREN           (1<<25) /* Fast voltage ramp enable */
-#define   GFXTGHYST            (1<<24) /* Hysteresis to allow trunk gating */
-#define   RCX_SW_EXIT          (1<<23) /* Leave RSx and prevent re-entry */
-#define   RSX_STATUS_MASK      (7<<20)
-#define   RSX_STATUS_ON                (0<<20)
-#define   RSX_STATUS_RC1       (1<<20)
-#define   RSX_STATUS_RC1E      (2<<20)
-#define   RSX_STATUS_RS1       (3<<20)
-#define   RSX_STATUS_RS2       (4<<20) /* aka rc6 */
-#define   RSX_STATUS_RSVD      (5<<20) /* deep rc6 unsupported on ilk */
-#define   RSX_STATUS_RS3       (6<<20) /* rs3 unsupported on ilk */
-#define   RSX_STATUS_RSVD2     (7<<20)
-#define   UWRCRSXE             (1<<19) /* wake counter limit prevents rsx */
-#define   RSCRP                        (1<<18) /* rs requests control on rs1/2 reqs */
-#define   JRSC                 (1<<17) /* rsx coupled to cpu c-state */
-#define   RS2INC0              (1<<16) /* allow rs2 in cpu c0 */
-#define   RS1CONTSAV_MASK      (3<<14)
-#define   RS1CONTSAV_NO_RS1    (0<<14) /* rs1 doesn't save/restore context */
-#define   RS1CONTSAV_RSVD      (1<<14)
-#define   RS1CONTSAV_SAVE_RS1  (2<<14) /* rs1 saves context */
-#define   RS1CONTSAV_FULL_RS1  (3<<14) /* rs1 saves and restores context */
-#define   NORMSLEXLAT_MASK     (3<<12)
-#define   SLOW_RS123           (0<<12)
-#define   SLOW_RS23            (1<<12)
-#define   SLOW_RS3             (2<<12)
-#define   NORMAL_RS123         (3<<12)
-#define   RCMODE_TIMEOUT       (1<<11) /* 0 is eval interval method */
-#define   IMPROMOEN            (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define   RCENTSYNC            (1<<9) /* rs coupled to cpu c-state (3/6/7) */
-#define   STATELOCK            (1<<7) /* locked to rs_cstate if 0 */
-#define   RS_CSTATE_MASK       (3<<4)
-#define   RS_CSTATE_C367_RS1   (0<<4)
-#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
-#define   RS_CSTATE_RSVD       (2<<4)
-#define   RS_CSTATE_C367_RS2   (3<<4)
-#define   REDSAVES             (1<<3) /* no context save if was idle during rs0 */
-#define   REDRESTORES          (1<<2) /* no restore if was idle during rs0 */
+#define   RS1EN                        (1 << 31)
+#define   RS2EN                        (1 << 30)
+#define   RS3EN                        (1 << 29)
+#define   D3RS3EN              (1 << 28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX           (1 << 27) /* RSx promotion timers ignored */
+#define   RCWAKERW             (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN           (1 << 25) /* Fast voltage ramp enable */
+#define   GFXTGHYST            (1 << 24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT          (1 << 23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK      (7 << 20)
+#define   RSX_STATUS_ON                (0 << 20)
+#define   RSX_STATUS_RC1       (1 << 20)
+#define   RSX_STATUS_RC1E      (2 << 20)
+#define   RSX_STATUS_RS1       (3 << 20)
+#define   RSX_STATUS_RS2       (4 << 20) /* aka rc6 */
+#define   RSX_STATUS_RSVD      (5 << 20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3       (6 << 20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2     (7 << 20)
+#define   UWRCRSXE             (1 << 19) /* wake counter limit prevents rsx */
+#define   RSCRP                        (1 << 18) /* rs requests control on rs1/2 reqs */
+#define   JRSC                 (1 << 17) /* rsx coupled to cpu c-state */
+#define   RS2INC0              (1 << 16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK      (3 << 14)
+#define   RS1CONTSAV_NO_RS1    (0 << 14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD      (1 << 14)
+#define   RS1CONTSAV_SAVE_RS1  (2 << 14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1  (3 << 14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK     (3 << 12)
+#define   SLOW_RS123           (0 << 12)
+#define   SLOW_RS23            (1 << 12)
+#define   SLOW_RS3             (2 << 12)
+#define   NORMAL_RS123         (3 << 12)
+#define   RCMODE_TIMEOUT       (1 << 11) /* 0 is eval interval method */
+#define   IMPROMOEN            (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC            (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK            (1 << 7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK       (3 << 4)
+#define   RS_CSTATE_C367_RS1   (0 << 4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define   RS_CSTATE_RSVD       (2 << 4)
+#define   RS_CSTATE_C367_RS2   (3 << 4)
+#define   REDSAVES             (1 << 3) /* no context save if was idle during rs0 */
+#define   REDRESTORES          (1 << 2) /* no restore if was idle during rs0 */
 #define VIDCTL                 _MMIO(0x111c0)
 #define VIDSTS                 _MMIO(0x111c8)
 #define VIDSTART               _MMIO(0x111cc) /* 8 bits */
@@ -3647,7 +3666,7 @@ enum i915_power_well_id {
 #define   MEMSTAT_VID_SHIFT    8
 #define   MEMSTAT_PSTATE_MASK  0x00f8
 #define   MEMSTAT_PSTATE_SHIFT  3
-#define   MEMSTAT_MON_ACTV     (1<<2)
+#define   MEMSTAT_MON_ACTV     (1 << 2)
 #define   MEMSTAT_SRC_CTL_MASK 0x0003
 #define   MEMSTAT_SRC_CTL_CORE 0
 #define   MEMSTAT_SRC_CTL_TRB  1
@@ -3656,7 +3675,7 @@ enum i915_power_well_id {
 #define RCPREVBSYTUPAVG                _MMIO(0x113b8)
 #define RCPREVBSYTDNAVG                _MMIO(0x113bc)
 #define PMMISC                 _MMIO(0x11214)
-#define   MCPPCE_EN            (1<<0) /* enable PM_MSG from PCH->MPC */
+#define   MCPPCE_EN            (1 << 0) /* enable PM_MSG from PCH->MPC */
 #define SDEW                   _MMIO(0x1124c)
 #define CSIEW0                 _MMIO(0x11250)
 #define CSIEW1                 _MMIO(0x11254)
@@ -3673,8 +3692,8 @@ enum i915_power_well_id {
 #define RPPREVBSYTUPAVG                _MMIO(0x113b8)
 #define RPPREVBSYTDNAVG                _MMIO(0x113bc)
 #define ECR                    _MMIO(0x11600)
-#define   ECR_GPFE             (1<<31)
-#define   ECR_IMONE            (1<<30)
+#define   ECR_GPFE             (1 << 31)
+#define   ECR_IMONE            (1 << 30)
 #define   ECR_CAP_MASK         0x0000001f /* Event range, 0-31 */
 #define OGW0                   _MMIO(0x11608)
 #define OGW1                   _MMIO(0x1160c)
@@ -3781,11 +3800,11 @@ enum {
        FAULT_AND_CONTINUE /* Unsupported */
 };
 
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
 #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
 
 #define GEN8_CTX_ID_SHIFT 32
@@ -3807,7 +3826,7 @@ enum {
 
 #define OVADD                  _MMIO(0x30000)
 #define DOVSTA                 _MMIO(0x30008)
-#define OC_BUF                 (0x3<<20)
+#define OC_BUF                 (0x3 << 20)
 #define OGAMC5                 _MMIO(0x30010)
 #define OGAMC4                 _MMIO(0x30014)
 #define OGAMC3                 _MMIO(0x30018)
@@ -3975,64 +3994,64 @@ enum {
 /* VLV eDP PSR registers */
 #define _PSRCTLA                               (VLV_DISPLAY_BASE + 0x60090)
 #define _PSRCTLB                               (VLV_DISPLAY_BASE + 0x61090)
-#define  VLV_EDP_PSR_ENABLE                    (1<<0)
-#define  VLV_EDP_PSR_RESET                     (1<<1)
-#define  VLV_EDP_PSR_MODE_MASK                 (7<<2)
-#define  VLV_EDP_PSR_MODE_HW_TIMER             (1<<3)
-#define  VLV_EDP_PSR_MODE_SW_TIMER             (1<<2)
-#define  VLV_EDP_PSR_SINGLE_FRAME_UPDATE       (1<<7)
-#define  VLV_EDP_PSR_ACTIVE_ENTRY              (1<<8)
-#define  VLV_EDP_PSR_SRC_TRANSMITTER_STATE     (1<<9)
-#define  VLV_EDP_PSR_DBL_FRAME                 (1<<10)
-#define  VLV_EDP_PSR_FRAME_COUNT_MASK          (0xff<<16)
+#define  VLV_EDP_PSR_ENABLE                    (1 << 0)
+#define  VLV_EDP_PSR_RESET                     (1 << 1)
+#define  VLV_EDP_PSR_MODE_MASK                 (7 << 2)
+#define  VLV_EDP_PSR_MODE_HW_TIMER             (1 << 3)
+#define  VLV_EDP_PSR_MODE_SW_TIMER             (1 << 2)
+#define  VLV_EDP_PSR_SINGLE_FRAME_UPDATE       (1 << 7)
+#define  VLV_EDP_PSR_ACTIVE_ENTRY              (1 << 8)
+#define  VLV_EDP_PSR_SRC_TRANSMITTER_STATE     (1 << 9)
+#define  VLV_EDP_PSR_DBL_FRAME                 (1 << 10)
+#define  VLV_EDP_PSR_FRAME_COUNT_MASK          (0xff << 16)
 #define  VLV_EDP_PSR_IDLE_FRAME_SHIFT          16
 #define VLV_PSRCTL(pipe)       _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
 
 #define _VSCSDPA                       (VLV_DISPLAY_BASE + 0x600a0)
 #define _VSCSDPB                       (VLV_DISPLAY_BASE + 0x610a0)
-#define  VLV_EDP_PSR_SDP_FREQ_MASK     (3<<30)
-#define  VLV_EDP_PSR_SDP_FREQ_ONCE     (1<<31)
-#define  VLV_EDP_PSR_SDP_FREQ_EVFRAME  (1<<30)
+#define  VLV_EDP_PSR_SDP_FREQ_MASK     (3 << 30)
+#define  VLV_EDP_PSR_SDP_FREQ_ONCE     (1 << 31)
+#define  VLV_EDP_PSR_SDP_FREQ_EVFRAME  (1 << 30)
 #define VLV_VSCSDP(pipe)       _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
 
 #define _PSRSTATA                      (VLV_DISPLAY_BASE + 0x60094)
 #define _PSRSTATB                      (VLV_DISPLAY_BASE + 0x61094)
-#define  VLV_EDP_PSR_LAST_STATE_MASK   (7<<3)
+#define  VLV_EDP_PSR_LAST_STATE_MASK   (7 << 3)
 #define  VLV_EDP_PSR_CURR_STATE_MASK   7
-#define  VLV_EDP_PSR_DISABLED          (0<<0)
-#define  VLV_EDP_PSR_INACTIVE          (1<<0)
-#define  VLV_EDP_PSR_IN_TRANS_TO_ACTIVE        (2<<0)
-#define  VLV_EDP_PSR_ACTIVE_NORFB_UP   (3<<0)
-#define  VLV_EDP_PSR_ACTIVE_SF_UPDATE  (4<<0)
-#define  VLV_EDP_PSR_EXIT              (5<<0)
-#define  VLV_EDP_PSR_IN_TRANS          (1<<7)
+#define  VLV_EDP_PSR_DISABLED          (0 << 0)
+#define  VLV_EDP_PSR_INACTIVE          (1 << 0)
+#define  VLV_EDP_PSR_IN_TRANS_TO_ACTIVE        (2 << 0)
+#define  VLV_EDP_PSR_ACTIVE_NORFB_UP   (3 << 0)
+#define  VLV_EDP_PSR_ACTIVE_SF_UPDATE  (4 << 0)
+#define  VLV_EDP_PSR_EXIT              (5 << 0)
+#define  VLV_EDP_PSR_IN_TRANS          (1 << 7)
 #define VLV_PSRSTAT(pipe)      _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
 
 /* HSW+ eDP PSR registers */
 #define HSW_EDP_PSR_BASE       0x64800
 #define BDW_EDP_PSR_BASE       0x6f800
 #define EDP_PSR_CTL                            _MMIO(dev_priv->psr_mmio_base + 0)
-#define   EDP_PSR_ENABLE                       (1<<31)
-#define   BDW_PSR_SINGLE_FRAME                 (1<<30)
-#define   EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK  (1<<29) /* SW can't modify */
-#define   EDP_PSR_LINK_STANDBY                 (1<<27)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES  (0<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES  (1<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES  (2<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES  (3<<25)
+#define   EDP_PSR_ENABLE                       (1 << 31)
+#define   BDW_PSR_SINGLE_FRAME                 (1 << 30)
+#define   EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK  (1 << 29) /* SW can't modify */
+#define   EDP_PSR_LINK_STANDBY                 (1 << 27)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES  (0 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES  (1 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES  (2 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES  (3 << 25)
 #define   EDP_PSR_MAX_SLEEP_TIME_SHIFT         20
-#define   EDP_PSR_SKIP_AUX_EXIT                        (1<<12)
-#define   EDP_PSR_TP1_TP2_SEL                  (0<<11)
-#define   EDP_PSR_TP1_TP3_SEL                  (1<<11)
-#define   EDP_PSR_TP2_TP3_TIME_500us           (0<<8)
-#define   EDP_PSR_TP2_TP3_TIME_100us           (1<<8)
-#define   EDP_PSR_TP2_TP3_TIME_2500us          (2<<8)
-#define   EDP_PSR_TP2_TP3_TIME_0us             (3<<8)
-#define   EDP_PSR_TP1_TIME_500us               (0<<4)
-#define   EDP_PSR_TP1_TIME_100us               (1<<4)
-#define   EDP_PSR_TP1_TIME_2500us              (2<<4)
-#define   EDP_PSR_TP1_TIME_0us                 (3<<4)
+#define   EDP_PSR_SKIP_AUX_EXIT                        (1 << 12)
+#define   EDP_PSR_TP1_TP2_SEL                  (0 << 11)
+#define   EDP_PSR_TP1_TP3_SEL                  (1 << 11)
+#define   EDP_PSR_TP2_TP3_TIME_500us           (0 << 8)
+#define   EDP_PSR_TP2_TP3_TIME_100us           (1 << 8)
+#define   EDP_PSR_TP2_TP3_TIME_2500us          (2 << 8)
+#define   EDP_PSR_TP2_TP3_TIME_0us             (3 << 8)
+#define   EDP_PSR_TP1_TIME_500us               (0 << 4)
+#define   EDP_PSR_TP1_TIME_100us               (1 << 4)
+#define   EDP_PSR_TP1_TIME_2500us              (2 << 4)
+#define   EDP_PSR_TP1_TIME_0us                 (3 << 4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT             0
 
 /* Bspec claims those aren't shifted but stay at 0x64800 */
@@ -4052,55 +4071,55 @@ enum {
 #define EDP_PSR_AUX_DATA(i)                    _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
 
 #define EDP_PSR_STATUS                         _MMIO(dev_priv->psr_mmio_base + 0x40)
-#define   EDP_PSR_STATUS_STATE_MASK            (7<<29)
-#define   EDP_PSR_STATUS_STATE_IDLE            (0<<29)
-#define   EDP_PSR_STATUS_STATE_SRDONACK                (1<<29)
-#define   EDP_PSR_STATUS_STATE_SRDENT          (2<<29)
-#define   EDP_PSR_STATUS_STATE_BUFOFF          (3<<29)
-#define   EDP_PSR_STATUS_STATE_BUFON           (4<<29)
-#define   EDP_PSR_STATUS_STATE_AUXACK          (5<<29)
-#define   EDP_PSR_STATUS_STATE_SRDOFFACK       (6<<29)
-#define   EDP_PSR_STATUS_LINK_MASK             (3<<26)
-#define   EDP_PSR_STATUS_LINK_FULL_OFF         (0<<26)
-#define   EDP_PSR_STATUS_LINK_FULL_ON          (1<<26)
-#define   EDP_PSR_STATUS_LINK_STANDBY          (2<<26)
+#define   EDP_PSR_STATUS_STATE_MASK            (7 << 29)
+#define   EDP_PSR_STATUS_STATE_IDLE            (0 << 29)
+#define   EDP_PSR_STATUS_STATE_SRDONACK                (1 << 29)
+#define   EDP_PSR_STATUS_STATE_SRDENT          (2 << 29)
+#define   EDP_PSR_STATUS_STATE_BUFOFF          (3 << 29)
+#define   EDP_PSR_STATUS_STATE_BUFON           (4 << 29)
+#define   EDP_PSR_STATUS_STATE_AUXACK          (5 << 29)
+#define   EDP_PSR_STATUS_STATE_SRDOFFACK       (6 << 29)
+#define   EDP_PSR_STATUS_LINK_MASK             (3 << 26)
+#define   EDP_PSR_STATUS_LINK_FULL_OFF         (0 << 26)
+#define   EDP_PSR_STATUS_LINK_FULL_ON          (1 << 26)
+#define   EDP_PSR_STATUS_LINK_STANDBY          (2 << 26)
 #define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
 #define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK  0x1f
 #define   EDP_PSR_STATUS_COUNT_SHIFT           16
 #define   EDP_PSR_STATUS_COUNT_MASK            0xf
-#define   EDP_PSR_STATUS_AUX_ERROR             (1<<15)
-#define   EDP_PSR_STATUS_AUX_SENDING           (1<<12)
-#define   EDP_PSR_STATUS_SENDING_IDLE          (1<<9)
-#define   EDP_PSR_STATUS_SENDING_TP2_TP3       (1<<8)
-#define   EDP_PSR_STATUS_SENDING_TP1           (1<<4)
+#define   EDP_PSR_STATUS_AUX_ERROR             (1 << 15)
+#define   EDP_PSR_STATUS_AUX_SENDING           (1 << 12)
+#define   EDP_PSR_STATUS_SENDING_IDLE          (1 << 9)
+#define   EDP_PSR_STATUS_SENDING_TP2_TP3       (1 << 8)
+#define   EDP_PSR_STATUS_SENDING_TP1           (1 << 4)
 #define   EDP_PSR_STATUS_IDLE_MASK             0xf
 
 #define EDP_PSR_PERF_CNT               _MMIO(dev_priv->psr_mmio_base + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK                0xffffff
 
 #define EDP_PSR_DEBUG                          _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
-#define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1<<28)
-#define   EDP_PSR_DEBUG_MASK_LPSP              (1<<27)
-#define   EDP_PSR_DEBUG_MASK_MEMUP             (1<<26)
-#define   EDP_PSR_DEBUG_MASK_HPD               (1<<25)
-#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1<<16)
-#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
+#define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1 << 28)
+#define   EDP_PSR_DEBUG_MASK_LPSP              (1 << 27)
+#define   EDP_PSR_DEBUG_MASK_MEMUP             (1 << 26)
+#define   EDP_PSR_DEBUG_MASK_HPD               (1 << 25)
+#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16)
+#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
 
 #define EDP_PSR2_CTL                   _MMIO(0x6f900)
-#define   EDP_PSR2_ENABLE              (1<<31)
-#define   EDP_SU_TRACK_ENABLE          (1<<30)
-#define   EDP_Y_COORDINATE_VALID       (1<<26) /* GLK and CNL+ */
-#define   EDP_Y_COORDINATE_ENABLE      (1<<25) /* GLK and CNL+ */
-#define   EDP_MAX_SU_DISABLE_TIME(t)   ((t)<<20)
-#define   EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define   EDP_PSR2_TP2_TIME_500                (0<<8)
-#define   EDP_PSR2_TP2_TIME_100                (1<<8)
-#define   EDP_PSR2_TP2_TIME_2500       (2<<8)
-#define   EDP_PSR2_TP2_TIME_50         (3<<8)
-#define   EDP_PSR2_TP2_TIME_MASK       (3<<8)
+#define   EDP_PSR2_ENABLE              (1 << 31)
+#define   EDP_SU_TRACK_ENABLE          (1 << 30)
+#define   EDP_Y_COORDINATE_VALID       (1 << 26) /* GLK and CNL+ */
+#define   EDP_Y_COORDINATE_ENABLE      (1 << 25) /* GLK and CNL+ */
+#define   EDP_MAX_SU_DISABLE_TIME(t)   ((t) << 20)
+#define   EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define   EDP_PSR2_TP2_TIME_500us      (0 << 8)
+#define   EDP_PSR2_TP2_TIME_100us      (1 << 8)
+#define   EDP_PSR2_TP2_TIME_2500us     (2 << 8)
+#define   EDP_PSR2_TP2_TIME_50us       (3 << 8)
+#define   EDP_PSR2_TP2_TIME_MASK       (3 << 8)
 #define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define   EDP_PSR2_FRAME_BEFORE_SU_MASK        (0xf<<4)
-#define   EDP_PSR2_FRAME_BEFORE_SU(a)  ((a)<<4)
+#define   EDP_PSR2_FRAME_BEFORE_SU_MASK        (0xf << 4)
+#define   EDP_PSR2_FRAME_BEFORE_SU(a)  ((a) << 4)
 #define   EDP_PSR2_IDLE_FRAME_MASK     0xf
 #define   EDP_PSR2_IDLE_FRAME_SHIFT    0
 
@@ -4128,7 +4147,7 @@ enum {
 #define  PSR_EVENT_PSR_DISABLE                 (1 << 0)
 
 #define EDP_PSR2_STATUS                        _MMIO(0x6f940)
-#define EDP_PSR2_STATUS_STATE_MASK     (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_MASK     (0xf << 28)
 #define EDP_PSR2_STATUS_STATE_SHIFT    28
 
 /* VGA port control */
@@ -4136,47 +4155,48 @@ enum {
 #define PCH_ADPA                _MMIO(0xe1100)
 #define VLV_ADPA               _MMIO(VLV_DISPLAY_BASE + 0x61100)
 
-#define   ADPA_DAC_ENABLE      (1<<31)
+#define   ADPA_DAC_ENABLE      (1 << 31)
 #define   ADPA_DAC_DISABLE     0
-#define   ADPA_PIPE_SELECT_MASK        (1<<30)
-#define   ADPA_PIPE_A_SELECT   0
-#define   ADPA_PIPE_B_SELECT   (1<<30)
-#define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_PIPE_SEL_SHIFT          30
+#define   ADPA_PIPE_SEL_MASK           (1 << 30)
+#define   ADPA_PIPE_SEL(pipe)          ((pipe) << 30)
+#define   ADPA_PIPE_SEL_SHIFT_CPT      29
+#define   ADPA_PIPE_SEL_MASK_CPT       (3 << 29)
+#define   ADPA_PIPE_SEL_CPT(pipe)      ((pipe) << 29)
 #define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
-#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
-#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
-#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
-#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
-#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
-#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
-#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
-#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
-#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
-#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
-#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
-#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
-#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0 << 24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3 << 24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2 << 24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1 << 23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0 << 22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1 << 22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0 << 21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1 << 21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0 << 20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1 << 20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0 << 17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1 << 17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
+#define   ADPA_USE_VGA_HVPOLARITY (1 << 15)
 #define   ADPA_SETS_HVPOLARITY 0
-#define   ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_VSYNC_CNTL_DISABLE (1 << 10)
 #define   ADPA_VSYNC_CNTL_ENABLE 0
-#define   ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_HSYNC_CNTL_DISABLE (1 << 11)
 #define   ADPA_HSYNC_CNTL_ENABLE 0
-#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define   ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
 #define   ADPA_VSYNC_ACTIVE_LOW        0
-#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define   ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
 #define   ADPA_HSYNC_ACTIVE_LOW        0
-#define   ADPA_DPMS_MASK       (~(3<<10))
-#define   ADPA_DPMS_ON         (0<<10)
-#define   ADPA_DPMS_SUSPEND    (1<<10)
-#define   ADPA_DPMS_STANDBY    (2<<10)
-#define   ADPA_DPMS_OFF                (3<<10)
+#define   ADPA_DPMS_MASK       (~(3 << 10))
+#define   ADPA_DPMS_ON         (0 << 10)
+#define   ADPA_DPMS_SUSPEND    (1 << 10)
+#define   ADPA_DPMS_STANDBY    (2 << 10)
+#define   ADPA_DPMS_OFF                (3 << 10)
 
 
 /* Hotplug control (945+ only) */
@@ -4301,9 +4321,9 @@ enum {
 
 /* Gen 3 SDVO bits: */
 #define   SDVO_ENABLE                          (1 << 31)
-#define   SDVO_PIPE_SEL(pipe)                  ((pipe) << 30)
+#define   SDVO_PIPE_SEL_SHIFT                  30
 #define   SDVO_PIPE_SEL_MASK                   (1 << 30)
-#define   SDVO_PIPE_B_SELECT                   (1 << 30)
+#define   SDVO_PIPE_SEL(pipe)                  ((pipe) << 30)
 #define   SDVO_STALL_SELECT                    (1 << 29)
 #define   SDVO_INTERRUPT_ENABLE                        (1 << 26)
 /*
@@ -4343,12 +4363,14 @@ enum {
 #define   SDVOB_HOTPLUG_ENABLE                 (1 << 23) /* SDVO only */
 
 /* Gen 6 (CPT) SDVO/HDMI bits: */
-#define   SDVO_PIPE_SEL_CPT(pipe)              ((pipe) << 29)
+#define   SDVO_PIPE_SEL_SHIFT_CPT              29
 #define   SDVO_PIPE_SEL_MASK_CPT               (3 << 29)
+#define   SDVO_PIPE_SEL_CPT(pipe)              ((pipe) << 29)
 
 /* CHV SDVO/HDMI bits: */
-#define   SDVO_PIPE_SEL_CHV(pipe)              ((pipe) << 24)
+#define   SDVO_PIPE_SEL_SHIFT_CHV              24
 #define   SDVO_PIPE_SEL_MASK_CHV               (3 << 24)
+#define   SDVO_PIPE_SEL_CHV(pipe)              ((pipe) << 24)
 
 
 /* DVO port control */
@@ -4359,7 +4381,9 @@ enum {
 #define _DVOC                  0x61160
 #define DVOC                   _MMIO(_DVOC)
 #define   DVO_ENABLE                   (1 << 31)
-#define   DVO_PIPE_B_SELECT            (1 << 30)
+#define   DVO_PIPE_SEL_SHIFT           30
+#define   DVO_PIPE_SEL_MASK            (1 << 30)
+#define   DVO_PIPE_SEL(pipe)           ((pipe) << 30)
 #define   DVO_PIPE_STALL_UNUSED                (0 << 28)
 #define   DVO_PIPE_STALL               (1 << 28)
 #define   DVO_PIPE_STALL_TV            (2 << 28)
@@ -4381,7 +4405,7 @@ enum {
 #define   DVO_BLANK_ACTIVE_HIGH                (1 << 2)
 #define   DVO_OUTPUT_CSTATE_PIXELS     (1 << 1)        /* SDG only */
 #define   DVO_OUTPUT_SOURCE_SIZE_PIXELS        (1 << 0)        /* SDG only */
-#define   DVO_PRESERVE_MASK            (0x7<<24)
+#define   DVO_PRESERVE_MASK            (0x7 << 24)
 #define DVOA_SRCDIM            _MMIO(0x61124)
 #define DVOB_SRCDIM            _MMIO(0x61144)
 #define DVOC_SRCDIM            _MMIO(0x61164)
@@ -4396,9 +4420,12 @@ enum {
  */
 #define   LVDS_PORT_EN                 (1 << 31)
 /* Selects pipe B for LVDS data.  Must be set on pre-965. */
-#define   LVDS_PIPEB_SELECT            (1 << 30)
-#define   LVDS_PIPE_MASK               (1 << 30)
-#define   LVDS_PIPE(pipe)              ((pipe) << 30)
+#define   LVDS_PIPE_SEL_SHIFT          30
+#define   LVDS_PIPE_SEL_MASK           (1 << 30)
+#define   LVDS_PIPE_SEL(pipe)          ((pipe) << 30)
+#define   LVDS_PIPE_SEL_SHIFT_CPT      29
+#define   LVDS_PIPE_SEL_MASK_CPT       (3 << 29)
+#define   LVDS_PIPE_SEL_CPT(pipe)      ((pipe) << 29)
 /* LVDS dithering flag on 965/g4x platform */
 #define   LVDS_ENABLE_DITHER           (1 << 25)
 /* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4695,7 +4722,9 @@ enum {
 /* Enables the TV encoder */
 # define TV_ENC_ENABLE                 (1 << 31)
 /* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT           (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT         30
+# define TV_ENC_PIPE_SEL_MASK          (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe)         ((pipe) << 30)
 /* Outputs composite video (DAC A only) */
 # define TV_ENC_OUTPUT_COMPOSITE       (0 << 28)
 /* Outputs SVideo video (DAC B/C) */
@@ -5177,10 +5206,15 @@ enum {
 #define CHV_DP_D               _MMIO(VLV_DISPLAY_BASE + 0x64300)
 
 #define   DP_PORT_EN                   (1 << 31)
-#define   DP_PIPEB_SELECT              (1 << 30)
-#define   DP_PIPE_MASK                 (1 << 30)
-#define   DP_PIPE_SELECT_CHV(pipe)     ((pipe) << 16)
-#define   DP_PIPE_MASK_CHV             (3 << 16)
+#define   DP_PIPE_SEL_SHIFT            30
+#define   DP_PIPE_SEL_MASK             (1 << 30)
+#define   DP_PIPE_SEL(pipe)            ((pipe) << 30)
+#define   DP_PIPE_SEL_SHIFT_IVB                29
+#define   DP_PIPE_SEL_MASK_IVB         (3 << 29)
+#define   DP_PIPE_SEL_IVB(pipe)                ((pipe) << 29)
+#define   DP_PIPE_SEL_SHIFT_CHV                16
+#define   DP_PIPE_SEL_MASK_CHV         (3 << 16)
+#define   DP_PIPE_SEL_CHV(pipe)                ((pipe) << 16)
 
 /* Link training mode - select a suitable mode for each stage */
 #define   DP_LINK_TRAIN_PAT_1          (0 << 28)
@@ -5287,6 +5321,13 @@ enum {
 #define _DPD_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64320)
 #define _DPD_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64324)
 
+#define _DPE_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64410)
+#define _DPE_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64414)
+#define _DPE_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64418)
+#define _DPE_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6441c)
+#define _DPE_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64420)
+#define _DPE_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64424)
+
 #define _DPF_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64510)
 #define _DPF_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64514)
 #define _DPF_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64518)
@@ -5342,7 +5383,7 @@ enum {
 #define _PIPEB_DATA_M_G4X      0x71050
 
 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
+#define  TU_SIZE(x)             (((x) - 1) << 25) /* default size 64 */
 #define  TU_SIZE_SHIFT         25
 #define  TU_SIZE_MASK           (0x3f << 25)
 
@@ -5384,18 +5425,18 @@ enum {
 #define   DSL_LINEMASK_GEN2    0x00000fff
 #define   DSL_LINEMASK_GEN3    0x00001fff
 #define _PIPEACONF             0x70008
-#define   PIPECONF_ENABLE      (1<<31)
+#define   PIPECONF_ENABLE      (1 << 31)
 #define   PIPECONF_DISABLE     0
-#define   PIPECONF_DOUBLE_WIDE (1<<30)
-#define   I965_PIPECONF_ACTIVE (1<<30)
-#define   PIPECONF_DSI_PLL_LOCKED      (1<<29) /* vlv & pipe A only */
-#define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define   PIPECONF_DOUBLE_WIDE (1 << 30)
+#define   I965_PIPECONF_ACTIVE (1 << 30)
+#define   PIPECONF_DSI_PLL_LOCKED      (1 << 29) /* vlv & pipe A only */
+#define   PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
 #define   PIPECONF_SINGLE_WIDE 0
 #define   PIPECONF_PIPE_UNLOCKED 0
-#define   PIPECONF_PIPE_LOCKED (1<<25)
+#define   PIPECONF_PIPE_LOCKED (1 << 25)
 #define   PIPECONF_PALETTE     0
-#define   PIPECONF_GAMMA               (1<<24)
-#define   PIPECONF_FORCE_BORDER        (1<<25)
+#define   PIPECONF_GAMMA               (1 << 24)
+#define   PIPECONF_FORCE_BORDER        (1 << 25)
 #define   PIPECONF_INTERLACE_MASK      (7 << 21)
 #define   PIPECONF_INTERLACE_MASK_HSW  (3 << 21)
 /* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5414,67 +5455,67 @@ enum {
 #define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK  (5 << 21) /* ilk/snb only */
 #define   PIPECONF_INTERLACE_MODE_MASK         (7 << 21)
 #define   PIPECONF_EDP_RR_MODE_SWITCH          (1 << 20)
-#define   PIPECONF_CXSR_DOWNCLOCK      (1<<16)
+#define   PIPECONF_CXSR_DOWNCLOCK      (1 << 16)
 #define   PIPECONF_EDP_RR_MODE_SWITCH_VLV      (1 << 14)
 #define   PIPECONF_COLOR_RANGE_SELECT  (1 << 13)
 #define   PIPECONF_BPC_MASK    (0x7 << 5)
-#define   PIPECONF_8BPC                (0<<5)
-#define   PIPECONF_10BPC       (1<<5)
-#define   PIPECONF_6BPC                (2<<5)
-#define   PIPECONF_12BPC       (3<<5)
-#define   PIPECONF_DITHER_EN   (1<<4)
+#define   PIPECONF_8BPC                (0 << 5)
+#define   PIPECONF_10BPC       (1 << 5)
+#define   PIPECONF_6BPC                (2 << 5)
+#define   PIPECONF_12BPC       (3 << 5)
+#define   PIPECONF_DITHER_EN   (1 << 4)
 #define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-#define   PIPECONF_DITHER_TYPE_SP (0<<2)
-#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
-#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
-#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define   PIPECONF_DITHER_TYPE_SP (0 << 2)
+#define   PIPECONF_DITHER_TYPE_ST1 (1 << 2)
+#define   PIPECONF_DITHER_TYPE_ST2 (2 << 2)
+#define   PIPECONF_DITHER_TYPE_TEMP (3 << 2)
 #define _PIPEASTAT             0x70024
-#define   PIPE_FIFO_UNDERRUN_STATUS            (1UL<<31)
-#define   SPRITE1_FLIP_DONE_INT_EN_VLV         (1UL<<30)
-#define   PIPE_CRC_ERROR_ENABLE                        (1UL<<29)
-#define   PIPE_CRC_DONE_ENABLE                 (1UL<<28)
-#define   PERF_COUNTER2_INTERRUPT_EN           (1UL<<27)
-#define   PIPE_GMBUS_EVENT_ENABLE              (1UL<<27)
-#define   PLANE_FLIP_DONE_INT_EN_VLV           (1UL<<26)
-#define   PIPE_HOTPLUG_INTERRUPT_ENABLE                (1UL<<26)
-#define   PIPE_VSYNC_INTERRUPT_ENABLE          (1UL<<25)
-#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE     (1UL<<24)
-#define   PIPE_DPST_EVENT_ENABLE               (1UL<<23)
-#define   SPRITE0_FLIP_DONE_INT_EN_VLV         (1UL<<22)
-#define   PIPE_LEGACY_BLC_EVENT_ENABLE         (1UL<<22)
-#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE      (1UL<<21)
-#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE     (1UL<<20)
-#define   PIPE_B_PSR_INTERRUPT_ENABLE_VLV      (1UL<<19)
-#define   PERF_COUNTER_INTERRUPT_EN            (1UL<<19)
-#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE     (1UL<<18) /* pre-965 */
-#define   PIPE_START_VBLANK_INTERRUPT_ENABLE   (1UL<<18) /* 965 or later */
-#define   PIPE_FRAMESTART_INTERRUPT_ENABLE     (1UL<<17)
-#define   PIPE_VBLANK_INTERRUPT_ENABLE         (1UL<<17)
-#define   PIPEA_HBLANK_INT_EN_VLV              (1UL<<16)
-#define   PIPE_OVERLAY_UPDATED_ENABLE          (1UL<<16)
-#define   SPRITE1_FLIP_DONE_INT_STATUS_VLV     (1UL<<15)
-#define   SPRITE0_FLIP_DONE_INT_STATUS_VLV     (1UL<<14)
-#define   PIPE_CRC_ERROR_INTERRUPT_STATUS      (1UL<<13)
-#define   PIPE_CRC_DONE_INTERRUPT_STATUS       (1UL<<12)
-#define   PERF_COUNTER2_INTERRUPT_STATUS       (1UL<<11)
-#define   PIPE_GMBUS_INTERRUPT_STATUS          (1UL<<11)
-#define   PLANE_FLIP_DONE_INT_STATUS_VLV       (1UL<<10)
-#define   PIPE_HOTPLUG_INTERRUPT_STATUS                (1UL<<10)
-#define   PIPE_VSYNC_INTERRUPT_STATUS          (1UL<<9)
-#define   PIPE_DISPLAY_LINE_COMPARE_STATUS     (1UL<<8)
-#define   PIPE_DPST_EVENT_STATUS               (1UL<<7)
-#define   PIPE_A_PSR_STATUS_VLV                        (1UL<<6)
-#define   PIPE_LEGACY_BLC_EVENT_STATUS         (1UL<<6)
-#define   PIPE_ODD_FIELD_INTERRUPT_STATUS      (1UL<<5)
-#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS     (1UL<<4)
-#define   PIPE_B_PSR_STATUS_VLV                        (1UL<<3)
-#define   PERF_COUNTER_INTERRUPT_STATUS                (1UL<<3)
-#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS     (1UL<<2) /* pre-965 */
-#define   PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL<<2) /* 965 or later */
-#define   PIPE_FRAMESTART_INTERRUPT_STATUS     (1UL<<1)
-#define   PIPE_VBLANK_INTERRUPT_STATUS         (1UL<<1)
-#define   PIPE_HBLANK_INT_STATUS               (1UL<<0)
-#define   PIPE_OVERLAY_UPDATED_STATUS          (1UL<<0)
+#define   PIPE_FIFO_UNDERRUN_STATUS            (1UL << 31)
+#define   SPRITE1_FLIP_DONE_INT_EN_VLV         (1UL << 30)
+#define   PIPE_CRC_ERROR_ENABLE                        (1UL << 29)
+#define   PIPE_CRC_DONE_ENABLE                 (1UL << 28)
+#define   PERF_COUNTER2_INTERRUPT_EN           (1UL << 27)
+#define   PIPE_GMBUS_EVENT_ENABLE              (1UL << 27)
+#define   PLANE_FLIP_DONE_INT_EN_VLV           (1UL << 26)
+#define   PIPE_HOTPLUG_INTERRUPT_ENABLE                (1UL << 26)
+#define   PIPE_VSYNC_INTERRUPT_ENABLE          (1UL << 25)
+#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE     (1UL << 24)
+#define   PIPE_DPST_EVENT_ENABLE               (1UL << 23)
+#define   SPRITE0_FLIP_DONE_INT_EN_VLV         (1UL << 22)
+#define   PIPE_LEGACY_BLC_EVENT_ENABLE         (1UL << 22)
+#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE      (1UL << 21)
+#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE     (1UL << 20)
+#define   PIPE_B_PSR_INTERRUPT_ENABLE_VLV      (1UL << 19)
+#define   PERF_COUNTER_INTERRUPT_EN            (1UL << 19)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE     (1UL << 18) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_ENABLE   (1UL << 18) /* 965 or later */
+#define   PIPE_FRAMESTART_INTERRUPT_ENABLE     (1UL << 17)
+#define   PIPE_VBLANK_INTERRUPT_ENABLE         (1UL << 17)
+#define   PIPEA_HBLANK_INT_EN_VLV              (1UL << 16)
+#define   PIPE_OVERLAY_UPDATED_ENABLE          (1UL << 16)
+#define   SPRITE1_FLIP_DONE_INT_STATUS_VLV     (1UL << 15)
+#define   SPRITE0_FLIP_DONE_INT_STATUS_VLV     (1UL << 14)
+#define   PIPE_CRC_ERROR_INTERRUPT_STATUS      (1UL << 13)
+#define   PIPE_CRC_DONE_INTERRUPT_STATUS       (1UL << 12)
+#define   PERF_COUNTER2_INTERRUPT_STATUS       (1UL << 11)
+#define   PIPE_GMBUS_INTERRUPT_STATUS          (1UL << 11)
+#define   PLANE_FLIP_DONE_INT_STATUS_VLV       (1UL << 10)
+#define   PIPE_HOTPLUG_INTERRUPT_STATUS                (1UL << 10)
+#define   PIPE_VSYNC_INTERRUPT_STATUS          (1UL << 9)
+#define   PIPE_DISPLAY_LINE_COMPARE_STATUS     (1UL << 8)
+#define   PIPE_DPST_EVENT_STATUS               (1UL << 7)
+#define   PIPE_A_PSR_STATUS_VLV                        (1UL << 6)
+#define   PIPE_LEGACY_BLC_EVENT_STATUS         (1UL << 6)
+#define   PIPE_ODD_FIELD_INTERRUPT_STATUS      (1UL << 5)
+#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS     (1UL << 4)
+#define   PIPE_B_PSR_STATUS_VLV                        (1UL << 3)
+#define   PERF_COUNTER_INTERRUPT_STATUS                (1UL << 3)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS     (1UL << 2) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL << 2) /* 965 or later */
+#define   PIPE_FRAMESTART_INTERRUPT_STATUS     (1UL << 1)
+#define   PIPE_VBLANK_INTERRUPT_STATUS         (1UL << 1)
+#define   PIPE_HBLANK_INT_STATUS               (1UL << 0)
+#define   PIPE_OVERLAY_UPDATED_STATUS          (1UL << 0)
 
 #define PIPESTAT_INT_ENABLE_MASK               0x7fff0000
 #define PIPESTAT_INT_STATUS_MASK               0x0000ffff
@@ -5503,67 +5544,67 @@ enum {
 
 #define _PIPE_MISC_A                   0x70030
 #define _PIPE_MISC_B                   0x71030
-#define   PIPEMISC_YUV420_ENABLE       (1<<27)
-#define   PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
-#define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1<<11)
-#define   PIPEMISC_DITHER_BPC_MASK     (7<<5)
-#define   PIPEMISC_DITHER_8_BPC                (0<<5)
-#define   PIPEMISC_DITHER_10_BPC       (1<<5)
-#define   PIPEMISC_DITHER_6_BPC                (2<<5)
-#define   PIPEMISC_DITHER_12_BPC       (3<<5)
-#define   PIPEMISC_DITHER_ENABLE       (1<<4)
-#define   PIPEMISC_DITHER_TYPE_MASK    (3<<2)
-#define   PIPEMISC_DITHER_TYPE_SP      (0<<2)
+#define   PIPEMISC_YUV420_ENABLE       (1 << 27)
+#define   PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
+#define   PIPEMISC_DITHER_BPC_MASK     (7 << 5)
+#define   PIPEMISC_DITHER_8_BPC                (0 << 5)
+#define   PIPEMISC_DITHER_10_BPC       (1 << 5)
+#define   PIPEMISC_DITHER_6_BPC                (2 << 5)
+#define   PIPEMISC_DITHER_12_BPC       (3 << 5)
+#define   PIPEMISC_DITHER_ENABLE       (1 << 4)
+#define   PIPEMISC_DITHER_TYPE_MASK    (3 << 2)
+#define   PIPEMISC_DITHER_TYPE_SP      (0 << 2)
 #define PIPEMISC(pipe)                 _MMIO_PIPE2(pipe, _PIPE_MISC_A)
 
 #define VLV_DPFLIPSTAT                         _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define   PIPEB_LINE_COMPARE_INT_EN            (1<<29)
-#define   PIPEB_HLINE_INT_EN                   (1<<28)
-#define   PIPEB_VBLANK_INT_EN                  (1<<27)
-#define   SPRITED_FLIP_DONE_INT_EN             (1<<26)
-#define   SPRITEC_FLIP_DONE_INT_EN             (1<<25)
-#define   PLANEB_FLIP_DONE_INT_EN              (1<<24)
-#define   PIPE_PSR_INT_EN                      (1<<22)
-#define   PIPEA_LINE_COMPARE_INT_EN            (1<<21)
-#define   PIPEA_HLINE_INT_EN                   (1<<20)
-#define   PIPEA_VBLANK_INT_EN                  (1<<19)
-#define   SPRITEB_FLIP_DONE_INT_EN             (1<<18)
-#define   SPRITEA_FLIP_DONE_INT_EN             (1<<17)
-#define   PLANEA_FLIPDONE_INT_EN               (1<<16)
-#define   PIPEC_LINE_COMPARE_INT_EN            (1<<13)
-#define   PIPEC_HLINE_INT_EN                   (1<<12)
-#define   PIPEC_VBLANK_INT_EN                  (1<<11)
-#define   SPRITEF_FLIPDONE_INT_EN              (1<<10)
-#define   SPRITEE_FLIPDONE_INT_EN              (1<<9)
-#define   PLANEC_FLIPDONE_INT_EN               (1<<8)
+#define   PIPEB_LINE_COMPARE_INT_EN            (1 << 29)
+#define   PIPEB_HLINE_INT_EN                   (1 << 28)
+#define   PIPEB_VBLANK_INT_EN                  (1 << 27)
+#define   SPRITED_FLIP_DONE_INT_EN             (1 << 26)
+#define   SPRITEC_FLIP_DONE_INT_EN             (1 << 25)
+#define   PLANEB_FLIP_DONE_INT_EN              (1 << 24)
+#define   PIPE_PSR_INT_EN                      (1 << 22)
+#define   PIPEA_LINE_COMPARE_INT_EN            (1 << 21)
+#define   PIPEA_HLINE_INT_EN                   (1 << 20)
+#define   PIPEA_VBLANK_INT_EN                  (1 << 19)
+#define   SPRITEB_FLIP_DONE_INT_EN             (1 << 18)
+#define   SPRITEA_FLIP_DONE_INT_EN             (1 << 17)
+#define   PLANEA_FLIPDONE_INT_EN               (1 << 16)
+#define   PIPEC_LINE_COMPARE_INT_EN            (1 << 13)
+#define   PIPEC_HLINE_INT_EN                   (1 << 12)
+#define   PIPEC_VBLANK_INT_EN                  (1 << 11)
+#define   SPRITEF_FLIPDONE_INT_EN              (1 << 10)
+#define   SPRITEE_FLIPDONE_INT_EN              (1 << 9)
+#define   PLANEC_FLIPDONE_INT_EN               (1 << 8)
 
 #define DPINVGTT                               _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define   SPRITEF_INVALID_GTT_INT_EN           (1<<27)
-#define   SPRITEE_INVALID_GTT_INT_EN           (1<<26)
-#define   PLANEC_INVALID_GTT_INT_EN            (1<<25)
-#define   CURSORC_INVALID_GTT_INT_EN           (1<<24)
-#define   CURSORB_INVALID_GTT_INT_EN           (1<<23)
-#define   CURSORA_INVALID_GTT_INT_EN           (1<<22)
-#define   SPRITED_INVALID_GTT_INT_EN           (1<<21)
-#define   SPRITEC_INVALID_GTT_INT_EN           (1<<20)
-#define   PLANEB_INVALID_GTT_INT_EN            (1<<19)
-#define   SPRITEB_INVALID_GTT_INT_EN           (1<<18)
-#define   SPRITEA_INVALID_GTT_INT_EN           (1<<17)
-#define   PLANEA_INVALID_GTT_INT_EN            (1<<16)
+#define   SPRITEF_INVALID_GTT_INT_EN           (1 << 27)
+#define   SPRITEE_INVALID_GTT_INT_EN           (1 << 26)
+#define   PLANEC_INVALID_GTT_INT_EN            (1 << 25)
+#define   CURSORC_INVALID_GTT_INT_EN           (1 << 24)
+#define   CURSORB_INVALID_GTT_INT_EN           (1 << 23)
+#define   CURSORA_INVALID_GTT_INT_EN           (1 << 22)
+#define   SPRITED_INVALID_GTT_INT_EN           (1 << 21)
+#define   SPRITEC_INVALID_GTT_INT_EN           (1 << 20)
+#define   PLANEB_INVALID_GTT_INT_EN            (1 << 19)
+#define   SPRITEB_INVALID_GTT_INT_EN           (1 << 18)
+#define   SPRITEA_INVALID_GTT_INT_EN           (1 << 17)
+#define   PLANEA_INVALID_GTT_INT_EN            (1 << 16)
 #define   DPINVGTT_EN_MASK                     0xff0000
 #define   DPINVGTT_EN_MASK_CHV                 0xfff0000
-#define   SPRITEF_INVALID_GTT_STATUS           (1<<11)
-#define   SPRITEE_INVALID_GTT_STATUS           (1<<10)
-#define   PLANEC_INVALID_GTT_STATUS            (1<<9)
-#define   CURSORC_INVALID_GTT_STATUS           (1<<8)
-#define   CURSORB_INVALID_GTT_STATUS           (1<<7)
-#define   CURSORA_INVALID_GTT_STATUS           (1<<6)
-#define   SPRITED_INVALID_GTT_STATUS           (1<<5)
-#define   SPRITEC_INVALID_GTT_STATUS           (1<<4)
-#define   PLANEB_INVALID_GTT_STATUS            (1<<3)
-#define   SPRITEB_INVALID_GTT_STATUS           (1<<2)
-#define   SPRITEA_INVALID_GTT_STATUS           (1<<1)
-#define   PLANEA_INVALID_GTT_STATUS            (1<<0)
+#define   SPRITEF_INVALID_GTT_STATUS           (1 << 11)
+#define   SPRITEE_INVALID_GTT_STATUS           (1 << 10)
+#define   PLANEC_INVALID_GTT_STATUS            (1 << 9)
+#define   CURSORC_INVALID_GTT_STATUS           (1 << 8)
+#define   CURSORB_INVALID_GTT_STATUS           (1 << 7)
+#define   CURSORA_INVALID_GTT_STATUS           (1 << 6)
+#define   SPRITED_INVALID_GTT_STATUS           (1 << 5)
+#define   SPRITEC_INVALID_GTT_STATUS           (1 << 4)
+#define   PLANEB_INVALID_GTT_STATUS            (1 << 3)
+#define   SPRITEB_INVALID_GTT_STATUS           (1 << 2)
+#define   SPRITEA_INVALID_GTT_STATUS           (1 << 1)
+#define   PLANEA_INVALID_GTT_STATUS            (1 << 0)
 #define   DPINVGTT_STATUS_MASK                 0xff
 #define   DPINVGTT_STATUS_MASK_CHV             0xfff
 
@@ -5604,149 +5645,149 @@ enum {
 /* pnv/gen4/g4x/vlv/chv */
 #define DSPFW1         _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
 #define   DSPFW_SR_SHIFT               23
-#define   DSPFW_SR_MASK                        (0x1ff<<23)
+#define   DSPFW_SR_MASK                        (0x1ff << 23)
 #define   DSPFW_CURSORB_SHIFT          16
-#define   DSPFW_CURSORB_MASK           (0x3f<<16)
+#define   DSPFW_CURSORB_MASK           (0x3f << 16)
 #define   DSPFW_PLANEB_SHIFT           8
-#define   DSPFW_PLANEB_MASK            (0x7f<<8)
-#define   DSPFW_PLANEB_MASK_VLV                (0xff<<8) /* vlv/chv */
+#define   DSPFW_PLANEB_MASK            (0x7f << 8)
+#define   DSPFW_PLANEB_MASK_VLV                (0xff << 8) /* vlv/chv */
 #define   DSPFW_PLANEA_SHIFT           0
-#define   DSPFW_PLANEA_MASK            (0x7f<<0)
-#define   DSPFW_PLANEA_MASK_VLV                (0xff<<0) /* vlv/chv */
+#define   DSPFW_PLANEA_MASK            (0x7f << 0)
+#define   DSPFW_PLANEA_MASK_VLV                (0xff << 0) /* vlv/chv */
 #define DSPFW2         _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
-#define   DSPFW_FBC_SR_EN              (1<<31)   /* g4x */
+#define   DSPFW_FBC_SR_EN              (1 << 31)         /* g4x */
 #define   DSPFW_FBC_SR_SHIFT           28
-#define   DSPFW_FBC_SR_MASK            (0x7<<28) /* g4x */
+#define   DSPFW_FBC_SR_MASK            (0x7 << 28) /* g4x */
 #define   DSPFW_FBC_HPLL_SR_SHIFT      24
-#define   DSPFW_FBC_HPLL_SR_MASK       (0xf<<24) /* g4x */
+#define   DSPFW_FBC_HPLL_SR_MASK       (0xf << 24) /* g4x */
 #define   DSPFW_SPRITEB_SHIFT          (16)
-#define   DSPFW_SPRITEB_MASK           (0x7f<<16) /* g4x */
-#define   DSPFW_SPRITEB_MASK_VLV       (0xff<<16) /* vlv/chv */
+#define   DSPFW_SPRITEB_MASK           (0x7f << 16) /* g4x */
+#define   DSPFW_SPRITEB_MASK_VLV       (0xff << 16) /* vlv/chv */
 #define   DSPFW_CURSORA_SHIFT          8
-#define   DSPFW_CURSORA_MASK           (0x3f<<8)
+#define   DSPFW_CURSORA_MASK           (0x3f << 8)
 #define   DSPFW_PLANEC_OLD_SHIFT       0
-#define   DSPFW_PLANEC_OLD_MASK                (0x7f<<0) /* pre-gen4 sprite C */
+#define   DSPFW_PLANEC_OLD_MASK                (0x7f << 0) /* pre-gen4 sprite C */
 #define   DSPFW_SPRITEA_SHIFT          0
-#define   DSPFW_SPRITEA_MASK           (0x7f<<0) /* g4x */
-#define   DSPFW_SPRITEA_MASK_VLV       (0xff<<0) /* vlv/chv */
+#define   DSPFW_SPRITEA_MASK           (0x7f << 0) /* g4x */
+#define   DSPFW_SPRITEA_MASK_VLV       (0xff << 0) /* vlv/chv */
 #define DSPFW3         _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
-#define   DSPFW_HPLL_SR_EN             (1<<31)
-#define   PINEVIEW_SELF_REFRESH_EN     (1<<30)
+#define   DSPFW_HPLL_SR_EN             (1 << 31)
+#define   PINEVIEW_SELF_REFRESH_EN     (1 << 30)
 #define   DSPFW_CURSOR_SR_SHIFT                24
-#define   DSPFW_CURSOR_SR_MASK         (0x3f<<24)
+#define   DSPFW_CURSOR_SR_MASK         (0x3f << 24)
 #define   DSPFW_HPLL_CURSOR_SHIFT      16
-#define   DSPFW_HPLL_CURSOR_MASK       (0x3f<<16)
+#define   DSPFW_HPLL_CURSOR_MASK       (0x3f << 16)
 #define   DSPFW_HPLL_SR_SHIFT          0
-#define   DSPFW_HPLL_SR_MASK           (0x1ff<<0)
+#define   DSPFW_HPLL_SR_MASK           (0x1ff << 0)
 
 /* vlv/chv */
 #define DSPFW4         _MMIO(VLV_DISPLAY_BASE + 0x70070)
 #define   DSPFW_SPRITEB_WM1_SHIFT      16
-#define   DSPFW_SPRITEB_WM1_MASK       (0xff<<16)
+#define   DSPFW_SPRITEB_WM1_MASK       (0xff << 16)
 #define   DSPFW_CURSORA_WM1_SHIFT      8
-#define   DSPFW_CURSORA_WM1_MASK       (0x3f<<8)
+#define   DSPFW_CURSORA_WM1_MASK       (0x3f << 8)
 #define   DSPFW_SPRITEA_WM1_SHIFT      0
-#define   DSPFW_SPRITEA_WM1_MASK       (0xff<<0)
+#define   DSPFW_SPRITEA_WM1_MASK       (0xff << 0)
 #define DSPFW5         _MMIO(VLV_DISPLAY_BASE + 0x70074)
 #define   DSPFW_PLANEB_WM1_SHIFT       24
-#define   DSPFW_PLANEB_WM1_MASK                (0xff<<24)
+#define   DSPFW_PLANEB_WM1_MASK                (0xff << 24)
 #define   DSPFW_PLANEA_WM1_SHIFT       16
-#define   DSPFW_PLANEA_WM1_MASK                (0xff<<16)
+#define   DSPFW_PLANEA_WM1_MASK                (0xff << 16)
 #define   DSPFW_CURSORB_WM1_SHIFT      8
-#define   DSPFW_CURSORB_WM1_MASK       (0x3f<<8)
+#define   DSPFW_CURSORB_WM1_MASK       (0x3f << 8)
 #define   DSPFW_CURSOR_SR_WM1_SHIFT    0
-#define   DSPFW_CURSOR_SR_WM1_MASK     (0x3f<<0)
+#define   DSPFW_CURSOR_SR_WM1_MASK     (0x3f << 0)
 #define DSPFW6         _MMIO(VLV_DISPLAY_BASE + 0x70078)
 #define   DSPFW_SR_WM1_SHIFT           0
-#define   DSPFW_SR_WM1_MASK            (0x1ff<<0)
+#define   DSPFW_SR_WM1_MASK            (0x1ff << 0)
 #define DSPFW7         _MMIO(VLV_DISPLAY_BASE + 0x7007c)
 #define DSPFW7_CHV     _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
 #define   DSPFW_SPRITED_WM1_SHIFT      24
-#define   DSPFW_SPRITED_WM1_MASK       (0xff<<24)
+#define   DSPFW_SPRITED_WM1_MASK       (0xff << 24)
 #define   DSPFW_SPRITED_SHIFT          16
-#define   DSPFW_SPRITED_MASK_VLV       (0xff<<16)
+#define   DSPFW_SPRITED_MASK_VLV       (0xff << 16)
 #define   DSPFW_SPRITEC_WM1_SHIFT      8
-#define   DSPFW_SPRITEC_WM1_MASK       (0xff<<8)
+#define   DSPFW_SPRITEC_WM1_MASK       (0xff << 8)
 #define   DSPFW_SPRITEC_SHIFT          0
-#define   DSPFW_SPRITEC_MASK_VLV       (0xff<<0)
+#define   DSPFW_SPRITEC_MASK_VLV       (0xff << 0)
 #define DSPFW8_CHV     _MMIO(VLV_DISPLAY_BASE + 0x700b8)
 #define   DSPFW_SPRITEF_WM1_SHIFT      24
-#define   DSPFW_SPRITEF_WM1_MASK       (0xff<<24)
+#define   DSPFW_SPRITEF_WM1_MASK       (0xff << 24)
 #define   DSPFW_SPRITEF_SHIFT          16
-#define   DSPFW_SPRITEF_MASK_VLV       (0xff<<16)
+#define   DSPFW_SPRITEF_MASK_VLV       (0xff << 16)
 #define   DSPFW_SPRITEE_WM1_SHIFT      8
-#define   DSPFW_SPRITEE_WM1_MASK       (0xff<<8)
+#define   DSPFW_SPRITEE_WM1_MASK       (0xff << 8)
 #define   DSPFW_SPRITEE_SHIFT          0
-#define   DSPFW_SPRITEE_MASK_VLV       (0xff<<0)
+#define   DSPFW_SPRITEE_MASK_VLV       (0xff << 0)
 #define DSPFW9_CHV     _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
 #define   DSPFW_PLANEC_WM1_SHIFT       24
-#define   DSPFW_PLANEC_WM1_MASK                (0xff<<24)
+#define   DSPFW_PLANEC_WM1_MASK                (0xff << 24)
 #define   DSPFW_PLANEC_SHIFT           16
-#define   DSPFW_PLANEC_MASK_VLV                (0xff<<16)
+#define   DSPFW_PLANEC_MASK_VLV                (0xff << 16)
 #define   DSPFW_CURSORC_WM1_SHIFT      8
-#define   DSPFW_CURSORC_WM1_MASK       (0x3f<<16)
+#define   DSPFW_CURSORC_WM1_MASK       (0x3f << 16)
 #define   DSPFW_CURSORC_SHIFT          0
-#define   DSPFW_CURSORC_MASK           (0x3f<<0)
+#define   DSPFW_CURSORC_MASK           (0x3f << 0)
 
 /* vlv/chv high order bits */
 #define DSPHOWM                _MMIO(VLV_DISPLAY_BASE + 0x70064)
 #define   DSPFW_SR_HI_SHIFT            24
-#define   DSPFW_SR_HI_MASK             (3<<24) /* 2 bits for chv, 1 for vlv */
+#define   DSPFW_SR_HI_MASK             (3 << 24) /* 2 bits for chv, 1 for vlv */
 #define   DSPFW_SPRITEF_HI_SHIFT       23
-#define   DSPFW_SPRITEF_HI_MASK                (1<<23)
+#define   DSPFW_SPRITEF_HI_MASK                (1 << 23)
 #define   DSPFW_SPRITEE_HI_SHIFT       22
-#define   DSPFW_SPRITEE_HI_MASK                (1<<22)
+#define   DSPFW_SPRITEE_HI_MASK                (1 << 22)
 #define   DSPFW_PLANEC_HI_SHIFT                21
-#define   DSPFW_PLANEC_HI_MASK         (1<<21)
+#define   DSPFW_PLANEC_HI_MASK         (1 << 21)
 #define   DSPFW_SPRITED_HI_SHIFT       20
-#define   DSPFW_SPRITED_HI_MASK                (1<<20)
+#define   DSPFW_SPRITED_HI_MASK                (1 << 20)
 #define   DSPFW_SPRITEC_HI_SHIFT       16
-#define   DSPFW_SPRITEC_HI_MASK                (1<<16)
+#define   DSPFW_SPRITEC_HI_MASK                (1 << 16)
 #define   DSPFW_PLANEB_HI_SHIFT                12
-#define   DSPFW_PLANEB_HI_MASK         (1<<12)
+#define   DSPFW_PLANEB_HI_MASK         (1 << 12)
 #define   DSPFW_SPRITEB_HI_SHIFT       8
-#define   DSPFW_SPRITEB_HI_MASK                (1<<8)
+#define   DSPFW_SPRITEB_HI_MASK                (1 << 8)
 #define   DSPFW_SPRITEA_HI_SHIFT       4
-#define   DSPFW_SPRITEA_HI_MASK                (1<<4)
+#define   DSPFW_SPRITEA_HI_MASK                (1 << 4)
 #define   DSPFW_PLANEA_HI_SHIFT                0
-#define   DSPFW_PLANEA_HI_MASK         (1<<0)
+#define   DSPFW_PLANEA_HI_MASK         (1 << 0)
 #define DSPHOWM1       _MMIO(VLV_DISPLAY_BASE + 0x70068)
 #define   DSPFW_SR_WM1_HI_SHIFT                24
-#define   DSPFW_SR_WM1_HI_MASK         (3<<24) /* 2 bits for chv, 1 for vlv */
+#define   DSPFW_SR_WM1_HI_MASK         (3 << 24) /* 2 bits for chv, 1 for vlv */
 #define   DSPFW_SPRITEF_WM1_HI_SHIFT   23
-#define   DSPFW_SPRITEF_WM1_HI_MASK    (1<<23)
+#define   DSPFW_SPRITEF_WM1_HI_MASK    (1 << 23)
 #define   DSPFW_SPRITEE_WM1_HI_SHIFT   22
-#define   DSPFW_SPRITEE_WM1_HI_MASK    (1<<22)
+#define   DSPFW_SPRITEE_WM1_HI_MASK    (1 << 22)
 #define   DSPFW_PLANEC_WM1_HI_SHIFT    21
-#define   DSPFW_PLANEC_WM1_HI_MASK     (1<<21)
+#define   DSPFW_PLANEC_WM1_HI_MASK     (1 << 21)
 #define   DSPFW_SPRITED_WM1_HI_SHIFT   20
-#define   DSPFW_SPRITED_WM1_HI_MASK    (1<<20)
+#define   DSPFW_SPRITED_WM1_HI_MASK    (1 << 20)
 #define   DSPFW_SPRITEC_WM1_HI_SHIFT   16
-#define   DSPFW_SPRITEC_WM1_HI_MASK    (1<<16)
+#define   DSPFW_SPRITEC_WM1_HI_MASK    (1 << 16)
 #define   DSPFW_PLANEB_WM1_HI_SHIFT    12
-#define   DSPFW_PLANEB_WM1_HI_MASK     (1<<12)
+#define   DSPFW_PLANEB_WM1_HI_MASK     (1 << 12)
 #define   DSPFW_SPRITEB_WM1_HI_SHIFT   8
-#define   DSPFW_SPRITEB_WM1_HI_MASK    (1<<8)
+#define   DSPFW_SPRITEB_WM1_HI_MASK    (1 << 8)
 #define   DSPFW_SPRITEA_WM1_HI_SHIFT   4
-#define   DSPFW_SPRITEA_WM1_HI_MASK    (1<<4)
+#define   DSPFW_SPRITEA_WM1_HI_MASK    (1 << 4)
 #define   DSPFW_PLANEA_WM1_HI_SHIFT    0
-#define   DSPFW_PLANEA_WM1_HI_MASK     (1<<0)
+#define   DSPFW_PLANEA_WM1_HI_MASK     (1 << 0)
 
 /* drain latency register values*/
 #define VLV_DDL(pipe)                  _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
 #define DDL_CURSOR_SHIFT               24
-#define DDL_SPRITE_SHIFT(sprite)       (8+8*(sprite))
+#define DDL_SPRITE_SHIFT(sprite)       (8 + 8 * (sprite))
 #define DDL_PLANE_SHIFT                        0
-#define DDL_PRECISION_HIGH             (1<<7)
-#define DDL_PRECISION_LOW              (0<<7)
+#define DDL_PRECISION_HIGH             (1 << 7)
+#define DDL_PRECISION_LOW              (0 << 7)
 #define DRAIN_LATENCY_MASK             0x7f
 
 #define CBR1_VLV                       _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define  CBR_PND_DEADLINE_DISABLE      (1<<31)
-#define  CBR_PWM_CLOCK_MUX_SELECT      (1<<30)
+#define  CBR_PND_DEADLINE_DISABLE      (1 << 31)
+#define  CBR_PWM_CLOCK_MUX_SELECT      (1 << 30)
 
 #define CBR4_VLV                       _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define  CBR_DPLLBMD_PIPE(pipe)                (1<<(7+(pipe)*11)) /* pipes B and C */
+#define  CBR_DPLLBMD_PIPE(pipe)                (1 << (7 + (pipe) * 11)) /* pipes B and C */
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE     64
@@ -5818,32 +5859,32 @@ enum {
 
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK          _MMIO(0x45100)
-#define  WM0_PIPE_PLANE_MASK   (0xffff<<16)
+#define  WM0_PIPE_PLANE_MASK   (0xffff << 16)
 #define  WM0_PIPE_PLANE_SHIFT  16
-#define  WM0_PIPE_SPRITE_MASK  (0xff<<8)
+#define  WM0_PIPE_SPRITE_MASK  (0xff << 8)
 #define  WM0_PIPE_SPRITE_SHIFT 8
 #define  WM0_PIPE_CURSOR_MASK  (0xff)
 
 #define WM0_PIPEB_ILK          _MMIO(0x45104)
 #define WM0_PIPEC_IVB          _MMIO(0x45200)
 #define WM1_LP_ILK             _MMIO(0x45108)
-#define  WM1_LP_SR_EN          (1<<31)
+#define  WM1_LP_SR_EN          (1 << 31)
 #define  WM1_LP_LATENCY_SHIFT  24
-#define  WM1_LP_LATENCY_MASK   (0x7f<<24)
-#define  WM1_LP_FBC_MASK       (0xf<<20)
+#define  WM1_LP_LATENCY_MASK   (0x7f << 24)
+#define  WM1_LP_FBC_MASK       (0xf << 20)
 #define  WM1_LP_FBC_SHIFT      20
 #define  WM1_LP_FBC_SHIFT_BDW  19
-#define  WM1_LP_SR_MASK                (0x7ff<<8)
+#define  WM1_LP_SR_MASK                (0x7ff << 8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0xff)
 #define WM2_LP_ILK             _MMIO(0x4510c)
-#define  WM2_LP_EN             (1<<31)
+#define  WM2_LP_EN             (1 << 31)
 #define WM3_LP_ILK             _MMIO(0x45110)
-#define  WM3_LP_EN             (1<<31)
+#define  WM3_LP_EN             (1 << 31)
 #define WM1S_LP_ILK            _MMIO(0x45120)
 #define WM2S_LP_IVB            _MMIO(0x45124)
 #define WM3S_LP_IVB            _MMIO(0x45128)
-#define  WM1S_LP_EN            (1<<31)
+#define  WM1S_LP_EN            (1 << 31)
 
 #define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
        (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
@@ -5900,8 +5941,7 @@ enum {
 #define   CURSOR_ENABLE                0x80000000
 #define   CURSOR_GAMMA_ENABLE  0x40000000
 #define   CURSOR_STRIDE_SHIFT  28
-#define   CURSOR_STRIDE(x)     ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define   CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define   CURSOR_STRIDE(x)     ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
 #define   CURSOR_FORMAT_SHIFT  24
 #define   CURSOR_FORMAT_MASK   (0x07 << CURSOR_FORMAT_SHIFT)
 #define   CURSOR_FORMAT_2C     (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5910,18 +5950,21 @@ enum {
 #define   CURSOR_FORMAT_ARGB   (0x04 << CURSOR_FORMAT_SHIFT)
 #define   CURSOR_FORMAT_XRGB   (0x05 << CURSOR_FORMAT_SHIFT)
 /* New style CUR*CNTR flags */
-#define   CURSOR_MODE          0x27
-#define   CURSOR_MODE_DISABLE   0x00
-#define   CURSOR_MODE_128_32B_AX 0x02
-#define   CURSOR_MODE_256_32B_AX 0x03
-#define   CURSOR_MODE_64_32B_AX 0x07
-#define   CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define   CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_MODE         0x27
+#define   MCURSOR_MODE_DISABLE   0x00
+#define   MCURSOR_MODE_128_32B_AX 0x02
+#define   MCURSOR_MODE_256_32B_AX 0x03
+#define   MCURSOR_MODE_64_32B_AX 0x07
+#define   MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define   MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define   MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define   MCURSOR_PIPE_SELECT_MASK     (0x3 << 28)
+#define   MCURSOR_PIPE_SELECT_SHIFT    28
 #define   MCURSOR_PIPE_SELECT(pipe)    ((pipe) << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
-#define   CURSOR_ROTATE_180    (1<<15)
-#define   CURSOR_TRICKLE_FEED_DISABLE  (1 << 14)
+#define   MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define   MCURSOR_ROTATE_180   (1 << 15)
+#define   MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
 #define _CURABASE              0x70084
 #define _CURAPOS               0x70088
 #define   CURSOR_POS_MASK       0x007FF
@@ -5958,41 +6001,41 @@ enum {
 
 /* Display A control */
 #define _DSPACNTR                              0x70180
-#define   DISPLAY_PLANE_ENABLE                 (1<<31)
+#define   DISPLAY_PLANE_ENABLE                 (1 << 31)
 #define   DISPLAY_PLANE_DISABLE                        0
-#define   DISPPLANE_GAMMA_ENABLE               (1<<30)
+#define   DISPPLANE_GAMMA_ENABLE               (1 << 30)
 #define   DISPPLANE_GAMMA_DISABLE              0
-#define   DISPPLANE_PIXFORMAT_MASK             (0xf<<26)
-#define   DISPPLANE_YUV422                     (0x0<<26)
-#define   DISPPLANE_8BPP                       (0x2<<26)
-#define   DISPPLANE_BGRA555                    (0x3<<26)
-#define   DISPPLANE_BGRX555                    (0x4<<26)
-#define   DISPPLANE_BGRX565                    (0x5<<26)
-#define   DISPPLANE_BGRX888                    (0x6<<26)
-#define   DISPPLANE_BGRA888                    (0x7<<26)
-#define   DISPPLANE_RGBX101010                 (0x8<<26)
-#define   DISPPLANE_RGBA101010                 (0x9<<26)
-#define   DISPPLANE_BGRX101010                 (0xa<<26)
-#define   DISPPLANE_RGBX161616                 (0xc<<26)
-#define   DISPPLANE_RGBX888                    (0xe<<26)
-#define   DISPPLANE_RGBA888                    (0xf<<26)
-#define   DISPPLANE_STEREO_ENABLE              (1<<25)
+#define   DISPPLANE_PIXFORMAT_MASK             (0xf << 26)
+#define   DISPPLANE_YUV422                     (0x0 << 26)
+#define   DISPPLANE_8BPP                       (0x2 << 26)
+#define   DISPPLANE_BGRA555                    (0x3 << 26)
+#define   DISPPLANE_BGRX555                    (0x4 << 26)
+#define   DISPPLANE_BGRX565                    (0x5 << 26)
+#define   DISPPLANE_BGRX888                    (0x6 << 26)
+#define   DISPPLANE_BGRA888                    (0x7 << 26)
+#define   DISPPLANE_RGBX101010                 (0x8 << 26)
+#define   DISPPLANE_RGBA101010                 (0x9 << 26)
+#define   DISPPLANE_BGRX101010                 (0xa << 26)
+#define   DISPPLANE_RGBX161616                 (0xc << 26)
+#define   DISPPLANE_RGBX888                    (0xe << 26)
+#define   DISPPLANE_RGBA888                    (0xf << 26)
+#define   DISPPLANE_STEREO_ENABLE              (1 << 25)
 #define   DISPPLANE_STEREO_DISABLE             0
-#define   DISPPLANE_PIPE_CSC_ENABLE            (1<<24)
+#define   DISPPLANE_PIPE_CSC_ENABLE            (1 << 24)
 #define   DISPPLANE_SEL_PIPE_SHIFT             24
-#define   DISPPLANE_SEL_PIPE_MASK              (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define   DISPPLANE_SEL_PIPE(pipe)             ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
-#define   DISPPLANE_SRC_KEY_ENABLE             (1<<22)
+#define   DISPPLANE_SEL_PIPE_MASK              (3 << DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SEL_PIPE(pipe)             ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SRC_KEY_ENABLE             (1 << 22)
 #define   DISPPLANE_SRC_KEY_DISABLE            0
-#define   DISPPLANE_LINE_DOUBLE                        (1<<20)
+#define   DISPPLANE_LINE_DOUBLE                        (1 << 20)
 #define   DISPPLANE_NO_LINE_DOUBLE             0
 #define   DISPPLANE_STEREO_POLARITY_FIRST      0
-#define   DISPPLANE_STEREO_POLARITY_SECOND     (1<<18)
-#define   DISPPLANE_ALPHA_PREMULTIPLY          (1<<16) /* CHV pipe B */
-#define   DISPPLANE_ROTATE_180                 (1<<15)
-#define   DISPPLANE_TRICKLE_FEED_DISABLE       (1<<14) /* Ironlake */
-#define   DISPPLANE_TILED                      (1<<10)
-#define   DISPPLANE_MIRROR                     (1<<8) /* CHV pipe B */
+#define   DISPPLANE_STEREO_POLARITY_SECOND     (1 << 18)
+#define   DISPPLANE_ALPHA_PREMULTIPLY          (1 << 16) /* CHV pipe B */
+#define   DISPPLANE_ROTATE_180                 (1 << 15)
+#define   DISPPLANE_TRICKLE_FEED_DISABLE       (1 << 14) /* Ironlake */
+#define   DISPPLANE_TILED                      (1 << 10)
+#define   DISPPLANE_MIRROR                     (1 << 8) /* CHV pipe B */
 #define _DSPAADDR                              0x70184
 #define _DSPASTRIDE                            0x70188
 #define _DSPAPOS                               0x7018C /* reserved */
@@ -6015,15 +6058,15 @@ enum {
 
 /* CHV pipe B blender and primary plane */
 #define _CHV_BLEND_A           0x60a00
-#define   CHV_BLEND_LEGACY             (0<<30)
-#define   CHV_BLEND_ANDROID            (1<<30)
-#define   CHV_BLEND_MPO                        (2<<30)
-#define   CHV_BLEND_MASK               (3<<30)
+#define   CHV_BLEND_LEGACY             (0 << 30)
+#define   CHV_BLEND_ANDROID            (1 << 30)
+#define   CHV_BLEND_MPO                        (2 << 30)
+#define   CHV_BLEND_MASK               (3 << 30)
 #define _CHV_CANVAS_A          0x60a04
 #define _PRIMPOS_A             0x60a08
 #define _PRIMSIZE_A            0x60a0c
 #define _PRIMCNSTALPHA_A       0x60a10
-#define   PRIM_CONST_ALPHA_ENABLE      (1<<31)
+#define   PRIM_CONST_ALPHA_ENABLE      (1 << 31)
 
 #define CHV_BLEND(pipe)                _MMIO_TRANS2(pipe, _CHV_BLEND_A)
 #define CHV_CANVAS(pipe)       _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
@@ -6033,8 +6076,8 @@ enum {
 
 /* Display/Sprite base address macros */
 #define DISP_BASEADDR_MASK     (0xfffff000)
-#define I915_LO_DISPBASE(val)  (val & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val)  (val & DISP_BASEADDR_MASK)
+#define I915_LO_DISPBASE(val)  ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val)  ((val) & DISP_BASEADDR_MASK)
 
 /*
  * VBIOS flags
@@ -6064,7 +6107,7 @@ enum {
 
 /* Display B control */
 #define _DSPBCNTR              (dev_priv->info.display_mmio_offset + 0x71180)
-#define   DISPPLANE_ALPHA_TRANS_ENABLE         (1<<15)
+#define   DISPPLANE_ALPHA_TRANS_ENABLE         (1 << 15)
 #define   DISPPLANE_ALPHA_TRANS_DISABLE                0
 #define   DISPPLANE_SPRITE_ABOVE_DISPLAY       0
 #define   DISPPLANE_SPRITE_ABOVE_OVERLAY       (1)
@@ -6079,27 +6122,27 @@ enum {
 
 /* Sprite A control */
 #define _DVSACNTR              0x72180
-#define   DVS_ENABLE           (1<<31)
-#define   DVS_GAMMA_ENABLE     (1<<30)
-#define   DVS_YUV_RANGE_CORRECTION_DISABLE     (1<<27)
-#define   DVS_PIXFORMAT_MASK   (3<<25)
-#define   DVS_FORMAT_YUV422    (0<<25)
-#define   DVS_FORMAT_RGBX101010        (1<<25)
-#define   DVS_FORMAT_RGBX888   (2<<25)
-#define   DVS_FORMAT_RGBX161616        (3<<25)
-#define   DVS_PIPE_CSC_ENABLE   (1<<24)
-#define   DVS_SOURCE_KEY       (1<<22)
-#define   DVS_RGB_ORDER_XBGR   (1<<20)
-#define   DVS_YUV_FORMAT_BT709 (1<<18)
-#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define   DVS_YUV_ORDER_YUYV   (0<<16)
-#define   DVS_YUV_ORDER_UYVY   (1<<16)
-#define   DVS_YUV_ORDER_YVYU   (2<<16)
-#define   DVS_YUV_ORDER_VYUY   (3<<16)
-#define   DVS_ROTATE_180       (1<<15)
-#define   DVS_DEST_KEY         (1<<2)
-#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define   DVS_TILED            (1<<10)
+#define   DVS_ENABLE           (1 << 31)
+#define   DVS_GAMMA_ENABLE     (1 << 30)
+#define   DVS_YUV_RANGE_CORRECTION_DISABLE     (1 << 27)
+#define   DVS_PIXFORMAT_MASK   (3 << 25)
+#define   DVS_FORMAT_YUV422    (0 << 25)
+#define   DVS_FORMAT_RGBX101010        (1 << 25)
+#define   DVS_FORMAT_RGBX888   (2 << 25)
+#define   DVS_FORMAT_RGBX161616        (3 << 25)
+#define   DVS_PIPE_CSC_ENABLE   (1 << 24)
+#define   DVS_SOURCE_KEY       (1 << 22)
+#define   DVS_RGB_ORDER_XBGR   (1 << 20)
+#define   DVS_YUV_FORMAT_BT709 (1 << 18)
+#define   DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define   DVS_YUV_ORDER_YUYV   (0 << 16)
+#define   DVS_YUV_ORDER_UYVY   (1 << 16)
+#define   DVS_YUV_ORDER_YVYU   (2 << 16)
+#define   DVS_YUV_ORDER_VYUY   (3 << 16)
+#define   DVS_ROTATE_180       (1 << 15)
+#define   DVS_DEST_KEY         (1 << 2)
+#define   DVS_TRICKLE_FEED_DISABLE (1 << 14)
+#define   DVS_TILED            (1 << 10)
 #define _DVSALINOFF            0x72184
 #define _DVSASTRIDE            0x72188
 #define _DVSAPOS               0x7218c
@@ -6111,13 +6154,13 @@ enum {
 #define _DVSATILEOFF           0x721a4
 #define _DVSASURFLIVE          0x721ac
 #define _DVSASCALE             0x72204
-#define   DVS_SCALE_ENABLE     (1<<31)
-#define   DVS_FILTER_MASK      (3<<29)
-#define   DVS_FILTER_MEDIUM    (0<<29)
-#define   DVS_FILTER_ENHANCING (1<<29)
-#define   DVS_FILTER_SOFTENING (2<<29)
-#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define   DVS_SCALE_ENABLE     (1 << 31)
+#define   DVS_FILTER_MASK      (3 << 29)
+#define   DVS_FILTER_MEDIUM    (0 << 29)
+#define   DVS_FILTER_ENHANCING (1 << 29)
+#define   DVS_FILTER_SOFTENING (2 << 29)
+#define   DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define   DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
 #define _DVSAGAMC              0x72300
 
 #define _DVSBCNTR              0x73180
@@ -6148,31 +6191,31 @@ enum {
 #define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
 
 #define _SPRA_CTL              0x70280
-#define   SPRITE_ENABLE                        (1<<31)
-#define   SPRITE_GAMMA_ENABLE          (1<<30)
-#define   SPRITE_YUV_RANGE_CORRECTION_DISABLE  (1<<28)
-#define   SPRITE_PIXFORMAT_MASK                (7<<25)
-#define   SPRITE_FORMAT_YUV422         (0<<25)
-#define   SPRITE_FORMAT_RGBX101010     (1<<25)
-#define   SPRITE_FORMAT_RGBX888                (2<<25)
-#define   SPRITE_FORMAT_RGBX161616     (3<<25)
-#define   SPRITE_FORMAT_YUV444         (4<<25)
-#define   SPRITE_FORMAT_XR_BGR101010   (5<<25) /* Extended range */
-#define   SPRITE_PIPE_CSC_ENABLE       (1<<24)
-#define   SPRITE_SOURCE_KEY            (1<<22)
-#define   SPRITE_RGB_ORDER_RGBX                (1<<20) /* only for 888 and 161616 */
-#define   SPRITE_YUV_TO_RGB_CSC_DISABLE        (1<<19)
-#define   SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709   (1<<18) /* 0 is BT601 */
-#define   SPRITE_YUV_BYTE_ORDER_MASK   (3<<16)
-#define   SPRITE_YUV_ORDER_YUYV                (0<<16)
-#define   SPRITE_YUV_ORDER_UYVY                (1<<16)
-#define   SPRITE_YUV_ORDER_YVYU                (2<<16)
-#define   SPRITE_YUV_ORDER_VYUY                (3<<16)
-#define   SPRITE_ROTATE_180            (1<<15)
-#define   SPRITE_TRICKLE_FEED_DISABLE  (1<<14)
-#define   SPRITE_INT_GAMMA_ENABLE      (1<<13)
-#define   SPRITE_TILED                 (1<<10)
-#define   SPRITE_DEST_KEY              (1<<2)
+#define   SPRITE_ENABLE                        (1 << 31)
+#define   SPRITE_GAMMA_ENABLE          (1 << 30)
+#define   SPRITE_YUV_RANGE_CORRECTION_DISABLE  (1 << 28)
+#define   SPRITE_PIXFORMAT_MASK                (7 << 25)
+#define   SPRITE_FORMAT_YUV422         (0 << 25)
+#define   SPRITE_FORMAT_RGBX101010     (1 << 25)
+#define   SPRITE_FORMAT_RGBX888                (2 << 25)
+#define   SPRITE_FORMAT_RGBX161616     (3 << 25)
+#define   SPRITE_FORMAT_YUV444         (4 << 25)
+#define   SPRITE_FORMAT_XR_BGR101010   (5 << 25) /* Extended range */
+#define   SPRITE_PIPE_CSC_ENABLE       (1 << 24)
+#define   SPRITE_SOURCE_KEY            (1 << 22)
+#define   SPRITE_RGB_ORDER_RGBX                (1 << 20) /* only for 888 and 161616 */
+#define   SPRITE_YUV_TO_RGB_CSC_DISABLE        (1 << 19)
+#define   SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709   (1 << 18) /* 0 is BT601 */
+#define   SPRITE_YUV_BYTE_ORDER_MASK   (3 << 16)
+#define   SPRITE_YUV_ORDER_YUYV                (0 << 16)
+#define   SPRITE_YUV_ORDER_UYVY                (1 << 16)
+#define   SPRITE_YUV_ORDER_YVYU                (2 << 16)
+#define   SPRITE_YUV_ORDER_VYUY                (3 << 16)
+#define   SPRITE_ROTATE_180            (1 << 15)
+#define   SPRITE_TRICKLE_FEED_DISABLE  (1 << 14)
+#define   SPRITE_INT_GAMMA_ENABLE      (1 << 13)
+#define   SPRITE_TILED                 (1 << 10)
+#define   SPRITE_DEST_KEY              (1 << 2)
 #define _SPRA_LINOFF           0x70284
 #define _SPRA_STRIDE           0x70288
 #define _SPRA_POS              0x7028c
@@ -6185,13 +6228,13 @@ enum {
 #define _SPRA_OFFSET           0x702a4
 #define _SPRA_SURFLIVE         0x702ac
 #define _SPRA_SCALE            0x70304
-#define   SPRITE_SCALE_ENABLE  (1<<31)
-#define   SPRITE_FILTER_MASK   (3<<29)
-#define   SPRITE_FILTER_MEDIUM (0<<29)
-#define   SPRITE_FILTER_ENHANCING      (1<<29)
-#define   SPRITE_FILTER_SOFTENING      (2<<29)
-#define   SPRITE_VERTICAL_OFFSET_HALF  (1<<28) /* must be enabled below */
-#define   SPRITE_VERTICAL_OFFSET_ENABLE        (1<<27)
+#define   SPRITE_SCALE_ENABLE  (1 << 31)
+#define   SPRITE_FILTER_MASK   (3 << 29)
+#define   SPRITE_FILTER_MEDIUM (0 << 29)
+#define   SPRITE_FILTER_ENHANCING      (1 << 29)
+#define   SPRITE_FILTER_SOFTENING      (2 << 29)
+#define   SPRITE_VERTICAL_OFFSET_HALF  (1 << 28) /* must be enabled below */
+#define   SPRITE_VERTICAL_OFFSET_ENABLE        (1 << 27)
 #define _SPRA_GAMC             0x70400
 
 #define _SPRB_CTL              0x71280
@@ -6225,28 +6268,28 @@ enum {
 #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
 #define _SPACNTR               (VLV_DISPLAY_BASE + 0x72180)
-#define   SP_ENABLE                    (1<<31)
-#define   SP_GAMMA_ENABLE              (1<<30)
-#define   SP_PIXFORMAT_MASK            (0xf<<26)
-#define   SP_FORMAT_YUV422             (0<<26)
-#define   SP_FORMAT_BGR565             (5<<26)
-#define   SP_FORMAT_BGRX8888           (6<<26)
-#define   SP_FORMAT_BGRA8888           (7<<26)
-#define   SP_FORMAT_RGBX1010102                (8<<26)
-#define   SP_FORMAT_RGBA1010102                (9<<26)
-#define   SP_FORMAT_RGBX8888           (0xe<<26)
-#define   SP_FORMAT_RGBA8888           (0xf<<26)
-#define   SP_ALPHA_PREMULTIPLY         (1<<23) /* CHV pipe B */
-#define   SP_SOURCE_KEY                        (1<<22)
-#define   SP_YUV_FORMAT_BT709          (1<<18)
-#define   SP_YUV_BYTE_ORDER_MASK       (3<<16)
-#define   SP_YUV_ORDER_YUYV            (0<<16)
-#define   SP_YUV_ORDER_UYVY            (1<<16)
-#define   SP_YUV_ORDER_YVYU            (2<<16)
-#define   SP_YUV_ORDER_VYUY            (3<<16)
-#define   SP_ROTATE_180                        (1<<15)
-#define   SP_TILED                     (1<<10)
-#define   SP_MIRROR                    (1<<8) /* CHV pipe B */
+#define   SP_ENABLE                    (1 << 31)
+#define   SP_GAMMA_ENABLE              (1 << 30)
+#define   SP_PIXFORMAT_MASK            (0xf << 26)
+#define   SP_FORMAT_YUV422             (0 << 26)
+#define   SP_FORMAT_BGR565             (5 << 26)
+#define   SP_FORMAT_BGRX8888           (6 << 26)
+#define   SP_FORMAT_BGRA8888           (7 << 26)
+#define   SP_FORMAT_RGBX1010102                (8 << 26)
+#define   SP_FORMAT_RGBA1010102                (9 << 26)
+#define   SP_FORMAT_RGBX8888           (0xe << 26)
+#define   SP_FORMAT_RGBA8888           (0xf << 26)
+#define   SP_ALPHA_PREMULTIPLY         (1 << 23) /* CHV pipe B */
+#define   SP_SOURCE_KEY                        (1 << 22)
+#define   SP_YUV_FORMAT_BT709          (1 << 18)
+#define   SP_YUV_BYTE_ORDER_MASK       (3 << 16)
+#define   SP_YUV_ORDER_YUYV            (0 << 16)
+#define   SP_YUV_ORDER_UYVY            (1 << 16)
+#define   SP_YUV_ORDER_YVYU            (2 << 16)
+#define   SP_YUV_ORDER_VYUY            (3 << 16)
+#define   SP_ROTATE_180                        (1 << 15)
+#define   SP_TILED                     (1 << 10)
+#define   SP_MIRROR                    (1 << 8) /* CHV pipe B */
 #define _SPALINOFF             (VLV_DISPLAY_BASE + 0x72184)
 #define _SPASTRIDE             (VLV_DISPLAY_BASE + 0x72188)
 #define _SPAPOS                        (VLV_DISPLAY_BASE + 0x7218c)
@@ -6257,7 +6300,7 @@ enum {
 #define _SPAKEYMAXVAL          (VLV_DISPLAY_BASE + 0x721a0)
 #define _SPATILEOFF            (VLV_DISPLAY_BASE + 0x721a4)
 #define _SPACONSTALPHA         (VLV_DISPLAY_BASE + 0x721a8)
-#define   SP_CONST_ALPHA_ENABLE                (1<<31)
+#define   SP_CONST_ALPHA_ENABLE                (1 << 31)
 #define _SPACLRC0              (VLV_DISPLAY_BASE + 0x721d0)
 #define   SP_CONTRAST(x)               ((x) << 18) /* u3.6 */
 #define   SP_BRIGHTNESS(x)             ((x) & 0xff) /* s8 */
@@ -6349,40 +6392,40 @@ enum {
  * correctly map to the same formats in ICL, as long as bit 23 is set to 0
  */
 #define   PLANE_CTL_FORMAT_MASK                        (0xf << 24)
-#define   PLANE_CTL_FORMAT_YUV422              (  0 << 24)
-#define   PLANE_CTL_FORMAT_NV12                        (  1 << 24)
-#define   PLANE_CTL_FORMAT_XRGB_2101010                (  2 << 24)
-#define   PLANE_CTL_FORMAT_XRGB_8888           (  4 << 24)
-#define   PLANE_CTL_FORMAT_XRGB_16161616F      (  6 << 24)
-#define   PLANE_CTL_FORMAT_AYUV                        (  8 << 24)
-#define   PLANE_CTL_FORMAT_INDEXED             ( 12 << 24)
-#define   PLANE_CTL_FORMAT_RGB_565             ( 14 << 24)
+#define   PLANE_CTL_FORMAT_YUV422              (0 << 24)
+#define   PLANE_CTL_FORMAT_NV12                        (1 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_2101010                (2 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_8888           (4 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_16161616F      (6 << 24)
+#define   PLANE_CTL_FORMAT_AYUV                        (8 << 24)
+#define   PLANE_CTL_FORMAT_INDEXED             (12 << 24)
+#define   PLANE_CTL_FORMAT_RGB_565             (14 << 24)
 #define   ICL_PLANE_CTL_FORMAT_MASK            (0x1f << 23)
 #define   PLANE_CTL_PIPE_CSC_ENABLE            (1 << 23) /* Pre-GLK */
 #define   PLANE_CTL_KEY_ENABLE_MASK            (0x3 << 21)
-#define   PLANE_CTL_KEY_ENABLE_SOURCE          (  1 << 21)
-#define   PLANE_CTL_KEY_ENABLE_DESTINATION     (  2 << 21)
+#define   PLANE_CTL_KEY_ENABLE_SOURCE          (1 << 21)
+#define   PLANE_CTL_KEY_ENABLE_DESTINATION     (2 << 21)
 #define   PLANE_CTL_ORDER_BGRX                 (0 << 20)
 #define   PLANE_CTL_ORDER_RGBX                 (1 << 20)
 #define   PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709        (1 << 18)
 #define   PLANE_CTL_YUV422_ORDER_MASK          (0x3 << 16)
-#define   PLANE_CTL_YUV422_YUYV                        (  0 << 16)
-#define   PLANE_CTL_YUV422_UYVY                        (  1 << 16)
-#define   PLANE_CTL_YUV422_YVYU                        (  2 << 16)
-#define   PLANE_CTL_YUV422_VYUY                        (  3 << 16)
+#define   PLANE_CTL_YUV422_YUYV                        (0 << 16)
+#define   PLANE_CTL_YUV422_UYVY                        (1 << 16)
+#define   PLANE_CTL_YUV422_YVYU                        (2 << 16)
+#define   PLANE_CTL_YUV422_VYUY                        (3 << 16)
 #define   PLANE_CTL_DECOMPRESSION_ENABLE       (1 << 15)
 #define   PLANE_CTL_TRICKLE_FEED_DISABLE       (1 << 14)
 #define   PLANE_CTL_PLANE_GAMMA_DISABLE                (1 << 13) /* Pre-GLK */
 #define   PLANE_CTL_TILED_MASK                 (0x7 << 10)
-#define   PLANE_CTL_TILED_LINEAR               (  0 << 10)
-#define   PLANE_CTL_TILED_X                    (  1 << 10)
-#define   PLANE_CTL_TILED_Y                    (  4 << 10)
-#define   PLANE_CTL_TILED_YF                   (  5 << 10)
-#define   PLANE_CTL_FLIP_HORIZONTAL            (  1 << 8)
+#define   PLANE_CTL_TILED_LINEAR               (0 << 10)
+#define   PLANE_CTL_TILED_X                    (1 << 10)
+#define   PLANE_CTL_TILED_Y                    (4 << 10)
+#define   PLANE_CTL_TILED_YF                   (5 << 10)
+#define   PLANE_CTL_FLIP_HORIZONTAL            (1 << 8)
 #define   PLANE_CTL_ALPHA_MASK                 (0x3 << 4) /* Pre-GLK */
-#define   PLANE_CTL_ALPHA_DISABLE              (  0 << 4)
-#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY       (  2 << 4)
-#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY       (  3 << 4)
+#define   PLANE_CTL_ALPHA_DISABLE              (0 << 4)
+#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY       (2 << 4)
+#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY       (3 << 4)
 #define   PLANE_CTL_ROTATE_MASK                        0x3
 #define   PLANE_CTL_ROTATE_0                   0x0
 #define   PLANE_CTL_ROTATE_90                  0x1
@@ -6610,7 +6653,7 @@ enum {
 # define VFMUNIT_CLOCK_GATE_DISABLE            (1 << 11)
 
 #define FDI_PLL_FREQ_CTL        _MMIO(0x46030)
-#define  FDI_PLL_FREQ_CHANGE_REQUEST    (1<<24)
+#define  FDI_PLL_FREQ_CHANGE_REQUEST    (1 << 24)
 #define  FDI_PLL_FREQ_LOCK_LIMIT_MASK   0xfff00
 #define  FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK  0xff
 
@@ -6659,14 +6702,14 @@ enum {
 /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
 #define _PFA_CTL_1               0x68080
 #define _PFB_CTL_1               0x68880
-#define  PF_ENABLE              (1<<31)
-#define  PF_PIPE_SEL_MASK_IVB  (3<<29)
-#define  PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
-#define  PF_FILTER_MASK                (3<<23)
-#define  PF_FILTER_PROGRAMMED  (0<<23)
-#define  PF_FILTER_MED_3x3     (1<<23)
-#define  PF_FILTER_EDGE_ENHANCE        (2<<23)
-#define  PF_FILTER_EDGE_SOFTEN (3<<23)
+#define  PF_ENABLE              (1 << 31)
+#define  PF_PIPE_SEL_MASK_IVB  (3 << 29)
+#define  PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define  PF_FILTER_MASK                (3 << 23)
+#define  PF_FILTER_PROGRAMMED  (0 << 23)
+#define  PF_FILTER_MED_3x3     (1 << 23)
+#define  PF_FILTER_EDGE_ENHANCE        (2 << 23)
+#define  PF_FILTER_EDGE_SOFTEN (3 << 23)
 #define _PFA_WIN_SZ            0x68074
 #define _PFB_WIN_SZ            0x68874
 #define _PFA_WIN_POS           0x68070
@@ -6684,7 +6727,7 @@ enum {
 
 #define _PSA_CTL               0x68180
 #define _PSB_CTL               0x68980
-#define PS_ENABLE              (1<<31)
+#define PS_ENABLE              (1 << 31)
 #define _PSA_WIN_SZ            0x68174
 #define _PSB_WIN_SZ            0x68974
 #define _PSA_WIN_POS           0x68170
@@ -6769,6 +6812,10 @@ enum {
 #define _PS_VPHASE_1B       0x68988
 #define _PS_VPHASE_2B       0x68A88
 #define _PS_VPHASE_1C       0x69188
+#define  PS_Y_PHASE(x)         ((x) << 16)
+#define  PS_UV_RGB_PHASE(x)    ((x) << 0)
+#define   PS_PHASE_MASK        (0x7fff << 1) /* u2.13 */
+#define   PS_PHASE_TRIP        (1 << 0)
 
 #define _PS_HPHASE_1A       0x68194
 #define _PS_HPHASE_2A       0x68294
@@ -6782,7 +6829,7 @@ enum {
 #define _PS_ECC_STAT_2B     0x68AD0
 #define _PS_ECC_STAT_1C     0x691D0
 
-#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define _ID(id, a, b) ((a) + (id) * ((b) - (a)))
 #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe,        \
                        _ID(id, _PS_1A_CTRL, _PS_2A_CTRL),       \
                        _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
@@ -6863,37 +6910,37 @@ enum {
 #define DE_PIPEB_CRC_DONE      (1 << 10)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEA_VBLANK         (1 << 7)
-#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8*(pipe)))
+#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8 * (pipe)))
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_VSYNC          (1 << 3)
 #define DE_PIPEA_CRC_DONE      (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8*(pipe)))
+#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8 * (pipe)))
 
 /* More Ivybridge lolz */
-#define DE_ERR_INT_IVB                 (1<<30)
-#define DE_GSE_IVB                     (1<<29)
-#define DE_PCH_EVENT_IVB               (1<<28)
-#define DE_DP_A_HOTPLUG_IVB            (1<<27)
-#define DE_AUX_CHANNEL_A_IVB           (1<<26)
-#define DE_EDP_PSR_INT_HSW             (1<<19)
-#define DE_SPRITEC_FLIP_DONE_IVB       (1<<14)
-#define DE_PLANEC_FLIP_DONE_IVB                (1<<13)
-#define DE_PIPEC_VBLANK_IVB            (1<<10)
-#define DE_SPRITEB_FLIP_DONE_IVB       (1<<9)
-#define DE_PLANEB_FLIP_DONE_IVB                (1<<8)
-#define DE_PIPEB_VBLANK_IVB            (1<<5)
-#define DE_SPRITEA_FLIP_DONE_IVB       (1<<4)
-#define DE_PLANEA_FLIP_DONE_IVB                (1<<3)
-#define DE_PLANE_FLIP_DONE_IVB(plane)  (1<< (3 + 5*(plane)))
-#define DE_PIPEA_VBLANK_IVB            (1<<0)
+#define DE_ERR_INT_IVB                 (1 << 30)
+#define DE_GSE_IVB                     (1 << 29)
+#define DE_PCH_EVENT_IVB               (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB            (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB           (1 << 26)
+#define DE_EDP_PSR_INT_HSW             (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB       (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB                (1 << 13)
+#define DE_PIPEC_VBLANK_IVB            (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB       (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB                (1 << 8)
+#define DE_PIPEB_VBLANK_IVB            (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB       (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB                (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane)  (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB            (1 << 0)
 #define DE_PIPE_VBLANK_IVB(pipe)       (1 << ((pipe) * 5))
 
 #define VLV_MASTER_IER                 _MMIO(0x4400c) /* Gunit master IER */
-#define   MASTER_INTERRUPT_ENABLE      (1<<31)
+#define   MASTER_INTERRUPT_ENABLE      (1 << 31)
 
 #define DEISR   _MMIO(0x44000)
 #define DEIMR   _MMIO(0x44004)
@@ -6906,37 +6953,37 @@ enum {
 #define GTIER   _MMIO(0x4401c)
 
 #define GEN8_MASTER_IRQ                        _MMIO(0x44200)
-#define  GEN8_MASTER_IRQ_CONTROL       (1<<31)
-#define  GEN8_PCU_IRQ                  (1<<30)
-#define  GEN8_DE_PCH_IRQ               (1<<23)
-#define  GEN8_DE_MISC_IRQ              (1<<22)
-#define  GEN8_DE_PORT_IRQ              (1<<20)
-#define  GEN8_DE_PIPE_C_IRQ            (1<<18)
-#define  GEN8_DE_PIPE_B_IRQ            (1<<17)
-#define  GEN8_DE_PIPE_A_IRQ            (1<<16)
-#define  GEN8_DE_PIPE_IRQ(pipe)                (1<<(16+(pipe)))
-#define  GEN8_GT_VECS_IRQ              (1<<6)
-#define  GEN8_GT_GUC_IRQ               (1<<5)
-#define  GEN8_GT_PM_IRQ                        (1<<4)
-#define  GEN8_GT_VCS2_IRQ              (1<<3)
-#define  GEN8_GT_VCS1_IRQ              (1<<2)
-#define  GEN8_GT_BCS_IRQ               (1<<1)
-#define  GEN8_GT_RCS_IRQ               (1<<0)
+#define  GEN8_MASTER_IRQ_CONTROL       (1 << 31)
+#define  GEN8_PCU_IRQ                  (1 << 30)
+#define  GEN8_DE_PCH_IRQ               (1 << 23)
+#define  GEN8_DE_MISC_IRQ              (1 << 22)
+#define  GEN8_DE_PORT_IRQ              (1 << 20)
+#define  GEN8_DE_PIPE_C_IRQ            (1 << 18)
+#define  GEN8_DE_PIPE_B_IRQ            (1 << 17)
+#define  GEN8_DE_PIPE_A_IRQ            (1 << 16)
+#define  GEN8_DE_PIPE_IRQ(pipe)                (1 << (16 + (pipe)))
+#define  GEN8_GT_VECS_IRQ              (1 << 6)
+#define  GEN8_GT_GUC_IRQ               (1 << 5)
+#define  GEN8_GT_PM_IRQ                        (1 << 4)
+#define  GEN8_GT_VCS2_IRQ              (1 << 3)
+#define  GEN8_GT_VCS1_IRQ              (1 << 2)
+#define  GEN8_GT_BCS_IRQ               (1 << 1)
+#define  GEN8_GT_RCS_IRQ               (1 << 0)
 
 #define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
 #define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
 #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
 #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
 
-#define GEN9_GUC_TO_HOST_INT_EVENT     (1<<31)
-#define GEN9_GUC_EXEC_ERROR_EVENT      (1<<30)
-#define GEN9_GUC_DISPLAY_EVENT         (1<<29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT     (1<<28)
-#define GEN9_GUC_IOMMU_MSG_EVENT       (1<<27)
-#define GEN9_GUC_DB_RING_EVENT         (1<<26)
-#define GEN9_GUC_DMA_DONE_EVENT                (1<<25)
-#define GEN9_GUC_FATAL_ERROR_EVENT     (1<<24)
-#define GEN9_GUC_NOTIFICATION_EVENT    (1<<23)
+#define GEN9_GUC_TO_HOST_INT_EVENT     (1 << 31)
+#define GEN9_GUC_EXEC_ERROR_EVENT      (1 << 30)
+#define GEN9_GUC_DISPLAY_EVENT         (1 << 29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT     (1 << 28)
+#define GEN9_GUC_IOMMU_MSG_EVENT       (1 << 27)
+#define GEN9_GUC_DB_RING_EVENT         (1 << 26)
+#define GEN9_GUC_DMA_DONE_EVENT                (1 << 25)
+#define GEN9_GUC_FATAL_ERROR_EVENT     (1 << 24)
+#define GEN9_GUC_NOTIFICATION_EVENT    (1 << 23)
 
 #define GEN8_RCS_IRQ_SHIFT 0
 #define GEN8_BCS_IRQ_SHIFT 16
@@ -6985,6 +7032,7 @@ enum {
 #define GEN8_DE_PORT_IMR _MMIO(0x44444)
 #define GEN8_DE_PORT_IIR _MMIO(0x44448)
 #define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define  ICL_AUX_CHANNEL_E             (1 << 29)
 #define  CNL_AUX_CHANNEL_F             (1 << 28)
 #define  GEN9_AUX_CHANNEL_D            (1 << 27)
 #define  GEN9_AUX_CHANNEL_C            (1 << 26)
@@ -7011,9 +7059,16 @@ enum {
 #define GEN8_PCU_IIR _MMIO(0x444e8)
 #define GEN8_PCU_IER _MMIO(0x444ec)
 
+#define GEN11_GU_MISC_ISR      _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR      _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR      _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER      _MMIO(0x444fc)
+#define  GEN11_GU_MISC_GSE     (1 << 27)
+
 #define GEN11_GFX_MSTR_IRQ             _MMIO(0x190010)
 #define  GEN11_MASTER_IRQ              (1 << 31)
 #define  GEN11_PCU_IRQ                 (1 << 30)
+#define  GEN11_GU_MISC_IRQ             (1 << 29)
 #define  GEN11_DISPLAY_IRQ             (1 << 16)
 #define  GEN11_GT_DW_IRQ(x)            (1 << (x))
 #define  GEN11_GT_DW1_IRQ              (1 << 1)
@@ -7024,11 +7079,40 @@ enum {
 #define  GEN11_AUDIO_CODEC_IRQ         (1 << 24)
 #define  GEN11_DE_PCH_IRQ              (1 << 23)
 #define  GEN11_DE_MISC_IRQ             (1 << 22)
+#define  GEN11_DE_HPD_IRQ              (1 << 21)
 #define  GEN11_DE_PORT_IRQ             (1 << 20)
 #define  GEN11_DE_PIPE_C               (1 << 18)
 #define  GEN11_DE_PIPE_B               (1 << 17)
 #define  GEN11_DE_PIPE_A               (1 << 16)
 
+#define GEN11_DE_HPD_ISR               _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR               _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR               _MMIO(0x44478)
+#define GEN11_DE_HPD_IER               _MMIO(0x4447c)
+#define  GEN11_TC4_HOTPLUG                     (1 << 19)
+#define  GEN11_TC3_HOTPLUG                     (1 << 18)
+#define  GEN11_TC2_HOTPLUG                     (1 << 17)
+#define  GEN11_TC1_HOTPLUG                     (1 << 16)
+#define  GEN11_DE_TC_HOTPLUG_MASK              (GEN11_TC4_HOTPLUG | \
+                                                GEN11_TC3_HOTPLUG | \
+                                                GEN11_TC2_HOTPLUG | \
+                                                GEN11_TC1_HOTPLUG)
+#define  GEN11_TBT4_HOTPLUG                    (1 << 3)
+#define  GEN11_TBT3_HOTPLUG                    (1 << 2)
+#define  GEN11_TBT2_HOTPLUG                    (1 << 1)
+#define  GEN11_TBT1_HOTPLUG                    (1 << 0)
+#define  GEN11_DE_TBT_HOTPLUG_MASK             (GEN11_TBT4_HOTPLUG | \
+                                                GEN11_TBT3_HOTPLUG | \
+                                                GEN11_TBT2_HOTPLUG | \
+                                                GEN11_TBT1_HOTPLUG)
+
+#define GEN11_TBT_HOTPLUG_CTL                          _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL                           _MMIO(0x44038)
+#define  GEN11_HOTPLUG_CTL_ENABLE(tc_port)             (8 << (tc_port) * 4)
+#define  GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port)                (2 << (tc_port) * 4)
+#define  GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port)       (1 << (tc_port) * 4)
+#define  GEN11_HOTPLUG_CTL_NO_DETECT(tc_port)          (0 << (tc_port) * 4)
+
 #define GEN11_GT_INTR_DW0              _MMIO(0x190018)
 #define  GEN11_CSME                    (31)
 #define  GEN11_GUNIT                   (28)
@@ -7043,7 +7127,7 @@ enum {
 #define  GEN11_VECS(x)                 (31 - (x))
 #define  GEN11_VCS(x)                  (x)
 
-#define GEN11_GT_INTR_DW(x)            _MMIO(0x190018 + (x * 4))
+#define GEN11_GT_INTR_DW(x)            _MMIO(0x190018 + ((x) * 4))
 
 #define GEN11_INTR_IDENTITY_REG0       _MMIO(0x190060)
 #define GEN11_INTR_IDENTITY_REG1       _MMIO(0x190064)
@@ -7052,12 +7136,12 @@ enum {
 #define  GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
 #define  GEN11_INTR_ENGINE_INTR(x)     ((x) & 0xffff)
 
-#define GEN11_INTR_IDENTITY_REG(x)     _MMIO(0x190060 + (x * 4))
+#define GEN11_INTR_IDENTITY_REG(x)     _MMIO(0x190060 + ((x) * 4))
 
 #define GEN11_IIR_REG0_SELECTOR                _MMIO(0x190070)
 #define GEN11_IIR_REG1_SELECTOR                _MMIO(0x190074)
 
-#define GEN11_IIR_REG_SELECTOR(x)      _MMIO(0x190070 + (x * 4))
+#define GEN11_IIR_REG_SELECTOR(x)      _MMIO(0x190070 + ((x) * 4))
 
 #define GEN11_RENDER_COPY_INTR_ENABLE  _MMIO(0x190030)
 #define GEN11_VCS_VECS_INTR_ENABLE     _MMIO(0x190034)
@@ -7079,8 +7163,8 @@ enum {
 #define ILK_DISPLAY_CHICKEN2   _MMIO(0x42004)
 /* Required on all Ironlake and Sandybridge according to the B-Spec. */
 #define  ILK_ELPIN_409_SELECT  (1 << 25)
-#define  ILK_DPARB_GATE        (1<<22)
-#define  ILK_VSDPFD_FULL       (1<<21)
+#define  ILK_DPARB_GATE        (1 << 22)
+#define  ILK_VSDPFD_FULL       (1 << 21)
 #define FUSE_STRAP                     _MMIO(0x42014)
 #define  ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
 #define  ILK_INTERNAL_DISPLAY_DISABLE  (1 << 30)
@@ -7130,31 +7214,31 @@ enum {
 #define CHICKEN_TRANS_A         0x420c0
 #define CHICKEN_TRANS_B         0x420c4
 #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define  VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
-#define  DDI_TRAINING_OVERRIDE_ENABLE  (1<<19)
-#define  DDI_TRAINING_OVERRIDE_VALUE   (1<<18)
-#define  DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
-#define  DDIE_TRAINING_OVERRIDE_VALUE  (1<<16) /* CHICKEN_TRANS_A only */
-#define  PSR2_ADD_VERTICAL_LINE_COUNT   (1<<15)
-#define  PSR2_VSC_ENABLE_PROG_HEADER    (1<<12)
+#define  VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
+#define  DDI_TRAINING_OVERRIDE_ENABLE  (1 << 19)
+#define  DDI_TRAINING_OVERRIDE_VALUE   (1 << 18)
+#define  DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
+#define  DDIE_TRAINING_OVERRIDE_VALUE  (1 << 16) /* CHICKEN_TRANS_A only */
+#define  PSR2_ADD_VERTICAL_LINE_COUNT   (1 << 15)
+#define  PSR2_VSC_ENABLE_PROG_HEADER    (1 << 12)
 
 #define DISP_ARB_CTL   _MMIO(0x45000)
-#define  DISP_FBC_MEMORY_WAKE          (1<<31)
-#define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
-#define  DISP_FBC_WM_DIS               (1<<15)
+#define  DISP_FBC_MEMORY_WAKE          (1 << 31)
+#define  DISP_TILE_SURFACE_SWIZZLING   (1 << 13)
+#define  DISP_FBC_WM_DIS               (1 << 15)
 #define DISP_ARB_CTL2  _MMIO(0x45004)
-#define  DISP_DATA_PARTITION_5_6       (1<<6)
-#define  DISP_IPC_ENABLE               (1<<3)
+#define  DISP_DATA_PARTITION_5_6       (1 << 6)
+#define  DISP_IPC_ENABLE               (1 << 3)
 #define DBUF_CTL       _MMIO(0x45008)
 #define DBUF_CTL_S1    _MMIO(0x45008)
 #define DBUF_CTL_S2    _MMIO(0x44FE8)
-#define  DBUF_POWER_REQUEST            (1<<31)
-#define  DBUF_POWER_STATE              (1<<30)
+#define  DBUF_POWER_REQUEST            (1 << 31)
+#define  DBUF_POWER_STATE              (1 << 30)
 #define GEN7_MSG_CTL   _MMIO(0x45010)
-#define  WAIT_FOR_PCH_RESET_ACK                (1<<1)
-#define  WAIT_FOR_PCH_FLR_ACK          (1<<0)
+#define  WAIT_FOR_PCH_RESET_ACK                (1 << 1)
+#define  WAIT_FOR_PCH_FLR_ACK          (1 << 0)
 #define HSW_NDE_RSTWRN_OPT     _MMIO(0x46408)
-#define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
+#define  RESET_PCH_HANDSHAKE_ENABLE    (1 << 4)
 
 #define GEN8_CHICKEN_DCPR_1            _MMIO(0x46430)
 #define   SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
@@ -7179,16 +7263,16 @@ enum {
 #define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz      (2 << 29)
 
 #define GEN7_FF_SLICE_CS_CHICKEN1      _MMIO(0x20e0)
-#define   GEN9_FFSC_PERCTX_PREEMPT_CTRL        (1<<14)
+#define   GEN9_FFSC_PERCTX_PREEMPT_CTRL        (1 << 14)
 
 #define FF_SLICE_CS_CHICKEN2                   _MMIO(0x20e4)
-#define  GEN9_TSG_BARRIER_ACK_DISABLE          (1<<8)
-#define  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE  (1<<10)
+#define  GEN9_TSG_BARRIER_ACK_DISABLE          (1 << 8)
+#define  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE  (1 << 10)
 
 #define GEN9_CS_DEBUG_MODE1            _MMIO(0x20ec)
 #define GEN9_CTX_PREEMPT_REG           _MMIO(0x2248)
 #define GEN8_CS_CHICKEN1               _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL           (1<<0)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL           (1 << 0)
 #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo)       (((hi) << 2) | ((lo) << 1))
 #define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL    GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
 #define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL  GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
@@ -7197,22 +7281,27 @@ enum {
 
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1             _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE                (1<<14)
-#define COMMON_SLICE_CHICKEN2                  _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION    (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE  (1<<0)
+  #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC    ((1 << 10) | (1 << 26))
+  #define GEN9_RHWO_OPTIMIZATION_DISABLE       (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2                                  _MMIO(0x7014)
+  #define GEN9_PBE_COMPRESSED_HASH_SELECTION                   (1 << 13)
+  #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE       (1 << 12)
+  #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION             (1 << 8)
+  #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE                 (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3            _MMIO(0x7304)
+  #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC   (1 << 11)
 
 #define HIZ_CHICKEN                                    _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X                         (1<<15)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE   (1<<3)
+# define CHV_HZ_8X8_MODE_IN_1X                         (1 << 15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE   (1 << 3)
 
 #define GEN9_SLICE_COMMON_ECO_CHICKEN0         _MMIO(0x7308)
-#define  DISABLE_PIXEL_MASK_CAMMING            (1<<14)
+#define  DISABLE_PIXEL_MASK_CAMMING            (1 << 14)
 
 #define GEN9_SLICE_COMMON_ECO_CHICKEN1         _MMIO(0x731c)
+#define   GEN11_STATE_CACHE_REDIRECT_TO_CS     (1 << 11)
 
 #define GEN7_L3SQCREG1                         _MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
@@ -7230,7 +7319,7 @@ enum {
 
 #define GEN7_L3CNTLREG1                                _MMIO(0xB01C)
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
-#define  GEN7_L3AGDIS                          (1<<19)
+#define  GEN7_L3AGDIS                          (1 << 19)
 #define GEN7_L3CNTLREG2                                _MMIO(0xB020)
 #define GEN7_L3CNTLREG3                                _MMIO(0xB024)
 
@@ -7240,7 +7329,7 @@ enum {
 #define   GEN11_I2M_WRITE_DISABLE              (1 << 28)
 
 #define GEN7_L3SQCREG4                         _MMIO(0xb034)
-#define  L3SQ_URB_READ_CAM_MATCH_DISABLE       (1<<27)
+#define  L3SQ_URB_READ_CAM_MATCH_DISABLE       (1 << 27)
 
 #define GEN8_L3SQCREG4                         _MMIO(0xb118)
 #define  GEN11_LQSC_CLEAN_EVICT_DISABLE                (1 << 6)
@@ -7251,12 +7340,12 @@ enum {
 #define HDC_CHICKEN0                           _MMIO(0x7300)
 #define CNL_HDC_CHICKEN0                       _MMIO(0xE5F0)
 #define ICL_HDC_MODE                           _MMIO(0xE5F4)
-#define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE        (1<<15)
-#define  HDC_FENCE_DEST_SLM_DISABLE            (1<<14)
-#define  HDC_DONOT_FETCH_MEM_WHEN_MASKED       (1<<11)
-#define  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT   (1<<5)
-#define  HDC_FORCE_NON_COHERENT                        (1<<4)
-#define  HDC_BARRIER_PERFORMANCE_DISABLE       (1<<10)
+#define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE        (1 << 15)
+#define  HDC_FENCE_DEST_SLM_DISABLE            (1 << 14)
+#define  HDC_DONOT_FETCH_MEM_WHEN_MASKED       (1 << 11)
+#define  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT   (1 << 5)
+#define  HDC_FORCE_NON_COHERENT                        (1 << 4)
+#define  HDC_BARRIER_PERFORMANCE_DISABLE       (1 << 10)
 
 #define GEN8_HDC_CHICKEN1                      _MMIO(0x7304)
 
@@ -7269,13 +7358,13 @@ enum {
 
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         _MMIO(0x9030)
-#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
+#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1 << 11)
 
 #define HSW_SCRATCH1                           _MMIO(0xb038)
-#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1<<27)
+#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1 << 27)
 
 #define BDW_SCRATCH1                                   _MMIO(0xb11c)
-#define  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE     (1<<2)
+#define  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE     (1 << 2)
 
 /* PCH */
 
@@ -7374,8 +7463,8 @@ enum {
 #define SDEIER  _MMIO(0xc400c)
 
 #define SERR_INT                       _MMIO(0xc4040)
-#define  SERR_INT_POISON               (1<<31)
-#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1<<((pipe)*3))
+#define  SERR_INT_POISON               (1 << 31)
+#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1 << ((pipe) * 3))
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG               _MMIO(0xc4030)  /* SHOTPLUG_CTL */
@@ -7444,46 +7533,46 @@ enum {
 
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
-#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
 #define _PCH_FPA0                0xc6040
-#define  FP_CB_TUNE            (0x3<<22)
+#define  FP_CB_TUNE            (0x3 << 22)
 #define _PCH_FPA1                0xc6044
 #define _PCH_FPB0                0xc6048
 #define _PCH_FPB1                0xc604c
-#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
 #define PCH_DPLL_TEST           _MMIO(0xc606c)
 
 #define PCH_DREF_CONTROL        _MMIO(0xC6200)
 #define  DREF_CONTROL_MASK      0x7fc3
-#define  DREF_CPU_SOURCE_OUTPUT_DISABLE         (0<<13)
-#define  DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD      (2<<13)
-#define  DREF_CPU_SOURCE_OUTPUT_NONSPREAD       (3<<13)
-#define  DREF_CPU_SOURCE_OUTPUT_MASK           (3<<13)
-#define  DREF_SSC_SOURCE_DISABLE                (0<<11)
-#define  DREF_SSC_SOURCE_ENABLE                 (2<<11)
-#define  DREF_SSC_SOURCE_MASK                  (3<<11)
-#define  DREF_NONSPREAD_SOURCE_DISABLE          (0<<9)
-#define  DREF_NONSPREAD_CK505_ENABLE           (1<<9)
-#define  DREF_NONSPREAD_SOURCE_ENABLE           (2<<9)
-#define  DREF_NONSPREAD_SOURCE_MASK            (3<<9)
-#define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0<<7)
-#define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2<<7)
-#define  DREF_SUPERSPREAD_SOURCE_MASK          (3<<7)
-#define  DREF_SSC4_DOWNSPREAD                   (0<<6)
-#define  DREF_SSC4_CENTERSPREAD                 (1<<6)
-#define  DREF_SSC1_DISABLE                      (0<<1)
-#define  DREF_SSC1_ENABLE                       (1<<1)
+#define  DREF_CPU_SOURCE_OUTPUT_DISABLE         (0 << 13)
+#define  DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD      (2 << 13)
+#define  DREF_CPU_SOURCE_OUTPUT_NONSPREAD       (3 << 13)
+#define  DREF_CPU_SOURCE_OUTPUT_MASK           (3 << 13)
+#define  DREF_SSC_SOURCE_DISABLE                (0 << 11)
+#define  DREF_SSC_SOURCE_ENABLE                 (2 << 11)
+#define  DREF_SSC_SOURCE_MASK                  (3 << 11)
+#define  DREF_NONSPREAD_SOURCE_DISABLE          (0 << 9)
+#define  DREF_NONSPREAD_CK505_ENABLE           (1 << 9)
+#define  DREF_NONSPREAD_SOURCE_ENABLE           (2 << 9)
+#define  DREF_NONSPREAD_SOURCE_MASK            (3 << 9)
+#define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0 << 7)
+#define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2 << 7)
+#define  DREF_SUPERSPREAD_SOURCE_MASK          (3 << 7)
+#define  DREF_SSC4_DOWNSPREAD                   (0 << 6)
+#define  DREF_SSC4_CENTERSPREAD                 (1 << 6)
+#define  DREF_SSC1_DISABLE                      (0 << 1)
+#define  DREF_SSC1_ENABLE                       (1 << 1)
 #define  DREF_SSC4_DISABLE                      (0)
 #define  DREF_SSC4_ENABLE                       (1)
 
 #define PCH_RAWCLK_FREQ         _MMIO(0xc6204)
 #define  FDL_TP1_TIMER_SHIFT    12
-#define  FDL_TP1_TIMER_MASK     (3<<12)
+#define  FDL_TP1_TIMER_MASK     (3 << 12)
 #define  FDL_TP2_TIMER_SHIFT    10
-#define  FDL_TP2_TIMER_MASK     (3<<10)
+#define  FDL_TP2_TIMER_MASK     (3 << 10)
 #define  RAWCLK_FREQ_MASK       0x3ff
 #define  CNP_RAWCLK_DIV_MASK   (0x3ff << 16)
 #define  CNP_RAWCLK_DIV(div)   ((div) << 16)
@@ -7520,7 +7609,7 @@ enum {
 #define  TRANS_VBLANK_END_SHIFT                16
 #define  TRANS_VBLANK_START_SHIFT      0
 #define _PCH_TRANS_VSYNC_A             0xe0014
-#define  TRANS_VSYNC_END_SHIFT         16
+#define  TRANS_VSYNC_END_SHIFT         16
 #define  TRANS_VSYNC_START_SHIFT       0
 #define _PCH_TRANS_VSYNCSHIFT_A                0xe0028
 
@@ -7608,7 +7697,7 @@ enum {
 #define HSW_TVIDEO_DIP_VSC_DATA(trans, i)      _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
 
 #define _HSW_STEREO_3D_CTL_A           0x70020
-#define   S3D_ENABLE                   (1<<31)
+#define   S3D_ENABLE                   (1 << 31)
 #define _HSW_STEREO_3D_CTL_B           0x71020
 
 #define HSW_STEREO_3D_CTL(trans)       _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
@@ -7651,156 +7740,156 @@ enum {
 #define _PCH_TRANSBCONF              0xf1008
 #define PCH_TRANSCONF(pipe)    _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
 #define LPT_TRANSCONF          PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define  TRANS_DISABLE          (0<<31)
-#define  TRANS_ENABLE           (1<<31)
-#define  TRANS_STATE_MASK       (1<<30)
-#define  TRANS_STATE_DISABLE    (0<<30)
-#define  TRANS_STATE_ENABLE     (1<<30)
-#define  TRANS_FSYNC_DELAY_HB1  (0<<27)
-#define  TRANS_FSYNC_DELAY_HB2  (1<<27)
-#define  TRANS_FSYNC_DELAY_HB3  (2<<27)
-#define  TRANS_FSYNC_DELAY_HB4  (3<<27)
-#define  TRANS_INTERLACE_MASK   (7<<21)
-#define  TRANS_PROGRESSIVE      (0<<21)
-#define  TRANS_INTERLACED       (3<<21)
-#define  TRANS_LEGACY_INTERLACED_ILK (2<<21)
-#define  TRANS_8BPC             (0<<5)
-#define  TRANS_10BPC            (1<<5)
-#define  TRANS_6BPC             (2<<5)
-#define  TRANS_12BPC            (3<<5)
+#define  TRANS_DISABLE          (0 << 31)
+#define  TRANS_ENABLE           (1 << 31)
+#define  TRANS_STATE_MASK       (1 << 30)
+#define  TRANS_STATE_DISABLE    (0 << 30)
+#define  TRANS_STATE_ENABLE     (1 << 30)
+#define  TRANS_FSYNC_DELAY_HB1  (0 << 27)
+#define  TRANS_FSYNC_DELAY_HB2  (1 << 27)
+#define  TRANS_FSYNC_DELAY_HB3  (2 << 27)
+#define  TRANS_FSYNC_DELAY_HB4  (3 << 27)
+#define  TRANS_INTERLACE_MASK   (7 << 21)
+#define  TRANS_PROGRESSIVE      (0 << 21)
+#define  TRANS_INTERLACED       (3 << 21)
+#define  TRANS_LEGACY_INTERLACED_ILK (2 << 21)
+#define  TRANS_8BPC             (0 << 5)
+#define  TRANS_10BPC            (1 << 5)
+#define  TRANS_6BPC             (2 << 5)
+#define  TRANS_12BPC            (3 << 5)
 
 #define _TRANSA_CHICKEN1        0xf0060
 #define _TRANSB_CHICKEN1        0xf1060
 #define TRANS_CHICKEN1(pipe)   _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE    (1<<10)
-#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE     (1<<4)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE    (1 << 10)
+#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE     (1 << 4)
 #define _TRANSA_CHICKEN2        0xf0064
 #define _TRANSB_CHICKEN2        0xf1064
 #define TRANS_CHICKEN2(pipe)   _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define  TRANS_CHICKEN2_TIMING_OVERRIDE                        (1<<31)
-#define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED          (1<<29)
-#define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK         (3<<27)
-#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER     (1<<26)
-#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH  (1<<25)
+#define  TRANS_CHICKEN2_TIMING_OVERRIDE                        (1 << 31)
+#define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED          (1 << 29)
+#define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK         (3 << 27)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER     (1 << 26)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH  (1 << 25)
 
 #define SOUTH_CHICKEN1         _MMIO(0xc2000)
 #define  FDIA_PHASE_SYNC_SHIFT_OVR     19
 #define  FDIA_PHASE_SYNC_SHIFT_EN      18
-#define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
 #define  FDI_BC_BIFURCATION_SELECT     (1 << 12)
 #define  CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
 #define  CHASSIS_CLK_REQ_DURATION(x)   ((x) << 8)
-#define  SPT_PWM_GRANULARITY           (1<<0)
+#define  SPT_PWM_GRANULARITY           (1 << 0)
 #define SOUTH_CHICKEN2         _MMIO(0xc2004)
-#define  FDI_MPHY_IOSFSB_RESET_STATUS  (1<<13)
-#define  FDI_MPHY_IOSFSB_RESET_CTL     (1<<12)
-#define  LPT_PWM_GRANULARITY           (1<<5)
-#define  DPLS_EDP_PPS_FIX_DIS          (1<<0)
+#define  FDI_MPHY_IOSFSB_RESET_STATUS  (1 << 13)
+#define  FDI_MPHY_IOSFSB_RESET_CTL     (1 << 12)
+#define  LPT_PWM_GRANULARITY           (1 << 5)
+#define  DPLS_EDP_PPS_FIX_DIS          (1 << 0)
 
 #define _FDI_RXA_CHICKEN        0xc200c
 #define _FDI_RXB_CHICKEN        0xc2010
-#define  FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
-#define  FDI_RX_PHASE_SYNC_POINTER_EN  (1<<0)
+#define  FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define  FDI_RX_PHASE_SYNC_POINTER_EN  (1 << 0)
 #define FDI_RX_CHICKEN(pipe)   _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
 #define SOUTH_DSPCLK_GATE_D    _MMIO(0xc2020)
-#define  PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
-#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
-#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define  CNP_PWM_CGE_GATING_DISABLE (1<<13)
-#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
+#define  PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define  CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1 << 12)
 
 /* CPU: FDI_TX */
 #define _FDI_TXA_CTL            0x60100
 #define _FDI_TXB_CTL            0x61100
 #define FDI_TX_CTL(pipe)       _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
-#define  FDI_TX_DISABLE         (0<<31)
-#define  FDI_TX_ENABLE          (1<<31)
-#define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
-#define  FDI_LINK_TRAIN_PATTERN_2       (1<<28)
-#define  FDI_LINK_TRAIN_PATTERN_IDLE    (2<<28)
-#define  FDI_LINK_TRAIN_NONE            (3<<28)
-#define  FDI_LINK_TRAIN_VOLTAGE_0_4V    (0<<25)
-#define  FDI_LINK_TRAIN_VOLTAGE_0_6V    (1<<25)
-#define  FDI_LINK_TRAIN_VOLTAGE_0_8V    (2<<25)
-#define  FDI_LINK_TRAIN_VOLTAGE_1_2V    (3<<25)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2<<22)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3<<22)
+#define  FDI_TX_DISABLE         (0 << 31)
+#define  FDI_TX_ENABLE          (1 << 31)
+#define  FDI_LINK_TRAIN_PATTERN_1       (0 << 28)
+#define  FDI_LINK_TRAIN_PATTERN_2       (1 << 28)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE    (2 << 28)
+#define  FDI_LINK_TRAIN_NONE            (3 << 28)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_4V    (0 << 25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_6V    (1 << 25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_8V    (2 << 25)
+#define  FDI_LINK_TRAIN_VOLTAGE_1_2V    (3 << 25)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2 << 22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3 << 22)
 /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
    SNB has different settings. */
 /* SNB A-stepping */
-#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A                (0x38<<22)
-#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A                (0x02<<22)
-#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01<<22)
-#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A                (0x0<<22)
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A                (0x38 << 22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A                (0x02 << 22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01 << 22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A                (0x0 << 22)
 /* SNB B-stepping */
-#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B                (0x0<<22)
-#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B                (0x3a<<22)
-#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B      (0x39<<22)
-#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B                (0x38<<22)
-#define  FDI_LINK_TRAIN_VOL_EMP_MASK           (0x3f<<22)
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B                (0x0 << 22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B                (0x3a << 22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B      (0x39 << 22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B                (0x38 << 22)
+#define  FDI_LINK_TRAIN_VOL_EMP_MASK           (0x3f << 22)
 #define  FDI_DP_PORT_WIDTH_SHIFT               19
 #define  FDI_DP_PORT_WIDTH_MASK                        (7 << FDI_DP_PORT_WIDTH_SHIFT)
 #define  FDI_DP_PORT_WIDTH(width)           (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
-#define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
+#define  FDI_TX_ENHANCE_FRAME_ENABLE    (1 << 18)
 /* Ironlake: hardwired to 1 */
-#define  FDI_TX_PLL_ENABLE              (1<<14)
+#define  FDI_TX_PLL_ENABLE              (1 << 14)
 
 /* Ivybridge has different bits for lolz */
-#define  FDI_LINK_TRAIN_PATTERN_1_IVB       (0<<8)
-#define  FDI_LINK_TRAIN_PATTERN_2_IVB       (1<<8)
-#define  FDI_LINK_TRAIN_PATTERN_IDLE_IVB    (2<<8)
-#define  FDI_LINK_TRAIN_NONE_IVB            (3<<8)
+#define  FDI_LINK_TRAIN_PATTERN_1_IVB       (0 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_2_IVB       (1 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_IVB    (2 << 8)
+#define  FDI_LINK_TRAIN_NONE_IVB            (3 << 8)
 
 /* both Tx and Rx */
-#define  FDI_COMPOSITE_SYNC            (1<<11)
-#define  FDI_LINK_TRAIN_AUTO           (1<<10)
-#define  FDI_SCRAMBLING_ENABLE          (0<<7)
-#define  FDI_SCRAMBLING_DISABLE         (1<<7)
+#define  FDI_COMPOSITE_SYNC            (1 << 11)
+#define  FDI_LINK_TRAIN_AUTO           (1 << 10)
+#define  FDI_SCRAMBLING_ENABLE          (0 << 7)
+#define  FDI_SCRAMBLING_DISABLE         (1 << 7)
 
 /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
 #define _FDI_RXA_CTL             0xf000c
 #define _FDI_RXB_CTL             0xf100c
 #define FDI_RX_CTL(pipe)       _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
-#define  FDI_RX_ENABLE          (1<<31)
+#define  FDI_RX_ENABLE          (1 << 31)
 /* train, dp width same as FDI_TX */
-#define  FDI_FS_ERRC_ENABLE            (1<<27)
-#define  FDI_FE_ERRC_ENABLE            (1<<26)
-#define  FDI_RX_POLARITY_REVERSED_LPT  (1<<16)
-#define  FDI_8BPC                       (0<<16)
-#define  FDI_10BPC                      (1<<16)
-#define  FDI_6BPC                       (2<<16)
-#define  FDI_12BPC                      (3<<16)
-#define  FDI_RX_LINK_REVERSAL_OVERRIDE  (1<<15)
-#define  FDI_DMI_LINK_REVERSE_MASK      (1<<14)
-#define  FDI_RX_PLL_ENABLE              (1<<13)
-#define  FDI_FS_ERR_CORRECT_ENABLE      (1<<11)
-#define  FDI_FE_ERR_CORRECT_ENABLE      (1<<10)
-#define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
-#define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
-#define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
-#define  FDI_PCDCLK                    (1<<4)
+#define  FDI_FS_ERRC_ENABLE            (1 << 27)
+#define  FDI_FE_ERRC_ENABLE            (1 << 26)
+#define  FDI_RX_POLARITY_REVERSED_LPT  (1 << 16)
+#define  FDI_8BPC                       (0 << 16)
+#define  FDI_10BPC                      (1 << 16)
+#define  FDI_6BPC                       (2 << 16)
+#define  FDI_12BPC                      (3 << 16)
+#define  FDI_RX_LINK_REVERSAL_OVERRIDE  (1 << 15)
+#define  FDI_DMI_LINK_REVERSE_MASK      (1 << 14)
+#define  FDI_RX_PLL_ENABLE              (1 << 13)
+#define  FDI_FS_ERR_CORRECT_ENABLE      (1 << 11)
+#define  FDI_FE_ERR_CORRECT_ENABLE      (1 << 10)
+#define  FDI_FS_ERR_REPORT_ENABLE       (1 << 9)
+#define  FDI_FE_ERR_REPORT_ENABLE       (1 << 8)
+#define  FDI_RX_ENHANCE_FRAME_ENABLE    (1 << 6)
+#define  FDI_PCDCLK                    (1 << 4)
 /* CPT */
-#define  FDI_AUTO_TRAINING                     (1<<10)
-#define  FDI_LINK_TRAIN_PATTERN_1_CPT          (0<<8)
-#define  FDI_LINK_TRAIN_PATTERN_2_CPT          (1<<8)
-#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT       (2<<8)
-#define  FDI_LINK_TRAIN_NORMAL_CPT             (3<<8)
-#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT       (3<<8)
+#define  FDI_AUTO_TRAINING                     (1 << 10)
+#define  FDI_LINK_TRAIN_PATTERN_1_CPT          (0 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_2_CPT          (1 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT       (2 << 8)
+#define  FDI_LINK_TRAIN_NORMAL_CPT             (3 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT       (3 << 8)
 
 #define _FDI_RXA_MISC                  0xf0010
 #define _FDI_RXB_MISC                  0xf1010
-#define  FDI_RX_PWRDN_LANE1_MASK       (3<<26)
-#define  FDI_RX_PWRDN_LANE1_VAL(x)     ((x)<<26)
-#define  FDI_RX_PWRDN_LANE0_MASK       (3<<24)
-#define  FDI_RX_PWRDN_LANE0_VAL(x)     ((x)<<24)
-#define  FDI_RX_TP1_TO_TP2_48          (2<<20)
-#define  FDI_RX_TP1_TO_TP2_64          (3<<20)
-#define  FDI_RX_FDI_DELAY_90           (0x90<<0)
+#define  FDI_RX_PWRDN_LANE1_MASK       (3 << 26)
+#define  FDI_RX_PWRDN_LANE1_VAL(x)     ((x) << 26)
+#define  FDI_RX_PWRDN_LANE0_MASK       (3 << 24)
+#define  FDI_RX_PWRDN_LANE0_VAL(x)     ((x) << 24)
+#define  FDI_RX_TP1_TO_TP2_48          (2 << 20)
+#define  FDI_RX_TP1_TO_TP2_64          (3 << 20)
+#define  FDI_RX_FDI_DELAY_90           (0x90 << 0)
 #define FDI_RX_MISC(pipe)      _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
 
 #define _FDI_RXA_TUSIZE1        0xf0030
@@ -7811,17 +7900,17 @@ enum {
 #define FDI_RX_TUSIZE2(pipe)   _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
 /* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN         (1<<10)
-#define FDI_RX_SYMBOL_LOCK              (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK                 (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL     (1<<7)
-#define FDI_RX_FS_CODE_ERR              (1<<6)
-#define FDI_RX_FE_CODE_ERR              (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE    (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL           (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW      (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW     (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1<<0)
+#define FDI_RX_INTER_LANE_ALIGN         (1 << 10)
+#define FDI_RX_SYMBOL_LOCK              (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK                 (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL     (1 << 7)
+#define FDI_RX_FS_CODE_ERR              (1 << 6)
+#define FDI_RX_FE_CODE_ERR              (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE    (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL           (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW      (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW     (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1 << 0)
 
 #define _FDI_RXA_IIR            0xf0014
 #define _FDI_RXA_IMR            0xf0018
@@ -7867,71 +7956,58 @@ enum {
 #define PCH_DP_AUX_CH_DATA(aux_ch, i)  _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
 
 /* CPT */
-#define  PORT_TRANS_A_SEL_CPT  0
-#define  PORT_TRANS_B_SEL_CPT  (1<<29)
-#define  PORT_TRANS_C_SEL_CPT  (2<<29)
-#define  PORT_TRANS_SEL_MASK   (3<<29)
-#define  PORT_TRANS_SEL_CPT(pipe)      ((pipe) << 29)
-#define  PORT_TO_PIPE(val)     (((val) & (1<<30)) >> 30)
-#define  PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define  SDVO_PORT_TO_PIPE_CHV(val)    (((val) & (3<<24)) >> 24)
-#define  DP_PORT_TO_PIPE_CHV(val)      (((val) & (3<<16)) >> 16)
-
 #define _TRANS_DP_CTL_A                0xe0300
 #define _TRANS_DP_CTL_B                0xe1300
 #define _TRANS_DP_CTL_C                0xe2300
 #define TRANS_DP_CTL(pipe)     _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define  TRANS_DP_OUTPUT_ENABLE        (1<<31)
-#define  TRANS_DP_PORT_SEL_B   (0<<29)
-#define  TRANS_DP_PORT_SEL_C   (1<<29)
-#define  TRANS_DP_PORT_SEL_D   (2<<29)
-#define  TRANS_DP_PORT_SEL_NONE        (3<<29)
-#define  TRANS_DP_PORT_SEL_MASK        (3<<29)
-#define  TRANS_DP_PIPE_TO_PORT(val)    ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
-#define  TRANS_DP_AUDIO_ONLY   (1<<26)
-#define  TRANS_DP_ENH_FRAMING  (1<<18)
-#define  TRANS_DP_8BPC         (0<<9)
-#define  TRANS_DP_10BPC                (1<<9)
-#define  TRANS_DP_6BPC         (2<<9)
-#define  TRANS_DP_12BPC                (3<<9)
-#define  TRANS_DP_BPC_MASK     (3<<9)
-#define  TRANS_DP_VSYNC_ACTIVE_HIGH    (1<<4)
+#define  TRANS_DP_OUTPUT_ENABLE        (1 << 31)
+#define  TRANS_DP_PORT_SEL_MASK                (3 << 29)
+#define  TRANS_DP_PORT_SEL_NONE                (3 << 29)
+#define  TRANS_DP_PORT_SEL(port)       (((port) - PORT_B) << 29)
+#define  TRANS_DP_AUDIO_ONLY   (1 << 26)
+#define  TRANS_DP_ENH_FRAMING  (1 << 18)
+#define  TRANS_DP_8BPC         (0 << 9)
+#define  TRANS_DP_10BPC                (1 << 9)
+#define  TRANS_DP_6BPC         (2 << 9)
+#define  TRANS_DP_12BPC                (3 << 9)
+#define  TRANS_DP_BPC_MASK     (3 << 9)
+#define  TRANS_DP_VSYNC_ACTIVE_HIGH    (1 << 4)
 #define  TRANS_DP_VSYNC_ACTIVE_LOW     0
-#define  TRANS_DP_HSYNC_ACTIVE_HIGH    (1<<3)
+#define  TRANS_DP_HSYNC_ACTIVE_HIGH    (1 << 3)
 #define  TRANS_DP_HSYNC_ACTIVE_LOW     0
-#define  TRANS_DP_SYNC_MASK    (3<<3)
+#define  TRANS_DP_SYNC_MASK    (3 << 3)
 
 /* SNB eDP training params */
 /* SNB A-stepping */
-#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A                (0x38<<22)
-#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A                (0x02<<22)
-#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01<<22)
-#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A                (0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A                (0x38 << 22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A                (0x02 << 22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01 << 22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A                (0x0 << 22)
 /* SNB B-stepping */
-#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B    (0x0<<22)
-#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B      (0x1<<22)
-#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B    (0x3a<<22)
-#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B  (0x39<<22)
-#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38<<22)
-#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B    (0x0 << 22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B      (0x1 << 22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B    (0x3a << 22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B  (0x39 << 22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38 << 22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f << 22)
 
 /* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x3e <<22)
+#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x3e << 22)
 
 /* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 <<22)
+#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 << 22)
 
-#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f<<22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f << 22)
 
 #define  VLV_PMWGICZ                           _MMIO(0x1300a4)
 
@@ -7978,7 +8054,7 @@ enum {
 #define   FORCEWAKE_KERNEL_FALLBACK            BIT(15)
 #define  FORCEWAKE_MT_ACK                      _MMIO(0x130040)
 #define  ECOBUS                                        _MMIO(0xa180)
-#define    FORCEWAKE_MT_ENABLE                 (1<<5)
+#define    FORCEWAKE_MT_ENABLE                 (1 << 5)
 #define  VLV_SPAREG2H                          _MMIO(0xA194)
 #define  GEN9_PWRGT_DOMAIN_STATUS              _MMIO(0xA2A0)
 #define   GEN9_PWRGT_MEDIA_STATUS_MASK         (1 << 0)
@@ -7987,13 +8063,13 @@ enum {
 #define  GTFIFODBG                             _MMIO(0x120000)
 #define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV   (0x1f << 20)
 #define    GT_FIFO_FREE_ENTRIES_CHV            (0x7f << 13)
-#define    GT_FIFO_SBDROPERR                   (1<<6)
-#define    GT_FIFO_BLOBDROPERR                 (1<<5)
-#define    GT_FIFO_SB_READ_ABORTERR            (1<<4)
-#define    GT_FIFO_DROPERR                     (1<<3)
-#define    GT_FIFO_OVFERR                      (1<<2)
-#define    GT_FIFO_IAWRERR                     (1<<1)
-#define    GT_FIFO_IARDERR                     (1<<0)
+#define    GT_FIFO_SBDROPERR                   (1 << 6)
+#define    GT_FIFO_BLOBDROPERR                 (1 << 5)
+#define    GT_FIFO_SB_READ_ABORTERR            (1 << 4)
+#define    GT_FIFO_DROPERR                     (1 << 3)
+#define    GT_FIFO_OVFERR                      (1 << 2)
+#define    GT_FIFO_IAWRERR                     (1 << 1)
+#define    GT_FIFO_IARDERR                     (1 << 0)
 
 #define  GTFIFOCTL                             _MMIO(0x120008)
 #define    GT_FIFO_FREE_ENTRIES_MASK           0x7f
@@ -8027,37 +8103,37 @@ enum {
 # define GEN6_OACSUNIT_CLOCK_GATE_DISABLE              (1 << 20)
 
 #define GEN7_UCGCTL4                           _MMIO(0x940c)
-#define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1<<25)
-#define  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE     (1<<14)
+#define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1 << 25)
+#define  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE     (1 << 14)
 
 #define GEN6_RCGCTL1                           _MMIO(0x9410)
 #define GEN6_RCGCTL2                           _MMIO(0x9414)
 #define GEN6_RSTCTL                            _MMIO(0x9420)
 
 #define GEN8_UCGCTL6                           _MMIO(0x9430)
-#define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE     (1<<24)
-#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE      (1<<14)
-#define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+#define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE     (1 << 24)
+#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE      (1 << 14)
+#define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
 
 #define GEN6_GFXPAUSE                          _MMIO(0xA000)
 #define GEN6_RPNSWREQ                          _MMIO(0xA008)
-#define   GEN6_TURBO_DISABLE                   (1<<31)
-#define   GEN6_FREQUENCY(x)                    ((x)<<25)
-#define   HSW_FREQUENCY(x)                     ((x)<<24)
-#define   GEN9_FREQUENCY(x)                    ((x)<<23)
-#define   GEN6_OFFSET(x)                       ((x)<<19)
-#define   GEN6_AGGRESSIVE_TURBO                        (0<<15)
+#define   GEN6_TURBO_DISABLE                   (1 << 31)
+#define   GEN6_FREQUENCY(x)                    ((x) << 25)
+#define   HSW_FREQUENCY(x)                     ((x) << 24)
+#define   GEN9_FREQUENCY(x)                    ((x) << 23)
+#define   GEN6_OFFSET(x)                       ((x) << 19)
+#define   GEN6_AGGRESSIVE_TURBO                        (0 << 15)
 #define GEN6_RC_VIDEO_FREQ                     _MMIO(0xA00C)
 #define GEN6_RC_CONTROL                                _MMIO(0xA090)
-#define   GEN6_RC_CTL_RC6pp_ENABLE             (1<<16)
-#define   GEN6_RC_CTL_RC6p_ENABLE              (1<<17)
-#define   GEN6_RC_CTL_RC6_ENABLE               (1<<18)
-#define   GEN6_RC_CTL_RC1e_ENABLE              (1<<20)
-#define   GEN6_RC_CTL_RC7_ENABLE               (1<<22)
-#define   VLV_RC_CTL_CTX_RST_PARALLEL          (1<<24)
-#define   GEN7_RC_CTL_TO_MODE                  (1<<28)
-#define   GEN6_RC_CTL_EI_MODE(x)               ((x)<<27)
-#define   GEN6_RC_CTL_HW_ENABLE                        (1<<31)
+#define   GEN6_RC_CTL_RC6pp_ENABLE             (1 << 16)
+#define   GEN6_RC_CTL_RC6p_ENABLE              (1 << 17)
+#define   GEN6_RC_CTL_RC6_ENABLE               (1 << 18)
+#define   GEN6_RC_CTL_RC1e_ENABLE              (1 << 20)
+#define   GEN6_RC_CTL_RC7_ENABLE               (1 << 22)
+#define   VLV_RC_CTL_CTX_RST_PARALLEL          (1 << 24)
+#define   GEN7_RC_CTL_TO_MODE                  (1 << 28)
+#define   GEN6_RC_CTL_EI_MODE(x)               ((x) << 27)
+#define   GEN6_RC_CTL_HW_ENABLE                        (1 << 31)
 #define GEN6_RP_DOWN_TIMEOUT                   _MMIO(0xA010)
 #define GEN6_RP_INTERRUPT_LIMITS               _MMIO(0xA014)
 #define GEN6_RPSTAT1                           _MMIO(0xA01C)
@@ -8068,19 +8144,19 @@ enum {
 #define   HSW_CAGF_MASK                                (0x7f << HSW_CAGF_SHIFT)
 #define   GEN9_CAGF_MASK                       (0x1ff << GEN9_CAGF_SHIFT)
 #define GEN6_RP_CONTROL                                _MMIO(0xA024)
-#define   GEN6_RP_MEDIA_TURBO                  (1<<11)
-#define   GEN6_RP_MEDIA_MODE_MASK              (3<<9)
-#define   GEN6_RP_MEDIA_HW_TURBO_MODE          (3<<9)
-#define   GEN6_RP_MEDIA_HW_NORMAL_MODE         (2<<9)
-#define   GEN6_RP_MEDIA_HW_MODE                        (1<<9)
-#define   GEN6_RP_MEDIA_SW_MODE                        (0<<9)
-#define   GEN6_RP_MEDIA_IS_GFX                 (1<<8)
-#define   GEN6_RP_ENABLE                       (1<<7)
-#define   GEN6_RP_UP_IDLE_MIN                  (0x1<<3)
-#define   GEN6_RP_UP_BUSY_AVG                  (0x2<<3)
-#define   GEN6_RP_UP_BUSY_CONT                 (0x4<<3)
-#define   GEN6_RP_DOWN_IDLE_AVG                        (0x2<<0)
-#define   GEN6_RP_DOWN_IDLE_CONT               (0x1<<0)
+#define   GEN6_RP_MEDIA_TURBO                  (1 << 11)
+#define   GEN6_RP_MEDIA_MODE_MASK              (3 << 9)
+#define   GEN6_RP_MEDIA_HW_TURBO_MODE          (3 << 9)
+#define   GEN6_RP_MEDIA_HW_NORMAL_MODE         (2 << 9)
+#define   GEN6_RP_MEDIA_HW_MODE                        (1 << 9)
+#define   GEN6_RP_MEDIA_SW_MODE                        (0 << 9)
+#define   GEN6_RP_MEDIA_IS_GFX                 (1 << 8)
+#define   GEN6_RP_ENABLE                       (1 << 7)
+#define   GEN6_RP_UP_IDLE_MIN                  (0x1 << 3)
+#define   GEN6_RP_UP_BUSY_AVG                  (0x2 << 3)
+#define   GEN6_RP_UP_BUSY_CONT                 (0x4 << 3)
+#define   GEN6_RP_DOWN_IDLE_AVG                        (0x2 << 0)
+#define   GEN6_RP_DOWN_IDLE_CONT               (0x1 << 0)
 #define GEN6_RP_UP_THRESHOLD                   _MMIO(0xA02C)
 #define GEN6_RP_DOWN_THRESHOLD                 _MMIO(0xA030)
 #define GEN6_RP_CUR_UP_EI                      _MMIO(0xA050)
@@ -8116,15 +8192,15 @@ enum {
 #define VLV_RCEDATA                            _MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD                   _MMIO(0xA0C0)
 #define GEN6_PMINTRMSK                         _MMIO(0xA168)
-#define   GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC  (1<<31)
-#define   ARAT_EXPIRED_INTRMSK                 (1<<9)
+#define   GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC  (1 << 31)
+#define   ARAT_EXPIRED_INTRMSK                 (1 << 9)
 #define GEN8_MISC_CTRL0                                _MMIO(0xA180)
 #define VLV_PWRDWNUPCTL                                _MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS          _MMIO(0xA0C4)
 #define GEN9_RENDER_PG_IDLE_HYSTERESIS         _MMIO(0xA0C8)
 #define GEN9_PG_ENABLE                         _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE                  (1<<0)
-#define GEN9_MEDIA_PG_ENABLE                   (1<<1)
+#define GEN9_RENDER_PG_ENABLE                  (1 << 0)
+#define GEN9_MEDIA_PG_ENABLE                   (1 << 1)
 #define GEN8_PUSHBUS_CONTROL                   _MMIO(0xA248)
 #define GEN8_PUSHBUS_ENABLE                    _MMIO(0xA250)
 #define GEN8_PUSHBUS_SHIFT                     _MMIO(0xA25C)
@@ -8137,13 +8213,13 @@ enum {
 #define GEN6_PMIMR                             _MMIO(0x44024) /* rps_lock */
 #define GEN6_PMIIR                             _MMIO(0x44028)
 #define GEN6_PMIER                             _MMIO(0x4402C)
-#define  GEN6_PM_MBOX_EVENT                    (1<<25)
-#define  GEN6_PM_THERMAL_EVENT                 (1<<24)
-#define  GEN6_PM_RP_DOWN_TIMEOUT               (1<<6)
-#define  GEN6_PM_RP_UP_THRESHOLD               (1<<5)
-#define  GEN6_PM_RP_DOWN_THRESHOLD             (1<<4)
-#define  GEN6_PM_RP_UP_EI_EXPIRED              (1<<2)
-#define  GEN6_PM_RP_DOWN_EI_EXPIRED            (1<<1)
+#define  GEN6_PM_MBOX_EVENT                    (1 << 25)
+#define  GEN6_PM_THERMAL_EVENT                 (1 << 24)
+#define  GEN6_PM_RP_DOWN_TIMEOUT               (1 << 6)
+#define  GEN6_PM_RP_UP_THRESHOLD               (1 << 5)
+#define  GEN6_PM_RP_DOWN_THRESHOLD             (1 << 4)
+#define  GEN6_PM_RP_UP_EI_EXPIRED              (1 << 2)
+#define  GEN6_PM_RP_DOWN_EI_EXPIRED            (1 << 1)
 #define  GEN6_PM_RPS_EVENTS                    (GEN6_PM_RP_UP_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
@@ -8152,16 +8228,16 @@ enum {
 #define GEN7_GT_SCRATCH_REG_NUM                        8
 
 #define VLV_GTLC_SURVIVABILITY_REG              _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT                 (1<<3)
-#define VLV_GFX_CLK_FORCE_ON_BIT               (1<<2)
+#define VLV_GFX_CLK_STATUS_BIT                 (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT               (1 << 2)
 
 #define GEN6_GT_GFX_RC6_LOCKED                 _MMIO(0x138104)
 #define VLV_COUNTER_CONTROL                    _MMIO(0x138104)
-#define   VLV_COUNT_RANGE_HIGH                 (1<<15)
-#define   VLV_MEDIA_RC0_COUNT_EN               (1<<5)
-#define   VLV_RENDER_RC0_COUNT_EN              (1<<4)
-#define   VLV_MEDIA_RC6_COUNT_EN               (1<<1)
-#define   VLV_RENDER_RC6_COUNT_EN              (1<<0)
+#define   VLV_COUNT_RANGE_HIGH                 (1 << 15)
+#define   VLV_MEDIA_RC0_COUNT_EN               (1 << 5)
+#define   VLV_RENDER_RC0_COUNT_EN              (1 << 4)
+#define   VLV_MEDIA_RC6_COUNT_EN               (1 << 1)
+#define   VLV_RENDER_RC6_COUNT_EN              (1 << 0)
 #define GEN6_GT_GFX_RC6                                _MMIO(0x138108)
 #define VLV_GT_RENDER_RC6                      _MMIO(0x138108)
 #define VLV_GT_MEDIA_RC6                       _MMIO(0x13810C)
@@ -8172,7 +8248,7 @@ enum {
 #define VLV_MEDIA_C0_COUNT                     _MMIO(0x13811C)
 
 #define GEN6_PCODE_MAILBOX                     _MMIO(0x138124)
-#define   GEN6_PCODE_READY                     (1<<31)
+#define   GEN6_PCODE_READY                     (1 << 31)
 #define   GEN6_PCODE_ERROR_MASK                        0xFF
 #define     GEN6_PCODE_SUCCESS                 0x0
 #define     GEN6_PCODE_ILLEGAL_CMD             0x1
@@ -8216,7 +8292,7 @@ enum {
 #define GEN6_PCODE_DATA1                       _MMIO(0x13812C)
 
 #define GEN6_GT_CORE_STATUS            _MMIO(0x138060)
-#define   GEN6_CORE_CPD_STATE_MASK     (7<<4)
+#define   GEN6_CORE_CPD_STATE_MASK     (7 << 4)
 #define   GEN6_RCn_MASK                        7
 #define   GEN6_RC0                     0
 #define   GEN6_RC3                     2
@@ -8228,26 +8304,26 @@ enum {
 
 #define CHV_POWER_SS0_SIG1             _MMIO(0xa720)
 #define CHV_POWER_SS1_SIG1             _MMIO(0xa728)
-#define   CHV_SS_PG_ENABLE             (1<<1)
-#define   CHV_EU08_PG_ENABLE           (1<<9)
-#define   CHV_EU19_PG_ENABLE           (1<<17)
-#define   CHV_EU210_PG_ENABLE          (1<<25)
+#define   CHV_SS_PG_ENABLE             (1 << 1)
+#define   CHV_EU08_PG_ENABLE           (1 << 9)
+#define   CHV_EU19_PG_ENABLE           (1 << 17)
+#define   CHV_EU210_PG_ENABLE          (1 << 25)
 
 #define CHV_POWER_SS0_SIG2             _MMIO(0xa724)
 #define CHV_POWER_SS1_SIG2             _MMIO(0xa72c)
-#define   CHV_EU311_PG_ENABLE          (1<<1)
+#define   CHV_EU311_PG_ENABLE          (1 << 1)
 
-#define GEN9_SLICE_PGCTL_ACK(slice)    _MMIO(0x804c + (slice)*0x4)
+#define GEN9_SLICE_PGCTL_ACK(slice)    _MMIO(0x804c + (slice) * 0x4)
 #define GEN10_SLICE_PGCTL_ACK(slice)   _MMIO(0x804c + ((slice) / 3) * 0x34 + \
                                              ((slice) % 3) * 0x4)
 #define   GEN9_PGCTL_SLICE_ACK         (1 << 0)
-#define   GEN9_PGCTL_SS_ACK(subslice)  (1 << (2 + (subslice)*2))
+#define   GEN9_PGCTL_SS_ACK(subslice)  (1 << (2 + (subslice) * 2))
 #define   GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
 
-#define GEN9_SS01_EU_PGCTL_ACK(slice)  _MMIO(0x805c + (slice)*0x8)
+#define GEN9_SS01_EU_PGCTL_ACK(slice)  _MMIO(0x805c + (slice) * 0x8)
 #define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
                                              ((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice)  _MMIO(0x8060 + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice)  _MMIO(0x8060 + (slice) * 0x8)
 #define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
                                              ((slice) % 3) * 0x8)
 #define   GEN9_PGCTL_SSA_EU08_ACK      (1 << 0)
@@ -8260,10 +8336,10 @@ enum {
 #define   GEN9_PGCTL_SSB_EU311_ACK     (1 << 14)
 
 #define GEN7_MISCCPCTL                         _MMIO(0x9424)
-#define   GEN7_DOP_CLOCK_GATE_ENABLE           (1<<0)
-#define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE     (1<<2)
-#define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE       (1<<4)
-#define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1<<6)
+#define   GEN7_DOP_CLOCK_GATE_ENABLE           (1 << 0)
+#define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE     (1 << 2)
+#define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE       (1 << 4)
+#define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1 << 6)
 
 #define GEN8_GARBCNTL                          _MMIO(0xB004)
 #define   GEN9_GAPS_TSV_CREDIT_DISABLE         (1 << 7)
@@ -8292,61 +8368,62 @@ enum {
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)         _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
-#define   GEN7_L3CDERRST1_ROW_MASK     (0x7ff<<14)
-#define   GEN7_PARITY_ERROR_VALID      (1<<13)
-#define   GEN7_L3CDERRST1_BANK_MASK    (3<<11)
-#define   GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define   GEN7_L3CDERRST1_ROW_MASK     (0x7ff << 14)
+#define   GEN7_PARITY_ERROR_VALID      (1 << 13)
+#define   GEN7_L3CDERRST1_BANK_MASK    (3 << 11)
+#define   GEN7_L3CDERRST1_SUBBANK_MASK (7 << 8)
 #define GEN7_PARITY_ERROR_ROW(reg) \
-               ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+               (((reg) & GEN7_L3CDERRST1_ROW_MASK) >> 14)
 #define GEN7_PARITY_ERROR_BANK(reg) \
-               ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+               (((reg) & GEN7_L3CDERRST1_BANK_MASK) >> 11)
 #define GEN7_PARITY_ERROR_SUBBANK(reg) \
-               ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
-#define   GEN7_L3CDERRST1_ENABLE       (1<<7)
+               (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define   GEN7_L3CDERRST1_ENABLE       (1 << 7)
 
 #define GEN7_L3LOG(slice, i)           _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
 #define GEN7_L3LOG_SIZE                        0x80
 
 #define GEN7_HALF_SLICE_CHICKEN1       _MMIO(0xe100) /* IVB GT1 + VLV */
 #define GEN7_HALF_SLICE_CHICKEN1_GT2   _MMIO(0xf100)
-#define   GEN7_MAX_PS_THREAD_DEP               (8<<12)
-#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1<<10)
-#define   GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE      (1<<4)
-#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define   GEN7_MAX_PS_THREAD_DEP               (8 << 12)
+#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1 << 10)
+#define   GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE      (1 << 4)
+#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
 
 #define GEN9_HALF_SLICE_CHICKEN5       _MMIO(0xe188)
-#define   GEN9_DG_MIRROR_FIX_ENABLE    (1<<5)
-#define   GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
+#define   GEN9_DG_MIRROR_FIX_ENABLE    (1 << 5)
+#define   GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
 
 #define GEN8_ROW_CHICKEN               _MMIO(0xe4f0)
-#define   FLOW_CONTROL_ENABLE          (1<<15)
-#define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1<<8)
-#define   STALL_DOP_GATING_DISABLE             (1<<5)
-#define   THROTTLE_12_5                                (7<<2)
-#define   DISABLE_EARLY_EOT                    (1<<1)
+#define   FLOW_CONTROL_ENABLE          (1 << 15)
+#define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1 << 8)
+#define   STALL_DOP_GATING_DISABLE             (1 << 5)
+#define   THROTTLE_12_5                                (7 << 2)
+#define   DISABLE_EARLY_EOT                    (1 << 1)
 
 #define GEN7_ROW_CHICKEN2              _MMIO(0xe4f4)
 #define GEN7_ROW_CHICKEN2_GT2          _MMIO(0xf4f4)
-#define   DOP_CLOCK_GATING_DISABLE     (1<<0)
-#define   PUSH_CONSTANT_DEREF_DISABLE  (1<<8)
+#define   DOP_CLOCK_GATING_DISABLE     (1 << 0)
+#define   PUSH_CONSTANT_DEREF_DISABLE  (1 << 8)
+#define   GEN11_TDL_CLOCK_GATING_FIX_DISABLE   (1 << 1)
 
 #define HSW_ROW_CHICKEN3               _MMIO(0xe49c)
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 
 #define HALF_SLICE_CHICKEN2            _MMIO(0xe180)
-#define   GEN8_ST_PO_DISABLE           (1<<13)
+#define   GEN8_ST_PO_DISABLE           (1 << 13)
 
 #define HALF_SLICE_CHICKEN3            _MMIO(0xe184)
-#define   HSW_SAMPLE_C_PERFORMANCE     (1<<9)
-#define   GEN8_CENTROID_PIXEL_OPT_DIS  (1<<8)
-#define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC  (1<<5)
-#define   CNL_FAST_ANISO_L1_BANKING_FIX        (1<<4)
-#define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1<<1)
+#define   HSW_SAMPLE_C_PERFORMANCE     (1 << 9)
+#define   GEN8_CENTROID_PIXEL_OPT_DIS  (1 << 8)
+#define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC  (1 << 5)
+#define   CNL_FAST_ANISO_L1_BANKING_FIX        (1 << 4)
+#define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1 << 1)
 
 #define GEN9_HALF_SLICE_CHICKEN7       _MMIO(0xe194)
-#define   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR       (1<<8)
-#define   GEN9_ENABLE_YV12_BUGFIX      (1<<4)
-#define   GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
+#define   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR       (1 << 8)
+#define   GEN9_ENABLE_YV12_BUGFIX      (1 << 4)
+#define   GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
 
 /* Audio */
 #define G4X_AUD_VID_DID                        _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -8499,9 +8576,9 @@ enum {
 #define   HSW_PWR_WELL_CTL_REQ(pw)             (1 << (_HSW_PW_SHIFT(pw) + 1))
 #define   HSW_PWR_WELL_CTL_STATE(pw)           (1 << _HSW_PW_SHIFT(pw))
 #define HSW_PWR_WELL_CTL5                      _MMIO(0x45410)
-#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP      (1<<31)
-#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE       (1<<20)
-#define   HSW_PWR_WELL_FORCE_ON                        (1<<19)
+#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP      (1 << 31)
+#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE       (1 << 20)
+#define   HSW_PWR_WELL_FORCE_ON                        (1 << 19)
 #define HSW_PWR_WELL_CTL6                      _MMIO(0x45414)
 
 /* SKL Fuse Status */
@@ -8512,7 +8589,7 @@ enum skl_power_gate {
 };
 
 #define SKL_FUSE_STATUS                                _MMIO(0x42000)
-#define  SKL_FUSE_DOWNLOAD_STATUS              (1<<31)
+#define  SKL_FUSE_DOWNLOAD_STATUS              (1 << 31)
 /* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
 #define  SKL_PW_TO_PG(pw)                      ((pw) - SKL_DISP_PW_1 + SKL_PG1)
 #define  SKL_FUSE_PG_DIST_STATUS(pg)           (1 << (27 - (pg)))
@@ -8527,8 +8604,8 @@ enum skl_power_gate {
                                                    _CNL_AUX_ANAOVRD1_C, \
                                                    _CNL_AUX_ANAOVRD1_D, \
                                                    _CNL_AUX_ANAOVRD1_F))
-#define   CNL_AUX_ANAOVRD1_ENABLE      (1<<16)
-#define   CNL_AUX_ANAOVRD1_LDO_BYPASS  (1<<23)
+#define   CNL_AUX_ANAOVRD1_ENABLE      (1 << 16)
+#define   CNL_AUX_ANAOVRD1_LDO_BYPASS  (1 << 23)
 
 /* HDCP Key Registers */
 #define HDCP_KEY_CONF                  _MMIO(0x66c00)
@@ -8573,7 +8650,7 @@ enum skl_power_gate {
 #define HDCP_SHA_V_PRIME_H2            _MMIO(0x66d0C)
 #define HDCP_SHA_V_PRIME_H3            _MMIO(0x66d10)
 #define HDCP_SHA_V_PRIME_H4            _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h)            _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_V_PRIME(h)            _MMIO((0x66d04 + (h) * 4))
 #define HDCP_SHA_TEXT                  _MMIO(0x66d18)
 
 /* HDCP Auth Registers */
@@ -8589,7 +8666,7 @@ enum skl_power_gate {
                                          _PORTC_HDCP_AUTHENC, \
                                          _PORTD_HDCP_AUTHENC, \
                                          _PORTE_HDCP_AUTHENC, \
-                                         _PORTF_HDCP_AUTHENC) + x)
+                                         _PORTF_HDCP_AUTHENC) + (x))
 #define PORT_HDCP_CONF(port)           _PORT_HDCP_AUTHENC(port, 0x0)
 #define  HDCP_CONF_CAPTURE_AN          BIT(0)
 #define  HDCP_CONF_AUTH_AND_ENC                (BIT(1) | BIT(0))
@@ -8610,7 +8687,7 @@ enum skl_power_gate {
 #define  HDCP_STATUS_R0_READY          BIT(18)
 #define  HDCP_STATUS_AN_READY          BIT(17)
 #define  HDCP_STATUS_CIPHER            BIT(16)
-#define  HDCP_STATUS_FRAME_CNT(x)      ((x >> 8) & 0xff)
+#define  HDCP_STATUS_FRAME_CNT(x)      (((x) >> 8) & 0xff)
 
 /* Per-pipe DDI Function Control */
 #define _TRANS_DDI_FUNC_CTL_A          0x60400
@@ -8619,37 +8696,37 @@ enum skl_power_gate {
 #define _TRANS_DDI_FUNC_CTL_EDP                0x6F400
 #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
 
-#define  TRANS_DDI_FUNC_ENABLE         (1<<31)
+#define  TRANS_DDI_FUNC_ENABLE         (1 << 31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  TRANS_DDI_PORT_MASK           (7<<28)
+#define  TRANS_DDI_PORT_MASK           (7 << 28)
 #define  TRANS_DDI_PORT_SHIFT          28
-#define  TRANS_DDI_SELECT_PORT(x)      ((x)<<28)
-#define  TRANS_DDI_PORT_NONE           (0<<28)
-#define  TRANS_DDI_MODE_SELECT_MASK    (7<<24)
-#define  TRANS_DDI_MODE_SELECT_HDMI    (0<<24)
-#define  TRANS_DDI_MODE_SELECT_DVI     (1<<24)
-#define  TRANS_DDI_MODE_SELECT_DP_SST  (2<<24)
-#define  TRANS_DDI_MODE_SELECT_DP_MST  (3<<24)
-#define  TRANS_DDI_MODE_SELECT_FDI     (4<<24)
-#define  TRANS_DDI_BPC_MASK            (7<<20)
-#define  TRANS_DDI_BPC_8               (0<<20)
-#define  TRANS_DDI_BPC_10              (1<<20)
-#define  TRANS_DDI_BPC_6               (2<<20)
-#define  TRANS_DDI_BPC_12              (3<<20)
-#define  TRANS_DDI_PVSYNC              (1<<17)
-#define  TRANS_DDI_PHSYNC              (1<<16)
-#define  TRANS_DDI_EDP_INPUT_MASK      (7<<12)
-#define  TRANS_DDI_EDP_INPUT_A_ON      (0<<12)
-#define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4<<12)
-#define  TRANS_DDI_EDP_INPUT_B_ONOFF   (5<<12)
-#define  TRANS_DDI_EDP_INPUT_C_ONOFF   (6<<12)
-#define  TRANS_DDI_HDCP_SIGNALLING     (1<<9)
-#define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
-#define  TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
-#define  TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
-#define  TRANS_DDI_BFI_ENABLE          (1<<4)
-#define  TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
-#define  TRANS_DDI_HDMI_SCRAMBLING     (1<<0)
+#define  TRANS_DDI_SELECT_PORT(x)      ((x) << 28)
+#define  TRANS_DDI_PORT_NONE           (0 << 28)
+#define  TRANS_DDI_MODE_SELECT_MASK    (7 << 24)
+#define  TRANS_DDI_MODE_SELECT_HDMI    (0 << 24)
+#define  TRANS_DDI_MODE_SELECT_DVI     (1 << 24)
+#define  TRANS_DDI_MODE_SELECT_DP_SST  (2 << 24)
+#define  TRANS_DDI_MODE_SELECT_DP_MST  (3 << 24)
+#define  TRANS_DDI_MODE_SELECT_FDI     (4 << 24)
+#define  TRANS_DDI_BPC_MASK            (7 << 20)
+#define  TRANS_DDI_BPC_8               (0 << 20)
+#define  TRANS_DDI_BPC_10              (1 << 20)
+#define  TRANS_DDI_BPC_6               (2 << 20)
+#define  TRANS_DDI_BPC_12              (3 << 20)
+#define  TRANS_DDI_PVSYNC              (1 << 17)
+#define  TRANS_DDI_PHSYNC              (1 << 16)
+#define  TRANS_DDI_EDP_INPUT_MASK      (7 << 12)
+#define  TRANS_DDI_EDP_INPUT_A_ON      (0 << 12)
+#define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4 << 12)
+#define  TRANS_DDI_EDP_INPUT_B_ONOFF   (5 << 12)
+#define  TRANS_DDI_EDP_INPUT_C_ONOFF   (6 << 12)
+#define  TRANS_DDI_HDCP_SIGNALLING     (1 << 9)
+#define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define  TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define  TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define  TRANS_DDI_BFI_ENABLE          (1 << 4)
+#define  TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define  TRANS_DDI_HDMI_SCRAMBLING     (1 << 0)
 #define  TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
                                        | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
                                        | TRANS_DDI_HDMI_SCRAMBLING)
@@ -8658,28 +8735,29 @@ enum skl_power_gate {
 #define _DP_TP_CTL_A                   0x64040
 #define _DP_TP_CTL_B                   0x64140
 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define  DP_TP_CTL_ENABLE                      (1<<31)
-#define  DP_TP_CTL_MODE_SST                    (0<<27)
-#define  DP_TP_CTL_MODE_MST                    (1<<27)
-#define  DP_TP_CTL_FORCE_ACT                   (1<<25)
-#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE       (1<<18)
-#define  DP_TP_CTL_FDI_AUTOTRAIN               (1<<15)
-#define  DP_TP_CTL_LINK_TRAIN_MASK             (7<<8)
-#define  DP_TP_CTL_LINK_TRAIN_PAT1             (0<<8)
-#define  DP_TP_CTL_LINK_TRAIN_PAT2             (1<<8)
-#define  DP_TP_CTL_LINK_TRAIN_PAT3             (4<<8)
-#define  DP_TP_CTL_LINK_TRAIN_IDLE             (2<<8)
-#define  DP_TP_CTL_LINK_TRAIN_NORMAL           (3<<8)
-#define  DP_TP_CTL_SCRAMBLE_DISABLE            (1<<7)
+#define  DP_TP_CTL_ENABLE                      (1 << 31)
+#define  DP_TP_CTL_MODE_SST                    (0 << 27)
+#define  DP_TP_CTL_MODE_MST                    (1 << 27)
+#define  DP_TP_CTL_FORCE_ACT                   (1 << 25)
+#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE       (1 << 18)
+#define  DP_TP_CTL_FDI_AUTOTRAIN               (1 << 15)
+#define  DP_TP_CTL_LINK_TRAIN_MASK             (7 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT1             (0 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT2             (1 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT3             (4 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT4             (5 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_IDLE             (2 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL           (3 << 8)
+#define  DP_TP_CTL_SCRAMBLE_DISABLE            (1 << 7)
 
 /* DisplayPort Transport Status */
 #define _DP_TP_STATUS_A                        0x64044
 #define _DP_TP_STATUS_B                        0x64144
 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define  DP_TP_STATUS_IDLE_DONE                        (1<<25)
-#define  DP_TP_STATUS_ACT_SENT                 (1<<24)
-#define  DP_TP_STATUS_MODE_STATUS_MST          (1<<23)
-#define  DP_TP_STATUS_AUTOTRAIN_DONE           (1<<12)
+#define  DP_TP_STATUS_IDLE_DONE                        (1 << 25)
+#define  DP_TP_STATUS_ACT_SENT                 (1 << 24)
+#define  DP_TP_STATUS_MODE_STATUS_MST          (1 << 23)
+#define  DP_TP_STATUS_AUTOTRAIN_DONE           (1 << 12)
 #define  DP_TP_STATUS_PAYLOAD_MAPPING_VC2      (3 << 8)
 #define  DP_TP_STATUS_PAYLOAD_MAPPING_VC1      (3 << 4)
 #define  DP_TP_STATUS_PAYLOAD_MAPPING_VC0      (3 << 0)
@@ -8688,16 +8766,16 @@ enum skl_power_gate {
 #define _DDI_BUF_CTL_A                         0x64000
 #define _DDI_BUF_CTL_B                         0x64100
 #define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define  DDI_BUF_CTL_ENABLE                    (1<<31)
+#define  DDI_BUF_CTL_ENABLE                    (1 << 31)
 #define  DDI_BUF_TRANS_SELECT(n)       ((n) << 24)
-#define  DDI_BUF_EMP_MASK                      (0xf<<24)
-#define  DDI_BUF_PORT_REVERSAL                 (1<<16)
-#define  DDI_BUF_IS_IDLE                       (1<<7)
-#define  DDI_A_4_LANES                         (1<<4)
+#define  DDI_BUF_EMP_MASK                      (0xf << 24)
+#define  DDI_BUF_PORT_REVERSAL                 (1 << 16)
+#define  DDI_BUF_IS_IDLE                       (1 << 7)
+#define  DDI_A_4_LANES                         (1 << 4)
 #define  DDI_PORT_WIDTH(width)                 (((width) - 1) << 1)
 #define  DDI_PORT_WIDTH_MASK                   (7 << 1)
 #define  DDI_PORT_WIDTH_SHIFT                  1
-#define  DDI_INIT_DISPLAY_DETECTED             (1<<0)
+#define  DDI_INIT_DISPLAY_DETECTED             (1 << 0)
 
 /* DDI Buffer Translations */
 #define _DDI_BUF_TRANS_A               0x64E00
@@ -8712,95 +8790,99 @@ enum skl_power_gate {
 #define SBI_ADDR                       _MMIO(0xC6000)
 #define SBI_DATA                       _MMIO(0xC6004)
 #define SBI_CTL_STAT                   _MMIO(0xC6008)
-#define  SBI_CTL_DEST_ICLK             (0x0<<16)
-#define  SBI_CTL_DEST_MPHY             (0x1<<16)
-#define  SBI_CTL_OP_IORD               (0x2<<8)
-#define  SBI_CTL_OP_IOWR               (0x3<<8)
-#define  SBI_CTL_OP_CRRD               (0x6<<8)
-#define  SBI_CTL_OP_CRWR               (0x7<<8)
-#define  SBI_RESPONSE_FAIL             (0x1<<1)
-#define  SBI_RESPONSE_SUCCESS          (0x0<<1)
-#define  SBI_BUSY                      (0x1<<0)
-#define  SBI_READY                     (0x0<<0)
+#define  SBI_CTL_DEST_ICLK             (0x0 << 16)
+#define  SBI_CTL_DEST_MPHY             (0x1 << 16)
+#define  SBI_CTL_OP_IORD               (0x2 << 8)
+#define  SBI_CTL_OP_IOWR               (0x3 << 8)
+#define  SBI_CTL_OP_CRRD               (0x6 << 8)
+#define  SBI_CTL_OP_CRWR               (0x7 << 8)
+#define  SBI_RESPONSE_FAIL             (0x1 << 1)
+#define  SBI_RESPONSE_SUCCESS          (0x0 << 1)
+#define  SBI_BUSY                      (0x1 << 0)
+#define  SBI_READY                     (0x0 << 0)
 
 /* SBI offsets */
 #define  SBI_SSCDIVINTPHASE                    0x0200
 #define  SBI_SSCDIVINTPHASE6                   0x0600
 #define   SBI_SSCDIVINTPHASE_DIVSEL_SHIFT      1
-#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK       (0x7f<<1)
-#define   SBI_SSCDIVINTPHASE_DIVSEL(x)         ((x)<<1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK       (0x7f << 1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL(x)         ((x) << 1)
 #define   SBI_SSCDIVINTPHASE_INCVAL_SHIFT      8
-#define   SBI_SSCDIVINTPHASE_INCVAL_MASK       (0x7f<<8)
-#define   SBI_SSCDIVINTPHASE_INCVAL(x)         ((x)<<8)
-#define   SBI_SSCDIVINTPHASE_DIR(x)            ((x)<<15)
-#define   SBI_SSCDIVINTPHASE_PROPAGATE         (1<<0)
+#define   SBI_SSCDIVINTPHASE_INCVAL_MASK       (0x7f << 8)
+#define   SBI_SSCDIVINTPHASE_INCVAL(x)         ((x) << 8)
+#define   SBI_SSCDIVINTPHASE_DIR(x)            ((x) << 15)
+#define   SBI_SSCDIVINTPHASE_PROPAGATE         (1 << 0)
 #define  SBI_SSCDITHPHASE                      0x0204
 #define  SBI_SSCCTL                            0x020c
 #define  SBI_SSCCTL6                           0x060C
-#define   SBI_SSCCTL_PATHALT                   (1<<3)
-#define   SBI_SSCCTL_DISABLE                   (1<<0)
+#define   SBI_SSCCTL_PATHALT                   (1 << 3)
+#define   SBI_SSCCTL_DISABLE                   (1 << 0)
 #define  SBI_SSCAUXDIV6                                0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT     4
-#define   SBI_SSCAUXDIV_FINALDIV2SEL_MASK      (1<<4)
-#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x)<<4)
+#define   SBI_SSCAUXDIV_FINALDIV2SEL_MASK      (1 << 4)
+#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x) << 4)
 #define  SBI_DBUFF0                            0x2a00
 #define  SBI_GEN0                              0x1f00
-#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE      (1<<0)
+#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE      (1 << 0)
 
 /* LPT PIXCLK_GATE */
 #define PIXCLK_GATE                    _MMIO(0xC6020)
-#define  PIXCLK_GATE_UNGATE            (1<<0)
-#define  PIXCLK_GATE_GATE              (0<<0)
+#define  PIXCLK_GATE_UNGATE            (1 << 0)
+#define  PIXCLK_GATE_GATE              (0 << 0)
 
 /* SPLL */
 #define SPLL_CTL                       _MMIO(0x46020)
-#define  SPLL_PLL_ENABLE               (1<<31)
-#define  SPLL_PLL_SSC                  (1<<28)
-#define  SPLL_PLL_NON_SSC              (2<<28)
-#define  SPLL_PLL_LCPLL                        (3<<28)
-#define  SPLL_PLL_REF_MASK             (3<<28)
-#define  SPLL_PLL_FREQ_810MHz          (0<<26)
-#define  SPLL_PLL_FREQ_1350MHz         (1<<26)
-#define  SPLL_PLL_FREQ_2700MHz         (2<<26)
-#define  SPLL_PLL_FREQ_MASK            (3<<26)
+#define  SPLL_PLL_ENABLE               (1 << 31)
+#define  SPLL_PLL_SSC                  (1 << 28)
+#define  SPLL_PLL_NON_SSC              (2 << 28)
+#define  SPLL_PLL_LCPLL                        (3 << 28)
+#define  SPLL_PLL_REF_MASK             (3 << 28)
+#define  SPLL_PLL_FREQ_810MHz          (0 << 26)
+#define  SPLL_PLL_FREQ_1350MHz         (1 << 26)
+#define  SPLL_PLL_FREQ_2700MHz         (2 << 26)
+#define  SPLL_PLL_FREQ_MASK            (3 << 26)
 
 /* WRPLL */
 #define _WRPLL_CTL1                    0x46040
 #define _WRPLL_CTL2                    0x46060
 #define WRPLL_CTL(pll)                 _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define  WRPLL_PLL_ENABLE              (1<<31)
-#define  WRPLL_PLL_SSC                 (1<<28)
-#define  WRPLL_PLL_NON_SSC             (2<<28)
-#define  WRPLL_PLL_LCPLL               (3<<28)
-#define  WRPLL_PLL_REF_MASK            (3<<28)
+#define  WRPLL_PLL_ENABLE              (1 << 31)
+#define  WRPLL_PLL_SSC                 (1 << 28)
+#define  WRPLL_PLL_NON_SSC             (2 << 28)
+#define  WRPLL_PLL_LCPLL               (3 << 28)
+#define  WRPLL_PLL_REF_MASK            (3 << 28)
 /* WRPLL divider programming */
-#define  WRPLL_DIVIDER_REFERENCE(x)    ((x)<<0)
+#define  WRPLL_DIVIDER_REFERENCE(x)    ((x) << 0)
 #define  WRPLL_DIVIDER_REF_MASK                (0xff)
-#define  WRPLL_DIVIDER_POST(x)         ((x)<<8)
-#define  WRPLL_DIVIDER_POST_MASK       (0x3f<<8)
+#define  WRPLL_DIVIDER_POST(x)         ((x) << 8)
+#define  WRPLL_DIVIDER_POST_MASK       (0x3f << 8)
 #define  WRPLL_DIVIDER_POST_SHIFT      8
-#define  WRPLL_DIVIDER_FEEDBACK(x)     ((x)<<16)
+#define  WRPLL_DIVIDER_FEEDBACK(x)     ((x) << 16)
 #define  WRPLL_DIVIDER_FB_SHIFT                16
-#define  WRPLL_DIVIDER_FB_MASK         (0xff<<16)
+#define  WRPLL_DIVIDER_FB_MASK         (0xff << 16)
 
 /* Port clock selection */
 #define _PORT_CLK_SEL_A                        0x46100
 #define _PORT_CLK_SEL_B                        0x46104
 #define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define  PORT_CLK_SEL_LCPLL_2700       (0<<29)
-#define  PORT_CLK_SEL_LCPLL_1350       (1<<29)
-#define  PORT_CLK_SEL_LCPLL_810                (2<<29)
-#define  PORT_CLK_SEL_SPLL             (3<<29)
-#define  PORT_CLK_SEL_WRPLL(pll)       (((pll)+4)<<29)
-#define  PORT_CLK_SEL_WRPLL1           (4<<29)
-#define  PORT_CLK_SEL_WRPLL2           (5<<29)
-#define  PORT_CLK_SEL_NONE             (7<<29)
-#define  PORT_CLK_SEL_MASK             (7<<29)
+#define  PORT_CLK_SEL_LCPLL_2700       (0 << 29)
+#define  PORT_CLK_SEL_LCPLL_1350       (1 << 29)
+#define  PORT_CLK_SEL_LCPLL_810                (2 << 29)
+#define  PORT_CLK_SEL_SPLL             (3 << 29)
+#define  PORT_CLK_SEL_WRPLL(pll)       (((pll) + 4) << 29)
+#define  PORT_CLK_SEL_WRPLL1           (4 << 29)
+#define  PORT_CLK_SEL_WRPLL2           (5 << 29)
+#define  PORT_CLK_SEL_NONE             (7 << 29)
+#define  PORT_CLK_SEL_MASK             (7 << 29)
 
 /* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
 #define DDI_CLK_SEL(port)              PORT_CLK_SEL(port)
 #define  DDI_CLK_SEL_NONE              (0x0 << 28)
 #define  DDI_CLK_SEL_MG                        (0x8 << 28)
+#define  DDI_CLK_SEL_TBT_162           (0xC << 28)
+#define  DDI_CLK_SEL_TBT_270           (0xD << 28)
+#define  DDI_CLK_SEL_TBT_540           (0xE << 28)
+#define  DDI_CLK_SEL_TBT_810           (0xF << 28)
 #define  DDI_CLK_SEL_MASK              (0xF << 28)
 
 /* Transcoder clock selection */
@@ -8808,8 +8890,8 @@ enum skl_power_gate {
 #define _TRANS_CLK_SEL_B               0x46144
 #define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
 /* For each transcoder, we need to select the corresponding port clock */
-#define  TRANS_CLK_SEL_DISABLED                (0x0<<29)
-#define  TRANS_CLK_SEL_PORT(x)         (((x)+1)<<29)
+#define  TRANS_CLK_SEL_DISABLED                (0x0 << 29)
+#define  TRANS_CLK_SEL_PORT(x)         (((x) + 1) << 29)
 
 #define CDCLK_FREQ                     _MMIO(0x46200)
 
@@ -8819,28 +8901,28 @@ enum skl_power_gate {
 #define _TRANS_EDP_MSA_MISC            0x6f410
 #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
 
-#define  TRANS_MSA_SYNC_CLK            (1<<0)
-#define  TRANS_MSA_6_BPC               (0<<5)
-#define  TRANS_MSA_8_BPC               (1<<5)
-#define  TRANS_MSA_10_BPC              (2<<5)
-#define  TRANS_MSA_12_BPC              (3<<5)
-#define  TRANS_MSA_16_BPC              (4<<5)
+#define  TRANS_MSA_SYNC_CLK            (1 << 0)
+#define  TRANS_MSA_6_BPC               (0 << 5)
+#define  TRANS_MSA_8_BPC               (1 << 5)
+#define  TRANS_MSA_10_BPC              (2 << 5)
+#define  TRANS_MSA_12_BPC              (3 << 5)
+#define  TRANS_MSA_16_BPC              (4 << 5)
 
 /* LCPLL Control */
 #define LCPLL_CTL                      _MMIO(0x130040)
-#define  LCPLL_PLL_DISABLE             (1<<31)
-#define  LCPLL_PLL_LOCK                        (1<<30)
-#define  LCPLL_CLK_FREQ_MASK           (3<<26)
-#define  LCPLL_CLK_FREQ_450            (0<<26)
-#define  LCPLL_CLK_FREQ_54O_BDW                (1<<26)
-#define  LCPLL_CLK_FREQ_337_5_BDW      (2<<26)
-#define  LCPLL_CLK_FREQ_675_BDW                (3<<26)
-#define  LCPLL_CD_CLOCK_DISABLE                (1<<25)
-#define  LCPLL_ROOT_CD_CLOCK_DISABLE   (1<<24)
-#define  LCPLL_CD2X_CLOCK_DISABLE      (1<<23)
-#define  LCPLL_POWER_DOWN_ALLOW                (1<<22)
-#define  LCPLL_CD_SOURCE_FCLK          (1<<21)
-#define  LCPLL_CD_SOURCE_FCLK_DONE     (1<<19)
+#define  LCPLL_PLL_DISABLE             (1 << 31)
+#define  LCPLL_PLL_LOCK                        (1 << 30)
+#define  LCPLL_CLK_FREQ_MASK           (3 << 26)
+#define  LCPLL_CLK_FREQ_450            (0 << 26)
+#define  LCPLL_CLK_FREQ_54O_BDW                (1 << 26)
+#define  LCPLL_CLK_FREQ_337_5_BDW      (2 << 26)
+#define  LCPLL_CLK_FREQ_675_BDW                (3 << 26)
+#define  LCPLL_CD_CLOCK_DISABLE                (1 << 25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE   (1 << 24)
+#define  LCPLL_CD2X_CLOCK_DISABLE      (1 << 23)
+#define  LCPLL_POWER_DOWN_ALLOW                (1 << 22)
+#define  LCPLL_CD_SOURCE_FCLK          (1 << 21)
+#define  LCPLL_CD_SOURCE_FCLK_DONE     (1 << 19)
 
 /*
  * SKL Clocks
@@ -8868,16 +8950,16 @@ enum skl_power_gate {
 /* LCPLL_CTL */
 #define LCPLL1_CTL             _MMIO(0x46010)
 #define LCPLL2_CTL             _MMIO(0x46014)
-#define  LCPLL_PLL_ENABLE      (1<<31)
+#define  LCPLL_PLL_ENABLE      (1 << 31)
 
 /* DPLL control1 */
 #define DPLL_CTRL1             _MMIO(0x6C058)
-#define  DPLL_CTRL1_HDMI_MODE(id)              (1<<((id)*6+5))
-#define  DPLL_CTRL1_SSC(id)                    (1<<((id)*6+4))
-#define  DPLL_CTRL1_LINK_RATE_MASK(id)         (7<<((id)*6+1))
-#define  DPLL_CTRL1_LINK_RATE_SHIFT(id)                ((id)*6+1)
-#define  DPLL_CTRL1_LINK_RATE(linkrate, id)    ((linkrate)<<((id)*6+1))
-#define  DPLL_CTRL1_OVERRIDE(id)               (1<<((id)*6))
+#define  DPLL_CTRL1_HDMI_MODE(id)              (1 << ((id) * 6 + 5))
+#define  DPLL_CTRL1_SSC(id)                    (1 << ((id) * 6 + 4))
+#define  DPLL_CTRL1_LINK_RATE_MASK(id)         (7 << ((id) * 6 + 1))
+#define  DPLL_CTRL1_LINK_RATE_SHIFT(id)                ((id) * 6 + 1)
+#define  DPLL_CTRL1_LINK_RATE(linkrate, id)    ((linkrate) << ((id) * 6 + 1))
+#define  DPLL_CTRL1_OVERRIDE(id)               (1 << ((id) * 6))
 #define  DPLL_CTRL1_LINK_RATE_2700             0
 #define  DPLL_CTRL1_LINK_RATE_1350             1
 #define  DPLL_CTRL1_LINK_RATE_810              2
@@ -8887,43 +8969,43 @@ enum skl_power_gate {
 
 /* DPLL control2 */
 #define DPLL_CTRL2                             _MMIO(0x6C05C)
-#define  DPLL_CTRL2_DDI_CLK_OFF(port)          (1<<((port)+15))
-#define  DPLL_CTRL2_DDI_CLK_SEL_MASK(port)     (3<<((port)*3+1))
-#define  DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port)    ((port)*3+1)
-#define  DPLL_CTRL2_DDI_CLK_SEL(clk, port)     ((clk)<<((port)*3+1))
-#define  DPLL_CTRL2_DDI_SEL_OVERRIDE(port)     (1<<((port)*3))
+#define  DPLL_CTRL2_DDI_CLK_OFF(port)          (1 << ((port) + 15))
+#define  DPLL_CTRL2_DDI_CLK_SEL_MASK(port)     (3 << ((port) * 3 + 1))
+#define  DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port)    ((port) * 3 + 1)
+#define  DPLL_CTRL2_DDI_CLK_SEL(clk, port)     ((clk) << ((port) * 3 + 1))
+#define  DPLL_CTRL2_DDI_SEL_OVERRIDE(port)     (1 << ((port) * 3))
 
 /* DPLL Status */
 #define DPLL_STATUS    _MMIO(0x6C060)
-#define  DPLL_LOCK(id) (1<<((id)*8))
+#define  DPLL_LOCK(id) (1 << ((id) * 8))
 
 /* DPLL cfg */
 #define _DPLL1_CFGCR1  0x6C040
 #define _DPLL2_CFGCR1  0x6C048
 #define _DPLL3_CFGCR1  0x6C050
-#define  DPLL_CFGCR1_FREQ_ENABLE       (1<<31)
-#define  DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
-#define  DPLL_CFGCR1_DCO_FRACTION(x)   ((x)<<9)
+#define  DPLL_CFGCR1_FREQ_ENABLE       (1 << 31)
+#define  DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define  DPLL_CFGCR1_DCO_FRACTION(x)   ((x) << 9)
 #define  DPLL_CFGCR1_DCO_INTEGER_MASK  (0x1ff)
 
 #define _DPLL1_CFGCR2  0x6C044
 #define _DPLL2_CFGCR2  0x6C04C
 #define _DPLL3_CFGCR2  0x6C054
-#define  DPLL_CFGCR2_QDIV_RATIO_MASK   (0xff<<8)
-#define  DPLL_CFGCR2_QDIV_RATIO(x)     ((x)<<8)
-#define  DPLL_CFGCR2_QDIV_MODE(x)      ((x)<<7)
-#define  DPLL_CFGCR2_KDIV_MASK         (3<<5)
-#define  DPLL_CFGCR2_KDIV(x)           ((x)<<5)
-#define  DPLL_CFGCR2_KDIV_5 (0<<5)
-#define  DPLL_CFGCR2_KDIV_2 (1<<5)
-#define  DPLL_CFGCR2_KDIV_3 (2<<5)
-#define  DPLL_CFGCR2_KDIV_1 (3<<5)
-#define  DPLL_CFGCR2_PDIV_MASK         (7<<2)
-#define  DPLL_CFGCR2_PDIV(x)           ((x)<<2)
-#define  DPLL_CFGCR2_PDIV_1 (0<<2)
-#define  DPLL_CFGCR2_PDIV_2 (1<<2)
-#define  DPLL_CFGCR2_PDIV_3 (2<<2)
-#define  DPLL_CFGCR2_PDIV_7 (4<<2)
+#define  DPLL_CFGCR2_QDIV_RATIO_MASK   (0xff << 8)
+#define  DPLL_CFGCR2_QDIV_RATIO(x)     ((x) << 8)
+#define  DPLL_CFGCR2_QDIV_MODE(x)      ((x) << 7)
+#define  DPLL_CFGCR2_KDIV_MASK         (3 << 5)
+#define  DPLL_CFGCR2_KDIV(x)           ((x) << 5)
+#define  DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define  DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define  DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define  DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define  DPLL_CFGCR2_PDIV_MASK         (7 << 2)
+#define  DPLL_CFGCR2_PDIV(x)           ((x) << 2)
+#define  DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define  DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define  DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define  DPLL_CFGCR2_PDIV_7 (4 << 2)
 #define  DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
 #define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
@@ -8935,9 +9017,9 @@ enum skl_power_gate {
 #define DPCLKA_CFGCR0                          _MMIO(0x6C200)
 #define DPCLKA_CFGCR0_ICL                      _MMIO(0x164280)
 #define  DPCLKA_CFGCR0_DDI_CLK_OFF(port)       (1 << ((port) ==  PORT_F ? 23 : \
-                                                     (port)+10))
+                                                     (port) + 10))
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
-                                               (port)*2)
+                                               (port) * 2)
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)  (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port)  ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
 
@@ -8950,6 +9032,8 @@ enum skl_power_gate {
 #define  PLL_POWER_STATE       (1 << 26)
 #define CNL_DPLL_ENABLE(pll)   _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
 
+#define TBT_PLL_ENABLE         _MMIO(0x46020)
+
 #define _MG_PLL1_ENABLE                0x46030
 #define _MG_PLL2_ENABLE                0x46034
 #define _MG_PLL3_ENABLE                0x46038
@@ -9105,13 +9189,16 @@ enum skl_power_gate {
 #define  DPLL_CFGCR1_QDIV_RATIO_MASK   (0xff << 10)
 #define  DPLL_CFGCR1_QDIV_RATIO_SHIFT  (10)
 #define  DPLL_CFGCR1_QDIV_RATIO(x)     ((x) << 10)
+#define  DPLL_CFGCR1_QDIV_MODE_SHIFT   (9)
 #define  DPLL_CFGCR1_QDIV_MODE(x)      ((x) << 9)
 #define  DPLL_CFGCR1_KDIV_MASK         (7 << 6)
+#define  DPLL_CFGCR1_KDIV_SHIFT                (6)
 #define  DPLL_CFGCR1_KDIV(x)           ((x) << 6)
 #define  DPLL_CFGCR1_KDIV_1            (1 << 6)
 #define  DPLL_CFGCR1_KDIV_2            (2 << 6)
 #define  DPLL_CFGCR1_KDIV_4            (4 << 6)
 #define  DPLL_CFGCR1_PDIV_MASK         (0xf << 2)
+#define  DPLL_CFGCR1_PDIV_SHIFT                (2)
 #define  DPLL_CFGCR1_PDIV(x)           ((x) << 2)
 #define  DPLL_CFGCR1_PDIV_2            (1 << 2)
 #define  DPLL_CFGCR1_PDIV_3            (2 << 2)
@@ -9145,22 +9232,22 @@ enum skl_power_gate {
 /* GEN9 DC */
 #define DC_STATE_EN                    _MMIO(0x45504)
 #define  DC_STATE_DISABLE              0
-#define  DC_STATE_EN_UPTO_DC5          (1<<0)
-#define  DC_STATE_EN_DC9               (1<<3)
-#define  DC_STATE_EN_UPTO_DC6          (2<<0)
+#define  DC_STATE_EN_UPTO_DC5          (1 << 0)
+#define  DC_STATE_EN_DC9               (1 << 3)
+#define  DC_STATE_EN_UPTO_DC6          (2 << 0)
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
 #define  DC_STATE_DEBUG                  _MMIO(0x45520)
-#define  DC_STATE_DEBUG_MASK_CORES     (1<<0)
-#define  DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+#define  DC_STATE_DEBUG_MASK_CORES     (1 << 0)
+#define  DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
 
 /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
  * since on HSW we can't write to it using I915_WRITE. */
 #define D_COMP_HSW                     _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
 #define D_COMP_BDW                     _MMIO(0x138144)
-#define  D_COMP_RCOMP_IN_PROGRESS      (1<<9)
-#define  D_COMP_COMP_FORCE             (1<<8)
-#define  D_COMP_COMP_DISABLE           (1<<0)
+#define  D_COMP_RCOMP_IN_PROGRESS      (1 << 9)
+#define  D_COMP_COMP_FORCE             (1 << 8)
+#define  D_COMP_COMP_DISABLE           (1 << 0)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define _PIPE_WM_LINETIME_A            0x45270
@@ -9168,27 +9255,27 @@ enum skl_power_gate {
 #define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
 #define   PIPE_WM_LINETIME_MASK                        (0x1ff)
 #define   PIPE_WM_LINETIME_TIME(x)             ((x))
-#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK   (0x1ff<<16)
-#define   PIPE_WM_LINETIME_IPS_LINETIME(x)     ((x)<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK   (0x1ff << 16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)     ((x) << 16)
 
 /* SFUSE_STRAP */
 #define SFUSE_STRAP                    _MMIO(0xc2014)
-#define  SFUSE_STRAP_FUSE_LOCK         (1<<13)
-#define  SFUSE_STRAP_RAW_FREQUENCY     (1<<8)
-#define  SFUSE_STRAP_DISPLAY_DISABLED  (1<<7)
-#define  SFUSE_STRAP_CRT_DISABLED      (1<<6)
-#define  SFUSE_STRAP_DDIF_DETECTED     (1<<3)
-#define  SFUSE_STRAP_DDIB_DETECTED     (1<<2)
-#define  SFUSE_STRAP_DDIC_DETECTED     (1<<1)
-#define  SFUSE_STRAP_DDID_DETECTED     (1<<0)
+#define  SFUSE_STRAP_FUSE_LOCK         (1 << 13)
+#define  SFUSE_STRAP_RAW_FREQUENCY     (1 << 8)
+#define  SFUSE_STRAP_DISPLAY_DISABLED  (1 << 7)
+#define  SFUSE_STRAP_CRT_DISABLED      (1 << 6)
+#define  SFUSE_STRAP_DDIF_DETECTED     (1 << 3)
+#define  SFUSE_STRAP_DDIB_DETECTED     (1 << 2)
+#define  SFUSE_STRAP_DDIC_DETECTED     (1 << 1)
+#define  SFUSE_STRAP_DDID_DETECTED     (1 << 0)
 
 #define WM_MISC                                _MMIO(0x45260)
 #define  WM_MISC_DATA_PARTITION_5_6    (1 << 0)
 
 #define WM_DBG                         _MMIO(0x45280)
-#define  WM_DBG_DISALLOW_MULTIPLE_LP   (1<<0)
-#define  WM_DBG_DISALLOW_MAXFIFO       (1<<1)
-#define  WM_DBG_DISALLOW_SPRITE                (1<<2)
+#define  WM_DBG_DISALLOW_MULTIPLE_LP   (1 << 0)
+#define  WM_DBG_DISALLOW_MAXFIFO       (1 << 1)
+#define  WM_DBG_DISALLOW_SPRITE                (1 << 2)
 
 /* pipe CSC */
 #define _PIPE_A_CSC_COEFF_RY_GY        0x49010
@@ -9351,7 +9438,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
                                        BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
 #define  BXT_MIPI_TX_ESCLK_DIVIDER(port, val)  \
-               ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+               (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
 /* RX upper control divider to select actual RX clock output from 8x */
 #define  BXT_MIPI1_RX_ESCLK_UPPER_SHIFT                21
 #define  BXT_MIPI2_RX_ESCLK_UPPER_SHIFT                5
@@ -9364,7 +9451,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
                                        BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
 #define  BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val)    \
-               ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+               (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
 /* 8/3X divider to select the actual 8/3X clock output from 8x */
 #define  BXT_MIPI1_8X_BY3_SHIFT                19
 #define  BXT_MIPI2_8X_BY3_SHIFT                3
@@ -9377,7 +9464,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
                                                BXT_MIPI2_8X_BY3_DIVIDER_MASK)
 #define  BXT_MIPI_8X_BY3_DIVIDER(port, val)    \
-                       ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+                       (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
 /* RX lower control divider to select actual RX clock output from 8x */
 #define  BXT_MIPI1_RX_ESCLK_LOWER_SHIFT                16
 #define  BXT_MIPI2_RX_ESCLK_LOWER_SHIFT                0
@@ -9390,7 +9477,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
                                        BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
 #define  BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val)    \
-               ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+               (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
 
 #define RX_DIVIDER_BIT_1_2                     0x3
 #define RX_DIVIDER_BIT_3_4                     0xC
index 8928894..e1dbb54 100644 (file)
@@ -320,6 +320,7 @@ static void advance_ring(struct i915_request *request)
                 * is just about to be. Either works, if we miss the last two
                 * noops - they are safe to be replayed on a reset.
                 */
+               GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
                tail = READ_ONCE(request->tail);
                list_del(&ring->active_link);
        } else {
@@ -383,8 +384,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
         * the subsequent request.
         */
        if (engine->last_retired_context)
-               intel_context_unpin(engine->last_retired_context, engine);
-       engine->last_retired_context = rq->ctx;
+               intel_context_unpin(engine->last_retired_context);
+       engine->last_retired_context = rq->hw_context;
 }
 
 static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +456,8 @@ static void i915_request_retire(struct i915_request *request)
        i915_request_remove_from_client(request);
 
        /* Retirement decays the ban score as it is a sign of ctx progress */
-       atomic_dec_if_positive(&request->ctx->ban_score);
-       intel_context_unpin(request->ctx, request->engine);
+       atomic_dec_if_positive(&request->gem_context->ban_score);
+       intel_context_unpin(request->hw_context);
 
        __retire_engine_upto(request->engine, request);
 
@@ -657,7 +658,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 {
        struct drm_i915_private *i915 = engine->i915;
        struct i915_request *rq;
-       struct intel_ring *ring;
+       struct intel_context *ce;
        int ret;
 
        lockdep_assert_held(&i915->drm.struct_mutex);
@@ -681,22 +682,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         * GGTT space, so do this first before we reserve a seqno for
         * ourselves.
         */
-       ring = intel_context_pin(ctx, engine);
-       if (IS_ERR(ring))
-               return ERR_CAST(ring);
-       GEM_BUG_ON(!ring);
+       ce = intel_context_pin(ctx, engine);
+       if (IS_ERR(ce))
+               return ERR_CAST(ce);
 
        ret = reserve_gt(i915);
        if (ret)
                goto err_unpin;
 
-       ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+       ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
        if (ret)
                goto err_unreserve;
 
        /* Move our oldest request to the slab-cache (if not in use!) */
-       rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
-       if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+       rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+       if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
            i915_request_completed(rq))
                i915_request_retire(rq);
 
@@ -760,9 +760,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        INIT_LIST_HEAD(&rq->active_list);
        rq->i915 = i915;
        rq->engine = engine;
-       rq->ctx = ctx;
-       rq->ring = ring;
-       rq->timeline = ring->timeline;
+       rq->gem_context = ctx;
+       rq->hw_context = ce;
+       rq->ring = ce->ring;
+       rq->timeline = ce->ring->timeline;
        GEM_BUG_ON(rq->timeline == &engine->timeline);
 
        spin_lock_init(&rq->lock);
@@ -814,14 +815,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
                goto err_unwind;
 
        /* Keep a second pin for the dual retirement along engine and ring */
-       __intel_context_pin(rq->ctx, engine);
+       __intel_context_pin(ce);
+
+       rq->infix = rq->ring->emit; /* end of header; start of user payload */
 
        /* Check that we didn't interrupt ourselves with a new request */
        GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
        return rq;
 
 err_unwind:
-       rq->ring->emit = rq->head;
+       ce->ring->emit = rq->head;
 
        /* Make sure we didn't add ourselves to external state before freeing */
        GEM_BUG_ON(!list_empty(&rq->active_list));
@@ -832,7 +835,7 @@ err_unwind:
 err_unreserve:
        unreserve_gt(i915);
 err_unpin:
-       intel_context_unpin(ctx, engine);
+       intel_context_unpin(ce);
        return ERR_PTR(ret);
 }
 
@@ -1015,14 +1018,13 @@ i915_request_await_object(struct i915_request *to,
  * request is not being tracked for completion but the work itself is
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
-void __i915_request_add(struct i915_request *request, bool flush_caches)
+void i915_request_add(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       struct intel_ring *ring = request->ring;
        struct i915_timeline *timeline = request->timeline;
+       struct intel_ring *ring = request->ring;
        struct i915_request *prev;
        u32 *cs;
-       int err;
 
        GEM_TRACE("%s fence %llx:%d\n",
                  engine->name, request->fence.context, request->fence.seqno);
@@ -1043,20 +1045,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
         * know that it is time to use that space up.
         */
        request->reserved_space = 0;
-
-       /*
-        * Emit any outstanding flushes - execbuf can fail to emit the flush
-        * after having emitted the batchbuffer command. Hence we need to fix
-        * things up similar to emitting the lazy request. The difference here
-        * is that the flush _must_ happen before the next request, no matter
-        * what.
-        */
-       if (flush_caches) {
-               err = engine->emit_flush(request, EMIT_FLUSH);
-
-               /* Not allowed to fail! */
-               WARN(err, "engine->emit_flush() failed: %d!\n", err);
-       }
+       engine->emit_flush(request, EMIT_FLUSH);
 
        /*
         * Record the position of the start of the breadcrumb so that
@@ -1095,8 +1084,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
        i915_gem_active_set(&timeline->last_request, request);
 
        list_add_tail(&request->ring_link, &ring->request_list);
-       if (list_is_first(&request->ring_link, &ring->request_list))
+       if (list_is_first(&request->ring_link, &ring->request_list)) {
+               GEM_TRACE("marking %s as active\n", ring->timeline->name);
                list_add(&ring->active_link, &request->i915->gt.active_rings);
+       }
        request->emitted_jiffies = jiffies;
 
        /*
@@ -1113,7 +1104,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
        local_bh_disable();
        rcu_read_lock(); /* RCU serialisation for set-wedged protection */
        if (engine->schedule)
-               engine->schedule(request, &request->ctx->sched);
+               engine->schedule(request, &request->gem_context->sched);
        rcu_read_unlock();
        i915_sw_fence_commit(&request->submit);
        local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
index eddbd42..7ee220d 100644 (file)
@@ -93,8 +93,9 @@ struct i915_request {
         * i915_request_free() will then decrement the refcount on the
         * context.
         */
-       struct i915_gem_context *ctx;
+       struct i915_gem_context *gem_context;
        struct intel_engine_cs *engine;
+       struct intel_context *hw_context;
        struct intel_ring *ring;
        struct i915_timeline *timeline;
        struct intel_signal_node signaling;
@@ -133,6 +134,9 @@ struct i915_request {
        /** Position in the ring of the start of the request */
        u32 head;
 
+       /** Position in the ring of the start of the user packets */
+       u32 infix;
+
        /**
         * Position in the ring of the start of the postfix.
         * This is required to calculate the maximum available ring space
@@ -249,9 +253,7 @@ int i915_request_await_object(struct i915_request *to,
 int i915_request_await_dma_fence(struct i915_request *rq,
                                 struct dma_fence *fence);
 
-void __i915_request_add(struct i915_request *rq, bool flush_caches);
-#define i915_request_add(rq) \
-       __i915_request_add(rq, false)
+void i915_request_add(struct i915_request *rq);
 
 void __i915_request_submit(struct i915_request *request);
 void i915_request_submit(struct i915_request *request);
@@ -266,6 +268,7 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_INTERRUPTIBLE        BIT(0)
 #define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
 #define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
index 8cc3a25..b50c6b8 100644 (file)
@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 
            TP_STRUCT__entry(
                             __field(u32, dev)
-                            __field(u32, sync_from)
-                            __field(u32, sync_to)
+                            __field(u32, from_class)
+                            __field(u32, from_instance)
+                            __field(u32, to_class)
+                            __field(u32, to_instance)
                             __field(u32, seqno)
                             ),
 
            TP_fast_assign(
                           __entry->dev = from->i915->drm.primary->index;
-                          __entry->sync_from = from->engine->id;
-                          __entry->sync_to = to->engine->id;
+                          __entry->from_class = from->engine->uabi_class;
+                          __entry->from_instance = from->engine->instance;
+                          __entry->to_class = to->engine->uabi_class;
+                          __entry->to_instance = to->engine->instance;
                           __entry->seqno = from->global_seqno;
                           ),
 
-           TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+           TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
                      __entry->dev,
-                     __entry->sync_from, __entry->sync_to,
+                     __entry->from_class, __entry->from_instance,
+                     __entry->to_class, __entry->to_instance,
                      __entry->seqno)
 );
 
@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, flags)
                             ),
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->flags = flags;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->flags)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->flags)
 );
 
 DECLARE_EVENT_CLASS(i915_request,
@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global)
                             ),
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global = rq->global_seqno;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->global)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->global)
 );
 
 DEFINE_EVENT(i915_request, i915_request_add,
@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global_seqno)
                             __field(u32, port)
@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global_seqno = rq->global_seqno;
@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
                           __entry->port = port;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->prio, __entry->global_seqno,
-                     __entry->port)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->prio, __entry->global_seqno, __entry->port)
 );
 
 TRACE_EVENT(i915_request_out,
@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global_seqno)
                             __field(u32, completed)
@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global_seqno = rq->global_seqno;
                           __entry->completed = i915_request_completed(rq);
                           ),
 
-                   TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
-                             __entry->dev, __entry->hw_id, __entry->ring,
-                             __entry->ctx, __entry->seqno,
+                   TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+                             __entry->dev, __entry->class, __entry->instance,
+                             __entry->hw_id, __entry->ctx, __entry->seqno,
                              __entry->global_seqno, __entry->completed)
 );
 
@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
 
            TP_STRUCT__entry(
                             __field(u32, dev)
-                            __field(u32, ring)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(bool, waiters)
                             ),
 
            TP_fast_assign(
                           __entry->dev = engine->i915->drm.primary->index;
-                          __entry->ring = engine->id;
+                          __entry->class = engine->uabi_class;
+                          __entry->instance = engine->instance;
                           __entry->seqno = intel_engine_get_seqno(engine);
                           __entry->waiters = waiters;
                           ),
 
-           TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
-                     __entry->dev, __entry->ring, __entry->seqno,
-                     __entry->waiters)
+           TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->seqno, __entry->waiters)
 );
 
 DEFINE_EVENT(i915_request, i915_request_retire,
@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global)
                             __field(unsigned int, flags)
@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
             */
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global = rq->global_seqno;
                           __entry->flags = flags;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->global,
-                     !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+                     __entry->flags)
 );
 
 DEFINE_EVENT(i915_request, i915_request_wait_end,
@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
                        __entry->dev = ctx->i915->drm.primary->index;
                        __entry->ctx = ctx;
                        __entry->hw_id = ctx->hw_id;
-                       __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+                       __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
        ),
 
        TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -953,36 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free,
        TP_ARGS(ctx)
 );
 
-/**
- * DOC: switch_mm tracepoint
- *
- * This tracepoint allows tracking of the mm switch, which is an important point
- * in the lifetime of the vm in the legacy submission path. This tracepoint is
- * called only if full ppgtt is enabled.
- */
-TRACE_EVENT(switch_mm,
-       TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
-
-       TP_ARGS(engine, to),
-
-       TP_STRUCT__entry(
-                       __field(u32, ring)
-                       __field(struct i915_gem_context *, to)
-                       __field(struct i915_address_space *, vm)
-                       __field(u32, dev)
-       ),
-
-       TP_fast_assign(
-                       __entry->ring = engine->id;
-                       __entry->to = to;
-                       __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
-                       __entry->dev = engine->i915->drm.primary->index;
-       ),
-
-       TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
-                 __entry->dev, __entry->ring, __entry->to, __entry->vm)
-);
-
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
index 5fe9f3f..869cf4a 100644 (file)
@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
                         node->start + node->size,
                         node->size / 1024);
 
-       ggtt->base.reserved -= node->size;
+       ggtt->vm.reserved -= node->size;
        drm_mm_remove_node(node);
 }
 
@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
 
        DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
                 start, end, size / 1024);
-       ret = i915_gem_gtt_reserve(&ggtt->base, node,
+       ret = i915_gem_gtt_reserve(&ggtt->vm, node,
                                   size, start, I915_COLOR_UNEVICTABLE,
                                   0);
        if (!ret)
-               ggtt->base.reserved += size;
+               ggtt->vm.reserved += size;
 
        return ret;
 }
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
 int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       unsigned long ggtt_end = ggtt->base.total;
+       unsigned long ggtt_end = ggtt->vm.total;
 
        unsigned long mappable_base, mappable_size, mappable_end;
        unsigned long unmappable_base, unmappable_size, unmappable_end;
index bb83384..551acc3 100644 (file)
@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
        return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
 }
 
+static inline bool
+intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+       return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
 int intel_vgt_balloon(struct drm_i915_private *dev_priv);
 void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
 
index 9324d47..e82aa80 100644 (file)
@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj,
        int i;
 
        /* The aliasing_ppgtt should never be used directly! */
-       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
 
        vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
        if (vma == NULL)
@@ -95,6 +95,7 @@ vma_create(struct drm_i915_gem_object *obj,
                init_request_active(&vma->last_read[i], i915_vma_retire);
        init_request_active(&vma->last_fence, NULL);
        vma->vm = vm;
+       vma->ops = &vm->vma_ops;
        vma->obj = obj;
        vma->resv = obj->resv;
        vma->size = obj->base.size;
@@ -280,7 +281,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
        GEM_BUG_ON(!vma->pages);
 
        trace_i915_vma_bind(vma, bind_flags);
-       ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+       ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
 
@@ -345,7 +346,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
 
 void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
        GEM_BUG_ON(vma->iomap == NULL);
 
@@ -365,6 +366,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
                return;
 
        obj = vma->obj;
+       GEM_BUG_ON(!obj);
 
        i915_vma_unpin(vma);
        i915_vma_close(vma);
@@ -459,6 +461,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
        return true;
 }
 
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+       /*
+        * Combine the assertion that the object is bound and that we have
+        * pinned its pages. But we should never have bound the object
+        * more than we have pinned its pages. (For complete accuracy, we
+        * assume that no else is pinning the pages, but as a rough assertion
+        * that we will not run into problems later, this will do!)
+        */
+       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
 /**
  * i915_vma_insert - finds a slot for the vma in its address space
  * @vma: the vma
@@ -477,7 +491,7 @@ static int
 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
        struct drm_i915_private *dev_priv = vma->vm->i915;
-       struct drm_i915_gem_object *obj = vma->obj;
+       unsigned int cache_level;
        u64 start, end;
        int ret;
 
@@ -512,20 +526,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
         * attempt to find space.
         */
        if (size > end) {
-               DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
-                         size, obj->base.size,
-                         flags & PIN_MAPPABLE ? "mappable" : "total",
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
+                         size, flags & PIN_MAPPABLE ? "mappable" : "total",
                          end);
                return -ENOSPC;
        }
 
-       ret = i915_gem_object_pin_pages(obj);
-       if (ret)
-               return ret;
+       if (vma->obj) {
+               ret = i915_gem_object_pin_pages(vma->obj);
+               if (ret)
+                       return ret;
+
+               cache_level = vma->obj->cache_level;
+       } else {
+               cache_level = 0;
+       }
 
        GEM_BUG_ON(vma->pages);
 
-       ret = vma->vm->set_pages(vma);
+       ret = vma->ops->set_pages(vma);
        if (ret)
                goto err_unpin;
 
@@ -538,7 +557,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                }
 
                ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
-                                          size, offset, obj->cache_level,
+                                          size, offset, cache_level,
                                           flags);
                if (ret)
                        goto err_clear;
@@ -577,7 +596,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                }
 
                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
-                                         size, alignment, obj->cache_level,
+                                         size, alignment, cache_level,
                                          start, end, flags);
                if (ret)
                        goto err_clear;
@@ -586,23 +605,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                GEM_BUG_ON(vma->node.start + vma->node.size > end);
        }
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
-       spin_lock(&dev_priv->mm.obj_lock);
-       list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
-       obj->bind_count++;
-       spin_unlock(&dev_priv->mm.obj_lock);
+       if (vma->obj) {
+               struct drm_i915_gem_object *obj = vma->obj;
 
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+               spin_lock(&dev_priv->mm.obj_lock);
+               list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+               obj->bind_count++;
+               spin_unlock(&dev_priv->mm.obj_lock);
+
+               assert_bind_count(obj);
+       }
 
        return 0;
 
 err_clear:
-       vma->vm->clear_pages(vma);
+       vma->ops->clear_pages(vma);
 err_unpin:
-       i915_gem_object_unpin_pages(obj);
+       if (vma->obj)
+               i915_gem_object_unpin_pages(vma->obj);
        return ret;
 }
 
@@ -610,30 +634,35 @@ static void
 i915_vma_remove(struct i915_vma *vma)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
-       struct drm_i915_gem_object *obj = vma->obj;
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
-       vma->vm->clear_pages(vma);
+       vma->ops->clear_pages(vma);
 
        drm_mm_remove_node(&vma->node);
        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-       /* Since the unbound list is global, only move to that list if
+       /*
+        * Since the unbound list is global, only move to that list if
         * no more VMAs exist.
         */
-       spin_lock(&i915->mm.obj_lock);
-       if (--obj->bind_count == 0)
-               list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
-       spin_unlock(&i915->mm.obj_lock);
-
-       /* And finally now the object is completely decoupled from this vma,
-        * we can drop its hold on the backing storage and allow it to be
-        * reaped by the shrinker.
-        */
-       i915_gem_object_unpin_pages(obj);
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+       if (vma->obj) {
+               struct drm_i915_gem_object *obj = vma->obj;
+
+               spin_lock(&i915->mm.obj_lock);
+               if (--obj->bind_count == 0)
+                       list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+               spin_unlock(&i915->mm.obj_lock);
+
+               /*
+                * And finally now the object is completely decoupled from this
+                * vma, we can drop its hold on the backing storage and allow
+                * it to be reaped by the shrinker.
+                */
+               i915_gem_object_unpin_pages(obj);
+               assert_bind_count(obj);
+       }
 }
 
 int __i915_vma_do_pin(struct i915_vma *vma,
@@ -658,7 +687,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
        }
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+       ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
        if (ret)
                goto err_remove;
 
@@ -715,6 +744,7 @@ void i915_vma_reopen(struct i915_vma *vma)
 
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
+       struct drm_i915_private *i915 = vma->vm->i915;
        int i;
 
        GEM_BUG_ON(vma->node.allocated);
@@ -726,12 +756,13 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 
        list_del(&vma->obj_link);
        list_del(&vma->vm_link);
-       rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+       if (vma->obj)
+               rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
        if (!i915_vma_is_ggtt(vma))
                i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
-       kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+       kmem_cache_free(i915->vmas, vma);
 }
 
 void i915_vma_destroy(struct i915_vma *vma)
@@ -797,13 +828,13 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
 
 int i915_vma_unbind(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = vma->obj;
        unsigned long active;
        int ret;
 
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-       /* First wait upon any activity as retiring the request may
+       /*
+        * First wait upon any activity as retiring the request may
         * have side-effects such as unpinning or even unbinding this vma.
         */
        might_sleep();
@@ -811,7 +842,8 @@ int i915_vma_unbind(struct i915_vma *vma)
        if (active) {
                int idx;
 
-               /* When a closed VMA is retired, it is unbound - eek.
+               /*
+                * When a closed VMA is retired, it is unbound - eek.
                 * In order to prevent it from being recursively closed,
                 * take a pin on the vma so that the second unbind is
                 * aborted.
@@ -849,9 +881,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        if (!drm_mm_node_allocated(&vma->node))
                return 0;
 
-       GEM_BUG_ON(obj->bind_count == 0);
-       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
        if (i915_vma_is_map_and_fenceable(vma)) {
                /*
                 * Check that we have flushed all writes through the GGTT
@@ -878,7 +907,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        if (likely(!vma->vm->closed)) {
                trace_i915_vma_unbind(vma);
-               vma->vm->unbind_vma(vma);
+               vma->ops->unbind_vma(vma);
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
index fc4294c..66a2289 100644 (file)
@@ -49,10 +49,12 @@ struct i915_vma {
        struct drm_mm_node node;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
+       const struct i915_vma_ops *ops;
        struct drm_i915_fence_reg *fence;
        struct reservation_object *resv; /** Alias of obj->resv */
        struct sg_table *pages;
        void __iomem *iomap;
+       void *private; /* owned by creator */
        u64 size;
        u64 display_alignment;
        struct i915_page_sizes page_sizes;
@@ -339,6 +341,12 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
        __i915_vma_unpin(vma);
 }
 
+static inline bool i915_vma_is_bound(const struct i915_vma *vma,
+                                    unsigned int where)
+{
+       return vma->flags & where;
+}
+
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
  * @vma: VMA to iomap
@@ -407,7 +415,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+       /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
        if (vma->fence)
                __i915_vma_unpin_fence(vma);
 }
index d1abf4b..6ba478e 100644 (file)
 #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
 #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
 
-static struct intel_dsm_priv {
-       acpi_handle dhandle;
-} intel_dsm_priv;
-
 static const guid_t intel_dsm_guid =
        GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
                  0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
@@ -72,12 +68,12 @@ static char *intel_dsm_mux_type(u8 type)
        }
 }
 
-static void intel_dsm_platform_mux_info(void)
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
 {
        int i;
        union acpi_object *pkg, *connector_count;
 
-       pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
+       pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
                        INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
                        NULL, ACPI_TYPE_PACKAGE);
        if (!pkg) {
@@ -107,41 +103,40 @@ static void intel_dsm_platform_mux_info(void)
        ACPI_FREE(pkg);
 }
 
-static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
 {
        acpi_handle dhandle;
 
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
-               return false;
+               return NULL;
 
        if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
                            1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
                DRM_DEBUG_KMS("no _DSM method for intel device\n");
-               return false;
+               return NULL;
        }
 
-       intel_dsm_priv.dhandle = dhandle;
-       intel_dsm_platform_mux_info();
+       intel_dsm_platform_mux_info(dhandle);
 
-       return true;
+       return dhandle;
 }
 
 static bool intel_dsm_detect(void)
 {
+       acpi_handle dhandle = NULL;
        char acpi_method_name[255] = { 0 };
        struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
        struct pci_dev *pdev = NULL;
-       bool has_dsm = false;
        int vga_count = 0;
 
        while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
                vga_count++;
-               has_dsm |= intel_dsm_pci_probe(pdev);
+               dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
        }
 
-       if (vga_count == 2 && has_dsm) {
-               acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+       if (vga_count == 2 && dhandle) {
+               acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
                DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
                                 acpi_method_name);
                return true;
index 40285d1..b04952b 100644 (file)
@@ -59,7 +59,8 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
        else if (property == dev_priv->broadcast_rgb_property)
                *val = intel_conn_state->broadcast_rgb;
        else {
-               DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+               DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+                                property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -95,7 +96,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
                return 0;
        }
 
-       DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+       DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+                        property->base.id, property->name);
        return -EINVAL;
 }
 
@@ -124,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
        if (new_conn_state->force_audio != old_conn_state->force_audio ||
            new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
            new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+           new_conn_state->base.content_type != old_conn_state->base.content_type ||
            new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
                crtc_state->mode_changed = true;
 
index 6d06878..dcba645 100644 (file)
@@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
                &crtc_state->base.adjusted_mode;
        int ret;
 
-       /*
-        * Both crtc and plane->crtc could be NULL if we're updating a
-        * property while the plane is disabled.  We don't actually have
-        * anything driver-specific we need to test in that case, so
-        * just return success.
-        */
        if (!intel_state->base.crtc && !old_plane_state->base.crtc)
                return 0;
 
@@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        const struct drm_crtc_state *old_crtc_state;
        struct drm_crtc_state *new_crtc_state;
 
-       /*
-        * Both crtc and plane->crtc could be NULL if we're updating a
-        * property while the plane is disabled.  We don't actually have
-        * anything driver-specific we need to test in that case, so
-        * just return success.
-        */
        if (!crtc)
                return 0;
 
@@ -277,7 +265,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
                                struct drm_property *property,
                                uint64_t *val)
 {
-       DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+       DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+                     property->base.id, property->name);
        return -EINVAL;
 }
 
@@ -299,6 +288,7 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
                                struct drm_property *property,
                                uint64_t val)
 {
-       DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+       DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+                     property->base.id, property->name);
        return -EINVAL;
 }
index 3ea566f..bb94172 100644 (file)
@@ -59,6 +59,7 @@
  */
 
 /* DP N/M table */
+#define LC_810M        810000
 #define LC_540M        540000
 #define LC_270M        270000
 #define LC_162M        162000
@@ -99,6 +100,15 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
        { 128000, LC_540M, 4096, 33750 },
        { 176400, LC_540M, 3136, 18750 },
        { 192000, LC_540M, 2048, 11250 },
+       { 32000, LC_810M, 1024, 50625 },
+       { 44100, LC_810M, 784, 28125 },
+       { 48000, LC_810M, 512, 16875 },
+       { 64000, LC_810M, 2048, 50625 },
+       { 88200, LC_810M, 1568, 28125 },
+       { 96000, LC_810M, 1024, 16875 },
+       { 128000, LC_810M, 4096, 50625 },
+       { 176400, LC_810M, 3136, 28125 },
+       { 192000, LC_810M, 2048, 16875 },
 };
 
 static const struct dp_aud_n_m *
@@ -198,13 +208,13 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
 }
 
 static bool intel_eld_uptodate(struct drm_connector *connector,
-                              i915_reg_t reg_eldv, uint32_t bits_eldv,
-                              i915_reg_t reg_elda, uint32_t bits_elda,
+                              i915_reg_t reg_eldv, u32 bits_eldv,
+                              i915_reg_t reg_elda, u32 bits_elda,
                               i915_reg_t reg_edid)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       uint8_t *eld = connector->eld;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 tmp;
        int i;
 
        tmp = I915_READ(reg_eldv);
@@ -218,7 +228,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
        I915_WRITE(reg_elda, tmp);
 
        for (i = 0; i < drm_eld_size(eld) / 4; i++)
-               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+               if (I915_READ(reg_edid) != *((const u32 *)eld + i))
                        return false;
 
        return true;
@@ -229,7 +239,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
                                    const struct drm_connector_state *old_conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       uint32_t eldv, tmp;
+       u32 eldv, tmp;
 
        DRM_DEBUG_KMS("Disable audio codec\n");
 
@@ -251,12 +261,12 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct drm_connector *connector = conn_state->connector;
-       uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 eldv;
+       u32 tmp;
        int len, i;
 
-       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
 
        tmp = I915_READ(G4X_AUD_VID_DID);
        if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
@@ -278,7 +288,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
        len = min(drm_eld_size(eld) / 4, len);
        DRM_DEBUG_DRIVER("ELD size %d\n", len);
        for (i = 0; i < len; i++)
-               I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+               I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
 
        tmp = I915_READ(G4X_AUD_CNTL_ST);
        tmp |= eldv;
@@ -393,7 +403,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
-       uint32_t tmp;
+       u32 tmp;
 
        DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
 
@@ -426,8 +436,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_connector *connector = conn_state->connector;
        enum pipe pipe = crtc->pipe;
-       const uint8_t *eld = connector->eld;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 tmp;
        int len, i;
 
        DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
@@ -456,7 +466,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
        /* Up to 84 bytes of hw ELD buffer */
        len = min(drm_eld_size(eld), 84);
        for (i = 0; i < len / 4; i++)
-               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
 
        /* ELD valid */
        tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -477,7 +487,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
        enum port port = encoder->port;
-       uint32_t tmp, eldv;
+       u32 tmp, eldv;
        i915_reg_t aud_config, aud_cntrl_st2;
 
        DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
@@ -524,8 +534,8 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
        struct drm_connector *connector = conn_state->connector;
        enum pipe pipe = crtc->pipe;
        enum port port = encoder->port;
-       uint8_t *eld = connector->eld;
-       uint32_t tmp, eldv;
+       const u8 *eld = connector->eld;
+       u32 tmp, eldv;
        int len, i;
        i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
@@ -575,7 +585,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
        /* Up to 84 bytes of hw ELD buffer */
        len = min(drm_eld_size(eld), 84);
        for (i = 0; i < len / 4; i++)
-               I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+               I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
 
        /* ELD valid */
        tmp = I915_READ(aud_cntrl_st2);
index 54270bd..1faa494 100644 (file)
@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        if (!lvds_lfp_data_ptrs)
                return;
 
-       dev_priv->vbt.lvds_vbt = 1;
-
        panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
                                               lvds_lfp_data_ptrs,
                                               panel_type);
@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
        if (!driver)
                return;
 
-       if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
-               dev_priv->vbt.edp.support = 1;
+       if (INTEL_GEN(dev_priv) >= 5) {
+               /*
+                * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+                * to mean "eDP". The VBT spec doesn't agree with that
+                * interpretation, but real world VBTs seem to.
+                */
+               if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+                       dev_priv->vbt.int_lvds_support = 0;
+       } else {
+               /*
+                * FIXME it's not clear which BDB version has the LVDS config
+                * bits defined. Revision history in the VBT spec says:
+                * "0.92 | Add two definitions for VBT value of LVDS Active
+                *  Config (00b and 11b values defined) | 06/13/2005"
+                * but does not the specify the BDB version.
+                *
+                * So far version 134 (on i945gm) is the oldest VBT observed
+                * in the wild with the bits correctly populated. Version
+                * 108 (on i85x) does not have the bits correctly populated.
+                */
+               if (bdb->version >= 134 &&
+                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+                       dev_priv->vbt.int_lvds_support = 0;
+       }
 
        DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
        /*
@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        int panel_type = dev_priv->vbt.panel_type;
 
        edp = find_section(bdb, BDB_EDP);
-       if (!edp) {
-               if (dev_priv->vbt.edp.support)
-                       DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+       if (!edp)
                return;
-       }
 
        switch ((edp->color_depth >> (panel_type * 2)) & 3) {
        case EDP_18BPP:
@@ -634,7 +652,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        }
 
        if (bdb->version >= 173) {
-               uint8_t vswing;
+               u8 vswing;
 
                /* Don't read from VBT if module parameter has valid value*/
                if (i915_modparams.edp_vswing) {
@@ -688,8 +706,54 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
                break;
        }
 
-       dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
-       dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+       /*
+        * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+        * Old decimal value is wake up time in multiples of 100 us.
+        */
+       if (bdb->version >= 205 &&
+           (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
+            INTEL_GEN(dev_priv) >= 10)) {
+               switch (psr_table->tp1_wakeup_time) {
+               case 0:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+                       break;
+               case 1:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+                       break;
+               case 3:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+                                       psr_table->tp1_wakeup_time);
+                       /* fallthrough */
+               case 2:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+                       break;
+               }
+
+               switch (psr_table->tp2_tp3_wakeup_time) {
+               case 0:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+                       break;
+               case 1:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+                       break;
+               case 3:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+                                       psr_table->tp2_tp3_wakeup_time);
+                       /* fallthrough */
+               case 2:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+               break;
+               }
+       } else {
+               dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+               dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+       }
 }
 
 static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -902,7 +966,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
         * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
         * byte.
         */
-       size_of_sequence = *((const uint32_t *)(data + index));
+       size_of_sequence = *((const u32 *)(data + index));
        index += 4;
 
        seq_end = index + size_of_sequence;
@@ -1197,18 +1261,37 @@ static const u8 cnp_ddc_pin_map[] = {
        [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
 };
 
+static const u8 icp_ddc_pin_map[] = {
+       [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+       [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+       [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+       [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+       [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+       [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
 static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 {
-       if (HAS_PCH_CNP(dev_priv)) {
-               if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
-                       return cnp_ddc_pin_map[vbt_pin];
-               } else {
-                       DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
-                       return 0;
-               }
+       const u8 *ddc_pin_map;
+       int n_entries;
+
+       if (HAS_PCH_ICP(dev_priv)) {
+               ddc_pin_map = icp_ddc_pin_map;
+               n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+       } else if (HAS_PCH_CNP(dev_priv)) {
+               ddc_pin_map = cnp_ddc_pin_map;
+               n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+       } else {
+               /* Assuming direct map */
+               return vbt_pin;
        }
 
-       return vbt_pin;
+       if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+               return ddc_pin_map[vbt_pin];
+
+       DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+                     vbt_pin);
+       return 0;
 }
 
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -1504,7 +1587,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 
        /* LFP panel data */
        dev_priv->vbt.lvds_dither = 1;
-       dev_priv->vbt.lvds_vbt = 0;
 
        /* SDVO panel data */
        dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
@@ -1513,6 +1595,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        dev_priv->vbt.int_tv_support = 1;
        dev_priv->vbt.int_crt_support = 1;
 
+       /* driver features */
+       dev_priv->vbt.int_lvds_support = 1;
+
        /* Default to using SSC */
        dev_priv->vbt.lvds_use_ssc = 1;
        /*
@@ -1636,7 +1721,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
        const struct bdb_header *bdb;
        u8 __iomem *bios = NULL;
 
-       if (HAS_PCH_NOP(dev_priv)) {
+       if (INTEL_INFO(dev_priv)->num_pipes == 0) {
                DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
                return;
        }
index 18e643d..86a987b 100644 (file)
@@ -846,8 +846,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned long flags;
 
-       spin_lock_irq(&b->irq_lock);
+       spin_lock_irqsave(&b->irq_lock, flags);
 
        /*
         * Leave the fake_irq timer enabled (if it is running), but clear the
@@ -871,7 +872,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
         */
        clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
 
-       spin_unlock_irq(&b->irq_lock);
+       spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
index 704ddb4..bf9433d 100644 (file)
@@ -991,6 +991,16 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
        u32 freq_select, cdclk_ctl;
        int ret;
 
+       /*
+        * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+        * unsupported on SKL. In theory this should never happen since only
+        * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+        * supported on SKL either, see the above WA. WARN whenever trying to
+        * use the corresponding VCO freq as that always leads to using the
+        * minimum 308MHz CDCLK.
+        */
+       WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+
        mutex_lock(&dev_priv->pcu_lock);
        ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
                                SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1861,11 +1871,35 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
                              skl_cdclk_decimal(cdclk));
 
        mutex_lock(&dev_priv->pcu_lock);
-       /* TODO: add proper DVFS support. */
-       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               cdclk_state->voltage_level);
        mutex_unlock(&dev_priv->pcu_lock);
 
        intel_update_cdclk(dev_priv);
+
+       /*
+        * Can't read out the voltage level :(
+        * Let's just assume everything is as expected.
+        */
+       dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+       switch (cdclk) {
+       case 50000:
+       case 307200:
+       case 312000:
+               return 0;
+       case 556800:
+       case 552000:
+               return 1;
+       default:
+               MISSING_CASE(cdclk);
+       case 652800:
+       case 648000:
+               return 2;
+       }
 }
 
 static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1899,7 +1933,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
                 */
                cdclk_state->vco = 0;
                cdclk_state->cdclk = cdclk_state->bypass;
-               return;
+               goto out;
        }
 
        cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
@@ -1908,6 +1942,14 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
        WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
 
        cdclk_state->cdclk = cdclk_state->vco / 2;
+
+out:
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               icl_calc_voltage_level(cdclk_state->cdclk);
 }
 
 /**
@@ -1950,6 +1992,8 @@ sanitize:
        sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
        sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
                                                     sanitized_state.cdclk);
+       sanitized_state.voltage_level =
+                               icl_calc_voltage_level(sanitized_state.cdclk);
 
        icl_set_cdclk(dev_priv, &sanitized_state);
 }
@@ -1967,6 +2011,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
 
        cdclk_state.cdclk = cdclk_state.bypass;
        cdclk_state.vco = 0;
+       cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
 
        icl_set_cdclk(dev_priv, &cdclk_state);
 }
@@ -2470,6 +2515,9 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
 
        intel_state->cdclk.logical.vco = vco;
        intel_state->cdclk.logical.cdclk = cdclk;
+       intel_state->cdclk.logical.voltage_level =
+               max(icl_calc_voltage_level(cdclk),
+                   cnl_compute_min_voltage_level(intel_state));
 
        if (!intel_state->active_crtcs) {
                cdclk = icl_calc_cdclk(0, ref);
@@ -2477,6 +2525,8 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
 
                intel_state->cdclk.actual.vco = vco;
                intel_state->cdclk.actual.cdclk = cdclk;
+               intel_state->cdclk.actual.voltage_level =
+                       icl_calc_voltage_level(cdclk);
        } else {
                intel_state->cdclk.actual = intel_state->cdclk.logical;
        }
index 072b326..0c6bf82 100644 (file)
@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
        return intel_encoder_to_crt(intel_attached_encoder(connector));
 }
 
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+                           i915_reg_t adpa_reg, enum pipe *pipe)
+{
+       u32 val;
+
+       val = I915_READ(adpa_reg);
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (HAS_PCH_CPT(dev_priv))
+               *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+       else
+               *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+       return val & ADPA_DAC_ENABLE;
+}
+
 static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
-
-       tmp = I915_READ(crt->adpa_reg);
+       ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
 
-       if (!(tmp & ADPA_DAC_ENABLE))
-               goto out;
-
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
-
-       ret = true;
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
        if (HAS_PCH_LPT(dev_priv))
                ; /* Those bits don't exist here */
        else if (HAS_PCH_CPT(dev_priv))
-               adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
-       else if (crtc->pipe == 0)
-               adpa |= ADPA_PIPE_A_SELECT;
+               adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
        else
-               adpa |= ADPA_PIPE_B_SELECT;
+               adpa |= ADPA_PIPE_SEL(crtc->pipe);
 
        if (!HAS_PCH_SPLIT(dev_priv))
                I915_WRITE(BCLRPAT(crtc->pipe), 0);
@@ -232,6 +232,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
 
        lpt_disable_pch_transcoder(dev_priv);
@@ -268,6 +270,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
        dev_priv->display.fdi_link_train(crtc, crtc_state);
+
+       intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void hsw_enable_crt(struct intel_encoder *encoder,
@@ -333,6 +337,10 @@ intel_crt_mode_valid(struct drm_connector *connector,
            (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
                return MODE_CLOCK_HIGH;
 
+       /* HSW/BDW FDI limited to 4k */
+       if (mode->hdisplay > 4096)
+               return MODE_H_ILLEGAL;
+
        return MODE_OK;
 }
 
@@ -375,6 +383,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return false;
 
+       /* HSW/BDW FDI limited to 4k */
+       if (adjusted_mode->crtc_hdisplay > 4096 ||
+           adjusted_mode->crtc_hblank_start > 4096)
+               return false;
+
        pipe_config->has_pch_encoder = true;
 
        /* LPT FDI RX only supports 8bpc. */
@@ -513,7 +526,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
         * to get a reliable result.
         */
 
-       if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
+       if (IS_G45(dev_priv))
                tries = 2;
        else
                tries = 1;
index f4a8598..044fe1f 100644 (file)
@@ -915,7 +915,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 
        level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
-       if (IS_CANNONLAKE(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               if (port == PORT_A || port == PORT_B)
+                       icl_get_combo_buf_trans(dev_priv, port,
+                                               INTEL_OUTPUT_HDMI, &n_entries);
+               else
+                       n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+               default_entry = n_entries - 1;
+       } else if (IS_CANNONLAKE(dev_priv)) {
                cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
                default_entry = n_entries - 1;
        } else if (IS_GEN9_LP(dev_priv)) {
@@ -1055,6 +1062,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
                                       const struct intel_shared_dpll *pll)
 {
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       int clock = crtc->config->port_clock;
        const enum intel_dpll_id id = pll->info->id;
 
        switch (id) {
@@ -1063,6 +1072,20 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
                return DDI_CLK_SEL_NONE;
+       case DPLL_ID_ICL_TBTPLL:
+               switch (clock) {
+               case 162000:
+                       return DDI_CLK_SEL_TBT_162;
+               case 270000:
+                       return DDI_CLK_SEL_TBT_270;
+               case 540000:
+                       return DDI_CLK_SEL_TBT_540;
+               case 810000:
+                       return DDI_CLK_SEL_TBT_810;
+               default:
+                       MISSING_CASE(clock);
+                       break;
+               }
        case DPLL_ID_ICL_MGPLL1:
        case DPLL_ID_ICL_MGPLL2:
        case DPLL_ID_ICL_MGPLL3:
@@ -1243,35 +1266,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
        return ret;
 }
 
-/* Finds the only possible encoder associated with the given CRTC. */
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_encoder *ret = NULL;
-       struct drm_atomic_state *state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int num_encoders = 0;
-       int i;
-
-       state = crtc_state->base.state;
-
-       for_each_new_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               ret = to_intel_encoder(connector_state->best_encoder);
-               num_encoders++;
-       }
-
-       WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
-            pipe_name(crtc->pipe));
-
-       BUG_ON(ret == NULL);
-       return ret;
-}
-
 #define LC_FREQ 2700
 
 static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
@@ -1374,8 +1368,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
        uint32_t cfgcr0, cfgcr1;
        uint32_t p0, p1, p2, dco_freq, ref_clock;
 
-       cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
-       cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+       if (INTEL_GEN(dev_priv) >= 11) {
+               cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+               cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+       } else {
+               cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+               cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+       }
 
        p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
        p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
@@ -1451,6 +1450,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
        pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 }
 
+static void icl_ddi_clock_get(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
+       int link_clock = 0;
+       uint32_t pll_id;
+
+       pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+       if (port == PORT_A || port == PORT_B) {
+               if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+                       link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+               else
+                       link_clock = icl_calc_dp_combo_pll_link(dev_priv,
+                                                               pll_id);
+       } else {
+               /* FIXME - Add for MG PLL */
+               WARN(1, "MG PLL clock_get code not implemented yet\n");
+       }
+
+       pipe_config->port_clock = link_clock;
+       ddi_dotclock_get(pipe_config);
+}
+
 static void cnl_ddi_clock_get(struct intel_encoder *encoder,
                              struct intel_crtc_state *pipe_config)
 {
@@ -1644,6 +1667,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
                bxt_ddi_clock_get(encoder, pipe_config);
        else if (IS_CANNONLAKE(dev_priv))
                cnl_ddi_clock_get(encoder, pipe_config);
+       else if (IS_ICELAKE(dev_priv))
+               icl_ddi_clock_get(encoder, pipe_config);
 }
 
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -2115,6 +2140,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
                DP_TRAIN_VOLTAGE_SWING_MASK;
 }
 
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+{
+       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+               return DP_TRAIN_PRE_EMPH_LEVEL_3;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+               return DP_TRAIN_PRE_EMPH_LEVEL_2;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+               return DP_TRAIN_PRE_EMPH_LEVEL_1;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+       default:
+               return DP_TRAIN_PRE_EMPH_LEVEL_0;
+       }
+}
+
 static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
                                   int level, enum intel_output_type type)
 {
@@ -2610,6 +2655,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        intel_dp_start_link_train(intel_dp);
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
+
+       intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2640,6 +2687,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
        if (IS_GEN9_BC(dev_priv))
                skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
+       intel_ddi_enable_pipe_clock(crtc_state);
+
        intel_dig_port->set_infoframes(&encoder->base,
                                       crtc_state->has_infoframe,
                                       crtc_state, conn_state);
@@ -2709,6 +2758,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /*
         * Power down sink before disabling the port, otherwise we end
         * up getting interrupts from the sink on detecting link loss.
@@ -2734,11 +2785,13 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
 
-       intel_disable_ddi_buf(encoder);
-
        dig_port->set_infoframes(&encoder->base, false,
                                 old_crtc_state, old_conn_state);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
+       intel_disable_ddi_buf(encoder);
+
        intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
        intel_ddi_clk_disable(encoder);
@@ -3025,6 +3078,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 {
        if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 2;
+       else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+               crtc_state->min_voltage_level = 1;
 }
 
 void intel_ddi_get_config(struct intel_encoder *encoder,
index 2cc6faa..2c8fef3 100644 (file)
@@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
         * We can ditch the adjusted_mode.crtc_clock check as soon
         * as Haswell has gained clock readout/fastboot support.
         *
-        * We can ditch the crtc->primary->fb check as soon as we can
+        * We can ditch the crtc->primary->state->fb check as soon as we can
         * properly reconstruct framebuffers.
         *
         * FIXME: The intel_crtc->active here should be switched to
@@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
        i915_reg_t pp_reg;
        u32 val;
-       enum pipe panel_pipe = PIPE_A;
+       enum pipe panel_pipe = INVALID_PIPE;
        bool locked = true;
 
        if (WARN_ON(HAS_DDI(dev_priv)))
@@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
                pp_reg = PP_CONTROL(0);
                port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 
-               if (port_sel == PANEL_PORT_SELECT_LVDS &&
-                   I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
-                       panel_pipe = PIPE_B;
-               /* XXX: else fix for eDP */
+               switch (port_sel) {
+               case PANEL_PORT_SELECT_LVDS:
+                       intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPA:
+                       intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPC:
+                       intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPD:
+                       intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+                       break;
+               default:
+                       MISSING_CASE(port_sel);
+                       break;
+               }
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                /* presumably write lock depends on pipe, not port select */
                pp_reg = PP_CONTROL(pipe);
                panel_pipe = pipe;
        } else {
+               u32 port_sel;
+
                pp_reg = PP_CONTROL(0);
-               if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
-                       panel_pipe = PIPE_B;
+               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+               WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+               intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
        }
 
        val = I915_READ(pp_reg);
@@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
 
 static void assert_plane(struct intel_plane *plane, bool state)
 {
-       bool cur_state = plane->get_hw_state(plane);
+       enum pipe pipe;
+       bool cur_state;
+
+       cur_state = plane->get_hw_state(plane, &pipe);
 
        I915_STATE_WARN(cur_state != state,
                        "%s assertion failure (expected %s, current %s)\n",
@@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
             pipe_name(pipe));
 }
 
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
-                           enum pipe pipe, u32 port_sel, u32 val)
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+                                  enum pipe pipe, enum port port,
+                                  i915_reg_t dp_reg)
 {
-       if ((val & DP_PORT_EN) == 0)
-               return false;
+       enum pipe port_pipe;
+       bool state;
 
-       if (HAS_PCH_CPT(dev_priv)) {
-               u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
-               if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
-                       return false;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
-                       return false;
-       } else {
-               if ((val & DP_PIPE_MASK) != (pipe << 30))
-                       return false;
-       }
-       return true;
-}
+       state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
 
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, u32 val)
-{
-       if ((val & SDVO_ENABLE) == 0)
-               return false;
+       I915_STATE_WARN(state && port_pipe == pipe,
+                       "PCH DP %c enabled on transcoder %c, should be disabled\n",
+                       port_name(port), pipe_name(pipe));
 
-       if (HAS_PCH_CPT(dev_priv)) {
-               if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
-                       return false;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
-                       return false;
-       } else {
-               if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
-                       return false;
-       }
-       return true;
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+                       "IBX PCH DP %c still using transcoder B\n",
+                       port_name(port));
 }
 
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, u32 val)
-{
-       if ((val & LVDS_PORT_EN) == 0)
-               return false;
-
-       if (HAS_PCH_CPT(dev_priv)) {
-               if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
-                       return false;
-       } else {
-               if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
-                       return false;
-       }
-       return true;
-}
-
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, u32 val)
-{
-       if ((val & ADPA_DAC_ENABLE) == 0)
-               return false;
-       if (HAS_PCH_CPT(dev_priv)) {
-               if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
-                       return false;
-       } else {
-               if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
-                       return false;
-       }
-       return true;
-}
-
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
-                                  enum pipe pipe, i915_reg_t reg,
-                                  u32 port_sel)
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe, enum port port,
+                                    i915_reg_t hdmi_reg)
 {
-       u32 val = I915_READ(reg);
-       I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
-            "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
-            i915_mmio_reg_offset(reg), pipe_name(pipe));
+       enum pipe port_pipe;
+       bool state;
 
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
-            && (val & DP_PIPEB_SELECT),
-            "IBX PCH dp port still using transcoder B\n");
-}
+       state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
 
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
-                                    enum pipe pipe, i915_reg_t reg)
-{
-       u32 val = I915_READ(reg);
-       I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
-            "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
-            i915_mmio_reg_offset(reg), pipe_name(pipe));
+       I915_STATE_WARN(state && port_pipe == pipe,
+                       "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+                       port_name(port), pipe_name(pipe));
 
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
-            && (val & SDVO_PIPE_B_SELECT),
-            "IBX PCH hdmi port still using transcoder B\n");
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+                       "IBX PCH HDMI %c still using transcoder B\n",
+                       port_name(port));
 }
 
 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
                                      enum pipe pipe)
 {
-       u32 val;
+       enum pipe port_pipe;
 
-       assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
-       assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
-       assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
 
-       val = I915_READ(PCH_ADPA);
-       I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
-            "PCH VGA enabled on transcoder %c, should be disabled\n",
-            pipe_name(pipe));
+       I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+                       port_pipe == pipe,
+                       "PCH VGA enabled on transcoder %c, should be disabled\n",
+                       pipe_name(pipe));
 
-       val = I915_READ(PCH_LVDS);
-       I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
-            "PCH LVDS enabled on transcoder %c, should be disabled\n",
-            pipe_name(pipe));
+       I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+                       port_pipe == pipe,
+                       "PCH LVDS enabled on transcoder %c, should be disabled\n",
+                       pipe_name(pipe));
 
-       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
-       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
-       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
 }
 
 static void _vlv_enable_pll(struct intel_crtc *crtc,
@@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        u32 gtt_offset_rotated = 0;
        unsigned int max_size = 0;
        int i, num_planes = fb->format->num_planes;
@@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                 * fb layout agrees with the fence layout. We already check that the
                 * fb stride matches the fence stride elsewhere.
                 */
-               if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
+               if (i == 0 && i915_gem_object_is_tiled(obj) &&
                    (x + width) * cpp > fb->pitches[i]) {
                        DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
                                      i, fb->offsets[i]);
@@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                max_size = max(max_size, offset + size);
        }
 
-       if (max_size * tile_size > intel_fb->obj->base.size) {
+       if (max_size * tile_size > obj->base.size) {
                DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
-                             max_size * tile_size, intel_fb->obj->base.size);
+                             max_size * tile_size, obj->base.size);
                return -EINVAL;
        }
 
@@ -2922,9 +2882,8 @@ valid_fb:
        if (i915_gem_object_is_tiled(obj))
                dev_priv->preserve_bios_swizzle = true;
 
-       drm_framebuffer_get(fb);
-       primary->fb = primary->state->fb = fb;
-       primary->crtc = primary->state->crtc = &intel_crtc->base;
+       plane_state->fb = fb;
+       plane_state->crtc = &intel_crtc->base;
 
        intel_set_plane_visible(to_intel_crtc_state(crtc_state),
                                to_intel_plane_state(plane_state),
@@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane,
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+                                   enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       enum pipe pipe = plane->pipe;
        bool ret;
+       u32 val;
 
        /*
         * Not 100% correct for planes that can move between pipes,
         * but that's only the case for gen2-4 which don't have any
         * display power wells.
         */
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
+       val = I915_READ(DSPCNTR(i9xx_plane));
+
+       ret = val & DISPLAY_PLANE_ENABLE;
+
+       if (INTEL_GEN(dev_priv) >= 5)
+               *pipe = plane->pipe;
+       else
+               *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+                       DISPPLANE_SEL_PIPE_SHIFT;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
        }
 }
 
-/* Return which DP Port should be selected for Transcoder DP control */
-static enum port
-intel_trans_dp_port_sel(struct intel_crtc *crtc)
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+                          const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_encoder *encoder;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_connector_state *connector_state;
+       const struct drm_connector *connector;
+       struct intel_encoder *encoder = NULL;
+       int num_encoders = 0;
+       int i;
+
+       for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+               if (connector_state->crtc != &crtc->base)
+                       continue;
 
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-               if (encoder->type == INTEL_OUTPUT_DP ||
-                   encoder->type == INTEL_OUTPUT_EDP)
-                       return encoder->port;
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               num_encoders++;
        }
 
-       return -1;
+       WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+            num_encoders, pipe_name(crtc->pipe));
+
+       return encoder;
 }
 
 /*
@@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
  *   - DP transcoding bits
  *   - transcoder
  */
-static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+                               const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
@@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
                        &crtc_state->base.adjusted_mode;
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
                i915_reg_t reg = TRANS_DP_CTL(pipe);
+               enum port port;
+
                temp = I915_READ(reg);
                temp &= ~(TRANS_DP_PORT_SEL_MASK |
                          TRANS_DP_SYNC_MASK |
@@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                        temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 
-               switch (intel_trans_dp_port_sel(crtc)) {
-               case PORT_B:
-                       temp |= TRANS_DP_PORT_SEL_B;
-                       break;
-               case PORT_C:
-                       temp |= TRANS_DP_PORT_SEL_C;
-                       break;
-               case PORT_D:
-                       temp |= TRANS_DP_PORT_SEL_D;
-                       break;
-               default:
-                       BUG();
-               }
+               port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+               WARN_ON(port < PORT_B || port > PORT_D);
+               temp |= TRANS_DP_PORT_SEL(port);
 
                I915_WRITE(reg, temp);
        }
@@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
        ironlake_enable_pch_transcoder(dev_priv, pipe);
 }
 
-static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+                          const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
        }
 }
 
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ */
+u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+{
+       int phase = -0x8000;
+       u16 trip = 0;
+
+       if (chroma_cosited)
+               phase += (sub - 1) * 0x8000 / sub;
+
+       if (phase < 0)
+               phase = 0x10000 + phase;
+       else
+               trip = PS_PHASE_TRIP;
+
+       return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
 static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
                  unsigned int scaler_user, int *scaler_id,
@@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
                &crtc->config->scaler_state;
 
        if (crtc->config->pch_pfit.enabled) {
+               u16 uv_rgb_hphase, uv_rgb_vphase;
                int id;
 
                if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
                        return;
 
+               uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+
                id = scaler_state->scaler_id;
                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
                        PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+               I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+               I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
                I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
                I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
        }
@@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
         *
         * Spurious PCH underruns also occur during PCH enabling.
         */
-       if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-       if (intel_crtc->config->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
        if (intel_crtc->config->has_pch_encoder)
                intel_prepare_shared_dpll(intel_crtc);
@@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_enable_pipe(pipe_config);
 
        if (intel_crtc->config->has_pch_encoder)
-               ironlake_pch_enable(pipe_config);
+               ironlake_pch_enable(old_intel_state, pipe_config);
 
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
@@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
        if (HAS_PCH_CPT(dev_priv))
                cpt_verify_modeset(dev, intel_crtc->pipe);
 
-       /* Must wait for vblank to avoid spurious PCH FIFO underruns */
-       if (intel_crtc->config->has_pch_encoder)
+       /*
+        * Must wait for vblank to avoid spurious PCH FIFO underruns.
+        * And a second vblank wait is needed at least on ILK with
+        * some interlaced HDMI modes. Let's do the double wait always
+        * in case there are more corner cases we don't know about.
+        */
+       if (intel_crtc->config->has_pch_encoder) {
+               intel_wait_for_vblank(dev_priv, pipe);
                intel_wait_for_vblank(dev_priv, pipe);
+       }
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
@@ -5623,6 +5644,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        if (INTEL_GEN(dev_priv) >= 11)
                icl_map_plls_to_ports(crtc, pipe_config, old_state);
 
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
        if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
@@ -5651,11 +5674,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_crtc->active = true;
 
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_enable_pipe_clock(pipe_config);
-
        /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
        psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
                         intel_crtc->config->pch_pfit.enabled;
@@ -5688,7 +5706,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
                intel_enable_pipe(pipe_config);
 
        if (intel_crtc->config->has_pch_encoder)
-               lpt_pch_enable(pipe_config);
+               lpt_pch_enable(old_intel_state, pipe_config);
 
        if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
                intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -5741,10 +5759,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
         * pipe is already disabled, but FDI RX/TX is still enabled.
         * Happens at least with VGA+HDMI cloning. Suppress them.
         */
-       if (intel_crtc->config->has_pch_encoder) {
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
-       }
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
        intel_encoders_disable(crtc, old_crtc_state, old_state);
 
@@ -5794,7 +5810,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 
        intel_encoders_disable(crtc, old_crtc_state, old_state);
 
@@ -5805,8 +5821,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_disable_pipe(old_crtc_state);
 
-       if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
-               intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
+       if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+               intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
 
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
@@ -5816,9 +5832,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        else
                ironlake_pfit_disable(intel_crtc, false);
 
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_pipe_clock(intel_crtc->config);
-
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (INTEL_GEN(dev_priv) >= 11)
@@ -5849,6 +5862,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (IS_ICELAKE(dev_priv))
+               return port >= PORT_C && port <= PORT_F;
+
+       return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (!intel_port_is_tc(dev_priv, port))
+               return PORT_TC_NONE;
+
+       return port - PORT_C;
+}
+
 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
 {
        switch (port) {
@@ -7675,16 +7704,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *plane = to_intel_plane(crtc->base.primary);
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       enum pipe pipe = crtc->pipe;
+       enum pipe pipe;
        u32 val, base, offset;
        int fourcc, pixel_format;
        unsigned int aligned_height;
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
 
-       if (!plane->get_hw_state(plane))
+       if (!plane->get_hw_state(plane, &pipe))
                return;
 
+       WARN_ON(pipe != crtc->pipe);
+
        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
        if (!intel_fb) {
                DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8705,16 +8736,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *plane = to_intel_plane(crtc->base.primary);
        enum plane_id plane_id = plane->id;
-       enum pipe pipe = crtc->pipe;
+       enum pipe pipe;
        u32 val, base, offset, stride_mult, tiling, alpha;
        int fourcc, pixel_format;
        unsigned int aligned_height;
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
 
-       if (!plane->get_hw_state(plane))
+       if (!plane->get_hw_state(plane, &pipe))
                return;
 
+       WARN_ON(pipe != crtc->pipe);
+
        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
        if (!intel_fb) {
                DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -9142,9 +9175,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
 {
+       struct intel_atomic_state *state =
+               to_intel_atomic_state(crtc_state->base.state);
+
        if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
                struct intel_encoder *encoder =
-                       intel_ddi_get_crtc_new_encoder(crtc_state);
+                       intel_get_crtc_new_encoder(state, crtc_state);
 
                if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
@@ -9172,6 +9208,44 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
        pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
+static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+       u32 temp;
+
+       /* TODO: TBT pll not implemented. */
+       switch (port) {
+       case PORT_A:
+       case PORT_B:
+               temp = I915_READ(DPCLKA_CFGCR0_ICL) &
+                      DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+               id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+
+               if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+                       return;
+               break;
+       case PORT_C:
+               id = DPLL_ID_ICL_MGPLL1;
+               break;
+       case PORT_D:
+               id = DPLL_ID_ICL_MGPLL2;
+               break;
+       case PORT_E:
+               id = DPLL_ID_ICL_MGPLL3;
+               break;
+       case PORT_F:
+               id = DPLL_ID_ICL_MGPLL4;
+               break;
+       default:
+               MISSING_CASE(port);
+               return;
+       }
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
                                enum port port,
                                struct intel_crtc_state *pipe_config)
@@ -9359,7 +9433,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 
        port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
-       if (IS_CANNONLAKE(dev_priv))
+       if (IS_ICELAKE(dev_priv))
+               icelake_get_ddi_pll(dev_priv, port, pipe_config);
+       else if (IS_CANNONLAKE(dev_priv))
                cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
        else if (IS_GEN9_BC(dev_priv))
                skylake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9692,7 +9768,8 @@ static void i845_disable_cursor(struct intel_plane *plane,
        i845_update_cursor(plane, NULL, NULL);
 }
 
-static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+                                    enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
@@ -9704,6 +9781,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane)
 
        ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 
+       *pipe = PIPE_A;
+
        intel_display_power_put(dev_priv, power_domain);
 
        return ret;
@@ -9715,25 +9794,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
        struct drm_i915_private *dev_priv =
                to_i915(plane_state->base.plane->dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       u32 cntl;
+       u32 cntl = 0;
 
-       cntl = MCURSOR_GAMMA_ENABLE;
+       if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+               cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
 
-       if (HAS_DDI(dev_priv))
-               cntl |= CURSOR_PIPE_CSC_ENABLE;
+       if (INTEL_GEN(dev_priv) <= 10) {
+               cntl |= MCURSOR_GAMMA_ENABLE;
+
+               if (HAS_DDI(dev_priv))
+                       cntl |= MCURSOR_PIPE_CSC_ENABLE;
+       }
 
        if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
                cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
 
        switch (plane_state->base.crtc_w) {
        case 64:
-               cntl |= CURSOR_MODE_64_ARGB_AX;
+               cntl |= MCURSOR_MODE_64_ARGB_AX;
                break;
        case 128:
-               cntl |= CURSOR_MODE_128_ARGB_AX;
+               cntl |= MCURSOR_MODE_128_ARGB_AX;
                break;
        case 256:
-               cntl |= CURSOR_MODE_256_ARGB_AX;
+               cntl |= MCURSOR_MODE_256_ARGB_AX;
                break;
        default:
                MISSING_CASE(plane_state->base.crtc_w);
@@ -9741,7 +9825,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
        }
 
        if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
-               cntl |= CURSOR_ROTATE_180;
+               cntl |= MCURSOR_ROTATE_180;
 
        return cntl;
 }
@@ -9903,23 +9987,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
        i9xx_update_cursor(plane, NULL, NULL);
 }
 
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+                                    enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
-       enum pipe pipe = plane->pipe;
        bool ret;
+       u32 val;
 
        /*
         * Not 100% correct for planes that can move between pipes,
         * but that's only the case for gen2-3 which don't have any
         * display power wells.
         */
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+       val = I915_READ(CURCNTR(plane->pipe));
+
+       ret = val & MCURSOR_MODE;
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               *pipe = plane->pipe;
+       else
+               *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+                       MCURSOR_PIPE_SELECT_SHIFT;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -13181,8 +13274,17 @@ void intel_plane_destroy(struct drm_plane *plane)
        kfree(to_intel_plane(plane));
 }
 
-static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
@@ -13195,8 +13297,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
@@ -13211,8 +13322,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+                                          u32 format, u64 modifier)
 {
+       struct intel_plane *plane = to_intel_plane(_plane);
+
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               break;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               if (!plane->has_ccs)
+                       return false;
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
@@ -13244,38 +13373,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
-                                                    uint32_t format,
-                                                    uint64_t modifier)
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+                                             u32 format, u64 modifier)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
-       if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
-               return false;
-
-       if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
-           modifier != DRM_FORMAT_MOD_LINEAR)
-               return false;
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_mod_supported(format, modifier);
-       else if (INTEL_GEN(dev_priv) >= 4)
-               return i965_mod_supported(format, modifier);
-       else
-               return i8xx_mod_supported(format, modifier);
+       return modifier == DRM_FORMAT_MOD_LINEAR &&
+               format == DRM_FORMAT_ARGB8888;
 }
 
-static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
-                                                   uint32_t format,
-                                                   uint64_t modifier)
-{
-       if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
-               return false;
+static struct drm_plane_funcs skl_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = skl_plane_format_mod_supported,
+};
 
-       return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
-}
+static struct drm_plane_funcs i965_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = i965_plane_format_mod_supported,
+};
 
-static struct drm_plane_funcs intel_plane_funcs = {
+static struct drm_plane_funcs i8xx_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
@@ -13283,7 +13410,7 @@ static struct drm_plane_funcs intel_plane_funcs = {
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = intel_primary_plane_format_mod_supported,
+       .format_mod_supported = i8xx_plane_format_mod_supported,
 };
 
 static int
@@ -13408,7 +13535,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = intel_cursor_plane_format_mod_supported,
+       .format_mod_supported = intel_cursor_format_mod_supported,
 };
 
 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@ -13466,6 +13593,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
        struct intel_plane *primary = NULL;
        struct intel_plane_state *state = NULL;
+       const struct drm_plane_funcs *plane_funcs;
        const uint32_t *intel_primary_formats;
        unsigned int supported_rotations;
        unsigned int num_formats;
@@ -13521,6 +13649,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
        primary->check_plane = intel_check_primary_plane;
 
        if (INTEL_GEN(dev_priv) >= 9) {
+               primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+                                                    PLANE_PRIMARY);
+
                if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
                        intel_primary_formats = skl_pri_planar_formats;
                        num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@@ -13529,7 +13660,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                        num_formats = ARRAY_SIZE(skl_primary_formats);
                }
 
-               if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
+               if (primary->has_ccs)
                        modifiers = skl_format_modifiers_ccs;
                else
                        modifiers = skl_format_modifiers_noccs;
@@ -13537,6 +13668,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
                primary->get_hw_state = skl_plane_get_hw_state;
+
+               plane_funcs = &skl_plane_funcs;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                intel_primary_formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13545,6 +13678,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->update_plane = i9xx_update_plane;
                primary->disable_plane = i9xx_disable_plane;
                primary->get_hw_state = i9xx_plane_get_hw_state;
+
+               plane_funcs = &i965_plane_funcs;
        } else {
                intel_primary_formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13553,25 +13688,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->update_plane = i9xx_update_plane;
                primary->disable_plane = i9xx_disable_plane;
                primary->get_hw_state = i9xx_plane_get_hw_state;
+
+               plane_funcs = &i8xx_plane_funcs;
        }
 
        if (INTEL_GEN(dev_priv) >= 9)
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, &intel_plane_funcs,
+                                              0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "plane 1%c", pipe_name(pipe));
        else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, &intel_plane_funcs,
+                                              0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "primary %c", pipe_name(pipe));
        else
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, &intel_plane_funcs,
+                                              0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
@@ -13951,7 +14088,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
        if (intel_crt_present(dev_priv))
                intel_crt_init(dev_priv);
 
-       if (IS_GEN9_LP(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D);
+               intel_ddi_init(dev_priv, PORT_E);
+               intel_ddi_init(dev_priv, PORT_F);
+       } else if (IS_GEN9_LP(dev_priv)) {
                /*
                 * FIXME: Broxton doesn't support port detection via the
                 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -14124,14 +14268,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
        drm_framebuffer_cleanup(fb);
 
-       i915_gem_object_lock(intel_fb->obj);
-       WARN_ON(!intel_fb->obj->framebuffer_references--);
-       i915_gem_object_unlock(intel_fb->obj);
+       i915_gem_object_lock(obj);
+       WARN_ON(!obj->framebuffer_references--);
+       i915_gem_object_unlock(obj);
 
-       i915_gem_object_put(intel_fb->obj);
+       i915_gem_object_put(obj);
 
        kfree(intel_fb);
 }
@@ -14140,8 +14285,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
                                                struct drm_file *file,
                                                unsigned int *handle)
 {
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
        if (obj->userptr.mm) {
                DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
@@ -14411,9 +14555,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                                      i, fb->pitches[i], stride_alignment);
                        goto err;
                }
-       }
 
-       intel_fb->obj = obj;
+               fb->obj[i] = &obj->base;
+       }
 
        ret = intel_fill_fb_info(dev_priv, fb);
        if (ret)
@@ -14469,6 +14613,10 @@ static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
                 const struct drm_display_mode *mode)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int hdisplay_max, htotal_max;
+       int vdisplay_max, vtotal_max;
+
        /*
         * Can't reject DBLSCAN here because Xorg ddxen can add piles
         * of DBLSCAN modes to the output's mode list when they detect
@@ -14498,6 +14646,36 @@ intel_mode_valid(struct drm_device *dev,
                           DRM_MODE_FLAG_CLKDIV2))
                return MODE_BAD;
 
+       if (INTEL_GEN(dev_priv) >= 9 ||
+           IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+               hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+               vdisplay_max = 4096;
+               htotal_max = 8192;
+               vtotal_max = 8192;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               hdisplay_max = 4096;
+               vdisplay_max = 4096;
+               htotal_max = 8192;
+               vtotal_max = 8192;
+       } else {
+               hdisplay_max = 2048;
+               vdisplay_max = 2048;
+               htotal_max = 4096;
+               vtotal_max = 4096;
+       }
+
+       if (mode->hdisplay > hdisplay_max ||
+           mode->hsync_start > htotal_max ||
+           mode->hsync_end > htotal_max ||
+           mode->htotal > htotal_max)
+               return MODE_H_ILLEGAL;
+
+       if (mode->vdisplay > vdisplay_max ||
+           mode->vsync_start > vtotal_max ||
+           mode->vsync_end > vtotal_max ||
+           mode->vtotal > vtotal_max)
+               return MODE_V_ILLEGAL;
+
        return MODE_OK;
 }
 
@@ -14936,6 +15114,7 @@ int intel_modeset_init(struct drm_device *dev)
                }
        }
 
+       /* maximum framebuffer dimensions */
        if (IS_GEN2(dev_priv)) {
                dev->mode_config.max_width = 2048;
                dev->mode_config.max_height = 2048;
@@ -14951,11 +15130,11 @@ int intel_modeset_init(struct drm_device *dev)
                dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
                dev->mode_config.cursor_height = 1023;
        } else if (IS_GEN2(dev_priv)) {
-               dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
-               dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+               dev->mode_config.cursor_width = 64;
+               dev->mode_config.cursor_height = 64;
        } else {
-               dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
-               dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+               dev->mode_config.cursor_width = 256;
+               dev->mode_config.cursor_height = 256;
        }
 
        dev->mode_config.fb_base = ggtt->gmadr.start;
@@ -15105,8 +15284,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
        WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
        WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
        WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
-       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
-       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
 
        I915_WRITE(PIPECONF(pipe), 0);
        POSTING_READ(PIPECONF(pipe));
@@ -15120,12 +15299,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
                                   struct intel_plane *plane)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       u32 val = I915_READ(DSPCNTR(i9xx_plane));
+       enum pipe pipe;
+
+       if (!plane->get_hw_state(plane, &pipe))
+               return true;
 
-       return (val & DISPLAY_PLANE_ENABLE) == 0 ||
-               (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+       return pipe == crtc->pipe;
 }
 
 static void
@@ -15284,6 +15463,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                connector->base.dpms = DRM_MODE_DPMS_OFF;
                connector->base.encoder = NULL;
        }
+
+       /* notify opregion of the sanitized encoder state */
+       intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
 }
 
 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15324,7 +15506,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
        for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
                struct intel_plane_state *plane_state =
                        to_intel_plane_state(plane->base.state);
-               bool visible = plane->get_hw_state(plane);
+               enum pipe pipe;
+               bool visible;
+
+               visible = plane->get_hw_state(plane, &pipe);
 
                intel_set_plane_visible(crtc_state, plane_state, visible);
        }
index 2ef3161..dfb02da 100644 (file)
@@ -126,6 +126,17 @@ enum port {
 
 #define port_name(p) ((p) + 'A')
 
+enum tc_port {
+       PORT_TC_NONE = -1,
+
+       PORT_TC1 = 0,
+       PORT_TC2,
+       PORT_TC3,
+       PORT_TC4,
+
+       I915_MAX_TC_PORTS
+};
+
 enum dpio_channel {
        DPIO_CH0,
        DPIO_CH1
@@ -144,7 +155,7 @@ enum aux_ch {
        AUX_CH_B,
        AUX_CH_C,
        AUX_CH_D,
-       _AUX_CH_E, /* does not exist */
+       AUX_CH_E, /* ICL+ */
        AUX_CH_F,
 };
 
@@ -185,6 +196,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_AUX_B,
        POWER_DOMAIN_AUX_C,
        POWER_DOMAIN_AUX_D,
+       POWER_DOMAIN_AUX_E,
        POWER_DOMAIN_AUX_F,
        POWER_DOMAIN_AUX_IO_A,
        POWER_DOMAIN_GMBUS,
index 16faea3..6ac6c87 100644 (file)
@@ -56,7 +56,7 @@ struct dp_link_dpll {
        struct dpll dpll;
 };
 
-static const struct dp_link_dpll gen4_dpll[] = {
+static const struct dp_link_dpll g4x_dpll[] = {
        { 162000,
                { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
        { 270000,
@@ -256,6 +256,17 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
        return 810000;
 }
 
+static int icl_max_source_rate(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum port port = dig_port->base.port;
+
+       if (port == PORT_B)
+               return 540000;
+
+       return 810000;
+}
+
 static void
 intel_dp_set_source_rates(struct intel_dp *intel_dp)
 {
@@ -285,10 +296,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        /* This should only be done once */
        WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
 
-       if (IS_CANNONLAKE(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 10) {
                source_rates = cnl_rates;
                size = ARRAY_SIZE(cnl_rates);
-               max_rate = cnl_max_source_rate(intel_dp);
+               if (INTEL_GEN(dev_priv) == 10)
+                       max_rate = cnl_max_source_rate(intel_dp);
+               else
+                       max_rate = icl_max_source_rate(intel_dp);
        } else if (IS_GEN9_LP(dev_priv)) {
                source_rates = bxt_rates;
                size = ARRAY_SIZE(bxt_rates);
@@ -516,7 +530,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        uint32_t DP;
 
        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
-                "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+                "skipping pipe %c power sequencer kick due to port %c being active\n",
                 pipe_name(pipe), port_name(intel_dig_port->base.port)))
                return;
 
@@ -532,9 +546,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        DP |= DP_LINK_TRAIN_PAT_1;
 
        if (IS_CHERRYVIEW(dev_priv))
-               DP |= DP_PIPE_SELECT_CHV(pipe);
-       else if (pipe == PIPE_B)
-               DP |= DP_PIPEB_SELECT;
+               DP |= DP_PIPE_SEL_CHV(pipe);
+       else
+               DP |= DP_PIPE_SEL(pipe);
 
        pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 
@@ -557,7 +571,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        /*
         * Similar magic as in intel_dp_enable_port().
         * We _must_ do this port enable + disable trick
-        * to make this power seqeuencer lock onto the port.
+        * to make this power sequencer lock onto the port.
         * Otherwise even VDD force bit won't work.
         */
        I915_WRITE(intel_dp->output_reg, DP);
@@ -1350,6 +1364,9 @@ static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
        case DP_AUX_D:
                aux_ch = AUX_CH_D;
                break;
+       case DP_AUX_E:
+               aux_ch = AUX_CH_E;
+               break;
        case DP_AUX_F:
                aux_ch = AUX_CH_F;
                break;
@@ -1377,6 +1394,8 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
                return POWER_DOMAIN_AUX_C;
        case AUX_CH_D:
                return POWER_DOMAIN_AUX_D;
+       case AUX_CH_E:
+               return POWER_DOMAIN_AUX_E;
        case AUX_CH_F:
                return POWER_DOMAIN_AUX_F;
        default:
@@ -1463,6 +1482,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
        case AUX_CH_B:
        case AUX_CH_C:
        case AUX_CH_D:
+       case AUX_CH_E:
        case AUX_CH_F:
                return DP_AUX_CH_CTL(aux_ch);
        default:
@@ -1481,6 +1501,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
        case AUX_CH_B:
        case AUX_CH_C:
        case AUX_CH_D:
+       case AUX_CH_E:
        case AUX_CH_F:
                return DP_AUX_CH_DATA(aux_ch, index);
        default:
@@ -1544,6 +1565,13 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
        return max_rate >= 540000;
 }
 
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+{
+       int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
+
+       return max_rate >= 810000;
+}
+
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
                   struct intel_crtc_state *pipe_config)
@@ -1553,8 +1581,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
        int i, count = 0;
 
        if (IS_G4X(dev_priv)) {
-               divisor = gen4_dpll;
-               count = ARRAY_SIZE(gen4_dpll);
+               divisor = g4x_dpll;
+               count = ARRAY_SIZE(g4x_dpll);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                divisor = pch_dpll;
                count = ARRAY_SIZE(pch_dpll);
@@ -1970,7 +1998,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 
        /* Split out the IBX/CPU vs CPT settings */
 
-       if (IS_GEN7(dev_priv) && port == PORT_A) {
+       if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                        intel_dp->DP |= DP_SYNC_HS_HIGH;
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1980,7 +2008,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                        intel_dp->DP |= DP_ENHANCED_FRAMING;
 
-               intel_dp->DP |= crtc->pipe << 29;
+               intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
        } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
                u32 trans_dp;
 
@@ -2006,9 +2034,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
                        intel_dp->DP |= DP_ENHANCED_FRAMING;
 
                if (IS_CHERRYVIEW(dev_priv))
-                       intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
-               else if (crtc->pipe == PIPE_B)
-                       intel_dp->DP |= DP_PIPEB_SELECT;
+                       intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+               else
+                       intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
        }
 }
 
@@ -2630,52 +2658,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
                              mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
 }
 
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+                                enum port port, enum pipe *pipe)
+{
+       enum pipe p;
+
+       for_each_pipe(dev_priv, p) {
+               u32 val = I915_READ(TRANS_DP_CTL(p));
+
+               if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+                       *pipe = p;
+                       return true;
+               }
+       }
+
+       DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+
+       /* must initialize pipe to something for the asserts */
+       *pipe = PIPE_A;
+
+       return false;
+}
+
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+                          i915_reg_t dp_reg, enum port port,
+                          enum pipe *pipe)
+{
+       bool ret;
+       u32 val;
+
+       val = I915_READ(dp_reg);
+
+       ret = val & DP_PORT_EN;
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+               *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+       else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+               ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+       else if (IS_CHERRYVIEW(dev_priv))
+               *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+       else
+               *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+       return ret;
+}
+
 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                                  enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       enum port port = encoder->port;
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
-
-       tmp = I915_READ(intel_dp->output_reg);
-
-       if (!(tmp & DP_PORT_EN))
-               goto out;
-
-       if (IS_GEN7(dev_priv) && port == PORT_A) {
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
-               enum pipe p;
-
-               for_each_pipe(dev_priv, p) {
-                       u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
-                       if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
-                               *pipe = p;
-                               ret = true;
-
-                               goto out;
-                       }
-               }
-
-               DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
-                             i915_mmio_reg_offset(intel_dp->output_reg));
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               *pipe = DP_PORT_TO_PIPE_CHV(tmp);
-       } else {
-               *pipe = PORT_TO_PIPE(tmp);
-       }
-
-       ret = true;
+       ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+                                   encoder->port, pipe);
 
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -2854,10 +2896,11 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
+       uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
 
-       if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+       if (dp_train_pat & train_pat_mask)
                DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
-                             dp_train_pat & DP_TRAINING_PATTERN_MASK);
+                             dp_train_pat & train_pat_mask);
 
        if (HAS_DDI(dev_priv)) {
                uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2868,7 +2911,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                        temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
 
                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+               switch (dp_train_pat & train_pat_mask) {
                case DP_TRAINING_PATTERN_DISABLE:
                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
@@ -2882,10 +2925,13 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                case DP_TRAINING_PATTERN_3:
                        temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
                        break;
+               case DP_TRAINING_PATTERN_4:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+                       break;
                }
                I915_WRITE(DP_TP_CTL(port), temp);
 
-       } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+       } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
                   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
                *DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
@@ -3043,11 +3089,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
        edp_panel_vdd_off_sync(intel_dp);
 
        /*
-        * VLV seems to get confused when multiple power seqeuencers
+        * VLV seems to get confused when multiple power sequencers
         * have the same port selected (even if only one has power/vdd
         * enabled). The failure manifests as vlv_wait_port_ready() failing
         * CHV on the other hand doesn't seem to mind having the same port
-        * selected in multiple power seqeuencers, but let's clear the
+        * selected in multiple power sequencers, but let's clear the
         * port select always when logically disconnecting a power sequencer
         * from a port.
         */
@@ -3197,14 +3243,14 @@ uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum port port = dp_to_dig_port(intel_dp)->base.port;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       enum port port = encoder->port;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       if (HAS_DDI(dev_priv))
                return intel_ddi_dp_voltage_max(encoder);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-       else if (IS_GEN7(dev_priv) && port == PORT_A)
+       else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
        else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
@@ -3216,33 +3262,11 @@ uint8_t
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum port port = dp_to_dig_port(intel_dp)->base.port;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       enum port port = encoder->port;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
-               default:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
-               }
-       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
-               default:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
-               }
+       if (HAS_DDI(dev_priv)) {
+               return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3255,7 +3279,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
                default:
                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
                }
-       } else if (IS_GEN7(dev_priv) && port == PORT_A) {
+       } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3450,7 +3474,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
 }
 
 static uint32_t
-gen4_signal_levels(uint8_t train_set)
+g4x_signal_levels(uint8_t train_set)
 {
        uint32_t        signal_levels = 0;
 
@@ -3487,9 +3511,9 @@ gen4_signal_levels(uint8_t train_set)
        return signal_levels;
 }
 
-/* Gen6's DP voltage swing and pre-emphasis control */
+/* SNB CPU eDP voltage swing and pre-emphasis control */
 static uint32_t
-gen6_edp_signal_levels(uint8_t train_set)
+snb_cpu_edp_signal_levels(uint8_t train_set)
 {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3515,9 +3539,9 @@ gen6_edp_signal_levels(uint8_t train_set)
        }
 }
 
-/* Gen7's DP voltage swing and pre-emphasis control */
+/* IVB CPU eDP voltage swing and pre-emphasis control */
 static uint32_t
-gen7_edp_signal_levels(uint8_t train_set)
+ivb_cpu_edp_signal_levels(uint8_t train_set)
 {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3564,14 +3588,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
                signal_levels = chv_signal_levels(intel_dp);
        } else if (IS_VALLEYVIEW(dev_priv)) {
                signal_levels = vlv_signal_levels(intel_dp);
-       } else if (IS_GEN7(dev_priv) && port == PORT_A) {
-               signal_levels = gen7_edp_signal_levels(train_set);
+       } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+               signal_levels = ivb_cpu_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
        } else if (IS_GEN6(dev_priv) && port == PORT_A) {
-               signal_levels = gen6_edp_signal_levels(train_set);
+               signal_levels = snb_cpu_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
        } else {
-               signal_levels = gen4_signal_levels(train_set);
+               signal_levels = g4x_signal_levels(train_set);
                mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
        }
 
@@ -3654,7 +3678,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("\n");
 
-       if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+       if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
            (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -3683,8 +3707,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
                /* always enable with pattern 1 (as per spec) */
-               DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
-               DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+               DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+               DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+                       DP_LINK_TRAIN_PAT_1;
                I915_WRITE(intel_dp->output_reg, DP);
                POSTING_READ(intel_dp->output_reg);
 
@@ -3739,8 +3764,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
                        DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
 
-       intel_psr_init_dpcd(intel_dp);
-
        /*
         * Read the eDP display control registers.
         *
@@ -3756,6 +3779,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
                DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
                              intel_dp->edp_dpcd);
 
+       /*
+        * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+        * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+        */
+       intel_psr_init_dpcd(intel_dp);
+
        /* Read the eDP 1.4+ supported link rates. */
        if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -5319,14 +5348,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       enum pipe pipe;
 
-       if ((intel_dp->DP & DP_PORT_EN) == 0)
-               return INVALID_PIPE;
+       if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+                                 encoder->port, &pipe))
+               return pipe;
 
-       if (IS_CHERRYVIEW(dev_priv))
-               return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
-       else
-               return PORT_TO_PIPE(intel_dp->DP);
+       return INVALID_PIPE;
 }
 
 void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -5675,7 +5704,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 
        /*
         * On some VLV machines the BIOS can leave the VDD
-        * enabled even on power seqeuencers which aren't
+        * enabled even on power sequencers which aren't
         * hooked up to any port. This would mess up the
         * power domain tracking the first time we pick
         * one of these power sequencers for use since
@@ -5683,7 +5712,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
         * already on and therefore wouldn't grab the power
         * domain reference. Disable VDD first to avoid this.
         * This also avoids spuriously turning the VDD on as
-        * soon as the new power seqeuencer gets initialized.
+        * soon as the new power sequencer gets initialized.
         */
        if (force_disable_vdd) {
                u32 pp = ironlake_get_pp_control(intel_dp);
@@ -5721,10 +5750,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                port_sel = PANEL_PORT_SELECT_VLV(port);
        } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
-               if (port == PORT_A)
+               switch (port) {
+               case PORT_A:
                        port_sel = PANEL_PORT_SELECT_DPA;
-               else
+                       break;
+               case PORT_C:
+                       port_sel = PANEL_PORT_SELECT_DPC;
+                       break;
+               case PORT_D:
                        port_sel = PANEL_PORT_SELECT_DPD;
+                       break;
+               default:
+                       MISSING_CASE(port);
+                       break;
+               }
        }
 
        pp_on |= port_sel;
@@ -6382,7 +6421,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
         * 0xd.  Failure to do so will result in spurious interrupts being
         * generated on the port when a cable is not attached.
         */
-       if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+       if (IS_G45(dev_priv)) {
                u32 temp = I915_READ(PEG_BAND_GAP_DATA);
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
index 2bb2ceb..357136f 100644 (file)
@@ -26,7 +26,7 @@
 
 static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 {
-       uint8_t reg_val = 0;
+       u8 reg_val = 0;
 
        /* Early return when display use other mechanism to enable backlight. */
        if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP))
@@ -54,11 +54,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
  * Read the current backlight value from DPCD register(s) based
  * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
  */
-static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
-       uint8_t read_val[2] = { 0x0 };
-       uint16_t level = 0;
+       u8 read_val[2] = { 0x0 };
+       u16 level = 0;
 
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
                             &read_val, sizeof(read_val)) < 0) {
@@ -82,7 +82,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
-       uint8_t vals[2] = { 0x0 };
+       u8 vals[2] = { 0x0 };
 
        vals[0] = level;
 
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
-       uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+       u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                        DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
index 3fcaa98..4da6e33 100644 (file)
@@ -219,32 +219,47 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 }
 
 /*
- * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * Pick training pattern for channel equalization. Training pattern 4 for HBR3
+ * or for 1.4 devices that support it, training Pattern 3 for HBR2
  * or 1.2 devices that support it, Training Pattern 2 otherwise.
  */
 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 {
-       u32 training_pattern = DP_TRAINING_PATTERN_2;
-       bool source_tps3, sink_tps3;
+       bool source_tps3, sink_tps3, source_tps4, sink_tps4;
 
        /*
+        * Intel platforms that support HBR3 also support TPS4. It is mandatory
+        * for all downstream devices that support HBR3. There are no known eDP
+        * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
+        * specification.
+        */
+       source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+       sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
+       if (source_tps4 && sink_tps4) {
+               return DP_TRAINING_PATTERN_4;
+       } else if (intel_dp->link_rate == 810000) {
+               if (!source_tps4)
+                       DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+               if (!sink_tps4)
+                       DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+       }
+       /*
         * Intel platforms that support HBR2 also support TPS3. TPS3 support is
         * also mandatory for downstream devices that support HBR2. However, not
         * all sinks follow the spec.
         */
        source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
        sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
-
        if (source_tps3 && sink_tps3) {
-               training_pattern = DP_TRAINING_PATTERN_3;
-       } else if (intel_dp->link_rate == 540000) {
+               return  DP_TRAINING_PATTERN_3;
+       } else if (intel_dp->link_rate >= 540000) {
                if (!source_tps3)
-                       DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+                       DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
                if (!sink_tps3)
-                       DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+                       DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
        }
 
-       return training_pattern;
+       return DP_TRAINING_PATTERN_2;
 }
 
 static bool
@@ -256,11 +271,13 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
        bool channel_eq = false;
 
        training_pattern = intel_dp_training_pattern(intel_dp);
+       /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
+       if (training_pattern != DP_TRAINING_PATTERN_4)
+               training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
 
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp,
-                                    training_pattern |
-                                    DP_LINK_SCRAMBLING_DISABLE)) {
+                                    training_pattern)) {
                DRM_ERROR("failed to start channel equalization\n");
                return false;
        }
index 383fbc1..132fe63 100644 (file)
@@ -2525,6 +2525,76 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
        return true;
 }
 
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+                              uint32_t pll_id)
+{
+       uint32_t cfgcr0, cfgcr1;
+       uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+       const struct skl_wrpll_params *params;
+       int index, n_entries, link_clock;
+
+       /* Read back values from DPLL CFGCR registers */
+       cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+       cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+
+       dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
+       dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+               DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+       pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
+       kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
+       qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
+               DPLL_CFGCR1_QDIV_MODE_SHIFT;
+       qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+               DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+
+       params = dev_priv->cdclk.hw.ref == 24000 ?
+               icl_dp_combo_pll_24MHz_values :
+               icl_dp_combo_pll_19_2MHz_values;
+       n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
+
+       for (index = 0; index < n_entries; index++) {
+               if (dco_integer == params[index].dco_integer &&
+                   dco_fraction == params[index].dco_fraction &&
+                   pdiv == params[index].pdiv &&
+                   kdiv == params[index].kdiv &&
+                   qdiv_mode == params[index].qdiv_mode &&
+                   qdiv_ratio == params[index].qdiv_ratio)
+                       break;
+       }
+
+       /* Map PLL Index to Link Clock */
+       switch (index) {
+       default:
+               MISSING_CASE(index);
+       case 0:
+               link_clock = 540000;
+               break;
+       case 1:
+               link_clock = 270000;
+               break;
+       case 2:
+               link_clock = 162000;
+               break;
+       case 3:
+               link_clock = 324000;
+               break;
+       case 4:
+               link_clock = 216000;
+               break;
+       case 5:
+               link_clock = 432000;
+               break;
+       case 6:
+               link_clock = 648000;
+               break;
+       case 7:
+               link_clock = 810000;
+               break;
+       }
+
+       return link_clock;
+}
+
 static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
 {
        return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
@@ -2787,10 +2857,17 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        case PORT_D:
        case PORT_E:
        case PORT_F:
-               min = icl_port_to_mg_pll_id(port);
-               max = min;
-               ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
-                                           &pll_state);
+               if (0 /* TODO: TBT PLLs */) {
+                       min = DPLL_ID_ICL_TBTPLL;
+                       max = min;
+                       ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+                                                 &pll_state);
+               } else {
+                       min = icl_port_to_mg_pll_id(port);
+                       max = min;
+                       ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+                                                   &pll_state);
+               }
                break;
        default:
                MISSING_CASE(port);
@@ -2823,6 +2900,8 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
                return CNL_DPLL_ENABLE(id);
+       case DPLL_ID_ICL_TBTPLL:
+               return TBT_PLL_ENABLE;
        case DPLL_ID_ICL_MGPLL1:
        case DPLL_ID_ICL_MGPLL2:
        case DPLL_ID_ICL_MGPLL3:
@@ -2850,6 +2929,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
        switch (id) {
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
+       case DPLL_ID_ICL_TBTPLL:
                hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
                hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
                break;
@@ -2936,6 +3016,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
        switch (id) {
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
+       case DPLL_ID_ICL_TBTPLL:
                icl_dpll_write(dev_priv, pll);
                break;
        case DPLL_ID_ICL_MGPLL1:
@@ -3034,6 +3115,7 @@ static const struct intel_shared_dpll_funcs icl_pll_funcs = {
 static const struct dpll_info icl_plls[] = {
        { "DPLL 0",   &icl_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
        { "DPLL 1",   &icl_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
+       { "TBT PLL",  &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
        { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
        { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
        { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
index 7a0cd56..ba925c7 100644 (file)
@@ -114,23 +114,27 @@ enum intel_dpll_id {
         */
        DPLL_ID_ICL_DPLL1 = 1,
        /**
+        * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+        */
+       DPLL_ID_ICL_TBTPLL = 2,
+       /**
         * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
         */
-       DPLL_ID_ICL_MGPLL1 = 2,
+       DPLL_ID_ICL_MGPLL1 = 3,
        /**
         * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
         */
-       DPLL_ID_ICL_MGPLL2 = 3,
+       DPLL_ID_ICL_MGPLL2 = 4,
        /**
         * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
         */
-       DPLL_ID_ICL_MGPLL3 = 4,
+       DPLL_ID_ICL_MGPLL3 = 5,
        /**
         * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
         */
-       DPLL_ID_ICL_MGPLL4 = 5,
+       DPLL_ID_ICL_MGPLL4 = 6,
 };
-#define I915_NUM_PLLS 6
+#define I915_NUM_PLLS 7
 
 struct intel_dpll_hw_state {
        /* i9xx, pch plls */
@@ -336,5 +340,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
 
 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
                              struct intel_dpll_hw_state *hw_state);
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+                              uint32_t pll_id);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index 0361130..0c3ac0e 100644 (file)
 #define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
 
-/* Maximum cursor sizes */
-#define GEN2_CURSOR_WIDTH 64
-#define GEN2_CURSOR_HEIGHT 64
-#define MAX_CURSOR_WIDTH 256
-#define MAX_CURSOR_HEIGHT 256
-
 #define INTEL_I2C_BUS_DVO 1
 #define INTEL_I2C_BUS_SDVO 2
 
@@ -194,7 +188,6 @@ enum intel_output_type {
 
 struct intel_framebuffer {
        struct drm_framebuffer base;
-       struct drm_i915_gem_object *obj;
        struct intel_rotation_info rot_info;
 
        /* for each plane in the normal GTT view */
@@ -953,6 +946,7 @@ struct intel_plane {
        enum pipe pipe;
        bool can_scale;
        bool has_fbc;
+       bool has_ccs;
        int max_downscale;
        uint32_t frontbuffer_bit;
 
@@ -971,7 +965,7 @@ struct intel_plane {
                             const struct intel_plane_state *plane_state);
        void (*disable_plane)(struct intel_plane *plane,
                              struct intel_crtc *crtc);
-       bool (*get_hw_state)(struct intel_plane *plane);
+       bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
        int (*check_plane)(struct intel_plane *plane,
                           struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
@@ -1004,7 +998,7 @@ struct cxsr_latency {
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 #define to_intel_plane(x) container_of(x, struct intel_plane, base)
 #define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
-#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
 
 struct intel_hdmi {
        i915_reg_t hdmi_reg;
@@ -1376,6 +1370,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
 
 /* intel_crt.c */
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+                           i915_reg_t adpa_reg, enum pipe *pipe);
 void intel_crt_init(struct drm_i915_private *dev_priv);
 void intel_crt_reset(struct drm_encoder *encoder);
 
@@ -1392,8 +1388,6 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
                                       enum transcoder cpu_transcoder);
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
 void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1407,6 +1401,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 u32 bxt_signal_levels(struct intel_dp *intel_dp);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+                                u8 voltage_swing);
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
                                     bool enable);
 void icl_map_plls_to_ports(struct drm_crtc *crtc,
@@ -1488,6 +1484,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder);
 struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+                             enum port port);
 
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
@@ -1615,6 +1614,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
 
+u16 skl_scaler_calc_phase(int sub, bool chroma_center);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                  uint32_t pixel_format);
@@ -1644,6 +1644,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
 void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+                          i915_reg_t dp_reg, enum port port,
+                          enum pipe *pipe);
 bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
                   enum port port);
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1707,6 +1710,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
                           uint8_t *link_bw, uint8_t *rate_select);
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
 
@@ -1821,6 +1825,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
 
 
 /* intel_lvds.c */
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t lvds_reg, enum pipe *pipe);
 void intel_lvds_init(struct drm_i915_private *dev_priv);
 struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
 bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1911,8 +1917,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
                     unsigned frontbuffer_bits,
                     enum fb_op_origin origin);
 void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
-                                  unsigned frontbuffer_bits);
 void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state);
 void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
@@ -2058,6 +2062,8 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
 void intel_enable_ipc(struct drm_i915_private *dev_priv);
 
 /* intel_sdvo.c */
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t sdvo_reg, enum pipe *pipe);
 bool intel_sdvo_init(struct drm_i915_private *dev_priv,
                     i915_reg_t reg, enum port port);
 
@@ -2076,7 +2082,7 @@ void skl_update_plane(struct intel_plane *plane,
                      const struct intel_crtc_state *crtc_state,
                      const struct intel_plane_state *plane_state);
 void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
 bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
                       enum pipe pipe, enum plane_id plane_id);
 bool intel_format_is_yuv(uint32_t format);
index f349b39..3b7acb5 100644 (file)
@@ -1671,16 +1671,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-       enum i9xx_plane_id plane;
+       enum i9xx_plane_id i9xx_plane;
        u32 val;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                if (connector->encoder->crtc_mask == BIT(PIPE_B))
-                       plane = PLANE_B;
+                       i9xx_plane = PLANE_B;
                else
-                       plane = PLANE_A;
+                       i9xx_plane = PLANE_A;
 
-               val = I915_READ(DSPCNTR(plane));
+               val = I915_READ(DSPCNTR(i9xx_plane));
                if (val & DISPPLANE_ROTATE_180)
                        orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
        }
index 61d908e..4e142ff 100644 (file)
@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp;
 
        tmp = I915_READ(intel_dvo->dev.dvo_reg);
 
-       if (!(tmp & DVO_ENABLE))
-               return false;
-
-       *pipe = PORT_TO_PIPE(tmp);
+       *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
 
-       return true;
+       return tmp & DVO_ENABLE;
 }
 
 static void intel_dvo_get_config(struct intel_encoder *encoder,
@@ -282,8 +278,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
        dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
                   DVO_BLANK_ACTIVE_HIGH;
 
-       if (pipe == 1)
-               dvo_val |= DVO_PIPE_B_SELECT;
+       dvo_val |= DVO_PIPE_SEL(pipe);
        dvo_val |= DVO_PIPE_STALL;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
@@ -443,7 +438,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
                int gpio;
                bool dvoinit;
                enum pipe pipe;
-               uint32_t dpll[I915_MAX_PIPES];
+               u32 dpll[I915_MAX_PIPES];
                enum port port;
 
                /*
index 1590375..32bf3a4 100644 (file)
@@ -499,7 +499,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
        intel_engine_init_cmd_parser(engine);
 }
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+                               unsigned int size)
 {
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
@@ -515,7 +516,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
                return PTR_ERR(obj);
        }
 
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_unref;
@@ -533,7 +534,7 @@ err_unref:
        return ret;
 }
 
-static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
 {
        i915_vma_unpin_and_release(&engine->scratch);
 }
@@ -585,7 +586,7 @@ static int init_status_page(struct intel_engine_cs *engine)
        if (ret)
                goto err;
 
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -645,6 +646,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
        return 0;
 }
 
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine)
+{
+       intel_context_unpin(to_intel_context(ctx, engine));
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
@@ -658,7 +665,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
  */
 int intel_engine_init_common(struct intel_engine_cs *engine)
 {
-       struct intel_ring *ring;
+       struct drm_i915_private *i915 = engine->i915;
+       struct intel_context *ce;
        int ret;
 
        engine->set_default_submission(engine);
@@ -670,18 +678,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
         * be available. To avoid this we always pin the default
         * context.
         */
-       ring = intel_context_pin(engine->i915->kernel_context, engine);
-       if (IS_ERR(ring))
-               return PTR_ERR(ring);
+       ce = intel_context_pin(i915->kernel_context, engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
 
        /*
         * Similarly the preempt context must always be available so that
         * we can interrupt the engine at any time.
         */
-       if (engine->i915->preempt_context) {
-               ring = intel_context_pin(engine->i915->preempt_context, engine);
-               if (IS_ERR(ring)) {
-                       ret = PTR_ERR(ring);
+       if (i915->preempt_context) {
+               ce = intel_context_pin(i915->preempt_context, engine);
+               if (IS_ERR(ce)) {
+                       ret = PTR_ERR(ce);
                        goto err_unpin_kernel;
                }
        }
@@ -690,7 +698,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        if (ret)
                goto err_unpin_preempt;
 
-       if (HWS_NEEDS_PHYSICAL(engine->i915))
+       if (HWS_NEEDS_PHYSICAL(i915))
                ret = init_phys_status_page(engine);
        else
                ret = init_status_page(engine);
@@ -702,10 +710,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
-       if (engine->i915->preempt_context)
-               intel_context_unpin(engine->i915->preempt_context, engine);
+       if (i915->preempt_context)
+               __intel_context_unpin(i915->preempt_context, engine);
+
 err_unpin_kernel:
-       intel_context_unpin(engine->i915->kernel_context, engine);
+       __intel_context_unpin(i915->kernel_context, engine);
        return ret;
 }
 
@@ -718,6 +727,8 @@ err_unpin_kernel:
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *i915 = engine->i915;
+
        intel_engine_cleanup_scratch(engine);
 
        if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -732,9 +743,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        if (engine->default_state)
                i915_gem_object_put(engine->default_state);
 
-       if (engine->i915->preempt_context)
-               intel_context_unpin(engine->i915->preempt_context, engine);
-       intel_context_unpin(engine->i915->kernel_context, engine);
+       if (i915->preempt_context)
+               __intel_context_unpin(i915->preempt_context, engine);
+       __intel_context_unpin(i915->kernel_context, engine);
 
        i915_timeline_fini(&engine->timeline);
 }
@@ -769,6 +780,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
        return bbaddr;
 }
 
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       const u32 base = engine->mmio_base;
+       const i915_reg_t mode = RING_MI_MODE(base);
+       int err;
+
+       if (INTEL_GEN(dev_priv) < 3)
+               return -ENODEV;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+       err = 0;
+       if (__intel_wait_for_register_fw(dev_priv,
+                                        mode, MODE_IDLE, MODE_IDLE,
+                                        1000, 0,
+                                        NULL)) {
+               GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+               err = -ETIMEDOUT;
+       }
+
+       /* A final mmio read to let GPU writes be hopefully flushed to memory */
+       POSTING_READ_FW(mode);
+
+       return err;
+}
+
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 {
        switch (type) {
@@ -780,12 +820,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
        }
 }
 
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+{
+       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       u32 mcr_s_ss_select;
+       u32 slice = fls(sseu->slice_mask);
+       u32 subslice = fls(sseu->subslice_mask[slice]);
+
+       if (INTEL_GEN(dev_priv) == 10)
+               mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
+                                 GEN8_MCR_SUBSLICE(subslice);
+       else if (INTEL_GEN(dev_priv) >= 11)
+               mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
+                                 GEN11_MCR_SUBSLICE(subslice);
+       else
+               mcr_s_ss_select = 0;
+
+       return mcr_s_ss_select;
+}
+
 static inline uint32_t
 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
                  int subslice, i915_reg_t reg)
 {
        uint32_t mcr_slice_subslice_mask;
        uint32_t mcr_slice_subslice_select;
+       uint32_t default_mcr_s_ss_select;
        uint32_t mcr;
        uint32_t ret;
        enum forcewake_domains fw_domains;
@@ -802,6 +862,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
                                            GEN8_MCR_SUBSLICE(subslice);
        }
 
+       default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+
        fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                                    FW_REG_READ);
        fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -812,11 +874,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
        intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
 
        mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
-       /*
-        * The HW expects the slice and sublice selectors to be reset to 0
-        * after reading out the registers.
-        */
-       WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+
+       WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
+                    default_mcr_s_ss_select);
+
        mcr &= ~mcr_slice_subslice_mask;
        mcr |= mcr_slice_subslice_select;
        I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
@@ -824,6 +885,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
        ret = I915_READ_FW(reg);
 
        mcr &= ~mcr_slice_subslice_mask;
+       mcr |= default_mcr_s_ss_select;
+
        I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
 
        intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -934,10 +997,19 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return true;
 
        /* Waiting to drain ELSP? */
-       if (READ_ONCE(engine->execlists.active))
-               return false;
+       if (READ_ONCE(engine->execlists.active)) {
+               struct intel_engine_execlists *execlists = &engine->execlists;
+
+               if (tasklet_trylock(&execlists->tasklet)) {
+                       execlists->tasklet.func(execlists->tasklet.data);
+                       tasklet_unlock(&execlists->tasklet);
+               }
+
+               if (READ_ONCE(execlists->active))
+                       return false;
+       }
 
-       /* ELSP is empty, but there are ready requests? */
+       /* ELSP is empty, but there are ready requests? E.g. after reset */
        if (READ_ONCE(engine->execlists.first))
                return false;
 
@@ -978,8 +1050,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
  */
 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 {
-       const struct i915_gem_context * const kernel_context =
-               engine->i915->kernel_context;
+       const struct intel_context *kernel_context =
+               to_intel_context(engine->i915->kernel_context, engine);
        struct i915_request *rq;
 
        lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -991,7 +1063,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
         */
        rq = __i915_gem_active_peek(&engine->timeline.last_request);
        if (rq)
-               return rq->ctx == kernel_context;
+               return rq->hw_context == kernel_context;
        else
                return engine->last_retired_context == kernel_context;
 }
@@ -1006,6 +1078,28 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
 }
 
 /**
+ * intel_engines_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_engines_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_engines_sanitize(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       GEM_TRACE("\n");
+
+       for_each_engine(engine, i915, id) {
+               if (engine->reset.reset)
+                       engine->reset.reset(engine, NULL);
+       }
+}
+
+/**
  * intel_engines_park: called when the GT is transitioning from busy->idle
  * @i915: the i915 device
  *
@@ -1043,6 +1137,11 @@ void intel_engines_park(struct drm_i915_private *i915)
                if (engine->park)
                        engine->park(engine);
 
+               if (engine->pinned_default_state) {
+                       i915_gem_object_unpin_map(engine->default_state);
+                       engine->pinned_default_state = NULL;
+               }
+
                i915_gem_batch_pool_fini(&engine->batch_pool);
                engine->execlists.no_priolist = false;
        }
@@ -1060,6 +1159,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
        enum intel_engine_id id;
 
        for_each_engine(engine, i915, id) {
+               void *map;
+
+               /* Pin the default state for fast resets from atomic context. */
+               map = NULL;
+               if (engine->default_state)
+                       map = i915_gem_object_pin_map(engine->default_state,
+                                                     I915_MAP_WB);
+               if (!IS_ERR_OR_NULL(map))
+                       engine->pinned_default_state = map;
+
                if (engine->unpark)
                        engine->unpark(engine);
 
@@ -1067,6 +1176,26 @@ void intel_engines_unpark(struct drm_i915_private *i915)
        }
 }
 
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+       struct intel_context *ce;
+
+       lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+       ce = fetch_and_zero(&engine->last_retired_context);
+       if (ce)
+               intel_context_unpin(ce);
+}
+
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
        switch (INTEL_GEN(engine->i915)) {
@@ -1151,7 +1280,7 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
                                                rowsize, sizeof(u32),
                                                line, sizeof(line),
                                                false) >= sizeof(line));
-               drm_printf(m, "%08zx %s\n", pos, line);
+               drm_printf(m, "[%04zx] %s\n", pos, line);
 
                prev = buf + pos;
                skip = false;
@@ -1166,6 +1295,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                &engine->execlists;
        u64 addr;
 
+       if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+               drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
        drm_printf(m, "\tRING_START: 0x%08x\n",
                   I915_READ(RING_START(engine->mmio_base)));
        drm_printf(m, "\tRING_HEAD:  0x%08x\n",
@@ -1287,6 +1418,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
        }
 }
 
+static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+{
+       void *ring;
+       int size;
+
+       drm_printf(m,
+                  "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
+                  rq->head, rq->postfix, rq->tail,
+                  rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+                  rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
+       size = rq->tail - rq->head;
+       if (rq->tail < rq->head)
+               size += rq->ring->size;
+
+       ring = kmalloc(size, GFP_ATOMIC);
+       if (ring) {
+               const void *vaddr = rq->ring->vaddr;
+               unsigned int head = rq->head;
+               unsigned int len = 0;
+
+               if (rq->tail < head) {
+                       len = rq->ring->size - head;
+                       memcpy(ring, vaddr + head, len);
+                       head = 0;
+               }
+               memcpy(ring + len, vaddr + head, size - len);
+
+               hexdump(m, ring, size);
+               kfree(ring);
+       }
+}
+
 void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...)
@@ -1296,6 +1460,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        const struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_gpu_error * const error = &engine->i915->gpu_error;
        struct i915_request *rq, *last;
+       unsigned long flags;
        struct rb_node *rb;
        int count;
 
@@ -1336,11 +1501,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        rq = i915_gem_find_active_request(engine);
        if (rq) {
                print_request(m, rq, "\t\tactive ");
-               drm_printf(m,
-                          "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
-                          rq->head, rq->postfix, rq->tail,
-                          rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
-                          rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
                drm_printf(m, "\t\tring->start:  0x%08x\n",
                           i915_ggtt_offset(rq->ring->vma));
                drm_printf(m, "\t\tring->head:   0x%08x\n",
@@ -1351,6 +1512,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                           rq->ring->emit);
                drm_printf(m, "\t\tring->space:  0x%08x\n",
                           rq->ring->space);
+
+               print_request_ring(m, rq);
        }
 
        rcu_read_unlock();
@@ -1362,7 +1525,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                drm_printf(m, "\tDevice is asleep; skipping register dump\n");
        }
 
-       spin_lock_irq(&engine->timeline.lock);
+       local_irq_save(flags);
+       spin_lock(&engine->timeline.lock);
 
        last = NULL;
        count = 0;
@@ -1404,16 +1568,17 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                print_request(m, last, "\t\tQ ");
        }
 
-       spin_unlock_irq(&engine->timeline.lock);
+       spin_unlock(&engine->timeline.lock);
 
-       spin_lock_irq(&b->rb_lock);
+       spin_lock(&b->rb_lock);
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                struct intel_wait *w = rb_entry(rb, typeof(*w), node);
 
                drm_printf(m, "\t%s [%d] waiting for %x\n",
                           w->tsk->comm, w->tsk->pid, w->seqno);
        }
-       spin_unlock_irq(&b->rb_lock);
+       spin_unlock(&b->rb_lock);
+       local_irq_restore(flags);
 
        drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
                   engine->irq_posted,
index e9e02b5..fb2f9fc 100644 (file)
@@ -47,7 +47,7 @@
 
 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
 {
-       struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
        unsigned int origin =
                ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
 
@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
                drm_framebuffer_put(&intel_fb->base);
                intel_fb = ifbdev->fb = NULL;
        }
-       if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+       if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
                DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
                if (ret)
@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
         * If the object is stolen however, it will be full of whatever
         * garbage was left in there.
         */
-       if (intel_fb->obj->stolen && !prealloc)
+       if (intel_fb_obj(fb)->stolen && !prealloc)
                memset_io(info->screen_base, 0, info->screen_size);
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
         * been restored from swap. If the object is stolen however, it will be
         * full of whatever garbage was left in there.
         */
-       if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+       if (state == FBINFO_STATE_RUNNING &&
+           intel_fb_obj(&ifbdev->fb->base)->stolen)
                memset_io(info->screen_base, 0, info->screen_size);
 
        drm_fb_helper_set_suspend(&ifbdev->helper, state);
index 7fff0a0..c3379bd 100644 (file)
@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
        /* Remove stale busy bits due to the old buffer. */
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
        spin_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
 }
 
 /**
index 116f4cc..1aff30b 100644 (file)
@@ -203,13 +203,11 @@ void intel_guc_fini(struct intel_guc *guc)
        guc_shared_data_destroy(guc);
 }
 
-static u32 get_log_control_flags(void)
+static u32 guc_ctl_debug_flags(struct intel_guc *guc)
 {
-       u32 level = i915_modparams.guc_log_level;
+       u32 level = intel_guc_log_get_level(&guc->log);
        u32 flags = 0;
 
-       GEM_BUG_ON(level < 0);
-
        if (!GUC_LOG_LEVEL_IS_ENABLED(level))
                flags |= GUC_LOG_DEFAULT_DISABLED;
 
@@ -219,6 +217,85 @@ static u32 get_log_control_flags(void)
                flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
                         GUC_LOG_VERBOSITY_SHIFT;
 
+       if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+               u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma)
+                       >> PAGE_SHIFT;
+
+               flags |= ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
+       }
+
+       return flags;
+}
+
+static u32 guc_ctl_feature_flags(struct intel_guc *guc)
+{
+       u32 flags = 0;
+
+       flags |=  GUC_CTL_VCS2_ENABLED;
+
+       if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
+               flags |= GUC_CTL_KERNEL_SUBMISSIONS;
+       else
+               flags |= GUC_CTL_DISABLE_SCHEDULER;
+
+       return flags;
+}
+
+static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
+{
+       u32 flags = 0;
+
+       if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+               u32 ctxnum, base;
+
+               base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
+               ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+               base >>= PAGE_SHIFT;
+               flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
+                       (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
+       }
+       return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
+{
+       u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
+       u32 flags;
+
+       #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
+       #define UNIT SZ_1M
+       #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+       #else
+       #define UNIT SZ_4K
+       #define FLAG 0
+       #endif
+
+       BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+       BUILD_BUG_ON(!DPC_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
+       BUILD_BUG_ON(!ISR_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
+
+       BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
+       BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
+       BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
+
+       flags = GUC_LOG_VALID |
+               GUC_LOG_NOTIFY_ON_HALF_FULL |
+               FLAG |
+               ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+               ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
+               ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
+               (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+       #undef UNIT
+       #undef FLAG
+
        return flags;
 }
 
@@ -245,32 +322,10 @@ void intel_guc_init_params(struct intel_guc *guc)
 
        params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
 
-       params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
-                       GUC_CTL_VCS2_ENABLED;
-
-       params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
-       params[GUC_CTL_DEBUG] = get_log_control_flags();
-
-       /* If GuC submission is enabled, set up additional parameters here */
-       if (USES_GUC_SUBMISSION(dev_priv)) {
-               u32 ads = intel_guc_ggtt_offset(guc,
-                                               guc->ads_vma) >> PAGE_SHIFT;
-               u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
-               u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
-               params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
-               params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
-               pgs >>= PAGE_SHIFT;
-               params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
-                       (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
-
-               params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
-
-               /* Unmask this bit to enable the GuC's internal scheduler */
-               params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
-       }
+       params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+       params[GUC_CTL_LOG_PARAMS]  = guc_ctl_log_params_flags(guc);
+       params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+       params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
 
        /*
         * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
@@ -346,10 +401,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
                ret = -EIO;
 
        if (ret) {
-               DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
-                                " ret=%d status=0x%08X response=0x%08X\n",
-                                action[0], ret, status,
-                                I915_READ(SOFT_SCRATCH(15)));
+               DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
+                         action[0], ret, status);
                goto out;
        }
 
@@ -572,7 +625,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
        if (IS_ERR(vma))
                goto err;
 
index 0867ba7..1a0f2a3 100644 (file)
 #define   GUC_LOG_VALID                        (1 << 0)
 #define   GUC_LOG_NOTIFY_ON_HALF_FULL  (1 << 1)
 #define   GUC_LOG_ALLOC_IN_MEGABYTE    (1 << 3)
-#define   GUC_LOG_CRASH_PAGES          1
 #define   GUC_LOG_CRASH_SHIFT          4
-#define   GUC_LOG_DPC_PAGES            7
+#define   GUC_LOG_CRASH_MASK           (0x1 << GUC_LOG_CRASH_SHIFT)
 #define   GUC_LOG_DPC_SHIFT            6
-#define   GUC_LOG_ISR_PAGES            7
+#define   GUC_LOG_DPC_MASK             (0x7 << GUC_LOG_DPC_SHIFT)
 #define   GUC_LOG_ISR_SHIFT            9
+#define   GUC_LOG_ISR_MASK             (0x7 << GUC_LOG_ISR_SHIFT)
 #define   GUC_LOG_BUF_ADDR_SHIFT       12
 
 #define GUC_CTL_PAGE_FAULT_CONTROL     5
@@ -532,20 +532,6 @@ enum guc_log_buffer_type {
 };
 
 /**
- * DOC: GuC Log buffer Layout
- *
- * Page0  +-------------------------------+
- *        |   ISR state header (32 bytes) |
- *        |      DPC state header         |
- *        |   Crash dump state header     |
- * Page1  +-------------------------------+
- *        |           ISR logs            |
- * Page9  +-------------------------------+
- *        |           DPC logs            |
- * Page17 +-------------------------------+
- *        |         Crash Dump logs       |
- *        +-------------------------------+
- *
  * Below state structure is used for coordination of retrieval of GuC firmware
  * logs. Separate state is maintained for each log buffer type.
  * read_ptr points to the location where i915 read last in log buffer and
index 401e170..6da61a7 100644 (file)
@@ -215,11 +215,11 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
 {
        switch (type) {
        case GUC_ISR_LOG_BUFFER:
-               return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+               return ISR_BUFFER_SIZE;
        case GUC_DPC_LOG_BUFFER:
-               return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+               return DPC_BUFFER_SIZE;
        case GUC_CRASH_DUMP_LOG_BUFFER:
-               return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+               return CRASH_BUFFER_SIZE;
        default:
                MISSING_CASE(type);
        }
@@ -397,7 +397,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
        lockdep_assert_held(&log->relay.lock);
 
         /* Keep the size of sub buffers same as shared log buffer */
-       subbuf_size = GUC_LOG_SIZE;
+       subbuf_size = log->vma->size;
 
        /*
         * Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -452,13 +452,34 @@ int intel_guc_log_create(struct intel_guc_log *log)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct i915_vma *vma;
-       unsigned long offset;
-       u32 flags;
+       u32 guc_log_size;
        int ret;
 
        GEM_BUG_ON(log->vma);
 
-       vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
+       /*
+        *  GuC Log buffer Layout
+        *
+        *  +===============================+ 00B
+        *  |    Crash dump state header    |
+        *  +-------------------------------+ 32B
+        *  |       DPC state header        |
+        *  +-------------------------------+ 64B
+        *  |       ISR state header        |
+        *  +-------------------------------+ 96B
+        *  |                               |
+        *  +===============================+ PAGE_SIZE (4KB)
+        *  |        Crash Dump logs        |
+        *  +===============================+ + CRASH_SIZE
+        *  |           DPC logs            |
+        *  +===============================+ + DPC_SIZE
+        *  |           ISR logs            |
+        *  +===============================+ + ISR_SIZE
+        */
+       guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
+                       ISR_BUFFER_SIZE;
+
+       vma = intel_guc_allocate_vma(guc, guc_log_size);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -466,20 +487,12 @@ int intel_guc_log_create(struct intel_guc_log *log)
 
        log->vma = vma;
 
-       /* each allocated unit is a page */
-       flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
-               (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
-               (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
-               (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
-       offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
-       log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+       log->level = i915_modparams.guc_log_level;
 
        return 0;
 
 err:
-       /* logging will be off */
-       i915_modparams.guc_log_level = 0;
+       DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
        return ret;
 }
 
@@ -488,15 +501,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
        i915_vma_unpin_and_release(&log->vma);
 }
 
-int intel_guc_log_level_get(struct intel_guc_log *log)
-{
-       GEM_BUG_ON(!log->vma);
-       GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-
-       return i915_modparams.guc_log_level;
-}
-
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -504,33 +509,32 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 
        BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
        GEM_BUG_ON(!log->vma);
-       GEM_BUG_ON(i915_modparams.guc_log_level < 0);
 
        /*
         * GuC is recognizing log levels starting from 0 to max, we're using 0
         * as indication that logging should be disabled.
         */
-       if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
+       if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
                return -EINVAL;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       if (i915_modparams.guc_log_level == val) {
+       if (log->level == level) {
                ret = 0;
                goto out_unlock;
        }
 
        intel_runtime_pm_get(dev_priv);
-       ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
-                                    GUC_LOG_LEVEL_IS_ENABLED(val),
-                                    GUC_LOG_LEVEL_TO_VERBOSITY(val));
+       ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
+                                    GUC_LOG_LEVEL_IS_ENABLED(level),
+                                    GUC_LOG_LEVEL_TO_VERBOSITY(level));
        intel_runtime_pm_put(dev_priv);
        if (ret) {
                DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
                goto out_unlock;
        }
 
-       i915_modparams.guc_log_level = val;
+       log->level = level;
 
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
index fa80535..7bc763f 100644 (file)
 #include <linux/workqueue.h>
 
 #include "intel_guc_fwif.h"
+#include "i915_gem.h"
 
 struct intel_guc;
 
-/*
- * The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap
- */
-#define GUC_LOG_SIZE   ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
-                         1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CRASH_BUFFER_SIZE      SZ_2M
+#define DPC_BUFFER_SIZE                SZ_8M
+#define ISR_BUFFER_SIZE                SZ_8M
+#else
+#define CRASH_BUFFER_SIZE      SZ_8K
+#define DPC_BUFFER_SIZE                SZ_32K
+#define ISR_BUFFER_SIZE                SZ_32K
+#endif
 
 /*
  * While we're using plain log level in i915, GuC controls are much more...
@@ -58,7 +62,7 @@ struct intel_guc;
 #define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
 
 struct intel_guc_log {
-       u32 flags;
+       u32 level;
        struct i915_vma *vma;
        struct {
                void *buf_addr;
@@ -80,8 +84,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log);
 int intel_guc_log_create(struct intel_guc_log *log);
 void intel_guc_log_destroy(struct intel_guc_log *log);
 
-int intel_guc_log_level_get(struct intel_guc_log *log);
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
 bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
 int intel_guc_log_relay_open(struct intel_guc_log *log);
 void intel_guc_log_relay_flush(struct intel_guc_log *log);
@@ -89,4 +92,9 @@ void intel_guc_log_relay_close(struct intel_guc_log *log);
 
 void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
 
+static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
+{
+       return log->level;
+}
+
 #endif
index 2feb650..f394525 100644 (file)
@@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
        struct intel_guc_client *client = guc->execbuf_client;
        struct intel_engine_cs *engine = rq->engine;
-       u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
-                                                                engine));
+       u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
        u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
        spin_lock(&client->wq_lock);
@@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
  */
 static void flush_ggtt_writes(struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+       struct drm_i915_private *dev_priv = vma->vm->i915;
 
        if (i915_vma_is_map_and_fenceable(vma))
                POSTING_READ_FW(GUC_STATUS);
@@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work)
                                             preempt_work[engine->id]);
        struct intel_guc_client *client = guc->preempt_client;
        struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-       u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
-                                                                engine));
+       u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+                                                     engine)->lrc_desc);
        u32 data[7];
 
        /*
@@ -623,6 +622,21 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
        report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
 }
 
+static void complete_preempt_context(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists *execlists = &engine->execlists;
+
+       GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+       execlists_cancel_port_requests(execlists);
+       execlists_unwind_incomplete_requests(execlists);
+
+       wait_for_guc_preempt_report(engine);
+       intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+
+       execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
 /**
  * guc_submit() - Submit commands through GuC
  * @engine: engine associated with the commands
@@ -710,7 +724,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
                struct i915_request *rq, *rn;
 
                list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-                       if (last && rq->ctx != last->ctx) {
+                       if (last && rq->hw_context != last->hw_context) {
                                if (port == last_port) {
                                        __list_del_many(&p->requests,
                                                        &rq->sched.link);
@@ -793,20 +807,44 @@ static void guc_submission_tasklet(unsigned long data)
 
        if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
            intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
-           GUC_PREEMPT_FINISHED) {
-               execlists_cancel_port_requests(&engine->execlists);
-               execlists_unwind_incomplete_requests(execlists);
-
-               wait_for_guc_preempt_report(engine);
-
-               execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
-               intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
-       }
+           GUC_PREEMPT_FINISHED)
+               complete_preempt_context(engine);
 
        if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
                guc_dequeue(engine);
 }
 
+static struct i915_request *
+guc_reset_prepare(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       /*
+        * Prevent request submission to the hardware until we have
+        * completed the reset in i915_gem_reset_finish(). If a request
+        * is completed by one engine, it may then queue a request
+        * to a second via its execlists->tasklet *just* as we are
+        * calling engine->init_hw() and also writing the ELSP.
+        * Turning off the execlists->tasklet until the reset is over
+        * prevents the race.
+        */
+       __tasklet_disable_sync_once(&execlists->tasklet);
+
+       /*
+        * We're using worker to queue preemption requests from the tasklet in
+        * GuC submission mode.
+        * Even though tasklet was disabled, we may still have a worker queued.
+        * Let's make sure that all workers scheduled before disabling the
+        * tasklet are completed before continuing with the reset.
+        */
+       if (engine->i915->guc.preempt_wq)
+               flush_workqueue(engine->i915->guc.preempt_wq);
+
+       return i915_gem_find_active_request(engine);
+}
+
 /*
  * Everything below here is concerned with setup & teardown, and is
  * therefore not part of the somewhat time-critical batch-submission
@@ -1119,7 +1157,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
        WARN_ON(!guc_verify_doorbells(guc));
        ret = guc_clients_create(guc);
        if (ret)
-               return ret;
+               goto err_pool;
 
        for_each_engine(engine, dev_priv, id) {
                guc->preempt_work[id].engine = engine;
@@ -1128,6 +1166,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
 
        return 0;
 
+err_pool:
+       guc_stage_desc_pool_destroy(guc);
+       return ret;
 }
 
 void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1267,6 +1308,9 @@ int intel_guc_submission_enable(struct intel_guc *guc)
                        &engine->execlists;
 
                execlists->tasklet.func = guc_submission_tasklet;
+
+               engine->reset.prepare = guc_reset_prepare;
+
                engine->park = guc_submission_park;
                engine->unpark = guc_submission_unpark;
 
index a2fe7c8..a6291f6 100644 (file)
@@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
                return true;
        if (IS_KABYLAKE(dev_priv))
                return true;
+       if (IS_BROXTON(dev_priv))
+               return true;
        return false;
 }
 
index d47e346..2fc7a0d 100644 (file)
@@ -294,6 +294,7 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine,
        engine->hangcheck.seqno = hc->seqno;
        engine->hangcheck.action = hc->action;
        engine->hangcheck.stalled = hc->stalled;
+       engine->hangcheck.wedged = hc->wedged;
 }
 
 static enum intel_engine_hangcheck_action
@@ -368,6 +369,9 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
 
        hc->stalled = time_after(jiffies,
                                 engine->hangcheck.action_timestamp + timeout);
+       hc->wedged = time_after(jiffies,
+                                engine->hangcheck.action_timestamp +
+                                I915_ENGINE_WEDGED_TIMEOUT);
 }
 
 static void hangcheck_declare_hang(struct drm_i915_private *i915,
@@ -409,7 +413,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                             gpu_error.hangcheck_work.work);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       unsigned int hung = 0, stuck = 0;
+       unsigned int hung = 0, stuck = 0, wedged = 0;
 
        if (!i915_modparams.enable_hangcheck)
                return;
@@ -440,6 +444,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        if (hc.action != ENGINE_DEAD)
                                stuck |= intel_engine_flag(engine);
                }
+
+               if (engine->hangcheck.wedged)
+                       wedged |= intel_engine_flag(engine);
+       }
+
+       if (wedged) {
+               dev_err(dev_priv->drm.dev,
+                       "GPU recovery timed out,"
+                       " cancelling all in-flight rendering.\n");
+               GEM_TRACE_DUMP();
+               i915_gem_set_wedged(dev_priv);
        }
 
        if (hung)
index d8cb53e..8363fbd 100644 (file)
@@ -51,7 +51,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 {
        struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t enabled_bits;
+       u32 enabled_bits;
 
        enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
@@ -59,6 +59,15 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
             "HDMI port enabled, expecting disabled\n");
 }
 
+static void
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+                                    enum transcoder cpu_transcoder)
+{
+       WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+            TRANS_DDI_FUNC_ENABLE,
+            "HDMI transcoder function enabled, expecting disabled\n");
+}
+
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
        struct intel_digital_port *intel_dig_port =
@@ -144,7 +153,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -199,7 +208,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -259,7 +268,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -317,7 +326,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -376,19 +385,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
-       i915_reg_t data_reg;
        int data_size = type == DP_SDP_VSC ?
                VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
        int i;
        u32 val = I915_READ(ctl_reg);
 
-       data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
-
        val &= ~hsw_infoframe_enable(type);
        I915_WRITE(ctl_reg, val);
 
@@ -442,7 +448,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
                                  union hdmi_infoframe *frame)
 {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-       uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+       u8 buffer[VIDEO_DIP_DATA_SIZE];
        ssize_t len;
 
        /* see comment above for the reason for this offset */
@@ -461,7 +467,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
 }
 
 static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
-                                        const struct intel_crtc_state *crtc_state)
+                                        const struct intel_crtc_state *crtc_state,
+                                        const struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        const struct drm_display_mode *adjusted_mode =
@@ -491,6 +498,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                           intel_hdmi->rgb_quant_range_selectable,
                                           is_hdmi2_sink);
 
+       drm_hdmi_avi_infoframe_content_type(&frame.avi,
+                                           conn_state);
+
        /* TODO: handle pixel repetition for YCBCR420 outputs */
        intel_write_infoframe(encoder, crtc_state, &frame);
 }
@@ -586,7 +596,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -727,7 +737,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -770,7 +780,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -823,7 +833,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -834,11 +844,11 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
                               const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
        u32 val = I915_READ(reg);
 
-       assert_hdmi_port_disabled(intel_hdmi);
+       assert_hdmi_transcoder_func_disabled(dev_priv,
+                                            crtc_state->cpu_transcoder);
 
        val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
                 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -856,7 +866,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -1161,33 +1171,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
 static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
+       ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
 
-       tmp = I915_READ(intel_hdmi->hdmi_reg);
-
-       if (!(tmp & SDVO_ENABLE))
-               goto out;
-
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else if (IS_CHERRYVIEW(dev_priv))
-               *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
-
-       ret = true;
-
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -1421,8 +1414,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
-               temp &= ~SDVO_PIPE_B_SELECT;
-               temp |= SDVO_ENABLE;
+               temp &= ~SDVO_PIPE_SEL_MASK;
+               temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
                /*
                 * HW workaround, need to write this twice for issue
                 * that may result in first write getting masked.
@@ -1577,14 +1570,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        /* check if we can do 8bpc */
        status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
 
-       /* if we can't do 8bpc we may still be able to do 12bpc */
-       if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
-               status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
+       if (hdmi->has_hdmi_sink && !force_dvi) {
+               /* if we can't do 8bpc we may still be able to do 12bpc */
+               if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+                       status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+                                                      true, force_dvi);
+
+               /* if we can't do 8,12bpc we may still be able to do 10bpc */
+               if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
+                       status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+                                                      true, force_dvi);
+       }
 
        return status;
 }
 
-static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+                                    int bpc)
 {
        struct drm_i915_private *dev_priv =
                to_i915(crtc_state->base.crtc->dev);
@@ -1596,6 +1598,9 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
        if (HAS_GMCH_DISPLAY(dev_priv))
                return false;
 
+       if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+               return false;
+
        if (crtc_state->pipe_bpp <= 8*3)
                return false;
 
@@ -1603,7 +1608,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
                return false;
 
        /*
-        * HDMI 12bpc affects the clocks, so it's only possible
+        * HDMI deep color affects the clocks, so it's only possible
         * when not cloning with other encoder types.
         */
        if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
@@ -1618,16 +1623,24 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
                if (crtc_state->ycbcr420) {
                        const struct drm_hdmi_info *hdmi = &info->hdmi;
 
-                       if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+                       if (bpc == 12 && !(hdmi->y420_dc_modes &
+                                          DRM_EDID_YCBCR420_DC_36))
+                               return false;
+                       else if (bpc == 10 && !(hdmi->y420_dc_modes &
+                                               DRM_EDID_YCBCR420_DC_30))
                                return false;
                } else {
-                       if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
+                       if (bpc == 12 && !(info->edid_hdmi_dc_modes &
+                                          DRM_EDID_HDMI_DC_36))
+                               return false;
+                       else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
+                                               DRM_EDID_HDMI_DC_30))
                                return false;
                }
        }
 
        /* Display WA #1139: glk */
-       if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+       if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
            crtc_state->base.adjusted_mode.htotal > 5460)
                return false;
 
@@ -1637,7 +1650,8 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
 static bool
 intel_hdmi_ycbcr420_config(struct drm_connector *connector,
                           struct intel_crtc_state *config,
-                          int *clock_12bpc, int *clock_8bpc)
+                          int *clock_12bpc, int *clock_10bpc,
+                          int *clock_8bpc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
 
@@ -1649,6 +1663,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
        /* YCBCR420 TMDS rate requirement is half the pixel clock */
        config->port_clock /= 2;
        *clock_12bpc /= 2;
+       *clock_10bpc /= 2;
        *clock_8bpc /= 2;
        config->ycbcr420 = true;
 
@@ -1676,6 +1691,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct intel_digital_connector_state *intel_conn_state =
                to_intel_digital_connector_state(conn_state);
        int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+       int clock_10bpc = clock_8bpc * 5 / 4;
        int clock_12bpc = clock_8bpc * 3 / 2;
        int desired_bpp;
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
@@ -1702,12 +1718,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
                pipe_config->pixel_multiplier = 2;
                clock_8bpc *= 2;
+               clock_10bpc *= 2;
                clock_12bpc *= 2;
        }
 
        if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
                if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
-                                               &clock_12bpc, &clock_8bpc)) {
+                                               &clock_12bpc, &clock_10bpc,
+                                               &clock_8bpc)) {
                        DRM_ERROR("Can't support YCBCR420 output\n");
                        return false;
                }
@@ -1725,18 +1743,25 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        }
 
        /*
-        * HDMI is either 12 or 8, so if the display lets 10bpc sneak
-        * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
-        * outputs. We also need to check that the higher clock still fits
-        * within limits.
+        * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
+        * to check that the higher clock still fits within limits.
         */
-       if (hdmi_12bpc_possible(pipe_config) &&
-           hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
+       if (hdmi_deep_color_possible(pipe_config, 12) &&
+           hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
+                                 true, force_dvi) == MODE_OK) {
                DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
                desired_bpp = 12*3;
 
                /* Need to adjust the port link by 1.5x for 12bpc. */
                pipe_config->port_clock = clock_12bpc;
+       } else if (hdmi_deep_color_possible(pipe_config, 10) &&
+                  hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
+                                        true, force_dvi) == MODE_OK) {
+               DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
+               desired_bpp = 10 * 3;
+
+               /* Need to adjust the port link by 1.25x for 10bpc. */
+               pipe_config->port_clock = clock_10bpc;
        } else {
                DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
                desired_bpp = 8*3;
@@ -2071,6 +2096,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
        intel_attach_aspect_ratio_property(connector);
+       drm_connector_attach_content_type_property(connector);
        connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 }
 
@@ -2257,7 +2283,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
                ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
        else if (HAS_PCH_CNP(dev_priv))
                ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
-       else if (IS_ICELAKE(dev_priv))
+       else if (HAS_PCH_ICP(dev_priv))
                ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
        else
                ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2352,7 +2378,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
         * 0xd.  Failure to do so will result in spurious interrupts being
         * generated on the port when a cable is not attached.
         */
-       if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+       if (IS_G45(dev_priv)) {
                u32 temp = I915_READ(PEG_BAND_GAP_DATA);
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
index e687550..97606c1 100644 (file)
@@ -77,12 +77,12 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
 };
 
 static const struct gmbus_pin gmbus_pins_icp[] = {
-       [GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
-       [GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
-       [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
-       [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
-       [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
-       [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
+       [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+       [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+       [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+       [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+       [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+       [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
 };
 
 /* pin is expected to be valid */
@@ -771,7 +771,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
        unsigned int pin;
        int ret;
 
-       if (HAS_PCH_NOP(dev_priv))
+       if (INTEL_INFO(dev_priv)->num_pipes == 0)
                return 0;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
index 7c4c8fb..33bc914 100644 (file)
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-                                           struct intel_engine_cs *engine);
+                                           struct intel_engine_cs *engine,
+                                           struct intel_context *ce);
 static void execlists_init_reg_state(u32 *reg_state,
                                     struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine,
@@ -189,12 +190,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
                !i915_request_completed(last));
 }
 
-/**
- * intel_lr_context_descriptor_update() - calculate & cache the descriptor
- *                                       descriptor for a pinned context
- * @ctx: Context to work on
- * @engine: Engine the descriptor will be used with
- *
+/*
  * The context descriptor encodes various attributes of a context,
  * including its GTT address and some flags. Because it's fairly
  * expensive to calculate, we'll just do it once and cache the result,
@@ -204,7 +200,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
  *
  *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
  *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
- *      bits 32-52:    ctx ID, a globally unique tag
+ *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
  *      bits 53-54:    mbz, reserved for use by hardware
  *      bits 55-63:    group ID, currently unused and set to 0
  *
@@ -222,9 +218,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
  */
 static void
 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
-                                  struct intel_engine_cs *engine)
+                                  struct intel_engine_cs *engine,
+                                  struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
        u64 desc;
 
        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -237,6 +233,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
                                                                /* bits 12-31 */
        GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
 
+       /*
+        * The following 32bits are copied into the OA reports (dword 2).
+        * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
+        * anything below.
+        */
        if (INTEL_GEN(ctx->i915) >= 11) {
                GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
                desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
@@ -418,9 +419,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
-       struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
+       struct intel_context *ce = rq->hw_context;
        struct i915_hw_ppgtt *ppgtt =
-               rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+               rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
        u32 *reg_state = ce->lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +431,7 @@ static u64 execlists_update_context(struct i915_request *rq)
         * PML4 is allocated during ppgtt init, so this is not needed
         * in 48-bit mode.
         */
-       if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+       if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
                execlists_update_context_pdps(ppgtt, reg_state);
 
        return ce->lrc_desc;
@@ -495,14 +496,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
        execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
 }
 
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
 {
        return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-               i915_gem_context_force_single_submission(ctx));
+               i915_gem_context_force_single_submission(ce->gem_context));
 }
 
-static bool can_merge_ctx(const struct i915_gem_context *prev,
-                         const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+                         const struct intel_context *next)
 {
        if (prev != next)
                return false;
@@ -552,8 +553,18 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
        if (execlists->ctrl_reg)
                writel(EL_CTRL_LOAD, execlists->ctrl_reg);
 
-       execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
-       execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
+       execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
+       execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
+static void complete_preempt_context(struct intel_engine_execlists *execlists)
+{
+       GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+       execlists_cancel_port_requests(execlists);
+       execlists_unwind_incomplete_requests(execlists);
+
+       execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
 }
 
 static bool __execlists_dequeue(struct intel_engine_cs *engine)
@@ -602,8 +613,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                GEM_BUG_ON(!execlists_is_active(execlists,
                                                EXECLISTS_ACTIVE_USER));
                GEM_BUG_ON(!port_count(&port[0]));
-               if (port_count(&port[0]) > 1)
-                       return false;
 
                /*
                 * If we write to ELSP a second time before the HW has had
@@ -671,7 +680,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                         * second request, and so we never need to tell the
                         * hardware about the first.
                         */
-                       if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+                       if (last &&
+                           !can_merge_ctx(rq->hw_context, last->hw_context)) {
                                /*
                                 * If we are on the second port and cannot
                                 * combine this request with the last, then we
@@ -690,14 +700,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                                 * the same context (even though a different
                                 * request) to the second port.
                                 */
-                               if (ctx_single_port_submission(last->ctx) ||
-                                   ctx_single_port_submission(rq->ctx)) {
+                               if (ctx_single_port_submission(last->hw_context) ||
+                                   ctx_single_port_submission(rq->hw_context)) {
                                        __list_del_many(&p->requests,
                                                        &rq->sched.link);
                                        goto done;
                                }
 
-                               GEM_BUG_ON(last->ctx == rq->ctx);
+                               GEM_BUG_ON(last->hw_context == rq->hw_context);
 
                                if (submit)
                                        port_assign(port, last);
@@ -947,63 +957,39 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        local_irq_restore(flags);
 }
 
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+static void process_csb(struct intel_engine_cs *engine)
 {
-       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
-       struct drm_i915_private *dev_priv = engine->i915;
+       struct drm_i915_private *i915 = engine->i915;
        bool fw = false;
 
-       /*
-        * We can skip acquiring intel_runtime_pm_get() here as it was taken
-        * on our behalf by the request (see i915_gem_mark_busy()) and it will
-        * not be relinquished until the device is idle (see
-        * i915_gem_idle_work_handler()). As a precaution, we make sure
-        * that all ELSP are drained i.e. we have processed the CSB,
-        * before allowing ourselves to idle and calling intel_runtime_pm_put().
-        */
-       GEM_BUG_ON(!dev_priv->gt.awake);
-
-       /*
-        * Prefer doing test_and_clear_bit() as a two stage operation to avoid
-        * imposing the cost of a locked atomic transaction when submitting a
-        * new request (outside of the context-switch interrupt).
-        */
-       while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+       do {
                /* The HWSP contains a (cacheable) mirror of the CSB */
                const u32 *buf =
                        &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
                unsigned int head, tail;
 
-               if (unlikely(execlists->csb_use_mmio)) {
-                       buf = (u32 * __force)
-                               (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
-                       execlists->csb_head = -1; /* force mmio read of CSB ptrs */
-               }
-
                /* Clear before reading to catch new interrupts */
                clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
                smp_mb__after_atomic();
 
-               if (unlikely(execlists->csb_head == -1)) { /* following a reset */
+               if (unlikely(execlists->csb_use_mmio)) {
                        if (!fw) {
-                               intel_uncore_forcewake_get(dev_priv,
-                                                          execlists->fw_domains);
+                               intel_uncore_forcewake_get(i915, execlists->fw_domains);
                                fw = true;
                        }
 
-                       head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+                       buf = (u32 * __force)
+                               (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+
+                       head = readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
                        tail = GEN8_CSB_WRITE_PTR(head);
                        head = GEN8_CSB_READ_PTR(head);
                        execlists->csb_head = head;
                } else {
                        const int write_idx =
-                               intel_hws_csb_write_index(dev_priv) -
+                               intel_hws_csb_write_index(i915) -
                                I915_HWS_CSB_BUF0_INDEX;
 
                        head = execlists->csb_head;
@@ -1012,8 +998,8 @@ static void execlists_submission_tasklet(unsigned long data)
                }
                GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
                          engine->name,
-                         head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
-                         tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
+                         head, GEN8_CSB_READ_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
+                         tail, GEN8_CSB_WRITE_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
 
                while (head != tail) {
                        struct i915_request *rq;
@@ -1023,7 +1009,8 @@ static void execlists_submission_tasklet(unsigned long data)
                        if (++head == GEN8_CSB_ENTRIES)
                                head = 0;
 
-                       /* We are flying near dragons again.
+                       /*
+                        * We are flying near dragons again.
                         *
                         * We hold a reference to the request in execlist_port[]
                         * but no more than that. We are operating in softirq
@@ -1063,14 +1050,7 @@ static void execlists_submission_tasklet(unsigned long data)
                        if (status & GEN8_CTX_STATUS_COMPLETE &&
                            buf[2*head + 1] == execlists->preempt_complete_status) {
                                GEM_TRACE("%s preempt-idle\n", engine->name);
-
-                               execlists_cancel_port_requests(execlists);
-                               execlists_unwind_incomplete_requests(execlists);
-
-                               GEM_BUG_ON(!execlists_is_active(execlists,
-                                                               EXECLISTS_ACTIVE_PREEMPT));
-                               execlists_clear_active(execlists,
-                                                      EXECLISTS_ACTIVE_PREEMPT);
+                               complete_preempt_context(execlists);
                                continue;
                        }
 
@@ -1139,15 +1119,48 @@ static void execlists_submission_tasklet(unsigned long data)
                if (head != execlists->csb_head) {
                        execlists->csb_head = head;
                        writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
-                              dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+                              i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
                }
-       }
+       } while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted));
 
-       if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
-               execlists_dequeue(engine);
+       if (unlikely(fw))
+               intel_uncore_forcewake_put(i915, execlists->fw_domains);
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+       GEM_TRACE("%s awake?=%d, active=%x, irq-posted?=%d\n",
+                 engine->name,
+                 engine->i915->gt.awake,
+                 engine->execlists.active,
+                 test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted));
+
+       /*
+        * We can skip acquiring intel_runtime_pm_get() here as it was taken
+        * on our behalf by the request (see i915_gem_mark_busy()) and it will
+        * not be relinquished until the device is idle (see
+        * i915_gem_idle_work_handler()). As a precaution, we make sure
+        * that all ELSP are drained i.e. we have processed the CSB,
+        * before allowing ourselves to idle and calling intel_runtime_pm_put().
+        */
+       GEM_BUG_ON(!engine->i915->gt.awake);
 
-       if (fw)
-               intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+       /*
+        * Prefer doing test_and_clear_bit() as a two stage operation to avoid
+        * imposing the cost of a locked atomic transaction when submitting a
+        * new request (outside of the context-switch interrupt).
+        */
+       if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+               process_csb(engine);
+
+       if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
+               execlists_dequeue(engine);
 
        /* If the engine is now idle, so should be the flag; and vice versa. */
        GEM_BUG_ON(execlists_is_active(&engine->execlists,
@@ -1322,6 +1335,26 @@ static void execlists_schedule(struct i915_request *request,
        spin_unlock_irq(&engine->timeline.lock);
 }
 
+static void execlists_context_destroy(struct intel_context *ce)
+{
+       GEM_BUG_ON(!ce->state);
+       GEM_BUG_ON(ce->pin_count);
+
+       intel_ring_free(ce->ring);
+       __i915_gem_object_release_unless_active(ce->state->obj);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+       intel_ring_unpin(ce->ring);
+
+       ce->state->obj->pin_global--;
+       i915_gem_object_unpin_map(ce->state->obj);
+       i915_vma_unpin(ce->state);
+
+       i915_gem_context_put(ce->gem_context);
+}
+
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 {
        unsigned int flags;
@@ -1345,21 +1378,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
        return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
 }
 
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
-                     struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+                       struct i915_gem_context *ctx,
+                       struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
        void *vaddr;
        int ret;
 
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-       if (likely(ce->pin_count++))
-               goto out;
-       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-       ret = execlists_context_deferred_alloc(ctx, engine);
+       ret = execlists_context_deferred_alloc(ctx, engine, ce);
        if (ret)
                goto err;
        GEM_BUG_ON(!ce->state);
@@ -1378,17 +1405,17 @@ execlists_context_pin(struct intel_engine_cs *engine,
        if (ret)
                goto unpin_map;
 
-       intel_lr_context_descriptor_update(ctx, engine);
+       intel_lr_context_descriptor_update(ctx, engine, ce);
 
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
        ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
                i915_ggtt_offset(ce->ring->vma);
+       GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
        ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
 
        ce->state->obj->pin_global++;
        i915_gem_context_get(ctx);
-out:
-       return ce->ring;
+       return ce;
 
 unpin_map:
        i915_gem_object_unpin_map(ce->state->obj);
@@ -1399,33 +1426,33 @@ err:
        return ERR_PTR(ret);
 }
 
-static void execlists_context_unpin(struct intel_engine_cs *engine,
-                                   struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+       .unpin = execlists_context_unpin,
+       .destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+                     struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-       GEM_BUG_ON(ce->pin_count == 0);
-
-       if (--ce->pin_count)
-               return;
 
-       intel_ring_unpin(ce->ring);
+       if (likely(ce->pin_count++))
+               return ce;
+       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-       ce->state->obj->pin_global--;
-       i915_gem_object_unpin_map(ce->state->obj);
-       i915_vma_unpin(ce->state);
+       ce->ops = &execlists_context_ops;
 
-       i915_gem_context_put(ctx);
+       return __execlists_context_pin(engine, ctx, ce);
 }
 
 static int execlists_request_alloc(struct i915_request *request)
 {
-       struct intel_context *ce =
-               to_intel_context(request->ctx, request->engine);
        int ret;
 
-       GEM_BUG_ON(!ce->pin_count);
+       GEM_BUG_ON(!request->hw_context->pin_count);
 
        /* Flush enough space to reduce the likelihood of waiting after
         * we start building the request - in which case we will just
@@ -1538,29 +1565,56 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        return batch;
 }
 
-static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+struct lri {
+       i915_reg_t reg;
+       u32 value;
+};
+
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
 {
-       *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+       GEM_BUG_ON(!count || count > 63);
 
-       /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
-       batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+       *batch++ = MI_LOAD_REGISTER_IMM(count);
+       do {
+               *batch++ = i915_mmio_reg_offset(lri->reg);
+               *batch++ = lri->value;
+       } while (lri++, --count);
+       *batch++ = MI_NOOP;
 
-       *batch++ = MI_LOAD_REGISTER_IMM(3);
+       return batch;
+}
 
-       /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
-       *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
-       *batch++ = _MASKED_BIT_DISABLE(
-                       GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+       static const struct lri lri[] = {
+               /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+               {
+                       COMMON_SLICE_CHICKEN2,
+                       __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+                                      0),
+               },
+
+               /* BSpec: 11391 */
+               {
+                       FF_SLICE_CHICKEN,
+                       __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+                                      FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+               },
+
+               /* BSpec: 11299 */
+               {
+                       _3D_CHICKEN3,
+                       __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+                                      _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+               }
+       };
 
-       /* BSpec: 11391 */
-       *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
-       *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+       *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 
-       /* BSpec: 11299 */
-       *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
-       *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+       /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+       batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
-       *batch++ = MI_NOOP;
+       batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
        /* WaClearSlmSpaceAtContextSwitch:kbl */
        /* Actual scratch location is at 128 bytes offset */
@@ -1652,7 +1706,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
@@ -1767,17 +1821,29 @@ static void enable_execlists(struct intel_engine_cs *engine)
                I915_WRITE(RING_MODE_GEN7(engine),
                           _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
 
+       I915_WRITE(RING_MI_MODE(engine->mmio_base),
+                  _MASKED_BIT_DISABLE(STOP_RING));
+
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
                   engine->status_page.ggtt_offset);
        POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
 
-       /* Following the reset, we need to reload the CSB read/write pointers */
-       engine->execlists.csb_head = -1;
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       bool unexpected = false;
+
+       if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+               DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+               unexpected = true;
+       }
+
+       return unexpected;
 }
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
        int ret;
 
        ret = intel_mocs_init_engine(engine);
@@ -1787,11 +1853,13 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
        intel_engine_reset_breadcrumbs(engine);
        intel_engine_init_hangcheck(engine);
 
-       enable_execlists(engine);
+       if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+               struct drm_printer p = drm_debug_printer(__func__);
 
-       /* After a GPU reset, we may have requests to replay */
-       if (execlists->first)
-               tasklet_schedule(&execlists->tasklet);
+               intel_engine_dump(engine, &p, NULL);
+       }
+
+       enable_execlists(engine);
 
        return 0;
 }
@@ -1833,8 +1901,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
        return 0;
 }
 
-static void reset_common_ring(struct intel_engine_cs *engine,
-                             struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_request *request, *active;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       /*
+        * Prevent request submission to the hardware until we have
+        * completed the reset in i915_gem_reset_finish(). If a request
+        * is completed by one engine, it may then queue a request
+        * to a second via its execlists->tasklet *just* as we are
+        * calling engine->init_hw() and also writing the ELSP.
+        * Turning off the execlists->tasklet until the reset is over
+        * prevents the race.
+        */
+       __tasklet_disable_sync_once(&execlists->tasklet);
+
+       /*
+        * We want to flush the pending context switches, having disabled
+        * the tasklet above, we can assume exclusive access to the execlists.
+        * For this allows us to catch up with an inflight preemption event,
+        * and avoid blaming an innocent request if the stall was due to the
+        * preemption itself.
+        */
+       if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+               process_csb(engine);
+
+       /*
+        * The last active request can then be no later than the last request
+        * now in ELSP[0]. So search backwards from there, so that if the GPU
+        * has advanced beyond the last CSB update, it will be pardoned.
+        */
+       active = NULL;
+       request = port_request(execlists->port);
+       if (request) {
+               unsigned long flags;
+
+               /*
+                * Prevent the breadcrumb from advancing before we decide
+                * which request is currently active.
+                */
+               intel_engine_stop_cs(engine);
+
+               spin_lock_irqsave(&engine->timeline.lock, flags);
+               list_for_each_entry_from_reverse(request,
+                                                &engine->timeline.requests,
+                                                link) {
+                       if (__i915_request_completed(request,
+                                                    request->global_seqno))
+                               break;
+
+                       active = request;
+               }
+               spin_unlock_irqrestore(&engine->timeline.lock, flags);
+       }
+
+       return active;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+                           struct i915_request *request)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        unsigned long flags;
@@ -1864,6 +1993,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        __unwind_incomplete_requests(engine);
        spin_unlock(&engine->timeline.lock);
 
+       /* Following the reset, we need to reload the CSB read/write pointers */
+       engine->execlists.csb_head = GEN8_CSB_ENTRIES - 1;
+
        local_irq_restore(flags);
 
        /*
@@ -1888,35 +2020,52 @@ static void reset_common_ring(struct intel_engine_cs *engine,
         * future request will be after userspace has had the opportunity
         * to recreate its own state.
         */
-       regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
-       if (engine->default_state) {
-               void *defaults;
-
-               defaults = i915_gem_object_pin_map(engine->default_state,
-                                                  I915_MAP_WB);
-               if (!IS_ERR(defaults)) {
-                       memcpy(regs, /* skip restoring the vanilla PPHWSP */
-                              defaults + LRC_STATE_PN * PAGE_SIZE,
-                              engine->context_size - PAGE_SIZE);
-                       i915_gem_object_unpin_map(engine->default_state);
-               }
+       regs = request->hw_context->lrc_reg_state;
+       if (engine->pinned_default_state) {
+               memcpy(regs, /* skip restoring the vanilla PPHWSP */
+                      engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+                      engine->context_size - PAGE_SIZE);
        }
-       execlists_init_reg_state(regs, request->ctx, engine, request->ring);
+       execlists_init_reg_state(regs,
+                                request->gem_context, engine, request->ring);
 
        /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
        regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
-       regs[CTX_RING_HEAD + 1] = request->postfix;
 
-       request->ring->head = request->postfix;
+       request->ring->head = intel_ring_wrap(request->ring, request->postfix);
+       regs[CTX_RING_HEAD + 1] = request->ring->head;
+
        intel_ring_update_space(request->ring);
 
        /* Reset WaIdleLiteRestore:bdw,skl as well */
        unwind_wa_tail(request);
 }
 
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+
+       /* After a GPU reset, we may have requests to replay */
+       if (execlists->first)
+               tasklet_schedule(&execlists->tasklet);
+
+       /*
+        * Flush the tasklet while we still have the forcewake to be sure
+        * that it is not allowed to sleep before we restart and reload a
+        * context.
+        *
+        * As before (with execlists_reset_prepare) we rely on the caller
+        * serialising multiple attempts to reset so that we know that we
+        * are the only one manipulating tasklet state.
+        */
+       __tasklet_enable_sync_once(&execlists->tasklet);
+
+       GEM_TRACE("%s\n", engine->name);
+}
+
 static int intel_logical_ring_emit_pdps(struct i915_request *rq)
 {
-       struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
        struct intel_engine_cs *engine = rq->engine;
        const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
        u32 *cs;
@@ -1955,15 +2104,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
         * it is unsafe in case of lite-restore (because the ctx is
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
-       if (rq->ctx->ppgtt &&
-           (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
-           !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+       if (rq->gem_context->ppgtt &&
+           (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+           !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
            !intel_vgpu_active(rq->i915)) {
                ret = intel_logical_ring_emit_pdps(rq);
                if (ret)
                        return ret;
 
-               rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+               rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
        }
 
        cs = intel_ring_begin(rq, 6);
@@ -2224,6 +2373,8 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
        engine->schedule = execlists_schedule;
        engine->execlists.tasklet.func = execlists_submission_tasklet;
 
+       engine->reset.prepare = execlists_reset_prepare;
+
        engine->park = NULL;
        engine->unpark = NULL;
 
@@ -2243,11 +2394,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
        engine->init_hw = gen8_init_common_ring;
-       engine->reset_hw = reset_common_ring;
 
-       engine->context_pin = execlists_context_pin;
-       engine->context_unpin = execlists_context_unpin;
+       engine->reset.prepare = execlists_reset_prepare;
+       engine->reset.reset = execlists_reset;
+       engine->reset.finish = execlists_reset_finish;
 
+       engine->context_pin = execlists_context_pin;
        engine->request_alloc = execlists_request_alloc;
 
        engine->emit_flush = gen8_emit_flush;
@@ -2350,6 +2502,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
                        upper_32_bits(ce->lrc_desc);
        }
 
+       engine->execlists.csb_head = GEN8_CSB_ENTRIES - 1;
+
        return 0;
 
 error:
@@ -2482,7 +2636,7 @@ static void execlists_init_reg_state(u32 *regs,
        struct drm_i915_private *dev_priv = engine->i915;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
        u32 base = engine->mmio_base;
-       bool rcs = engine->id == RCS;
+       bool rcs = engine->class == RENDER_CLASS;
 
        /* A context is actually a big batch buffer with several
         * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2550,7 +2704,7 @@ static void execlists_init_reg_state(u32 *regs,
        CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
        CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-       if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+       if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
                /* 64b PPGTT (48bit canonical)
                 * PDP0_DESCRIPTOR contains the base address to PML4 and
                 * other PDP Descriptors are ignored.
@@ -2629,10 +2783,10 @@ err_unpin_ctx:
 }
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-                                           struct intel_engine_cs *engine)
+                                           struct intel_engine_cs *engine,
+                                           struct intel_context *ce)
 {
        struct drm_i915_gem_object *ctx_obj;
-       struct intel_context *ce = to_intel_context(ctx, engine);
        struct i915_vma *vma;
        uint32_t context_size;
        struct intel_ring *ring;
@@ -2654,7 +2808,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
        if (IS_ERR(ctx_obj))
                return PTR_ERR(ctx_obj);
 
-       vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto error_deref_obj;
index 4ec7d8d..1593194 100644 (file)
@@ -104,11 +104,4 @@ struct i915_gem_context;
 
 void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
-                           struct intel_engine_cs *engine)
-{
-       return to_intel_context(ctx, engine)->lrc_desc;
-}
-
 #endif /* _INTEL_LRC_H_ */
index 8ae8f42..5dae16c 100644 (file)
@@ -116,7 +116,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
 
 static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
 {
-       uint8_t rev;
+       u8 rev;
 
        if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
                              &rev) != 1) {
index 48f618d..bb06744 100644 (file)
@@ -85,34 +85,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn
        return container_of(connector, struct intel_lvds_connector, base.base);
 }
 
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t lvds_reg, enum pipe *pipe)
+{
+       u32 val;
+
+       val = I915_READ(lvds_reg);
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (HAS_PCH_CPT(dev_priv))
+               *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+       else
+               *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+
+       return val & LVDS_PORT_EN;
+}
+
 static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
-
-       tmp = I915_READ(lvds_encoder->reg);
-
-       if (!(tmp & LVDS_PORT_EN))
-               goto out;
+       ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
 
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
-
-       ret = true;
-
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -255,14 +256,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
        if (HAS_PCH_CPT(dev_priv)) {
-               temp &= ~PORT_TRANS_SEL_MASK;
-               temp |= PORT_TRANS_SEL_CPT(pipe);
+               temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+               temp |= LVDS_PIPE_SEL_CPT(pipe);
        } else {
-               if (pipe == 1) {
-                       temp |= LVDS_PIPEB_SELECT;
-               } else {
-                       temp &= ~LVDS_PIPEB_SELECT;
-               }
+               temp &= ~LVDS_PIPE_SEL_MASK;
+               temp |= LVDS_PIPE_SEL(pipe);
        }
 
        /* set the corresponsding LVDS_BORDER bit */
@@ -948,7 +946,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
         * register is uninitialized.
         */
        val = I915_READ(lvds_encoder->reg);
-       if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+       if (HAS_PCH_CPT(dev_priv))
+               val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+       else
+               val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+       if (val == 0)
                val = dev_priv->vbt.bios_lvds_val;
 
        return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
@@ -1003,8 +1005,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
                return;
 
        /* Skip init on machines we know falsely report LVDS */
-       if (dmi_check_system(intel_no_lvds))
+       if (dmi_check_system(intel_no_lvds)) {
+               WARN(!dev_priv->vbt.int_lvds_support,
+                    "Useless DMI match. Internal LVDS support disabled by VBT\n");
                return;
+       }
+
+       if (!dev_priv->vbt.int_lvds_support) {
+               DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+               return;
+       }
 
        if (HAS_PCH_SPLIT(dev_priv))
                lvds_reg = PCH_LVDS;
@@ -1016,10 +1026,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        if (HAS_PCH_SPLIT(dev_priv)) {
                if ((lvds & LVDS_DETECTED) == 0)
                        return;
-               if (dev_priv->vbt.edp.support) {
-                       DRM_DEBUG_KMS("disable LVDS for eDP support\n");
-                       return;
-               }
        }
 
        pin = GMBUS_PIN_PANEL;
index c58e5f5..e034b41 100644 (file)
@@ -608,16 +608,16 @@ void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 #define ACPI_EV_LID            (1<<1)
 #define ACPI_EV_DOCK           (1<<2)
 
-static struct intel_opregion *system_opregion;
-
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
 static int intel_opregion_video_event(struct notifier_block *nb,
                                      unsigned long val, void *data)
 {
-       /* The only video events relevant to opregion are 0x80. These indicate
-          either a docking event, lid switch or display switch request. In
-          Linux, these are handled by the dock, button and video drivers.
-       */
-
+       struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+                                                      acpi_notifier);
        struct acpi_bus_event *event = data;
        struct opregion_acpi *acpi;
        int ret = NOTIFY_OK;
@@ -625,10 +625,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
        if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
                return NOTIFY_DONE;
 
-       if (!system_opregion)
-               return NOTIFY_DONE;
-
-       acpi = system_opregion->acpi;
+       acpi = opregion->acpi;
 
        if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
                ret = NOTIFY_BAD;
@@ -638,10 +635,6 @@ static int intel_opregion_video_event(struct notifier_block *nb,
        return ret;
 }
 
-static struct notifier_block intel_opregion_notifier = {
-       .notifier_call = intel_opregion_video_event,
-};
-
 /*
  * Initialise the DIDL field in opregion. This passes a list of devices to
  * the firmware. Values are defined by section B.4.2 of the ACPI specification
@@ -797,8 +790,8 @@ void intel_opregion_register(struct drm_i915_private *dev_priv)
                opregion->acpi->csts = 0;
                opregion->acpi->drdy = 1;
 
-               system_opregion = opregion;
-               register_acpi_notifier(&intel_opregion_notifier);
+               opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
+               register_acpi_notifier(&opregion->acpi_notifier);
        }
 
        if (opregion->asle) {
@@ -822,8 +815,8 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
        if (opregion->acpi) {
                opregion->acpi->drdy = 0;
 
-               system_opregion = NULL;
-               unregister_acpi_notifier(&intel_opregion_notifier);
+               unregister_acpi_notifier(&opregion->acpi_notifier);
+               opregion->acpi_notifier.notifier_call = NULL;
        }
 
        /* just clear all opregion memory pointers now */
index e0e437b..e8498a8 100644 (file)
@@ -49,6 +49,7 @@ struct intel_opregion {
        u32 vbt_size;
        u32 *lid_state;
        struct work_struct asle_work;
+       struct notifier_block acpi_notifier;
 };
 
 #define OPREGION_SIZE            (8 * 1024)
index b443278..14b827e 100644 (file)
@@ -406,11 +406,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
  * Return @source_val in range [@source_min..@source_max] scaled to range
  * [@target_min..@target_max].
  */
-static uint32_t scale(uint32_t source_val,
-                     uint32_t source_min, uint32_t source_max,
-                     uint32_t target_min, uint32_t target_max)
+static u32 scale(u32 source_val,
+                u32 source_min, u32 source_max,
+                u32 target_min, u32 target_max)
 {
-       uint64_t target_val;
+       u64 target_val;
 
        WARN_ON(source_min > source_max);
        WARN_ON(target_min > target_max);
index db27f2f..d4cd19f 100644 (file)
@@ -97,10 +97,6 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
 {
        u32 debug_mask, mask;
 
-       /* No PSR interrupts on VLV/CHV */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return;
-
        mask = EDP_PSR_ERROR(TRANSCODER_EDP);
        debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
                     EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
@@ -201,15 +197,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
        }
 }
 
-static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
-{
-       uint8_t psr_caps = 0;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
-               return false;
-       return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
 {
        uint8_t dprx = 0;
@@ -232,13 +219,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 
 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
 {
-       u8 val = 0;
+       u8 val = 8; /* assume the worst if we can't read the value */
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
                val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
        else
-               DRM_ERROR("Unable to get sink synchronization latency\n");
+               DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
        return val;
 }
 
@@ -250,13 +237,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
                         sizeof(intel_dp->psr_dpcd));
 
-       if (intel_dp->psr_dpcd[0]) {
-               dev_priv->psr.sink_support = true;
-               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+       if (!intel_dp->psr_dpcd[0])
+               return;
+       DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+                     intel_dp->psr_dpcd[0]);
+
+       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+               DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+               return;
        }
+       dev_priv->psr.sink_support = true;
+       dev_priv->psr.sink_sync_latency =
+               intel_dp_get_sink_sync_latency(intel_dp);
 
        if (INTEL_GEN(dev_priv) >= 9 &&
            (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+               bool y_req = intel_dp->psr_dpcd[1] &
+                            DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+               bool alpm = intel_dp_get_alpm_status(intel_dp);
+
                /*
                 * All panels that supports PSR version 03h (PSR2 +
                 * Y-coordinate) can handle Y-coordinates in VSC but we are
@@ -268,47 +267,17 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
                 * Y-coordinate requirement panels we would need to enable
                 * GTC first.
                 */
-               dev_priv->psr.sink_psr2_support =
-                               intel_dp_get_y_coord_required(intel_dp);
-               DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
-                             ? "supported" : "not supported");
+               dev_priv->psr.sink_psr2_support = y_req && alpm;
+               DRM_DEBUG_KMS("PSR2 %ssupported\n",
+                             dev_priv->psr.sink_psr2_support ? "" : "not ");
 
                if (dev_priv->psr.sink_psr2_support) {
                        dev_priv->psr.colorimetry_support =
                                intel_dp_get_colorimetry_status(intel_dp);
-                       dev_priv->psr.alpm =
-                               intel_dp_get_alpm_status(intel_dp);
-                       dev_priv->psr.sink_sync_latency =
-                               intel_dp_get_sink_sync_latency(intel_dp);
                }
        }
 }
 
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t val;
-
-       val = I915_READ(VLV_PSRSTAT(pipe)) &
-             VLV_EDP_PSR_CURR_STATE_MASK;
-       return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
-              (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       uint32_t val;
-
-       /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
-       val  = I915_READ(VLV_VSCSDP(crtc->pipe));
-       val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
-       val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
-       I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
-}
-
 static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
                              const struct intel_crtc_state *crtc_state)
 {
@@ -341,12 +310,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
                                        DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
 }
 
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-                          DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -389,13 +352,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
        u8 dpcd_val = DP_PSR_ENABLE;
 
        /* Enable ALPM at sink for psr2 */
-       if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
-               drm_dp_dpcd_writeb(&intel_dp->aux,
-                               DP_RECEIVER_ALPM_CONFIG,
-                               DP_ALPM_ENABLE);
-
-       if (dev_priv->psr.psr2_enabled)
+       if (dev_priv->psr.psr2_enabled) {
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+                                  DP_ALPM_ENABLE);
                dpcd_val |= DP_PSR_ENABLE_PSR2;
+       }
+
        if (dev_priv->psr.link_standby)
                dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
@@ -403,81 +365,49 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }
 
-static void vlv_psr_enable_source(struct intel_dp *intel_dp,
-                                 const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
-       I915_WRITE(VLV_PSRCTL(crtc->pipe),
-                  VLV_EDP_PSR_MODE_SW_TIMER |
-                  VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
-                  VLV_EDP_PSR_ENABLE);
-}
-
-static void vlv_psr_activate(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
-       /*
-        * Let's do the transition from PSR_state 1 (inactive) to
-        * PSR_state 2 (transition to active - static frame transmission).
-        * Then Hardware is responsible for the transition to
-        * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
-        */
-       I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
-                  VLV_EDP_PSR_ACTIVE_ENTRY);
-}
-
 static void hsw_activate_psr1(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 max_sleep_time = 0x1f;
+       u32 val = EDP_PSR_ENABLE;
 
-       uint32_t max_sleep_time = 0x1f;
-       /*
-        * Let's respect VBT in case VBT asks a higher idle_frame value.
-        * Let's use 6 as the minimum to cover all known cases including
-        * the off-by-one issue that HW has in some cases. Also there are
-        * cases where sink should be able to train
-        * with the 5 or 6 idle patterns.
+       /* Let's use 6 as the minimum to cover all known cases including the
+        * off-by-one issue that HW has in some cases.
         */
-       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-       uint32_t val = EDP_PSR_ENABLE;
+       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 
-       val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+       /* sink_sync_latency of 8 means source has to wait for more than 8
+        * frames, we'll go with 9 frames for now
+        */
+       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
        val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 
+       val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
        if (IS_HASWELL(dev_priv))
                val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
        if (dev_priv->psr.link_standby)
                val |= EDP_PSR_LINK_STANDBY;
 
-       if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
-               val |= EDP_PSR_TP1_TIME_2500us;
-       else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
-               val |= EDP_PSR_TP1_TIME_500us;
-       else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+       if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+               val |=  EDP_PSR_TP1_TIME_0us;
+       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
                val |= EDP_PSR_TP1_TIME_100us;
+       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+               val |= EDP_PSR_TP1_TIME_500us;
        else
-               val |= EDP_PSR_TP1_TIME_0us;
+               val |= EDP_PSR_TP1_TIME_2500us;
 
-       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
-               val |= EDP_PSR_TP2_TP3_TIME_2500us;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
-               val |= EDP_PSR_TP2_TP3_TIME_500us;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+               val |=  EDP_PSR_TP2_TP3_TIME_0us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
                val |= EDP_PSR_TP2_TP3_TIME_100us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+               val |= EDP_PSR_TP2_TP3_TIME_500us;
        else
-               val |= EDP_PSR_TP2_TP3_TIME_0us;
+               val |= EDP_PSR_TP2_TP3_TIME_2500us;
 
        if (intel_dp_source_supports_hbr2(intel_dp) &&
            drm_dp_tps3_supported(intel_dp->dpcd))
@@ -494,15 +424,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       /*
-        * Let's respect VBT in case VBT asks a higher idle_frame value.
-        * Let's use 6 as the minimum to cover all known cases including
-        * the off-by-one issue that HW has in some cases. Also there are
-        * cases where sink should be able to train
-        * with the 5 or 6 idle patterns.
+       u32 val;
+
+       /* Let's use 6 as the minimum to cover all known cases including the
+        * off-by-one issue that HW has in some cases.
         */
-       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-       u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+       val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
 
        /* FIXME: selective update is probably totally broken because it doesn't
         * mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -513,14 +443,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 
        val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
 
-       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
-               val |= EDP_PSR2_TP2_TIME_2500;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
-               val |= EDP_PSR2_TP2_TIME_500;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
-               val |= EDP_PSR2_TP2_TIME_100;
+       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
+           dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+               val |= EDP_PSR2_TP2_TIME_50us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+               val |= EDP_PSR2_TP2_TIME_100us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+               val |= EDP_PSR2_TP2_TIME_500us;
        else
-               val |= EDP_PSR2_TP2_TIME_50;
+               val |= EDP_PSR2_TP2_TIME_2500us;
 
        I915_WRITE(EDP_PSR2_CTL, val);
 }
@@ -602,17 +533,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
         * ones. Since by Display design transcoder EDP is tied to port A
         * we can safely escape based on the port A.
         */
-       if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
+       if (dig_port->base.port != PORT_A) {
                DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
                return;
        }
 
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-           !dev_priv->psr.link_standby) {
-               DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
-               return;
-       }
-
        if (IS_HASWELL(dev_priv) &&
            I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
                      S3D_ENABLE) {
@@ -640,11 +565,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
                return;
        }
 
-       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
-               DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
-               return;
-       }
-
        crtc_state->has_psr = true;
        crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
        DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
@@ -751,57 +671,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
        dev_priv->psr.enable_source(intel_dp, crtc_state);
        dev_priv->psr.enabled = intel_dp;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               intel_psr_activate(intel_dp);
-       } else {
-               /*
-                * FIXME: Activation should happen immediately since this
-                * function is just called after pipe is fully trained and
-                * enabled.
-                * However on some platforms we face issues when first
-                * activation follows a modeset so quickly.
-                *     - On VLV/CHV we get bank screen on first activation
-                *     - On HSW/BDW we get a recoverable frozen screen until
-                *       next exit-activate sequence.
-                */
-               schedule_delayed_work(&dev_priv->psr.work,
-                                     msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
-       }
+       intel_psr_activate(intel_dp);
 
 unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void vlv_psr_disable(struct intel_dp *intel_dp,
-                           const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       uint32_t val;
-
-       if (dev_priv->psr.active) {
-               /* Put VLV PSR back to PSR_state 0 (disabled). */
-               if (intel_wait_for_register(dev_priv,
-                                           VLV_PSRSTAT(crtc->pipe),
-                                           VLV_EDP_PSR_IN_TRANS,
-                                           0,
-                                           1))
-                       WARN(1, "PSR transition took longer than expected\n");
-
-               val = I915_READ(VLV_PSRCTL(crtc->pipe));
-               val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
-               val &= ~VLV_EDP_PSR_ENABLE;
-               val &= ~VLV_EDP_PSR_MODE_MASK;
-               I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
-
-               dev_priv->psr.active = false;
-       } else {
-               WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
-       }
-}
-
 static void hsw_psr_disable(struct intel_dp *intel_dp,
                            const struct intel_crtc_state *old_crtc_state)
 {
@@ -879,8 +754,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 
        dev_priv->psr.enabled = NULL;
        mutex_unlock(&dev_priv->psr.lock);
-
-       cancel_delayed_work_sync(&dev_priv->psr.work);
+       cancel_work_sync(&dev_priv->psr.work);
 }
 
 static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
@@ -894,21 +768,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
        if (!intel_dp)
                return false;
 
-       if (HAS_DDI(dev_priv)) {
-               if (dev_priv->psr.psr2_enabled) {
-                       reg = EDP_PSR2_STATUS;
-                       mask = EDP_PSR2_STATUS_STATE_MASK;
-               } else {
-                       reg = EDP_PSR_STATUS;
-                       mask = EDP_PSR_STATUS_STATE_MASK;
-               }
+       if (dev_priv->psr.psr2_enabled) {
+               reg = EDP_PSR2_STATUS;
+               mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
-               struct drm_crtc *crtc =
-                       dp_to_dig_port(intel_dp)->base.base.crtc;
-               enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
-               reg = VLV_PSRSTAT(pipe);
-               mask = VLV_EDP_PSR_IN_TRANS;
+               reg = EDP_PSR_STATUS;
+               mask = EDP_PSR_STATUS_STATE_MASK;
        }
 
        mutex_unlock(&dev_priv->psr.lock);
@@ -925,10 +790,13 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
 static void intel_psr_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), psr.work.work);
+               container_of(work, typeof(*dev_priv), psr.work);
 
        mutex_lock(&dev_priv->psr.lock);
 
+       if (!dev_priv->psr.enabled)
+               goto unlock;
+
        /*
         * We have to make sure PSR is ready for re-enable
         * otherwise it keeps disabled until next full enable/disable cycle.
@@ -953,103 +821,24 @@ unlock:
 
 static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-       struct intel_dp *intel_dp = dev_priv->psr.enabled;
-       struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
        u32 val;
 
        if (!dev_priv->psr.active)
                return;
 
-       if (HAS_DDI(dev_priv)) {
-               if (dev_priv->psr.psr2_enabled) {
-                       val = I915_READ(EDP_PSR2_CTL);
-                       WARN_ON(!(val & EDP_PSR2_ENABLE));
-                       I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
-               } else {
-                       val = I915_READ(EDP_PSR_CTL);
-                       WARN_ON(!(val & EDP_PSR_ENABLE));
-                       I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
-               }
+       if (dev_priv->psr.psr2_enabled) {
+               val = I915_READ(EDP_PSR2_CTL);
+               WARN_ON(!(val & EDP_PSR2_ENABLE));
+               I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
        } else {
-               val = I915_READ(VLV_PSRCTL(pipe));
-
-               /*
-                * Here we do the transition drirectly from
-                * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
-                * PSR_state 5 (exit).
-                * PSR State 4 (active with single frame update) can be skipped.
-                * On PSR_state 5 (exit) Hardware is responsible to transition
-                * back to PSR_state 1 (inactive).
-                * Now we are at Same state after vlv_psr_enable_source.
-                */
-               val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
-               I915_WRITE(VLV_PSRCTL(pipe), val);
-
-               /*
-                * Send AUX wake up - Spec says after transitioning to PSR
-                * active we have to send AUX wake up by writing 01h in DPCD
-                * 600h of sink device.
-                * XXX: This might slow down the transition, but without this
-                * HW doesn't complete the transition to PSR_state 1 and we
-                * never get the screen updated.
-                */
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
-                                  DP_SET_POWER_D0);
+               val = I915_READ(EDP_PSR_CTL);
+               WARN_ON(!(val & EDP_PSR_ENABLE));
+               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
        }
-
        dev_priv->psr.active = false;
 }
 
 /**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
-                                  unsigned frontbuffer_bits)
-{
-       struct drm_crtc *crtc;
-       enum pipe pipe;
-       u32 val;
-
-       if (!CAN_PSR(dev_priv))
-               return;
-
-       /*
-        * Single frame update is already supported on BDW+ but it requires
-        * many W/A and it isn't really needed.
-        */
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
-               return;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
-       pipe = to_intel_crtc(crtc)->pipe;
-
-       if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
-               val = I915_READ(VLV_PSRCTL(pipe));
-
-               /*
-                * We need to set this bit before writing registers for a flip.
-                * This bit will be self-clear when it gets to the PSR active state.
-                */
-               I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
-       }
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
  * intel_psr_invalidate - Invalidade PSR
  * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
@@ -1071,7 +860,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
        if (!CAN_PSR(dev_priv))
                return;
 
-       if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+       if (origin == ORIGIN_FLIP)
                return;
 
        mutex_lock(&dev_priv->psr.lock);
@@ -1114,7 +903,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
        if (!CAN_PSR(dev_priv))
                return;
 
-       if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+       if (origin == ORIGIN_FLIP)
                return;
 
        mutex_lock(&dev_priv->psr.lock);
@@ -1131,8 +920,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 
        /* By definition flush = invalidate + flush */
        if (frontbuffer_bits) {
-               if (dev_priv->psr.psr2_enabled ||
-                   IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               if (dev_priv->psr.psr2_enabled) {
                        intel_psr_exit(dev_priv);
                } else {
                        /*
@@ -1149,9 +937,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
        }
 
        if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-               if (!work_busy(&dev_priv->psr.work.work))
-                       schedule_delayed_work(&dev_priv->psr.work,
-                                             msecs_to_jiffies(100));
+               schedule_work(&dev_priv->psr.work);
        mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -1184,9 +970,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                /* HSW and BDW require workarounds that we don't implement. */
                dev_priv->psr.link_standby = false;
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               /* On VLV and CHV only standby mode is supported. */
-               dev_priv->psr.link_standby = true;
        else
                /* For new platforms let's respect VBT back again */
                dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
@@ -1201,21 +984,13 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
                dev_priv->psr.link_standby = false;
        }
 
-       INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
+       INIT_WORK(&dev_priv->psr.work, intel_psr_work);
        mutex_init(&dev_priv->psr.lock);
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->psr.enable_source = vlv_psr_enable_source;
-               dev_priv->psr.disable_source = vlv_psr_disable;
-               dev_priv->psr.enable_sink = vlv_psr_enable_sink;
-               dev_priv->psr.activate = vlv_psr_activate;
-               dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
-       } else {
-               dev_priv->psr.has_hw_tracking = true;
-               dev_priv->psr.enable_source = hsw_psr_enable_source;
-               dev_priv->psr.disable_source = hsw_psr_disable;
-               dev_priv->psr.enable_sink = hsw_psr_enable_sink;
-               dev_priv->psr.activate = hsw_psr_activate;
-               dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
-       }
+       dev_priv->psr.enable_source = hsw_psr_enable_source;
+       dev_priv->psr.disable_source = hsw_psr_disable;
+       dev_priv->psr.enable_sink = hsw_psr_enable_sink;
+       dev_priv->psr.activate = hsw_psr_activate;
+       dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+
 }
index 8f19349..e0448ef 100644 (file)
@@ -496,6 +496,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
                DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
                                 engine->name, I915_READ_HEAD(engine));
 
+       /* Check that the ring offsets point within the ring! */
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
        intel_ring_update_space(ring);
        I915_WRITE_HEAD(engine, ring->head);
        I915_WRITE_TAIL(engine, ring->tail);
@@ -531,16 +535,33 @@ out:
        return ret;
 }
 
-static void reset_ring_common(struct intel_engine_cs *engine,
-                             struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
 {
-       /*
-        * RC6 must be prevented until the reset is complete and the engine
-        * reinitialised. If it occurs in the middle of this sequence, the
-        * state written to/loaded from the power context is ill-defined (e.g.
-        * the PP_BASE_DIR may be lost).
-        */
-       assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+       intel_engine_stop_cs(engine);
+
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+
+       return i915_gem_find_active_request(engine);
+}
+
+static void skip_request(struct i915_request *rq)
+{
+       void *vaddr = rq->ring->vaddr;
+       u32 head;
+
+       head = rq->infix;
+       if (rq->postfix < head) {
+               memset32(vaddr + head, MI_NOOP,
+                        (rq->ring->size - head) / sizeof(u32));
+               head = 0;
+       }
+       memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
+}
+
+static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+       GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
 
        /*
         * Try to restore the logical GPU state to match the continuation
@@ -556,47 +577,18 @@ static void reset_ring_common(struct intel_engine_cs *engine,
         * If the request was innocent, we try to replay the request with
         * the restored context.
         */
-       if (request) {
-               struct drm_i915_private *dev_priv = request->i915;
-               struct intel_context *ce = to_intel_context(request->ctx,
-                                                           engine);
-               struct i915_hw_ppgtt *ppgtt;
-
-               if (ce->state) {
-                       I915_WRITE(CCID,
-                                  i915_ggtt_offset(ce->state) |
-                                  BIT(8) /* must be set! */ |
-                                  CCID_EXTENDED_STATE_SAVE |
-                                  CCID_EXTENDED_STATE_RESTORE |
-                                  CCID_EN);
-               }
-
-               ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
-               if (ppgtt) {
-                       u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
-
-                       I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
-                       I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
-
-                       /* Wait for the PD reload to complete */
-                       if (intel_wait_for_register(dev_priv,
-                                                   RING_PP_DIR_BASE(engine),
-                                                   BIT(0), 0,
-                                                   10))
-                               DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
-
-                       ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-               }
-
+       if (rq) {
                /* If the rq hung, jump to its breadcrumb and skip the batch */
-               if (request->fence.error == -EIO)
-                       request->ring->head = request->postfix;
-       } else {
-               engine->legacy_active_context = NULL;
-               engine->legacy_active_ppgtt = NULL;
+               rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
+               if (rq->fence.error == -EIO)
+                       skip_request(rq);
        }
 }
 
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
 static int intel_rcs_ctx_init(struct i915_request *rq)
 {
        int ret;
@@ -1033,6 +1025,8 @@ int intel_ring_pin(struct intel_ring *ring,
                flags |= PIN_OFFSET_BIAS | offset_bias;
        if (vma->obj->stolen)
                flags |= PIN_MAPPABLE;
+       else
+               flags |= PIN_HIGH;
 
        if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
                if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@ -1066,6 +1060,8 @@ err:
 
 void intel_ring_reset(struct intel_ring *ring, u32 tail)
 {
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
        ring->tail = tail;
        ring->head = tail;
        ring->emit = tail;
@@ -1105,7 +1101,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
 
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
        if (IS_ERR(vma))
                goto err;
 
@@ -1169,10 +1165,43 @@ intel_ring_free(struct intel_ring *ring)
        kfree(ring);
 }
 
-static int context_pin(struct intel_context *ce)
+static void intel_ring_context_destroy(struct intel_context *ce)
 {
-       struct i915_vma *vma = ce->state;
-       int ret;
+       GEM_BUG_ON(ce->pin_count);
+
+       if (ce->state)
+               __i915_gem_object_release_unless_active(ce->state->obj);
+}
+
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt;
+       int err = 0;
+
+       ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+       if (ppgtt)
+               err = gen6_ppgtt_pin(ppgtt);
+
+       return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt;
+
+       ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+       if (ppgtt)
+               gen6_ppgtt_unpin(ppgtt);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+       struct i915_vma *vma;
+       int err;
+
+       vma = ce->state;
+       if (!vma)
+               return 0;
 
        /*
         * Clear this page out of any CPU caches for coherent swap-in/out.
@@ -1180,13 +1209,43 @@ static int context_pin(struct intel_context *ce)
         * on an active context (which by nature is already on the GPU).
         */
        if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-               ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
-               if (ret)
-                       return ret;
+               err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+               if (err)
+                       return err;
        }
 
-       return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
-                           PIN_GLOBAL | PIN_HIGH);
+       err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+                          PIN_GLOBAL | PIN_HIGH);
+       if (err)
+               return err;
+
+       /*
+        * And mark is as a globally pinned object to let the shrinker know
+        * it cannot reclaim the object until we release it.
+        */
+       vma->obj->pin_global++;
+
+       return 0;
+}
+
+static void __context_unpin(struct intel_context *ce)
+{
+       struct i915_vma *vma;
+
+       vma = ce->state;
+       if (!vma)
+               return;
+
+       vma->obj->pin_global--;
+       i915_vma_unpin(vma);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+       __context_unpin_ppgtt(ce->gem_context);
+       __context_unpin(ce);
+
+       i915_gem_context_put(ce->gem_context);
 }
 
 static struct i915_vma *
@@ -1243,7 +1302,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
                i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
        }
 
-       vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err_obj;
@@ -1258,81 +1317,79 @@ err_obj:
        return ERR_PTR(err);
 }
 
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+                  struct i915_gem_context *ctx,
+                  struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
-       int ret;
-
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-       if (likely(ce->pin_count++))
-               goto out;
-       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+       int err;
 
        if (!ce->state && engine->context_size) {
                struct i915_vma *vma;
 
                vma = alloc_context_vma(engine);
                if (IS_ERR(vma)) {
-                       ret = PTR_ERR(vma);
+                       err = PTR_ERR(vma);
                        goto err;
                }
 
                ce->state = vma;
        }
 
-       if (ce->state) {
-               ret = context_pin(ce);
-               if (ret)
-                       goto err;
+       err = __context_pin(ce);
+       if (err)
+               goto err;
 
-               ce->state->obj->pin_global++;
-       }
+       err = __context_pin_ppgtt(ce->gem_context);
+       if (err)
+               goto err_unpin;
 
        i915_gem_context_get(ctx);
 
-out:
        /* One ringbuffer to rule them all */
-       return engine->buffer;
+       GEM_BUG_ON(!engine->buffer);
+       ce->ring = engine->buffer;
+
+       return ce;
 
+err_unpin:
+       __context_unpin(ce);
 err:
        ce->pin_count = 0;
-       return ERR_PTR(ret);
+       return ERR_PTR(err);
 }
 
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
-                                    struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+       .unpin = intel_ring_context_unpin,
+       .destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+                      struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-       GEM_BUG_ON(ce->pin_count == 0);
 
-       if (--ce->pin_count)
-               return;
+       if (likely(ce->pin_count++))
+               return ce;
+       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-       if (ce->state) {
-               ce->state->obj->pin_global--;
-               i915_vma_unpin(ce->state);
-       }
+       ce->ops = &ring_context_ops;
 
-       i915_gem_context_put(ctx);
+       return __ring_context_pin(engine, ctx, ce);
 }
 
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct intel_ring *ring;
        struct i915_timeline *timeline;
+       struct intel_ring *ring;
+       unsigned int size;
        int err;
 
        intel_engine_setup_common(engine);
 
-       err = intel_engine_init_common(engine);
-       if (err)
-               goto err;
-
        timeline = i915_timeline_create(engine->i915, engine->name);
        if (IS_ERR(timeline)) {
                err = PTR_ERR(timeline);
@@ -1354,8 +1411,23 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        GEM_BUG_ON(engine->buffer);
        engine->buffer = ring;
 
+       size = PAGE_SIZE;
+       if (HAS_BROKEN_CS_TLB(engine->i915))
+               size = I830_WA_SIZE;
+       err = intel_engine_create_scratch(engine, size);
+       if (err)
+               goto err_unpin;
+
+       err = intel_engine_init_common(engine);
+       if (err)
+               goto err_scratch;
+
        return 0;
 
+err_scratch:
+       intel_engine_cleanup_scratch(engine);
+err_unpin:
+       intel_ring_unpin(ring);
 err_ring:
        intel_ring_free(ring);
 err:
@@ -1392,6 +1464,48 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
                intel_ring_reset(engine->buffer, 0);
 }
 
+static int load_pd_dir(struct i915_request *rq,
+                      const struct i915_hw_ppgtt *ppgtt)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+       *cs++ = PP_DIR_DCLV_2G;
+
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+       *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
+static int flush_pd_dir(struct i915_request *rq)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Stall until the page table load is complete */
+       *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+       *cs++ = i915_ggtt_offset(engine->scratch);
+       *cs++ = MI_NOOP;
+
+       intel_ring_advance(rq, cs);
+       return 0;
+}
+
 static inline int mi_set_context(struct i915_request *rq, u32 flags)
 {
        struct drm_i915_private *i915 = rq->i915;
@@ -1402,6 +1516,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
                INTEL_INFO(i915)->num_rings - 1 :
                0;
+       bool force_restore = false;
        int len;
        u32 *cs;
 
@@ -1415,6 +1530,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        len = 4;
        if (IS_GEN7(i915))
                len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+       if (flags & MI_FORCE_RESTORE) {
+               GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
+               flags &= ~MI_FORCE_RESTORE;
+               force_restore = true;
+               len += 2;
+       }
 
        cs = intel_ring_begin(rq, len);
        if (IS_ERR(cs))
@@ -1439,9 +1560,29 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                }
        }
 
+       if (force_restore) {
+               /*
+                * The HW doesn't handle being told to restore the current
+                * context very well. Quite often it likes goes to go off and
+                * sulk, especially when it is meant to be reloading PP_DIR.
+                * A very simple fix to force the reload is to simply switch
+                * away from the current context and back again.
+                *
+                * Note that the kernel_context will contain random state
+                * following the INHIBIT_RESTORE. We accept this since we
+                * never use the kernel_context state; it is merely a
+                * placeholder we use to flush other contexts.
+                */
+               *cs++ = MI_SET_CONTEXT;
+               *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
+                                                         engine)->state) |
+                       MI_MM_SPACE_GTT |
+                       MI_RESTORE_INHIBIT;
+       }
+
        *cs++ = MI_NOOP;
        *cs++ = MI_SET_CONTEXT;
-       *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
+       *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
         * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1509,31 +1650,28 @@ static int remap_l3(struct i915_request *rq, int slice)
 static int switch_context(struct i915_request *rq)
 {
        struct intel_engine_cs *engine = rq->engine;
-       struct i915_gem_context *to_ctx = rq->ctx;
-       struct i915_hw_ppgtt *to_mm =
-               to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
-       struct i915_gem_context *from_ctx = engine->legacy_active_context;
-       struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+       struct i915_gem_context *ctx = rq->gem_context;
+       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+       unsigned int unwind_mm = 0;
        u32 hw_flags = 0;
        int ret, i;
 
        lockdep_assert_held(&rq->i915->drm.struct_mutex);
        GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
 
-       if (to_mm != from_mm ||
-           (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
-               trace_switch_mm(engine, to_ctx);
-               ret = to_mm->switch_mm(to_mm, rq);
+       if (ppgtt) {
+               ret = load_pd_dir(rq, ppgtt);
                if (ret)
                        goto err;
 
-               to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
-               engine->legacy_active_ppgtt = to_mm;
-               hw_flags = MI_FORCE_RESTORE;
+               if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
+                       unwind_mm = intel_engine_flag(engine);
+                       ppgtt->pd_dirty_rings &= ~unwind_mm;
+                       hw_flags = MI_FORCE_RESTORE;
+               }
        }
 
-       if (to_intel_context(to_ctx, engine)->state &&
-           (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+       if (rq->hw_context->state) {
                GEM_BUG_ON(engine->id != RCS);
 
                /*
@@ -1543,35 +1681,38 @@ static int switch_context(struct i915_request *rq)
                 * as nothing actually executes using the kernel context; it
                 * is purely used for flushing user contexts.
                 */
-               if (i915_gem_context_is_kernel(to_ctx))
+               if (i915_gem_context_is_kernel(ctx))
                        hw_flags = MI_RESTORE_INHIBIT;
 
                ret = mi_set_context(rq, hw_flags);
                if (ret)
                        goto err_mm;
+       }
 
-               engine->legacy_active_context = to_ctx;
+       if (ppgtt) {
+               ret = flush_pd_dir(rq);
+               if (ret)
+                       goto err_mm;
        }
 
-       if (to_ctx->remap_slice) {
+       if (ctx->remap_slice) {
                for (i = 0; i < MAX_L3_SLICES; i++) {
-                       if (!(to_ctx->remap_slice & BIT(i)))
+                       if (!(ctx->remap_slice & BIT(i)))
                                continue;
 
                        ret = remap_l3(rq, i);
                        if (ret)
-                               goto err_ctx;
+                               goto err_mm;
                }
 
-               to_ctx->remap_slice = 0;
+               ctx->remap_slice = 0;
        }
 
        return 0;
 
-err_ctx:
-       engine->legacy_active_context = from_ctx;
 err_mm:
-       engine->legacy_active_ppgtt = from_mm;
+       if (unwind_mm)
+               ppgtt->pd_dirty_rings |= unwind_mm;
 err:
        return ret;
 }
@@ -1580,7 +1721,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
        int ret;
 
-       GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
+       GEM_BUG_ON(!request->hw_context->pin_count);
 
        /* Flush enough space to reduce the likelihood of waiting after
         * we start building the request - in which case we will just
@@ -2006,11 +2147,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
        intel_ring_init_semaphores(dev_priv, engine);
 
        engine->init_hw = init_ring_common;
-       engine->reset_hw = reset_ring_common;
+       engine->reset.prepare = reset_prepare;
+       engine->reset.reset = reset_ring;
+       engine->reset.finish = reset_finish;
 
        engine->context_pin = intel_ring_context_pin;
-       engine->context_unpin = intel_ring_context_unpin;
-
        engine->request_alloc = ring_request_alloc;
 
        engine->emit_breadcrumb = i9xx_emit_breadcrumb;
@@ -2074,16 +2215,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
-       if (INTEL_GEN(dev_priv) >= 6) {
-               ret = intel_engine_create_scratch(engine, PAGE_SIZE);
-               if (ret)
-                       return ret;
-       } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
-               ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
index 010750e..a0bc7a8 100644 (file)
@@ -122,7 +122,8 @@ struct intel_engine_hangcheck {
        int deadlock;
        struct intel_instdone instdone;
        struct i915_request *active_request;
-       bool stalled;
+       bool stalled:1;
+       bool wedged:1;
 };
 
 struct intel_ring {
@@ -342,6 +343,7 @@ struct intel_engine_cs {
        struct i915_timeline timeline;
 
        struct drm_i915_gem_object *default_state;
+       void *pinned_default_state;
 
        atomic_t irq_count;
        unsigned long irq_posted;
@@ -423,18 +425,22 @@ struct intel_engine_cs {
        void            (*irq_disable)(struct intel_engine_cs *engine);
 
        int             (*init_hw)(struct intel_engine_cs *engine);
-       void            (*reset_hw)(struct intel_engine_cs *engine,
-                                   struct i915_request *rq);
+
+       struct {
+               struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+               void (*reset)(struct intel_engine_cs *engine,
+                             struct i915_request *rq);
+               void (*finish)(struct intel_engine_cs *engine);
+       } reset;
 
        void            (*park)(struct intel_engine_cs *engine);
        void            (*unpark)(struct intel_engine_cs *engine);
 
        void            (*set_default_submission)(struct intel_engine_cs *engine);
 
-       struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
-                                         struct i915_gem_context *ctx);
-       void            (*context_unpin)(struct intel_engine_cs *engine,
-                                        struct i915_gem_context *ctx);
+       struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+                                            struct i915_gem_context *ctx);
+
        int             (*request_alloc)(struct i915_request *rq);
        int             (*init_context)(struct i915_request *rq);
 
@@ -550,16 +556,7 @@ struct intel_engine_cs {
         * to the kernel context and trash it as the save may not happen
         * before the hardware is powered down.
         */
-       struct i915_gem_context *last_retired_context;
-
-       /* We track the current MI_SET_CONTEXT in order to eliminate
-        * redudant context switches. This presumes that requests are not
-        * reordered! Or when they are the tracking is updated along with
-        * the emission of individual requests into the legacy command
-        * stream (ring).
-        */
-       struct i915_gem_context *legacy_active_context;
-       struct i915_hw_ppgtt *legacy_active_ppgtt;
+       struct intel_context *last_retired_context;
 
        /* status_notifier: list of callbacks for context-switch changes */
        struct atomic_notifier_head context_status_notifier;
@@ -809,6 +806,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
        return pos & (ring->size - 1);
 }
 
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+                       unsigned int pos)
+{
+       if (pos & -ring->size) /* must be strictly within the ring */
+               return false;
+
+       if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+               return false;
+
+       return true;
+}
+
 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
@@ -820,12 +830,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 static inline void
 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
 {
-       /* We could combine these into a single tail operation, but keeping
-        * them as seperate tests will help identify the cause should one
-        * ever fire.
-        */
-       GEM_BUG_ON(!IS_ALIGNED(tail, 8));
-       GEM_BUG_ON(tail >= ring->size);
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
 
        /*
         * "Ring Buffer Use"
@@ -865,14 +870,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
 void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+                               unsigned int size);
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
+
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
@@ -1042,10 +1052,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
        return cs;
 }
 
+void intel_engines_sanitize(struct drm_i915_private *i915);
+
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
 
 void intel_engines_park(struct drm_i915_private *i915);
 void intel_engines_unpark(struct drm_i915_private *i915);
index 53a6eaa..de3a810 100644 (file)
@@ -128,6 +128,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
                return "AUX_C";
        case POWER_DOMAIN_AUX_D:
                return "AUX_D";
+       case POWER_DOMAIN_AUX_E:
+               return "AUX_E";
        case POWER_DOMAIN_AUX_F:
                return "AUX_F";
        case POWER_DOMAIN_AUX_IO_A:
index 26975df..e6a64b3 100644 (file)
@@ -1406,27 +1406,37 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
                return false;
 }
 
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+       u32 val;
+
+       val = I915_READ(sdvo_reg);
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (HAS_PCH_CPT(dev_priv))
+               *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+       else if (IS_CHERRYVIEW(dev_priv))
+               *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
+       else
+               *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+       return val & SDVO_ENABLE;
+}
+
 static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        u16 active_outputs = 0;
-       u32 tmp;
+       bool ret;
 
-       tmp = I915_READ(intel_sdvo->sdvo_reg);
        intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
 
-       if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
-               return false;
-
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
+       ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
 
-       return true;
+       return ret || active_outputs;
 }
 
 static void intel_sdvo_get_config(struct intel_encoder *encoder,
@@ -1553,8 +1563,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
-               temp &= ~SDVO_PIPE_B_SELECT;
-               temp |= SDVO_ENABLE;
+               temp &= ~SDVO_PIPE_SEL_MASK;
+               temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
                intel_sdvo_write_sdvox(intel_sdvo, temp);
 
                temp &= ~SDVO_ENABLE;
index ee23613..344c0e7 100644 (file)
@@ -284,13 +284,35 @@ skl_update_plane(struct intel_plane *plane,
        /* program plane scaler */
        if (plane_state->scaler_id >= 0) {
                int scaler_id = plane_state->scaler_id;
-               const struct intel_scaler *scaler;
+               const struct intel_scaler *scaler =
+                       &crtc_state->scaler_state.scalers[scaler_id];
+               u16 y_hphase, uv_rgb_hphase;
+               u16 y_vphase, uv_rgb_vphase;
+
+               /* TODO: handle sub-pixel coordinates */
+               if (fb->format->format == DRM_FORMAT_NV12) {
+                       y_hphase = skl_scaler_calc_phase(1, false);
+                       y_vphase = skl_scaler_calc_phase(1, false);
+
+                       /* MPEG2 chroma siting convention */
+                       uv_rgb_hphase = skl_scaler_calc_phase(2, true);
+                       uv_rgb_vphase = skl_scaler_calc_phase(2, false);
+               } else {
+                       /* not used */
+                       y_hphase = 0;
+                       y_vphase = 0;
 
-               scaler = &crtc_state->scaler_state.scalers[scaler_id];
+                       uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+                       uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+               }
 
                I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
                              PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
                I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+               I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+                             PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+               I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+                             PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
                I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
                I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
                              ((crtc_w + 1) << 16)|(crtc_h + 1));
@@ -327,19 +349,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 bool
-skl_plane_get_hw_state(struct intel_plane *plane)
+skl_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum plane_id plane_id = plane->id;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+       ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -588,19 +612,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 static bool
-vlv_plane_get_hw_state(struct intel_plane *plane)
+vlv_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum plane_id plane_id = plane->id;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+       ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -754,18 +780,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 static bool
-ivb_plane_get_hw_state(struct intel_plane *plane)
+ivb_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret =  I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+       ret =  I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -910,18 +938,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 static bool
-g4x_plane_get_hw_state(struct intel_plane *plane)
+g4x_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+       ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -1071,6 +1101,37 @@ intel_check_sprite_plane(struct intel_plane *plane,
        return 0;
 }
 
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+       return INTEL_GEN(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+                                const struct drm_intel_sprite_colorkey *set)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+       *key = *set;
+
+       /*
+        * We want src key enabled on the
+        * sprite and not on the primary.
+        */
+       if (plane->id == PLANE_PRIMARY &&
+           set->flags & I915_SET_COLORKEY_SOURCE)
+               key->flags = 0;
+
+       /*
+        * On SKL+ we want dst key enabled on
+        * the primary and not on the sprite.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               key->flags = 0;
+}
+
 int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv)
 {
@@ -1100,6 +1161,16 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
        if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
                return -ENOENT;
 
+       /*
+        * SKL+ only plane 2 can do destination keying against plane 1.
+        * Also multiple planes can't do destination keying on the same
+        * pipe simultaneously.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 &&
+           to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               return -EINVAL;
+
        drm_modeset_acquire_init(&ctx, 0);
 
        state = drm_atomic_state_alloc(plane->dev);
@@ -1112,11 +1183,28 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
        while (1) {
                plane_state = drm_atomic_get_plane_state(state, plane);
                ret = PTR_ERR_OR_ZERO(plane_state);
-               if (!ret) {
-                       to_intel_plane_state(plane_state)->ckey = *set;
-                       ret = drm_atomic_commit(state);
+               if (!ret)
+                       intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+               /*
+                * On some platforms we have to configure
+                * the dst colorkey on the primary plane.
+                */
+               if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+                       struct intel_crtc *crtc =
+                               intel_get_crtc_for_pipe(dev_priv,
+                                                       to_intel_plane(plane)->pipe);
+
+                       plane_state = drm_atomic_get_plane_state(state,
+                                                                crtc->base.primary);
+                       ret = PTR_ERR_OR_ZERO(plane_state);
+                       if (!ret)
+                               intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
                }
 
+               if (!ret)
+                       ret = drm_atomic_commit(state);
+
                if (ret != -EDEADLK)
                        break;
 
@@ -1211,8 +1299,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
        DRM_FORMAT_MOD_INVALID
 };
 
-static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_YUYV:
@@ -1228,8 +1325,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool snb_mod_supported(uint32_t format, uint64_t modifier)
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
@@ -1246,8 +1352,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_RGB565:
        case DRM_FORMAT_ABGR8888:
@@ -1269,8 +1384,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+                                          u32 format, u64 modifier)
 {
+       struct intel_plane *plane = to_intel_plane(_plane);
+
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               break;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               if (!plane->has_ccs)
+                       return false;
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
@@ -1302,38 +1435,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
-                                                    uint32_t format,
-                                                    uint64_t modifier)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
-       if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
-               return false;
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = g4x_sprite_format_mod_supported,
+};
 
-       if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
-           modifier != DRM_FORMAT_MOD_LINEAR)
-               return false;
+static const struct drm_plane_funcs snb_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = snb_sprite_format_mod_supported,
+};
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_mod_supported(format, modifier);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return vlv_mod_supported(format, modifier);
-       else if (INTEL_GEN(dev_priv) >= 6)
-               return snb_mod_supported(format, modifier);
-       else
-               return g4x_mod_supported(format, modifier);
-}
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = vlv_sprite_format_mod_supported,
+};
 
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
-        .update_plane = drm_atomic_helper_update_plane,
-        .disable_plane = drm_atomic_helper_disable_plane,
-        .destroy = intel_plane_destroy,
-        .atomic_get_property = intel_plane_atomic_get_property,
-        .atomic_set_property = intel_plane_atomic_set_property,
-        .atomic_duplicate_state = intel_plane_duplicate_state,
-        .atomic_destroy_state = intel_plane_destroy_state,
-        .format_mod_supported = intel_sprite_plane_format_mod_supported,
+static const struct drm_plane_funcs skl_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = skl_plane_format_mod_supported,
 };
 
 bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@ -1359,6 +1502,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 {
        struct intel_plane *intel_plane = NULL;
        struct intel_plane_state *state = NULL;
+       const struct drm_plane_funcs *plane_funcs;
        unsigned long possible_crtcs;
        const uint32_t *plane_formats;
        const uint64_t *modifiers;
@@ -1383,6 +1527,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                intel_plane->can_scale = true;
                state->scaler_id = -1;
 
+               intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+                                                        PLANE_SPRITE0 + plane);
+
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
                intel_plane->get_hw_state = skl_plane_get_hw_state;
@@ -1396,10 +1543,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                        num_plane_formats = ARRAY_SIZE(skl_plane_formats);
                }
 
-               if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+               if (intel_plane->has_ccs)
                        modifiers = skl_plane_format_modifiers_ccs;
                else
                        modifiers = skl_plane_format_modifiers_noccs;
+
+               plane_funcs = &skl_plane_funcs;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                intel_plane->can_scale = false;
                intel_plane->max_downscale = 1;
@@ -1411,6 +1560,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                plane_formats = vlv_plane_formats;
                num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
+
+               plane_funcs = &vlv_sprite_funcs;
        } else if (INTEL_GEN(dev_priv) >= 7) {
                if (IS_IVYBRIDGE(dev_priv)) {
                        intel_plane->can_scale = true;
@@ -1427,6 +1578,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                plane_formats = snb_plane_formats;
                num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
+
+               plane_funcs = &snb_sprite_funcs;
        } else {
                intel_plane->can_scale = true;
                intel_plane->max_downscale = 16;
@@ -1439,9 +1592,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                if (IS_GEN6(dev_priv)) {
                        plane_formats = snb_plane_formats;
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+                       plane_funcs = &snb_sprite_funcs;
                } else {
                        plane_formats = g4x_plane_formats;
                        num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+
+                       plane_funcs = &g4x_sprite_funcs;
                }
        }
 
@@ -1468,14 +1625,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
        if (INTEL_GEN(dev_priv) >= 9)
                ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-                                              possible_crtcs, &intel_sprite_plane_funcs,
+                                              possible_crtcs, plane_funcs,
                                               plane_formats, num_plane_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_OVERLAY,
                                               "plane %d%c", plane + 2, pipe_name(pipe));
        else
                ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-                                              possible_crtcs, &intel_sprite_plane_funcs,
+                                              possible_crtcs, plane_funcs,
                                               plane_formats, num_plane_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_OVERLAY,
index b55b5c1..24dc368 100644 (file)
@@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
 static bool
 intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 tmp = I915_READ(TV_CTL);
 
-       if (!(tmp & TV_ENC_ENABLE))
-               return false;
+       *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
 
-       *pipe = PORT_TO_PIPE(tmp);
-
-       return true;
+       return tmp & TV_ENC_ENABLE;
 }
 
 static void
@@ -1032,8 +1028,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
                break;
        }
 
-       if (intel_crtc->pipe == 1)
-               tv_ctl |= TV_ENC_PIPEB_SELECT;
+       tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
        tv_ctl |= tv_mode->oversample;
 
        if (tv_mode->progressive)
@@ -1157,12 +1152,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
 
        /* Poll for TV detection */
-       tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+       tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
        tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
-       if (intel_crtc->pipe == 1)
-               tv_ctl |= TV_ENC_PIPEB_SELECT;
-       else
-               tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+       tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
 
        tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
        tv_dac |= (TVDAC_STATE_CHG_EN |
index 1cffaf7..94e8863 100644 (file)
@@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *i915)
 {
-       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       struct intel_uc_fw *guc_fw = &i915->guc.fw;
+       struct intel_uc_fw *huc_fw = &i915->huc.fw;
        int enable_guc = 0;
 
        /* Default is to enable GuC/HuC if we know their firmwares */
@@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
        return enable_guc;
 }
 
-static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+static int __get_default_guc_log_level(struct drm_i915_private *i915)
 {
        int guc_log_level;
 
-       if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+       if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
                guc_log_level = GUC_LOG_LEVEL_DISABLED;
        else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
                 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
 
 /**
  * sanitize_options_early - sanitize uC related modparam options
- * @dev_priv: device private
+ * @i915: device private
  *
  * In case of "enable_guc" option this function will attempt to modify
  * it only if it was initially set to "auto(-1)". Default value for this
@@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
  * unless GuC is enabled on given platform and the driver is compiled with
  * debug config when this modparam will default to "enable(1..4)".
  */
-static void sanitize_options_early(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *i915)
 {
-       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       struct intel_uc_fw *guc_fw = &i915->guc.fw;
+       struct intel_uc_fw *huc_fw = &i915->huc.fw;
 
        /* A negative value means "use platform default" */
        if (i915_modparams.enable_guc < 0)
-               i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+               i915_modparams.enable_guc = __get_platform_enable_guc(i915);
 
        DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
                         i915_modparams.enable_guc,
@@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
        if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "enable_guc", i915_modparams.enable_guc,
-                        !HAS_GUC(dev_priv) ? "no GuC hardware" :
-                                             "no GuC firmware");
+                        !HAS_GUC(i915) ? "no GuC hardware" :
+                                         "no GuC firmware");
        }
 
        /* Verify HuC firmware availability */
        if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "enable_guc", i915_modparams.enable_guc,
-                        !HAS_HUC(dev_priv) ? "no HuC hardware" :
-                                             "no HuC firmware");
+                        !HAS_HUC(i915) ? "no HuC hardware" :
+                                         "no HuC firmware");
        }
 
        /* A negative value means "use platform/config default" */
        if (i915_modparams.guc_log_level < 0)
                i915_modparams.guc_log_level =
-                       __get_default_guc_log_level(dev_priv);
+                       __get_default_guc_log_level(i915);
 
        if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "guc_log_level", i915_modparams.guc_log_level,
-                        !HAS_GUC(dev_priv) ? "no GuC hardware" :
-                                             "GuC not enabled");
+                        !HAS_GUC(i915) ? "no GuC hardware" :
+                                         "GuC not enabled");
                i915_modparams.guc_log_level = 0;
        }
 
@@ -195,20 +195,19 @@ void intel_uc_cleanup_early(struct drm_i915_private *i915)
 
 /**
  * intel_uc_init_mmio - setup uC MMIO access
- *
- * @dev_priv: device private
+ * @i915: device private
  *
  * Setup minimal state necessary for MMIO accesses later in the
  * initialization sequence.
  */
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
+void intel_uc_init_mmio(struct drm_i915_private *i915)
 {
-       intel_guc_init_send_regs(&dev_priv->guc);
+       intel_guc_init_send_regs(&i915->guc);
 }
 
 static void guc_capture_load_err_log(struct intel_guc *guc)
 {
-       if (!guc->log.vma || !i915_modparams.guc_log_level)
+       if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
                return;
 
        if (!guc->load_err_log)
@@ -225,11 +224,11 @@ static void guc_free_load_err_log(struct intel_guc *guc)
 
 static int guc_enable_communication(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_private *i915 = guc_to_i915(guc);
 
-       gen9_enable_guc_interrupts(dev_priv);
+       gen9_enable_guc_interrupts(i915);
 
-       if (HAS_GUC_CT(dev_priv))
+       if (HAS_GUC_CT(i915))
                return intel_guc_ct_enable(&guc->ct);
 
        guc->send = intel_guc_send_mmio;
@@ -239,23 +238,23 @@ static int guc_enable_communication(struct intel_guc *guc)
 
 static void guc_disable_communication(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_private *i915 = guc_to_i915(guc);
 
-       if (HAS_GUC_CT(dev_priv))
+       if (HAS_GUC_CT(i915))
                intel_guc_ct_disable(&guc->ct);
 
-       gen9_disable_guc_interrupts(dev_priv);
+       gen9_disable_guc_interrupts(i915);
 
        guc->send = intel_guc_send_nop;
        guc->handler = intel_guc_to_host_event_handler_nop;
 }
 
-int intel_uc_init_misc(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
        int ret;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
        intel_guc_init_ggtt_pin_bias(guc);
@@ -267,32 +266,32 @@ int intel_uc_init_misc(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+void intel_uc_fini_misc(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return;
 
        intel_guc_fini_wq(guc);
 }
 
-int intel_uc_init(struct drm_i915_private *dev_priv)
+int intel_uc_init(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
        int ret;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
-       if (!HAS_GUC(dev_priv))
+       if (!HAS_GUC(i915))
                return -ENODEV;
 
        ret = intel_guc_init(guc);
        if (ret)
                return ret;
 
-       if (USES_GUC_SUBMISSION(dev_priv)) {
+       if (USES_GUC_SUBMISSION(i915)) {
                /*
                 * This is stuff we need to have available at fw load time
                 * if we are planning to enable submission later
@@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-void intel_uc_fini(struct drm_i915_private *dev_priv)
+void intel_uc_fini(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return;
 
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
+       GEM_BUG_ON(!HAS_GUC(i915));
 
-       if (USES_GUC_SUBMISSION(dev_priv))
+       if (USES_GUC_SUBMISSION(i915))
                intel_guc_submission_fini(guc);
 
        intel_guc_fini(guc);
@@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
        __intel_uc_reset_hw(i915);
 }
 
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_hw(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
-       struct intel_huc *huc = &dev_priv->huc;
+       struct intel_guc *guc = &i915->guc;
+       struct intel_huc *huc = &i915->huc;
        int ret, attempts;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
+       GEM_BUG_ON(!HAS_GUC(i915));
 
-       gen9_reset_guc_interrupts(dev_priv);
+       gen9_reset_guc_interrupts(i915);
 
        /* WaEnableuKernelHeaderValidFix:skl */
        /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
-       if (IS_GEN9(dev_priv))
+       if (IS_GEN9(i915))
                attempts = 3;
        else
                attempts = 1;
@@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
                 * Always reset the GuC just before (re)loading, so
                 * that the state and timing are fairly predictable
                 */
-               ret = __intel_uc_reset_hw(dev_priv);
+               ret = __intel_uc_reset_hw(i915);
                if (ret)
                        goto err_out;
 
-               if (USES_HUC(dev_priv)) {
+               if (USES_HUC(i915)) {
                        ret = intel_huc_fw_upload(huc);
                        if (ret)
                                goto err_out;
@@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
        if (ret)
                goto err_log_capture;
 
-       if (USES_HUC(dev_priv)) {
+       if (USES_HUC(i915)) {
                ret = intel_huc_auth(huc);
                if (ret)
                        goto err_communication;
        }
 
-       if (USES_GUC_SUBMISSION(dev_priv)) {
+       if (USES_GUC_SUBMISSION(i915)) {
                ret = intel_guc_submission_enable(guc);
                if (ret)
                        goto err_communication;
        }
 
-       dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
+       dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
                 guc->fw.major_ver_found, guc->fw.minor_ver_found);
-       dev_info(dev_priv->drm.dev, "GuC submission %s\n",
-                enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
-       dev_info(dev_priv->drm.dev, "HuC %s\n",
-                enableddisabled(USES_HUC(dev_priv)));
+       dev_info(i915->drm.dev, "GuC submission %s\n",
+                enableddisabled(USES_GUC_SUBMISSION(i915)));
+       dev_info(i915->drm.dev, "HuC %s\n",
+                enableddisabled(USES_HUC(i915)));
 
        return 0;
 
@@ -428,20 +427,20 @@ err_out:
        if (GEM_WARN_ON(ret == -EIO))
                ret = -EINVAL;
 
-       dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
+       dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
        return ret;
 }
 
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+void intel_uc_fini_hw(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return;
 
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
+       GEM_BUG_ON(!HAS_GUC(i915));
 
-       if (USES_GUC_SUBMISSION(dev_priv))
+       if (USES_GUC_SUBMISSION(i915))
                intel_guc_submission_disable(guc);
 
        guc_disable_communication(guc);
index 448293e..b892ca8 100644 (file)
@@ -1702,15 +1702,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        const u32 base = engine->mmio_base;
-       const i915_reg_t mode = RING_MI_MODE(base);
-
-       I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
-       if (__intel_wait_for_register_fw(dev_priv,
-                                        mode, MODE_IDLE, MODE_IDLE,
-                                        500, 0,
-                                        NULL))
-               DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
-                                engine->name);
+
+       if (intel_engine_stop_cs(engine))
+               DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
 
        I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
        POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@@ -2099,21 +2093,25 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
 {
        struct intel_engine_cs *engine;
        unsigned int tmp;
+       int ret;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-               if (gen8_reset_engine_start(engine))
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+               if (gen8_reset_engine_start(engine)) {
+                       ret = -EIO;
                        goto not_ready;
+               }
+       }
 
        if (INTEL_GEN(dev_priv) >= 11)
-               return gen11_reset_engines(dev_priv, engine_mask);
+               ret = gen11_reset_engines(dev_priv, engine_mask);
        else
-               return gen6_reset_engines(dev_priv, engine_mask);
+               ret = gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                gen8_reset_engine_cancel(engine);
 
-       return -EIO;
+       return ret;
 }
 
 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
@@ -2176,6 +2174,8 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
                 * Thus assume it is best to stop engines on all gens
                 * where we have a gpu reset.
                 *
+                * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+                *
                 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
                 *
                 * FIXME: Wa for more modern gens needs to be validated
index 47478d6..2fbe931 100644 (file)
@@ -67,21 +67,21 @@ struct intel_uncore_funcs {
        void (*force_wake_put)(struct drm_i915_private *dev_priv,
                               enum forcewake_domains domains);
 
-       uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
-       uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
-       uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
-       uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
+       u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+                        i915_reg_t r, bool trace);
+       u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+                         i915_reg_t r, bool trace);
+       u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+                         i915_reg_t r, bool trace);
+       u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+                         i915_reg_t r, bool trace);
 
        void (*mmio_writeb)(struct drm_i915_private *dev_priv,
-                           i915_reg_t r, uint8_t val, bool trace);
+                           i915_reg_t r, u8 val, bool trace);
        void (*mmio_writew)(struct drm_i915_private *dev_priv,
-                           i915_reg_t r, uint16_t val, bool trace);
+                           i915_reg_t r, u16 val, bool trace);
        void (*mmio_writel)(struct drm_i915_private *dev_priv,
-                           i915_reg_t r, uint32_t val, bool trace);
+                           i915_reg_t r, u32 val, bool trace);
 };
 
 struct intel_forcewake_range {
index 4584682..bba98cf 100644 (file)
@@ -318,6 +318,12 @@ enum vbt_gmbus_ddi {
        DDC_BUS_DDI_C,
        DDC_BUS_DDI_D,
        DDC_BUS_DDI_F,
+       ICL_DDC_BUS_DDI_A = 0x1,
+       ICL_DDC_BUS_DDI_B,
+       ICL_DDC_BUS_PORT_1 = 0x4,
+       ICL_DDC_BUS_PORT_2,
+       ICL_DDC_BUS_PORT_3,
+       ICL_DDC_BUS_PORT_4,
 };
 
 #define VBT_DP_MAX_LINK_RATE_HBR3      0
@@ -414,7 +420,9 @@ struct child_device_config {
        u16 extended_type;
        u8 dvo_function;
        u8 dp_usb_type_c:1;                                     /* 195 */
-       u8 flags2_reserved:7;                                   /* 195 */
+       u8 tbt:1;                                               /* 209 */
+       u8 flags2_reserved:2;                                   /* 195 */
+       u8 dp_port_trace_length:4;                              /* 209 */
        u8 dp_gpio_index;                                       /* 195 */
        u16 dp_gpio_pin_num;                                    /* 195 */
        u8 dp_iboost_level:4;                                   /* 196 */
@@ -448,7 +456,7 @@ struct bdb_general_definitions {
         * number = (block_size - sizeof(bdb_general_definitions))/
         *           defs->child_dev_size;
         */
-       uint8_t devices[0];
+       u8 devices[0];
 } __packed;
 
 /* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
@@ -635,7 +643,7 @@ struct bdb_sdvo_lvds_options {
 #define BDB_DRIVER_FEATURE_NO_LVDS             0
 #define BDB_DRIVER_FEATURE_INT_LVDS            1
 #define BDB_DRIVER_FEATURE_SDVO_LVDS           2
-#define BDB_DRIVER_FEATURE_EDP                 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS       3
 
 struct bdb_driver_features {
        u8 boot_dev_algorithm:1;
index 2df3538..f8bb32e 100644 (file)
  * - Public functions to init or apply the given workaround type.
  */
 
-static int wa_add(struct drm_i915_private *dev_priv,
-                 i915_reg_t addr,
-                 const u32 mask, const u32 val)
+static void wa_add(struct drm_i915_private *i915,
+                  i915_reg_t reg, const u32 mask, const u32 val)
 {
-       const unsigned int idx = dev_priv->workarounds.count;
+       struct i915_workarounds *wa = &i915->workarounds;
+       unsigned int start = 0, end = wa->count;
+       unsigned int addr = i915_mmio_reg_offset(reg);
+       struct i915_wa_reg *r;
+
+       while (start < end) {
+               unsigned int mid = start + (end - start) / 2;
+
+               if (wa->reg[mid].addr < addr) {
+                       start = mid + 1;
+               } else if (wa->reg[mid].addr > addr) {
+                       end = mid;
+               } else {
+                       r = &wa->reg[mid];
+
+                       if ((mask & ~r->mask) == 0) {
+                               DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+                                         addr, r->mask, r->value);
+
+                               r->value &= ~mask;
+                       }
+
+                       r->value |= val;
+                       r->mask  |= mask;
+                       return;
+               }
+       }
 
-       if (WARN_ON(idx >= I915_MAX_WA_REGS))
-               return -ENOSPC;
+       if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
+               DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
+                         addr, mask, val);
+               return;
+       }
 
-       dev_priv->workarounds.reg[idx].addr = addr;
-       dev_priv->workarounds.reg[idx].value = val;
-       dev_priv->workarounds.reg[idx].mask = mask;
+       r = &wa->reg[wa->count++];
+       r->addr  = addr;
+       r->value = val;
+       r->mask  = mask;
 
-       dev_priv->workarounds.count++;
+       while (r-- > wa->reg) {
+               GEM_BUG_ON(r[0].addr == r[1].addr);
+               if (r[1].addr > r[0].addr)
+                       break;
 
-       return 0;
+               swap(r[1], r[0]);
+       }
 }
 
-#define WA_REG(addr, mask, val) do { \
-               const int r = wa_add(dev_priv, (addr), (mask), (val)); \
-               if (r) \
-                       return r; \
-       } while (0)
+#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
 
 #define WA_SET_BIT_MASKED(addr, mask) \
        WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -463,6 +492,25 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
         */
        WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
 
+       /* Wa_2006611047:icl (pre-prod)
+        * Formerly known as WaDisableImprovedTdlClkGating
+        */
+       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+               WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+                                 GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+
+       /* WaEnableStateCacheRedirectToCS:icl */
+       WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
+                         GEN11_STATE_CACHE_REDIRECT_TO_CS);
+
+       /* Wa_2006665173:icl (pre-prod) */
+       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+               WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+                                 GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
+       /* WaEnableFloatBlendOptimization:icl */
+       WA_SET_BIT_MASKED(GEN10_CACHE_MODE_SS, FLOAT_BLEND_OPTIMIZATION_ENABLE);
+
        return 0;
 }
 
@@ -521,7 +569,7 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
 
        *cs++ = MI_LOAD_REGISTER_IMM(w->count);
        for (i = 0; i < w->count; i++) {
-               *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+               *cs++ = w->reg[i].addr;
                *cs++ = w->reg[i].value;
        }
        *cs++ = MI_NOOP;
@@ -647,6 +695,19 @@ static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
                   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       /* WaKBLVECSSemaphoreWaitPoll:kbl */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
+               struct intel_engine_cs *engine;
+               unsigned int tmp;
+
+               for_each_engine(engine, dev_priv, tmp) {
+                       if (engine->id == RCS)
+                               continue;
+
+                       I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
+               }
+       }
 }
 
 static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -672,8 +733,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
+static void wa_init_mcr(struct drm_i915_private *dev_priv)
+{
+       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       u32 mcr;
+       u32 mcr_slice_subslice_mask;
+
+       /*
+        * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
+        * L3Banks could be fused off in single slice scenario. If that is
+        * the case, we might need to program MCR select to a valid L3Bank
+        * by default, to make sure we correctly read certain registers
+        * later on (in the range 0xB100 - 0xB3FF).
+        * This might be incompatible with
+        * WaProgramMgsrForCorrectSliceSpecificMmioReads.
+        * Fortunately, this should not happen in production hardware, so
+        * we only assert that this is the case (instead of implementing
+        * something more complex that requires checking the range of every
+        * MMIO read).
+        */
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           is_power_of_2(sseu->slice_mask)) {
+               /*
+                * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
+                * enabled subslice, no need to redirect MCR packet
+                */
+               u32 slice = fls(sseu->slice_mask);
+               u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+               u8 ss_mask = sseu->subslice_mask[slice];
+
+               u8 enabled_mask = (ss_mask | ss_mask >>
+                                  GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
+               u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
+
+               /*
+                * Production silicon should have matched L3Bank and
+                * subslice enabled
+                */
+               WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
+       }
+
+       mcr = I915_READ(GEN8_MCR_SELECTOR);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+                                         GEN11_MCR_SUBSLICE_MASK;
+       else
+               mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+                                         GEN8_MCR_SUBSLICE_MASK;
+       /*
+        * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
+        * Before any MMIO read into slice/subslice specific registers, MCR
+        * packet control register needs to be programmed to point to any
+        * enabled s/ss pair. Otherwise, incorrect values will be returned.
+        * This means each subsequent MMIO read will be forwarded to an
+        * specific s/ss combination, but this is OK since these registers
+        * are consistent across s/ss in almost all cases. In the rare
+        * occasions, such as INSTDONE, where this value is dependent
+        * on s/ss combo, the read should be done with read_subslice_reg.
+        */
+       mcr &= ~mcr_slice_subslice_mask;
+       mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
+       I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+}
+
 static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 {
+       wa_init_mcr(dev_priv);
+
        /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
        if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
                I915_WRITE(GAMT_CHKN_BIT_REG,
@@ -692,6 +819,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 
 static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 {
+       wa_init_mcr(dev_priv);
+
        /* This is not an Wa. Enable for better image quality */
        I915_WRITE(_3D_CHICKEN3,
                   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
@@ -772,6 +901,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
                   PMFLUSHDONE_LNICRSDROP |
                   PMFLUSH_GAPL3UNBLOCK |
                   PMFLUSHDONE_LNEBLK);
+
+       /* Wa_1406463099:icl
+        * Formerly known as WaGamTlbPendError
+        */
+       I915_WRITE(GAMT_CHKN_BIT_REG,
+                  I915_READ(GAMT_CHKN_BIT_REG) |
+                  GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
index 91c7291..fbe4324 100644 (file)
@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
 
 static int igt_check_page_sizes(struct i915_vma *vma)
 {
-       struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+       struct drm_i915_private *i915 = vma->vm->i915;
        unsigned int supported = INTEL_INFO(i915)->page_sizes;
        struct drm_i915_gem_object *obj = vma->obj;
        int err = 0;
@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
 static int igt_mock_exhaust_device_supported_pages(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
        unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
                                goto out_put;
                        }
 
-                       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+                       vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                        if (IS_ERR(vma)) {
                                err = PTR_ERR(vma);
                                goto out_put;
@@ -458,7 +458,7 @@ out_device:
 static int igt_mock_ppgtt_misaligned_dma(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
        unsigned long supported = INTEL_INFO(i915)->page_sizes;
        struct drm_i915_gem_object *obj;
        int bit;
@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                /* Force the page size for this object */
                obj->mm.page_sizes.sg = page_size;
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out_unpin;
@@ -591,7 +591,7 @@ static void close_object_list(struct list_head *objects,
        list_for_each_entry_safe(obj, on, objects, st_link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (!IS_ERR(vma))
                        i915_vma_close(vma);
 
@@ -604,8 +604,8 @@ static void close_object_list(struct list_head *objects,
 static int igt_mock_ppgtt_huge_fill(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
-       unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
+       unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
        unsigned long page_num;
        bool single = false;
        LIST_HEAD(objects);
@@ -641,7 +641,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        break;
@@ -725,7 +725,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 static int igt_mock_ppgtt_64K(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
        struct drm_i915_gem_object *obj;
        const struct object_info {
                unsigned int size;
@@ -819,7 +819,7 @@ static int igt_mock_ppgtt_64K(void *arg)
                         */
                        obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
 
-                       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+                       vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                        if (IS_ERR(vma)) {
                                err = PTR_ERR(vma);
                                goto out_object_unpin;
@@ -887,8 +887,8 @@ out_object_put:
 static struct i915_vma *
 gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
 {
-       struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
-       const int gen = INTEL_GEN(vma->vm->i915);
+       struct drm_i915_private *i915 = vma->vm->i915;
+       const int gen = INTEL_GEN(i915);
        unsigned int count = vma->size >> PAGE_SHIFT;
        struct drm_i915_gem_object *obj;
        struct i915_vma *batch;
@@ -1003,7 +1003,7 @@ static int gpu_write(struct i915_vma *vma,
        reservation_object_unlock(vma->resv);
 
 err_request:
-       __i915_request_add(rq, err == 0);
+       i915_request_add(rq);
 
        return err;
 }
@@ -1047,7 +1047,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
                            u32 dword, u32 val)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
        struct i915_vma *vma;
        int err;
@@ -1100,7 +1101,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
                          struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        static struct intel_engine_cs *engines[I915_NUM_ENGINES];
        struct intel_engine_cs *engine;
        I915_RND_STATE(prng);
@@ -1439,7 +1441,7 @@ static int igt_ppgtt_pin_update(void *arg)
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out_put;
@@ -1493,7 +1495,7 @@ static int igt_ppgtt_pin_update(void *arg)
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+       vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto out_put;
@@ -1531,7 +1533,8 @@ static int igt_tmpfs_fallback(void *arg)
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *i915 = ctx->i915;
        struct vfsmount *gemfs = i915->mm.gemfs;
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        u32 *vaddr;
@@ -1587,7 +1590,8 @@ static int igt_shrink_thp(void *arg)
 {
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *i915 = ctx->i915;
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        unsigned int flags = PIN_USER;
@@ -1696,14 +1700,14 @@ int i915_gem_huge_page_mock_selftests(void)
                goto out_unlock;
        }
 
-       if (!i915_vm_is_48bit(&ppgtt->base)) {
+       if (!i915_vm_is_48bit(&ppgtt->vm)) {
                pr_err("failed to create 48b PPGTT\n");
                err = -EINVAL;
                goto out_close;
        }
 
        /* If we were ever hit this then it's time to mock the 64K scratch */
-       if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+       if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
                pr_err("PPGTT missing 64K scratch page\n");
                err = -EINVAL;
                goto out_close;
@@ -1712,7 +1716,7 @@ int i915_gem_huge_page_mock_selftests(void)
        err = i915_subtests(tests, ppgtt);
 
 out_close:
-       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_close(&ppgtt->vm);
        i915_ppgtt_put(ppgtt);
 
 out_unlock:
@@ -1758,7 +1762,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
        }
 
        if (ctx->ppgtt)
-               ctx->ppgtt->base.scrub_64K = true;
+               ctx->ppgtt->vm.scrub_64K = true;
 
        err = i915_subtests(tests, ctx);
 
index 340a98c..a490009 100644 (file)
@@ -199,7 +199,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 
        cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs)) {
-               __i915_request_add(rq, false);
+               i915_request_add(rq);
                i915_vma_unpin(vma);
                return PTR_ERR(cs);
        }
@@ -229,7 +229,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
        reservation_object_add_excl_fence(obj->resv, &rq->fence);
        reservation_object_unlock(obj->resv);
 
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        return 0;
 }
index ddb03f0..90c3c36 100644 (file)
@@ -26,6 +26,7 @@
 #include "igt_flush_test.h"
 
 #include "mock_drm.h"
+#include "mock_gem_device.h"
 #include "huge_gem_object.h"
 
 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -114,7 +115,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_address_space *vm =
-               ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct i915_request *rq;
        struct i915_vma *vma;
        struct i915_vma *batch;
@@ -181,12 +182,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        reservation_object_add_excl_fence(obj->resv, &rq->fence);
        reservation_object_unlock(obj->resv);
 
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        return 0;
 
 err_request:
-       __i915_request_add(rq, false);
+       i915_request_add(rq);
 err_batch:
        i915_vma_unpin(batch);
 err_vma:
@@ -289,7 +290,7 @@ create_test_object(struct i915_gem_context *ctx,
 {
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm =
-               ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+               ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
        u64 size;
        int err;
 
@@ -420,6 +421,130 @@ out_unlock:
        return err;
 }
 
+static __maybe_unused const char *
+__engine_name(struct drm_i915_private *i915, unsigned int engines)
+{
+       struct intel_engine_cs *engine;
+       unsigned int tmp;
+
+       if (engines == ALL_ENGINES)
+               return "all";
+
+       for_each_engine_masked(engine, i915, engines, tmp)
+               return engine->name;
+
+       return "none";
+}
+
+static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
+                                         struct i915_gem_context *ctx,
+                                         unsigned int engines)
+{
+       struct intel_engine_cs *engine;
+       unsigned int tmp;
+       int err;
+
+       GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               struct i915_request *rq;
+
+               rq = i915_request_alloc(engine, ctx);
+               if (IS_ERR(rq))
+                       return PTR_ERR(rq);
+
+               i915_request_add(rq);
+       }
+
+       err = i915_gem_switch_to_kernel_context(i915);
+       if (err)
+               return err;
+
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               if (!engine_has_kernel_context_barrier(engine)) {
+                       pr_err("kernel context not last on engine %s!\n",
+                              engine->name);
+                       return -EINVAL;
+               }
+       }
+
+       err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+       if (err)
+               return err;
+
+       GEM_BUG_ON(i915->gt.active_requests);
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               if (engine->last_retired_context->gem_context != i915->kernel_context) {
+                       pr_err("engine %s not idling in kernel context!\n",
+                              engine->name);
+                       return -EINVAL;
+               }
+       }
+
+       err = i915_gem_switch_to_kernel_context(i915);
+       if (err)
+               return err;
+
+       if (i915->gt.active_requests) {
+               pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
+                      i915->gt.active_requests);
+               return -EINVAL;
+       }
+
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               if (!intel_engine_has_kernel_context(engine)) {
+                       pr_err("kernel context not last on engine %s!\n",
+                              engine->name);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int igt_switch_to_kernel_context(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       struct i915_gem_context *ctx;
+       enum intel_engine_id id;
+       int err;
+
+       /*
+        * A core premise of switching to the kernel context is that
+        * if an engine is already idling in the kernel context, we
+        * do not emit another request and wake it up. The other being
+        * that we do indeed end up idling in the kernel context.
+        */
+
+       mutex_lock(&i915->drm.struct_mutex);
+       ctx = kernel_context(i915);
+       if (IS_ERR(ctx)) {
+               mutex_unlock(&i915->drm.struct_mutex);
+               return PTR_ERR(ctx);
+       }
+
+       /* First check idling each individual engine */
+       for_each_engine(engine, i915, id) {
+               err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
+               if (err)
+                       goto out_unlock;
+       }
+
+       /* Now en masse */
+       err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
+       if (err)
+               goto out_unlock;
+
+out_unlock:
+       GEM_TRACE_DUMP_ON(err);
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       kernel_context_close(ctx);
+       return err;
+}
+
 static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
 {
        struct drm_i915_gem_object *obj;
@@ -432,7 +557,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
        list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma))
                        continue;
 
@@ -447,9 +572,28 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
        i915_gem_fini_aliasing_ppgtt(i915);
 }
 
+int i915_gem_context_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_switch_to_kernel_context),
+       };
+       struct drm_i915_private *i915;
+       int err;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       err = i915_subtests(tests, i915);
+
+       drm_dev_unref(&i915->drm);
+       return err;
+}
+
 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 {
        static const struct i915_subtest tests[] = {
+               SUBTEST(igt_switch_to_kernel_context),
                SUBTEST(igt_ctx_exec),
        };
        bool fake_alias = false;
index ab9d7be..2dc72a9 100644 (file)
@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
        u64 size;
 
        for (size = 0;
-            size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             size += I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
                return -EINVAL;
        }
 
-       if (list_empty(&i915->ggtt.base.inactive_list)) {
+       if (list_empty(&i915->ggtt.vm.inactive_list)) {
                pr_err("No objects on the GGTT inactive list!\n");
                return -EINVAL;
        }
@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+       list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
                i915_vma_unpin(vma);
 }
 
@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
                goto cleanup;
 
        /* Everything is pinned, nothing should happen */
-       err = i915_gem_evict_something(&ggtt->base,
+       err = i915_gem_evict_something(&ggtt->vm,
                                       I915_GTT_PAGE_SIZE, 0, 0,
                                       0, U64_MAX,
                                       0);
@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
        unpin_ggtt(i915);
 
        /* Everything is unpinned, we should be able to evict something */
-       err = i915_gem_evict_something(&ggtt->base,
+       err = i915_gem_evict_something(&ggtt->vm,
                                       I915_GTT_PAGE_SIZE, 0, 0,
                                       0, U64_MAX,
                                       0);
@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
                goto cleanup;
 
        /* Everything is pinned, nothing should happen */
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (err != -ENOSPC) {
                pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
                       err);
@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
        unpin_ggtt(i915);
 
        /* Everything is unpinned, we should be able to evict the node */
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (err) {
                pr_err("i915_gem_evict_for_node returned err=%d\n",
                       err);
@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
         * i915_gtt_color_adjust throughout our driver, so using a mock color
         * adjust will work just fine for our purposes.
         */
-       ggtt->base.mm.color_adjust = mock_color_adjust;
+       ggtt->vm.mm.color_adjust = mock_color_adjust;
 
        obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
        if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
        i915_vma_unpin(vma);
 
        /* Remove just the second vma */
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (err) {
                pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
                goto cleanup;
@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
         */
        target.color = I915_CACHE_L3_LLC;
 
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (!err) {
                pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
                err = -EINVAL;
@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
 cleanup:
        unpin_ggtt(i915);
        cleanup_objects(i915);
-       ggtt->base.mm.color_adjust = NULL;
+       ggtt->vm.mm.color_adjust = NULL;
        return err;
 }
 
@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
                goto cleanup;
 
        /* Everything is pinned, nothing should happen */
-       err = i915_gem_evict_vm(&ggtt->base);
+       err = i915_gem_evict_vm(&ggtt->vm);
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
                       err);
@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
 
        unpin_ggtt(i915);
 
-       err = i915_gem_evict_vm(&ggtt->base);
+       err = i915_gem_evict_vm(&ggtt->vm);
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
                       err);
@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
 
        /* Reserve a block so that we know we have enough to fit a few rq */
        memset(&hole, 0, sizeof(hole));
-       err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+       err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
                                  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
-                                 0, i915->ggtt.base.total,
+                                 0, i915->ggtt.vm.total,
                                  PIN_NOEVICT);
        if (err)
                goto out_locked;
@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
                        goto out_locked;
                }
 
-               if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+               if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
                                        1ul << 20, 0, I915_COLOR_UNEVICTABLE,
-                                       0, i915->ggtt.base.total,
+                                       0, i915->ggtt.vm.total,
                                        PIN_NOEVICT)) {
                        kfree(r);
                        break;
index f7dc926..a406023 100644 (file)
@@ -135,30 +135,28 @@ static int igt_ppgtt_alloc(void *arg)
        struct drm_i915_private *dev_priv = arg;
        struct i915_hw_ppgtt *ppgtt;
        u64 size, last;
-       int err;
+       int err = 0;
 
        /* Allocate a ppggt and try to fill the entire range */
 
        if (!USES_PPGTT(dev_priv))
                return 0;
 
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return -ENOMEM;
-
        mutex_lock(&dev_priv->drm.struct_mutex);
-       err = __hw_ppgtt_init(ppgtt, dev_priv);
-       if (err)
-               goto err_ppgtt;
+       ppgtt = __hw_ppgtt_create(dev_priv);
+       if (IS_ERR(ppgtt)) {
+               err = PTR_ERR(ppgtt);
+               goto err_unlock;
+       }
 
-       if (!ppgtt->base.allocate_va_range)
+       if (!ppgtt->vm.allocate_va_range)
                goto err_ppgtt_cleanup;
 
        /* Check we can allocate the entire range */
        for (size = 4096;
-            size <= ppgtt->base.total;
+            size <= ppgtt->vm.total;
             size <<= 2) {
-               err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+               err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
                if (err) {
                        if (err == -ENOMEM) {
                                pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +166,15 @@ static int igt_ppgtt_alloc(void *arg)
                        goto err_ppgtt_cleanup;
                }
 
-               ppgtt->base.clear_range(&ppgtt->base, 0, size);
+               ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
        }
 
        /* Check we can incrementally allocate the entire range */
        for (last = 0, size = 4096;
-            size <= ppgtt->base.total;
+            size <= ppgtt->vm.total;
             last = size, size <<= 2) {
-               err = ppgtt->base.allocate_va_range(&ppgtt->base,
-                                                   last, size - last);
+               err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+                                                 last, size - last);
                if (err) {
                        if (err == -ENOMEM) {
                                pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -188,10 +186,10 @@ static int igt_ppgtt_alloc(void *arg)
        }
 
 err_ppgtt_cleanup:
-       ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       ppgtt->vm.cleanup(&ppgtt->vm);
        kfree(ppgtt);
+err_unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
        return err;
 }
 
@@ -987,12 +985,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
                err = PTR_ERR(ppgtt);
                goto out_unlock;
        }
-       GEM_BUG_ON(offset_in_page(ppgtt->base.total));
-       GEM_BUG_ON(ppgtt->base.closed);
+       GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+       GEM_BUG_ON(ppgtt->vm.closed);
 
-       err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+       err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
-       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_close(&ppgtt->vm);
        i915_ppgtt_put(ppgtt);
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1059,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
 
        mutex_lock(&i915->drm.struct_mutex);
 restart:
-       list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
-       drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+       list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+       drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
                if (hole_start < last)
                        continue;
 
-               if (ggtt->base.mm.color_adjust)
-                       ggtt->base.mm.color_adjust(node, 0,
-                                                  &hole_start, &hole_end);
+               if (ggtt->vm.mm.color_adjust)
+                       ggtt->vm.mm.color_adjust(node, 0,
+                                                &hole_start, &hole_end);
                if (hole_start >= hole_end)
                        continue;
 
-               err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+               err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
                if (err)
                        break;
 
@@ -1134,7 +1132,7 @@ static int igt_ggtt_page(void *arg)
                goto out_free;
 
        memset(&tmp, 0, sizeof(tmp));
-       err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+       err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
                                          count * PAGE_SIZE, 0,
                                          I915_COLOR_UNEVICTABLE,
                                          0, ggtt->mappable_end,
@@ -1147,9 +1145,9 @@ static int igt_ggtt_page(void *arg)
        for (n = 0; n < count; n++) {
                u64 offset = tmp.start + n * PAGE_SIZE;
 
-               ggtt->base.insert_page(&ggtt->base,
-                                      i915_gem_object_get_dma_address(obj, 0),
-                                      offset, I915_CACHE_NONE, 0);
+               ggtt->vm.insert_page(&ggtt->vm,
+                                    i915_gem_object_get_dma_address(obj, 0),
+                                    offset, I915_CACHE_NONE, 0);
        }
 
        order = i915_random_order(count, &prng);
@@ -1188,7 +1186,7 @@ static int igt_ggtt_page(void *arg)
 
        kfree(order);
 out_remove:
-       ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+       ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
        intel_runtime_pm_put(i915);
        drm_mm_remove_node(&tmp);
 out_unpin:
@@ -1229,7 +1227,7 @@ static int exercise_mock(struct drm_i915_private *i915,
        ppgtt = ctx->ppgtt;
        GEM_BUG_ON(!ppgtt);
 
-       err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+       err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
        mock_context_close(ctx);
        return err;
@@ -1270,7 +1268,7 @@ static int igt_gtt_reserve(void *arg)
 
        /* Start by filling the GGTT */
        for (total = 0;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += 2*I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1288,20 +1286,20 @@ static int igt_gtt_reserve(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
                                           obj->base.size,
                                           total,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1319,7 +1317,7 @@ static int igt_gtt_reserve(void *arg)
 
        /* Now we start forcing evictions */
        for (total = I915_GTT_PAGE_SIZE;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += 2*I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1337,20 +1335,20 @@ static int igt_gtt_reserve(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
                                           obj->base.size,
                                           total,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1371,7 +1369,7 @@ static int igt_gtt_reserve(void *arg)
                struct i915_vma *vma;
                u64 offset;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1383,18 +1381,18 @@ static int igt_gtt_reserve(void *arg)
                        goto out;
                }
 
-               offset = random_offset(0, i915->ggtt.base.total,
+               offset = random_offset(0, i915->ggtt.vm.total,
                                       2*I915_GTT_PAGE_SIZE,
                                       I915_GTT_MIN_ALIGNMENT);
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
                                           obj->base.size,
                                           offset,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1429,8 +1427,8 @@ static int igt_gtt_insert(void *arg)
                u64 start, end;
        } invalid_insert[] = {
                {
-                       i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
-                       0, i915->ggtt.base.total,
+                       i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+                       0, i915->ggtt.vm.total,
                },
                {
                        2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1458,7 @@ static int igt_gtt_insert(void *arg)
 
        /* Check a couple of obviously invalid requests */
        for (ii = invalid_insert; ii->size; ii++) {
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
                                          ii->size, ii->alignment,
                                          I915_COLOR_UNEVICTABLE,
                                          ii->start, ii->end,
@@ -1475,7 +1473,7 @@ static int igt_gtt_insert(void *arg)
 
        /* Start by filling the GGTT */
        for (total = 0;
-            total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1493,15 +1491,15 @@ static int igt_gtt_insert(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.base.total,
+                                         0, i915->ggtt.vm.total,
                                          0);
                if (err == -ENOSPC) {
                        /* maxed out the GGTT space */
@@ -1510,7 +1508,7 @@ static int igt_gtt_insert(void *arg)
                }
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1522,7 +1520,7 @@ static int igt_gtt_insert(void *arg)
        list_for_each_entry(obj, &objects, st_link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1542,7 +1540,7 @@ static int igt_gtt_insert(void *arg)
                struct i915_vma *vma;
                u64 offset;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1557,13 +1555,13 @@ static int igt_gtt_insert(void *arg)
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.base.total,
+                                         0, i915->ggtt.vm.total,
                                          0);
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1579,7 +1577,7 @@ static int igt_gtt_insert(void *arg)
 
        /* And then force evictions */
        for (total = 0;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += 2*I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1597,19 +1595,19 @@ static int igt_gtt_insert(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.base.total,
+                                         0, i915->ggtt.vm.total,
                                          0);
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1669,7 +1667,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_ggtt_page),
        };
 
-       GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+       GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
 
        return i915_subtests(tests, i915);
 }
index fbdb241..2b2dde9 100644 (file)
@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
 
        obj = huge_gem_object(i915,
                              nreal * PAGE_SIZE,
-                             i915->ggtt.base.total + PAGE_SIZE);
+                             i915->ggtt.vm.total + PAGE_SIZE);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
@@ -311,7 +311,7 @@ static int igt_partial_tiling(void *arg)
 
        obj = huge_gem_object(i915,
                              nreal << PAGE_SHIFT,
-                             (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+                             (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
@@ -440,7 +440,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
        struct i915_vma *vma;
        int err;
 
-       vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
index d16d741..1b70208 100644 (file)
@@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
 selftest(gtt, i915_gem_gtt_mock_selftests)
 selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(contexts, i915_gem_context_mock_selftests)
index 94bc2e1..63cd948 100644 (file)
@@ -342,9 +342,9 @@ static int live_nop_request(void *arg)
        mutex_lock(&i915->drm.struct_mutex);
 
        for_each_engine(engine, i915, id) {
-               IGT_TIMEOUT(end_time);
-               struct i915_request *request;
+               struct i915_request *request = NULL;
                unsigned long n, prime;
+               IGT_TIMEOUT(end_time);
                ktime_t times[2] = {};
 
                err = begin_live_test(&t, i915, __func__, engine->name);
@@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
        if (err)
                goto err;
 
-       vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
@@ -466,7 +466,7 @@ empty_request(struct intel_engine_cs *engine,
                goto out_request;
 
 out_request:
-       __i915_request_add(request, err == 0);
+       i915_request_add(request);
        return err ? ERR_PTR(err) : request;
 }
 
@@ -555,7 +555,8 @@ out_unlock:
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
        struct i915_gem_context *ctx = i915->kernel_context;
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        const int gen = INTEL_GEN(i915);
        struct i915_vma *vma;
index e90f972..8400a8c 100644 (file)
@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
 {
        bool ok = true;
 
-       if (vma->vm != &ctx->ppgtt->base) {
+       if (vma->vm != &ctx->ppgtt->vm) {
                pr_err("VMA created with wrong VM\n");
                ok = false;
        }
@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
        list_for_each_entry(obj, objects, st_link) {
                for (pinned = 0; pinned <= 1; pinned++) {
                        list_for_each_entry(ctx, contexts, link) {
-                               struct i915_address_space *vm =
-                                       &ctx->ppgtt->base;
+                               struct i915_address_space *vm = &ctx->ppgtt->vm;
                                struct i915_vma *vma;
                                int err;
 
@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
                VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
                VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
                VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
 
                VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
                INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
-               INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+               INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
                INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
 
                VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
                VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
                VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
                NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
-               VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
-               VALID(i915->ggtt.base.total, PIN_GLOBAL),
-               NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+               VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+               VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+               NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
                NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
                INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
-               INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+               INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
                INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
 
                VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
                 * variable start, end and size.
                 */
                NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
-               NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+               NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
                NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+               NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
 #endif
                { },
 #undef NOSPACE
@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
         * focusing on error handling of boundary conditions.
         */
 
-       GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+       GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
 
        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma))
                goto out;
 
@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
 static int igt_vma_rotate(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct i915_address_space *vm = &i915->ggtt.base;
+       struct i915_address_space *vm = &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        const struct intel_rotation_plane_info planes[] = {
                { .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
 static int igt_vma_partial(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct i915_address_space *vm = &i915->ggtt.base;
+       struct i915_address_space *vm = &i915->ggtt.vm;
        const unsigned int npages = 1021; /* prime! */
        struct drm_i915_gem_object *obj;
        const struct phase {
index 438e0b0..fe7d319 100644 (file)
@@ -105,7 +105,10 @@ static int emit_recurse_batch(struct hang *h,
                              struct i915_request *rq)
 {
        struct drm_i915_private *i915 = h->i915;
-       struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               rq->gem_context->ppgtt ?
+               &rq->gem_context->ppgtt->vm :
+               &i915->ggtt.vm;
        struct i915_vma *hws, *vma;
        unsigned int flags;
        u32 *batch;
@@ -242,7 +245,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
        err = emit_recurse_batch(h, rq);
        if (err) {
-               __i915_request_add(rq, false);
+               i915_request_add(rq);
                return ERR_PTR(err);
        }
 
@@ -315,7 +318,7 @@ static int igt_hang_sanitycheck(void *arg)
                *h.batch = MI_BATCH_BUFFER_END;
                i915_gem_chipset_flush(i915);
 
-               __i915_request_add(rq, true);
+               i915_request_add(rq);
 
                timeout = i915_request_wait(rq,
                                            I915_WAIT_LOCKED,
@@ -461,7 +464,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                                }
 
                                i915_request_get(rq);
-                               __i915_request_add(rq, true);
+                               i915_request_add(rq);
                                mutex_unlock(&i915->drm.struct_mutex);
 
                                if (!wait_until_running(&h, rq)) {
@@ -560,6 +563,30 @@ struct active_engine {
 #define TEST_SELF      BIT(2)
 #define TEST_PRIORITY  BIT(3)
 
+static int active_request_put(struct i915_request *rq)
+{
+       int err = 0;
+
+       if (!rq)
+               return 0;
+
+       if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+               GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+                         rq->engine->name,
+                         rq->fence.context,
+                         rq->fence.seqno,
+                         i915_request_global_seqno(rq));
+               GEM_TRACE_DUMP();
+
+               i915_gem_set_wedged(rq->i915);
+               err = -EIO;
+       }
+
+       i915_request_put(rq);
+
+       return err;
+}
+
 static int active_engine(void *data)
 {
        I915_RND_STATE(prng);
@@ -608,24 +635,20 @@ static int active_engine(void *data)
                i915_request_add(new);
                mutex_unlock(&engine->i915->drm.struct_mutex);
 
-               if (old) {
-                       if (i915_request_wait(old, 0, HZ) < 0) {
-                               GEM_TRACE("%s timed out.\n", engine->name);
-                               GEM_TRACE_DUMP();
-
-                               i915_gem_set_wedged(engine->i915);
-                               i915_request_put(old);
-                               err = -EIO;
-                               break;
-                       }
-                       i915_request_put(old);
-               }
+               err = active_request_put(old);
+               if (err)
+                       break;
 
                cond_resched();
        }
 
-       for (count = 0; count < ARRAY_SIZE(rq); count++)
-               i915_request_put(rq[count]);
+       for (count = 0; count < ARRAY_SIZE(rq); count++) {
+               int err__ = active_request_put(rq[count]);
+
+               /* Keep the first error */
+               if (!err)
+                       err = err__;
+       }
 
 err_file:
        mock_file_free(engine->i915, file);
@@ -719,7 +742,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
                                }
 
                                i915_request_get(rq);
-                               __i915_request_add(rq, true);
+                               i915_request_add(rq);
                                mutex_unlock(&i915->drm.struct_mutex);
 
                                if (!wait_until_running(&h, rq)) {
@@ -919,7 +942,7 @@ static int igt_wait_reset(void *arg)
        }
 
        i915_request_get(rq);
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        if (!wait_until_running(&h, rq)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1014,7 +1037,7 @@ static int igt_reset_queue(void *arg)
                }
 
                i915_request_get(prev);
-               __i915_request_add(prev, true);
+               i915_request_add(prev);
 
                count = 0;
                do {
@@ -1028,7 +1051,7 @@ static int igt_reset_queue(void *arg)
                        }
 
                        i915_request_get(rq);
-                       __i915_request_add(rq, true);
+                       i915_request_add(rq);
 
                        /*
                         * XXX We don't handle resetting the kernel context
@@ -1161,7 +1184,7 @@ static int igt_handle_error(void *arg)
        }
 
        i915_request_get(rq);
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        if (!wait_until_running(&h, rq)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
index 1b8a071..ea27c7c 100644 (file)
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
                              struct i915_request *rq,
                              u32 arbitration_command)
 {
-       struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+       struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
        struct i915_vma *hws, *vma;
        u32 *batch;
        int err;
@@ -155,7 +155,7 @@ spinner_create_request(struct spinner *spin,
 
        err = emit_recurse_batch(spin, rq, arbitration_command);
        if (err) {
-               __i915_request_add(rq, false);
+               i915_request_add(rq);
                return ERR_PTR(err);
        }
 
index 17444a3..e1ea2d2 100644 (file)
@@ -33,7 +33,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        memset(cs, 0xc5, PAGE_SIZE);
        i915_gem_object_unpin_map(result);
 
-       vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err_obj;
@@ -75,7 +75,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        i915_gem_object_get(result);
        i915_gem_object_set_active_reference(result);
 
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
        i915_vma_unpin(vma);
 
        return result;
index 501becc..8904f1c 100644 (file)
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
             const char *name)
 {
        struct i915_gem_context *ctx;
+       unsigned int n;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
 
+       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+               struct intel_context *ce = &ctx->__engine[n];
+
+               ce->gem_context = ctx;
+       }
+
        ret = ida_simple_get(&i915->contexts.hw_ida,
                             0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
        if (ret < 0)
index 302f7d1..ca682ca 100644 (file)
@@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
        vm_unmap_ram(vaddr, mock->npages);
 }
 
-static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
-       struct mock_dmabuf *mock = to_mock(dma_buf);
-
-       return kmap_atomic(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-       kunmap_atomic(addr);
-}
-
 static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
        struct mock_dmabuf *mock = to_mock(dma_buf);
@@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops =  {
        .unmap_dma_buf = mock_unmap_dma_buf,
        .release = mock_dmabuf_release,
        .map = mock_dmabuf_kmap,
-       .map_atomic = mock_dmabuf_kmap_atomic,
        .unmap = mock_dmabuf_kunmap,
-       .unmap_atomic = mock_dmabuf_kunmap_atomic,
        .mmap = mock_dmabuf_mmap,
        .vmap = mock_dmabuf_vmap,
        .vunmap = mock_dmabuf_vunmap,
index 26bf29d..c2a0451 100644 (file)
@@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t)
        spin_unlock(&engine->hw_lock);
 }
 
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
-                struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
-
-       if (!ce->pin_count++)
-               i915_gem_context_get(ctx);
+       i915_gem_context_put(ce->gem_context);
+}
 
-       return engine->buffer;
+static void mock_context_destroy(struct intel_context *ce)
+{
+       GEM_BUG_ON(ce->pin_count);
 }
 
-static void mock_context_unpin(struct intel_engine_cs *engine,
-                              struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+       .unpin = mock_context_unpin,
+       .destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+                struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
 
-       if (!--ce->pin_count)
-               i915_gem_context_put(ctx);
+       if (!ce->pin_count++) {
+               i915_gem_context_get(ctx);
+               ce->ring = engine->buffer;
+               ce->ops = &mock_context_ops;
+       }
+
+       return ce;
 }
 
 static int mock_request_alloc(struct i915_request *request)
@@ -185,7 +194,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        engine->base.status_page.page_addr = (void *)(engine + 1);
 
        engine->base.context_pin = mock_context_pin;
-       engine->base.context_unpin = mock_context_unpin;
        engine->base.request_alloc = mock_request_alloc;
        engine->base.emit_flush = mock_emit_flush;
        engine->base.emit_breadcrumb = mock_emit_breadcrumb;
@@ -204,8 +212,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        if (!engine->base.buffer)
                goto err_breadcrumbs;
 
+       if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+               goto err_ring;
+
        return &engine->base;
 
+err_ring:
+       mock_ring_free(engine->base.buffer);
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(&engine->base);
        i915_timeline_fini(&engine->base.timeline);
@@ -238,11 +251,15 @@ void mock_engine_free(struct intel_engine_cs *engine)
 {
        struct mock_engine *mock =
                container_of(engine, typeof(*mock), base);
+       struct intel_context *ce;
 
        GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-       if (engine->last_retired_context)
-               intel_context_unpin(engine->last_retired_context, engine);
+       ce = fetch_and_zero(&engine->last_retired_context);
+       if (ce)
+               intel_context_unpin(ce);
+
+       __intel_context_unpin(engine->i915->kernel_context, engine);
 
        mock_ring_free(engine->buffer);
 
index 94baedf..c97075c 100644 (file)
@@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = {
 struct drm_i915_private *mock_gem_device(void)
 {
        struct drm_i915_private *i915;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
        struct pci_dev *pdev;
        int err;
 
@@ -233,13 +231,13 @@ struct drm_i915_private *mock_gem_device(void)
        mock_init_ggtt(i915);
 
        mkwrite_device_info(i915)->ring_mask = BIT(0);
-       i915->engine[RCS] = mock_engine(i915, "mock", RCS);
-       if (!i915->engine[RCS])
-               goto err_unlock;
-
        i915->kernel_context = mock_context(i915, NULL);
        if (!i915->kernel_context)
-               goto err_engine;
+               goto err_unlock;
+
+       i915->engine[RCS] = mock_engine(i915, "mock", RCS);
+       if (!i915->engine[RCS])
+               goto err_context;
 
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -247,9 +245,8 @@ struct drm_i915_private *mock_gem_device(void)
 
        return i915;
 
-err_engine:
-       for_each_engine(engine, i915, id)
-               mock_engine_free(engine);
+err_context:
+       i915_gem_contexts_fini(i915);
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        kmem_cache_destroy(i915->priorities);
index 36c1120..6a7f4da 100644 (file)
@@ -66,25 +66,26 @@ mock_ppgtt(struct drm_i915_private *i915,
                return NULL;
 
        kref_init(&ppgtt->ref);
-       ppgtt->base.i915 = i915;
-       ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
-       ppgtt->base.file = ERR_PTR(-ENODEV);
-
-       INIT_LIST_HEAD(&ppgtt->base.active_list);
-       INIT_LIST_HEAD(&ppgtt->base.inactive_list);
-       INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
-       INIT_LIST_HEAD(&ppgtt->base.global_link);
-       drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
-       ppgtt->base.clear_range = nop_clear_range;
-       ppgtt->base.insert_page = mock_insert_page;
-       ppgtt->base.insert_entries = mock_insert_entries;
-       ppgtt->base.bind_vma = mock_bind_ppgtt;
-       ppgtt->base.unbind_vma = mock_unbind_ppgtt;
-       ppgtt->base.set_pages = ppgtt_set_pages;
-       ppgtt->base.clear_pages = clear_pages;
-       ppgtt->base.cleanup = mock_cleanup;
+       ppgtt->vm.i915 = i915;
+       ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+       ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+       INIT_LIST_HEAD(&ppgtt->vm.active_list);
+       INIT_LIST_HEAD(&ppgtt->vm.inactive_list);
+       INIT_LIST_HEAD(&ppgtt->vm.unbound_list);
+
+       INIT_LIST_HEAD(&ppgtt->vm.global_link);
+       drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total);
+
+       ppgtt->vm.clear_range = nop_clear_range;
+       ppgtt->vm.insert_page = mock_insert_page;
+       ppgtt->vm.insert_entries = mock_insert_entries;
+       ppgtt->vm.cleanup = mock_cleanup;
+
+       ppgtt->vm.vma_ops.bind_vma    = mock_bind_ppgtt;
+       ppgtt->vm.vma_ops.unbind_vma  = mock_unbind_ppgtt;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
 
        return ppgtt;
 }
@@ -107,27 +108,28 @@ void mock_init_ggtt(struct drm_i915_private *i915)
 
        INIT_LIST_HEAD(&i915->vm_list);
 
-       ggtt->base.i915 = i915;
+       ggtt->vm.i915 = i915;
 
        ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
        ggtt->mappable_end = resource_size(&ggtt->gmadr);
-       ggtt->base.total = 4096 * PAGE_SIZE;
-
-       ggtt->base.clear_range = nop_clear_range;
-       ggtt->base.insert_page = mock_insert_page;
-       ggtt->base.insert_entries = mock_insert_entries;
-       ggtt->base.bind_vma = mock_bind_ggtt;
-       ggtt->base.unbind_vma = mock_unbind_ggtt;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.cleanup = mock_cleanup;
-
-       i915_address_space_init(&ggtt->base, i915, "global");
+       ggtt->vm.total = 4096 * PAGE_SIZE;
+
+       ggtt->vm.clear_range = nop_clear_range;
+       ggtt->vm.insert_page = mock_insert_page;
+       ggtt->vm.insert_entries = mock_insert_entries;
+       ggtt->vm.cleanup = mock_cleanup;
+
+       ggtt->vm.vma_ops.bind_vma    = mock_bind_ggtt;
+       ggtt->vm.vma_ops.unbind_vma  = mock_unbind_ggtt;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+       i915_address_space_init(&ggtt->vm, i915, "global");
 }
 
 void mock_fini_ggtt(struct drm_i915_private *i915)
 {
        struct i915_ggtt *ggtt = &i915->ggtt;
 
-       i915_address_space_fini(&ggtt->base);
+       i915_address_space_fini(&ggtt->vm);
 }
index 0d8d506..be5f6f1 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <linux/dma-buf.h>
 #include <linux/reservation.h>
 
 #include "mtk_drm_fb.h"
 #include "mtk_drm_gem.h"
 
-/*
- * mtk specific framebuffer structure.
- *
- * @fb: drm framebuffer object.
- * @gem_obj: array of gem objects.
- */
-struct mtk_drm_fb {
-       struct drm_framebuffer  base;
-       /* For now we only support a single plane */
-       struct drm_gem_object   *gem_obj;
-};
-
-#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
-
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
-{
-       struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
-       return mtk_fb->gem_obj;
-}
-
-static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
-                                   struct drm_file *file_priv,
-                                   unsigned int *handle)
-{
-       struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
-       return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
-}
-
-static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
-{
-       struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       drm_gem_object_put_unlocked(mtk_fb->gem_obj);
-
-       kfree(mtk_fb);
-}
-
 static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
-       .create_handle = mtk_drm_fb_create_handle,
-       .destroy = mtk_drm_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
-static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
                                        const struct drm_mode_fb_cmd2 *mode,
                                        struct drm_gem_object *obj)
 {
-       struct mtk_drm_fb *mtk_fb;
+       struct drm_framebuffer *fb;
        int ret;
 
        if (drm_format_num_planes(mode->pixel_format) != 1)
                return ERR_PTR(-EINVAL);
 
-       mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
-       if (!mtk_fb)
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
                return ERR_PTR(-ENOMEM);
 
-       drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
+       drm_helper_mode_fill_fb_struct(dev, fb, mode);
 
-       mtk_fb->gem_obj = obj;
+       fb->obj[0] = obj;
 
-       ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+       ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
        if (ret) {
                DRM_ERROR("failed to initialize framebuffer\n");
-               kfree(mtk_fb);
+               kfree(fb);
                return ERR_PTR(ret);
        }
 
-       return mtk_fb;
+       return fb;
 }
 
 /*
@@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
        if (!fb)
                return 0;
 
-       gem = mtk_fb_get_gem_obj(fb);
+       gem = fb->obj[0];
        if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
                return 0;
 
@@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
                                               struct drm_file *file,
                                               const struct drm_mode_fb_cmd2 *cmd)
 {
-       struct mtk_drm_fb *mtk_fb;
+       struct drm_framebuffer *fb;
        struct drm_gem_object *gem;
        unsigned int width = cmd->width;
        unsigned int height = cmd->height;
@@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
                goto unreference;
        }
 
-       mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
-       if (IS_ERR(mtk_fb)) {
-               ret = PTR_ERR(mtk_fb);
+       fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
                goto unreference;
        }
 
-       return &mtk_fb->base;
+       return fb;
 
 unreference:
        drm_gem_object_put_unlocked(gem);
index 9b2ae34..7f976b1 100644 (file)
@@ -14,7 +14,6 @@
 #ifndef MTK_DRM_FB_H
 #define MTK_DRM_FB_H
 
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
 int mtk_fb_wait(struct drm_framebuffer *fb);
 struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
                                               struct drm_file *file,
index 2f4b0ff..f7e6aa1 100644 (file)
@@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
        if (!fb)
                return 0;
 
-       if (!mtk_fb_get_gem_obj(fb)) {
-               DRM_DEBUG_KMS("buffer is null\n");
-               return -EFAULT;
-       }
-
        if (!state->crtc)
                return 0;
 
@@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        if (!crtc || WARN_ON(!fb))
                return;
 
-       gem = mtk_fb_get_gem_obj(fb);
+       gem = fb->obj[0];
        mtk_gem = to_mtk_gem_obj(gem);
        addr = mtk_gem->dma_addr;
        pitch = fb->pitches[0];
index b001699..457c29d 100644 (file)
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
                int idx = idxs[pipe_id];
                if (idx > 0) {
                        const struct mdp_format *format =
-                                       to_mdp_format(msm_framebuffer_format(plane->fb));
+                                       to_mdp_format(msm_framebuffer_format(plane->state->fb));
                        alpha[idx-1] = format->alpha_enable;
                }
        }
@@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
        drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
                                  NULL);
        drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
-       plane->crtc = crtc;
 
        return crtc;
 }
index 20e956e..7b641fa 100644 (file)
@@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
                        msm_framebuffer_iova(fb, kms->aspace, 2));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
                        msm_framebuffer_iova(fb, kms->aspace, 3));
-
-       plane->fb = fb;
 }
 
 static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
index 1027135..24e0027 100644 (file)
@@ -1207,7 +1207,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
                        "unref cursor", unref_cursor_worker);
 
        drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
-       plane->crtc = crtc;
 
        return crtc;
 }
index e09bc53..c4f115f 100644 (file)
@@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
        if (plane_enabled(new_state)) {
                struct mdp5_ctl *ctl;
                struct mdp5_pipeline *pipeline =
-                                       mdp5_crtc_get_pipeline(plane->crtc);
+                                       mdp5_crtc_get_pipeline(new_state->crtc);
                int ret;
 
                ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
                                     src_img_w, src_img_h,
                                     src_x + src_w, src_y, src_w, src_h);
 
-       plane->fb = fb;
-
        return ret;
 }
 
index 7a16242..2a7348a 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
 struct msm_framebuffer {
        struct drm_framebuffer base;
        const struct msm_format *format;
-       struct drm_gem_object *planes[MAX_PLANE];
 };
 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
 
 static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
                const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
 
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
-               struct drm_file *file_priv,
-               unsigned int *handle)
-{
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       return drm_gem_handle_create(file_priv,
-                       msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       int i, n = fb->format->num_planes;
-
-       DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       for (i = 0; i < n; i++) {
-               struct drm_gem_object *bo = msm_fb->planes[i];
-
-               drm_gem_object_put_unlocked(bo);
-       }
-
-       kfree(msm_fb);
-}
-
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
-       .create_handle = msm_framebuffer_create_handle,
-       .destroy = msm_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
        for (i = 0; i < n; i++) {
                seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
                                i, fb->offsets[i], fb->pitches[i]);
-               msm_gem_describe(msm_fb->planes[i], m);
+               msm_gem_describe(fb->obj[i], m);
        }
 }
 #endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int ret, i, n = fb->format->num_planes;
        uint64_t iova;
 
        for (i = 0; i < n; i++) {
-               ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+               ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
                DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
                if (ret)
                        return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        for (i = 0; i < n; i++)
-               msm_gem_put_iova(msm_fb->planes[i], aspace);
+               msm_gem_put_iova(fb->obj[i], aspace);
 }
 
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace, int plane)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       if (!msm_fb->planes[plane])
+       if (!fb->obj[plane])
                return 0;
-       return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+       return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       return msm_fb->planes[plane];
+       return drm_gem_fb_get_obj(fb, plane);
 }
 
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
        msm_fb->format = format;
 
-       if (n > ARRAY_SIZE(msm_fb->planes)) {
+       if (n > ARRAY_SIZE(fb->obj)) {
                ret = -EINVAL;
                goto fail;
        }
@@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
                        goto fail;
                }
 
-               msm_fb->planes[i] = bos[i];
+               msm_fb->base.obj[i] = bos[i];
        }
 
        drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
index 5fd22ca..9f1e3d8 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "omap_dmm_tiler.h"
 #include "omap_drv.h"
@@ -51,9 +52,6 @@ static const u32 formats[] = {
 
 /* per-plane info for the fb: */
 struct plane {
-       struct drm_gem_object *bo;
-       u32 pitch;
-       u32 offset;
        dma_addr_t dma_addr;
 };
 
@@ -68,56 +66,28 @@ struct omap_framebuffer {
        struct mutex lock;
 };
 
-static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
-               struct drm_file *file_priv,
-               unsigned int *handle)
-{
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       return drm_gem_handle_create(file_priv,
-                       omap_fb->planes[0].bo, handle);
-}
-
-static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       int i, n = fb->format->num_planes;
-
-       DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       for (i = 0; i < n; i++) {
-               struct plane *plane = &omap_fb->planes[i];
-
-               drm_gem_object_unreference_unlocked(plane->bo);
-       }
-
-       kfree(omap_fb);
-}
-
 static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
-       .create_handle = omap_framebuffer_create_handle,
-       .destroy = omap_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
-static u32 get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct drm_framebuffer *fb,
                const struct drm_format_info *format, int n, int x, int y)
 {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       struct plane *plane = &omap_fb->planes[n];
        u32 offset;
 
-       offset = plane->offset
+       offset = fb->offsets[n]
               + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
-              + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+              + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
 
        return plane->dma_addr + offset;
 }
 
 bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
 {
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       struct plane *plane = &omap_fb->planes[0];
-
-       return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+       return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
 }
 
 /* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
        x = state->src_x >> 16;
        y = state->src_y >> 16;
 
-       if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+       if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
                u32 w = state->src_w >> 16;
                u32 h = state->src_h >> 16;
 
@@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                        x += w - 1;
 
                /* Note: x and y are in TILER units, not pixels */
-               omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+               omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
                                          &info->paddr);
                info->rotation_type = OMAP_DSS_ROT_TILER;
                info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
                /* Note: stride in TILER units, not pixels */
-               info->screen_width  = omap_gem_tiled_stride(plane->bo, orient);
+               info->screen_width  = omap_gem_tiled_stride(fb->obj[0], orient);
        } else {
                switch (state->rotation & DRM_MODE_ROTATE_MASK) {
                case 0:
@@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                        break;
                }
 
-               info->paddr         = get_linear_addr(plane, format, 0, x, y);
+               info->paddr         = get_linear_addr(fb, format, 0, x, y);
                info->rotation_type = OMAP_DSS_ROT_NONE;
                info->rotation      = DRM_MODE_ROTATE_0;
-               info->screen_width  = plane->pitch;
+               info->screen_width  = fb->pitches[0];
        }
 
        /* convert to pixels: */
@@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                plane = &omap_fb->planes[1];
 
                if (info->rotation_type == OMAP_DSS_ROT_TILER) {
-                       WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
-                       omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+                       WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+                       omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
                                                  &info->p_uv_addr);
                } else {
-                       info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+                       info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
                }
        } else {
                info->p_uv_addr = 0;
@@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
 
        for (i = 0; i < n; i++) {
                struct plane *plane = &omap_fb->planes[i];
-               ret = omap_gem_pin(plane->bo, &plane->dma_addr);
+               ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
                if (ret)
                        goto fail;
-               omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
+               omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
        }
 
        omap_fb->pin_count++;
@@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
 fail:
        for (i--; i >= 0; i--) {
                struct plane *plane = &omap_fb->planes[i];
-               omap_gem_unpin(plane->bo);
+               omap_gem_unpin(fb->obj[i]);
                plane->dma_addr = 0;
        }
 
@@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
 
        for (i = 0; i < n; i++) {
                struct plane *plane = &omap_fb->planes[i];
-               omap_gem_unpin(plane->bo);
+               omap_gem_unpin(fb->obj[i]);
                plane->dma_addr = 0;
        }
 
        mutex_unlock(&omap_fb->lock);
 }
 
-/* iterate thru all the connectors, returning ones that are attached
- * to the same fb..
- */
-struct drm_connector *omap_framebuffer_get_next_connector(
-               struct drm_framebuffer *fb, struct drm_connector *from)
-{
-       struct drm_device *dev = fb->dev;
-       struct list_head *connector_list = &dev->mode_config.connector_list;
-       struct drm_connector *connector = from;
-
-       if (!from)
-               return list_first_entry_or_null(connector_list, typeof(*from),
-                                               head);
-
-       list_for_each_entry_from(connector, connector_list, head) {
-               if (connector != from) {
-                       struct drm_encoder *encoder = connector->encoder;
-                       struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
-                       if (crtc && crtc->primary->fb == fb)
-                               return connector;
-
-               }
-       }
-
-       return NULL;
-}
-
 #ifdef CONFIG_DEBUG_FS
 void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
                        (char *)&fb->format->format);
 
        for (i = 0; i < n; i++) {
-               struct plane *plane = &omap_fb->planes[i];
                seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
-                               i, plane->offset, plane->pitch);
-               omap_gem_describe(plane->bo, m);
+                               i, fb->offsets[n], fb->pitches[i]);
+               omap_gem_describe(fb->obj[i], m);
        }
 }
 #endif
@@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
                        goto fail;
                }
 
-               plane->bo     = bos[i];
-               plane->offset = mode_cmd->offsets[i];
-               plane->pitch  = pitch;
+               fb->obj[i]    = bos[i];
                plane->dma_addr  = 0;
        }
 
index 94ad5f9..c20cb4b 100644 (file)
@@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
 void omap_framebuffer_unpin(struct drm_framebuffer *fb);
 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
-               struct drm_framebuffer *fb, struct drm_connector *from);
 bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
 void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
 
index 8e41d64..ec04a69 100644 (file)
@@ -93,23 +93,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
        return 0;
 }
 
-
-static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
-               unsigned long page_num)
-{
-       struct drm_gem_object *obj = buffer->priv;
-       struct page **pages;
-       omap_gem_get_pages(obj, &pages, false);
-       omap_gem_cpu_sync_page(obj, page_num);
-       return kmap_atomic(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
-               unsigned long page_num, void *addr)
-{
-       kunmap_atomic(addr);
-}
-
 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
                unsigned long page_num)
 {
@@ -148,8 +131,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
        .release = drm_gem_dmabuf_release,
        .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
        .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
-       .map_atomic = omap_gem_dmabuf_kmap_atomic,
-       .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
        .map = omap_gem_dmabuf_kmap,
        .unmap = omap_gem_dmabuf_kunmap,
        .mmap = omap_gem_dmabuf_mmap,
index 25682ff..6020c30 100644 (file)
@@ -46,6 +46,15 @@ config DRM_PANEL_ILITEK_IL9322
          Say Y here if you want to enable support for Ilitek IL9322
          QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
 
+config DRM_PANEL_ILITEK_ILI9881C
+       tristate "Ilitek ILI9881C-based panels"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y if you want to enable support for panels based on the
+         Ilitek ILI9881c controller.
+
 config DRM_PANEL_INNOLUX_P079ZCA
        tristate "Innolux P079ZCA panel"
        depends on OF
index f26efc1..5ccaaa9 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
 obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
new file mode 100644 (file)
index 0000000..e848af2
--- /dev/null
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Bootlin
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct ili9881c {
+       struct drm_panel        panel;
+       struct mipi_dsi_device  *dsi;
+
+       struct backlight_device *backlight;
+       struct regulator        *power;
+       struct gpio_desc        *reset;
+};
+
+enum ili9881c_op {
+       ILI9881C_SWITCH_PAGE,
+       ILI9881C_COMMAND,
+};
+
+struct ili9881c_instr {
+       enum ili9881c_op        op;
+
+       union arg {
+               struct cmd {
+                       u8      cmd;
+                       u8      data;
+               } cmd;
+               u8      page;
+       } arg;
+};
+
+#define ILI9881C_SWITCH_PAGE_INSTR(_page)      \
+       {                                       \
+               .op = ILI9881C_SWITCH_PAGE,     \
+               .arg = {                        \
+                       .page = (_page),        \
+               },                              \
+       }
+
+#define ILI9881C_COMMAND_INSTR(_cmd, _data)            \
+       {                                               \
+               .op = ILI9881C_COMMAND,         \
+               .arg = {                                \
+                       .cmd = {                        \
+                               .cmd = (_cmd),          \
+                               .data = (_data),        \
+                       },                              \
+               },                                      \
+       }
+
+static const struct ili9881c_instr ili9881c_init[] = {
+       ILI9881C_SWITCH_PAGE_INSTR(3),
+       ILI9881C_COMMAND_INSTR(0x01, 0x00),
+       ILI9881C_COMMAND_INSTR(0x02, 0x00),
+       ILI9881C_COMMAND_INSTR(0x03, 0x73),
+       ILI9881C_COMMAND_INSTR(0x04, 0x03),
+       ILI9881C_COMMAND_INSTR(0x05, 0x00),
+       ILI9881C_COMMAND_INSTR(0x06, 0x06),
+       ILI9881C_COMMAND_INSTR(0x07, 0x06),
+       ILI9881C_COMMAND_INSTR(0x08, 0x00),
+       ILI9881C_COMMAND_INSTR(0x09, 0x18),
+       ILI9881C_COMMAND_INSTR(0x0a, 0x04),
+       ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0c, 0x02),
+       ILI9881C_COMMAND_INSTR(0x0d, 0x03),
+       ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0f, 0x25),
+       ILI9881C_COMMAND_INSTR(0x10, 0x25),
+       ILI9881C_COMMAND_INSTR(0x11, 0x00),
+       ILI9881C_COMMAND_INSTR(0x12, 0x00),
+       ILI9881C_COMMAND_INSTR(0x13, 0x00),
+       ILI9881C_COMMAND_INSTR(0x14, 0x00),
+       ILI9881C_COMMAND_INSTR(0x15, 0x00),
+       ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+       ILI9881C_COMMAND_INSTR(0x17, 0x00),
+       ILI9881C_COMMAND_INSTR(0x18, 0x00),
+       ILI9881C_COMMAND_INSTR(0x19, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+       ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+       ILI9881C_COMMAND_INSTR(0x20, 0x04),
+       ILI9881C_COMMAND_INSTR(0x21, 0x01),
+       ILI9881C_COMMAND_INSTR(0x22, 0x00),
+       ILI9881C_COMMAND_INSTR(0x23, 0x00),
+       ILI9881C_COMMAND_INSTR(0x24, 0x00),
+       ILI9881C_COMMAND_INSTR(0x25, 0x00),
+       ILI9881C_COMMAND_INSTR(0x26, 0x00),
+       ILI9881C_COMMAND_INSTR(0x27, 0x00),
+       ILI9881C_COMMAND_INSTR(0x28, 0x33),
+       ILI9881C_COMMAND_INSTR(0x29, 0x03),
+       ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+       ILI9881C_COMMAND_INSTR(0x30, 0x00),
+       ILI9881C_COMMAND_INSTR(0x31, 0x00),
+       ILI9881C_COMMAND_INSTR(0x32, 0x00),
+       ILI9881C_COMMAND_INSTR(0x33, 0x00),
+       ILI9881C_COMMAND_INSTR(0x34, 0x04),
+       ILI9881C_COMMAND_INSTR(0x35, 0x00),
+       ILI9881C_COMMAND_INSTR(0x36, 0x00),
+       ILI9881C_COMMAND_INSTR(0x37, 0x00),
+       ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+       ILI9881C_COMMAND_INSTR(0x39, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+       ILI9881C_COMMAND_INSTR(0x40, 0x00),
+       ILI9881C_COMMAND_INSTR(0x41, 0x00),
+       ILI9881C_COMMAND_INSTR(0x42, 0x00),
+       ILI9881C_COMMAND_INSTR(0x43, 0x00),
+       ILI9881C_COMMAND_INSTR(0x44, 0x00),
+       ILI9881C_COMMAND_INSTR(0x50, 0x01),
+       ILI9881C_COMMAND_INSTR(0x51, 0x23),
+       ILI9881C_COMMAND_INSTR(0x52, 0x45),
+       ILI9881C_COMMAND_INSTR(0x53, 0x67),
+       ILI9881C_COMMAND_INSTR(0x54, 0x89),
+       ILI9881C_COMMAND_INSTR(0x55, 0xab),
+       ILI9881C_COMMAND_INSTR(0x56, 0x01),
+       ILI9881C_COMMAND_INSTR(0x57, 0x23),
+       ILI9881C_COMMAND_INSTR(0x58, 0x45),
+       ILI9881C_COMMAND_INSTR(0x59, 0x67),
+       ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+       ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+       ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+       ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+       ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+       ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+       ILI9881C_COMMAND_INSTR(0x60, 0x02),
+       ILI9881C_COMMAND_INSTR(0x61, 0x02),
+       ILI9881C_COMMAND_INSTR(0x62, 0x02),
+       ILI9881C_COMMAND_INSTR(0x63, 0x02),
+       ILI9881C_COMMAND_INSTR(0x64, 0x02),
+       ILI9881C_COMMAND_INSTR(0x65, 0x02),
+       ILI9881C_COMMAND_INSTR(0x66, 0x02),
+       ILI9881C_COMMAND_INSTR(0x67, 0x02),
+       ILI9881C_COMMAND_INSTR(0x68, 0x02),
+       ILI9881C_COMMAND_INSTR(0x69, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+       ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
+       ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
+       ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+       ILI9881C_COMMAND_INSTR(0x6f, 0x06),
+       ILI9881C_COMMAND_INSTR(0x70, 0x07),
+       ILI9881C_COMMAND_INSTR(0x71, 0x02),
+       ILI9881C_COMMAND_INSTR(0x72, 0x02),
+       ILI9881C_COMMAND_INSTR(0x73, 0x02),
+       ILI9881C_COMMAND_INSTR(0x74, 0x02),
+       ILI9881C_COMMAND_INSTR(0x75, 0x02),
+       ILI9881C_COMMAND_INSTR(0x76, 0x02),
+       ILI9881C_COMMAND_INSTR(0x77, 0x02),
+       ILI9881C_COMMAND_INSTR(0x78, 0x02),
+       ILI9881C_COMMAND_INSTR(0x79, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7a, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+       ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+       ILI9881C_COMMAND_INSTR(0x81, 0x02),
+       ILI9881C_COMMAND_INSTR(0x82, 0x0F),
+       ILI9881C_COMMAND_INSTR(0x83, 0x0E),
+       ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+       ILI9881C_COMMAND_INSTR(0x85, 0x06),
+       ILI9881C_COMMAND_INSTR(0x86, 0x07),
+       ILI9881C_COMMAND_INSTR(0x87, 0x02),
+       ILI9881C_COMMAND_INSTR(0x88, 0x02),
+       ILI9881C_COMMAND_INSTR(0x89, 0x02),
+       ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+       ILI9881C_SWITCH_PAGE_INSTR(4),
+       ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+       ILI9881C_COMMAND_INSTR(0x6E, 0x22),
+       ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+       ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
+       ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
+       ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+       ILI9881C_COMMAND_INSTR(0x26, 0x76),
+       ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+       ILI9881C_SWITCH_PAGE_INSTR(1),
+       ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+       ILI9881C_COMMAND_INSTR(0x53, 0xDC),
+       ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+       ILI9881C_COMMAND_INSTR(0x50, 0x78),
+       ILI9881C_COMMAND_INSTR(0x51, 0x78),
+       ILI9881C_COMMAND_INSTR(0x31, 0x02),
+       ILI9881C_COMMAND_INSTR(0x60, 0x14),
+       ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
+       ILI9881C_COMMAND_INSTR(0xA1, 0x39),
+       ILI9881C_COMMAND_INSTR(0xA2, 0x46),
+       ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
+       ILI9881C_COMMAND_INSTR(0xA4, 0x12),
+       ILI9881C_COMMAND_INSTR(0xA5, 0x25),
+       ILI9881C_COMMAND_INSTR(0xA6, 0x19),
+       ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+       ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
+       ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+       ILI9881C_COMMAND_INSTR(0xAB, 0x85),
+       ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
+       ILI9881C_COMMAND_INSTR(0xAE, 0x51),
+       ILI9881C_COMMAND_INSTR(0xAF, 0x22),
+       ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
+       ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
+       ILI9881C_COMMAND_INSTR(0xB2, 0x59),
+       ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
+       ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
+       ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
+       ILI9881C_COMMAND_INSTR(0xC2, 0x45),
+       ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
+       ILI9881C_COMMAND_INSTR(0xC4, 0x11),
+       ILI9881C_COMMAND_INSTR(0xC5, 0x24),
+       ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
+       ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
+       ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
+       ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+       ILI9881C_COMMAND_INSTR(0xCB, 0x96),
+       ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
+       ILI9881C_COMMAND_INSTR(0xCE, 0x51),
+       ILI9881C_COMMAND_INSTR(0xCF, 0x22),
+       ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
+       ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
+       ILI9881C_COMMAND_INSTR(0xD2, 0x59),
+       ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+};
+
+static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
+{
+       return container_of(panel, struct ili9881c, panel);
+}
+
+/*
+ * The panel seems to accept some private DCS commands that map
+ * directly to registers.
+ *
+ * It is organised by page, with each page having its own set of
+ * registers, and the first page looks like it's holding the standard
+ * DCS commands.
+ *
+ * So before any attempt at sending a command or data, we have to be
+ * sure if we're in the right page or not.
+ */
+static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+{
+       u8 buf[4] = { 0xff, 0x98, 0x81, page };
+       int ret;
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+{
+       u8 buf[2] = { cmd, data };
+       int ret;
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int ili9881c_prepare(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+       unsigned int i;
+       int ret;
+
+       /* Power the panel */
+       ret = regulator_enable(ctx->power);
+       if (ret)
+               return ret;
+       msleep(5);
+
+       /* And reset it */
+       gpiod_set_value(ctx->reset, 1);
+       msleep(20);
+
+       gpiod_set_value(ctx->reset, 0);
+       msleep(20);
+
+       for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) {
+               const struct ili9881c_instr *instr = &ili9881c_init[i];
+
+               if (instr->op == ILI9881C_SWITCH_PAGE)
+                       ret = ili9881c_switch_page(ctx, instr->arg.page);
+               else if (instr->op == ILI9881C_COMMAND)
+                       ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
+                                                     instr->arg.cmd.data);
+
+               if (ret)
+                       return ret;
+       }
+
+       ret = ili9881c_switch_page(ctx, 0);
+       if (ret)
+               return ret;
+
+       ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+       if (ret)
+               return ret;
+
+       mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int ili9881c_enable(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+       msleep(120);
+
+       mipi_dsi_dcs_set_display_on(ctx->dsi);
+       backlight_enable(ctx->backlight);
+
+       return 0;
+}
+
+static int ili9881c_disable(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+       backlight_disable(ctx->backlight);
+       return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int ili9881c_unprepare(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+       mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+       regulator_disable(ctx->power);
+       gpiod_set_value(ctx->reset, 1);
+
+       return 0;
+}
+
+static const struct drm_display_mode bananapi_default_mode = {
+       .clock          = 62000,
+       .vrefresh       = 60,
+
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 10,
+       .hsync_end      = 720 + 10 + 20,
+       .htotal         = 720 + 10 + 20 + 30,
+
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 10,
+       .vsync_end      = 1280 + 10 + 10,
+       .vtotal         = 1280 + 10 + 10 + 20,
+};
+
+static int ili9881c_get_modes(struct drm_panel *panel)
+{
+       struct drm_connector *connector = panel->connector;
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+       if (!mode) {
+               dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+                       bananapi_default_mode.hdisplay,
+                       bananapi_default_mode.vdisplay,
+                       bananapi_default_mode.vrefresh);
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
+
+       panel->connector->display_info.width_mm = 62;
+       panel->connector->display_info.height_mm = 110;
+
+       return 1;
+}
+
+static const struct drm_panel_funcs ili9881c_funcs = {
+       .prepare        = ili9881c_prepare,
+       .unprepare      = ili9881c_unprepare,
+       .enable         = ili9881c_enable,
+       .disable        = ili9881c_disable,
+       .get_modes      = ili9881c_get_modes,
+};
+
+static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
+{
+       struct device_node *np;
+       struct ili9881c *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       mipi_dsi_set_drvdata(dsi, ctx);
+       ctx->dsi = dsi;
+
+       drm_panel_init(&ctx->panel);
+       ctx->panel.dev = &dsi->dev;
+       ctx->panel.funcs = &ili9881c_funcs;
+
+       ctx->power = devm_regulator_get(&dsi->dev, "power");
+       if (IS_ERR(ctx->power)) {
+               dev_err(&dsi->dev, "Couldn't get our power regulator\n");
+               return PTR_ERR(ctx->power);
+       }
+
+       ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset)) {
+               dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+               return PTR_ERR(ctx->reset);
+       }
+
+       np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
+       if (np) {
+               ctx->backlight = of_find_backlight_by_node(np);
+               of_node_put(np);
+
+               if (!ctx->backlight)
+                       return -EPROBE_DEFER;
+       }
+
+       ret = drm_panel_add(&ctx->panel);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->lanes = 4;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+{
+       struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
+
+       mipi_dsi_detach(dsi);
+       drm_panel_remove(&ctx->panel);
+
+       if (ctx->backlight)
+               put_device(&ctx->backlight->dev);
+
+       return 0;
+}
+
+static const struct of_device_id ili9881c_of_match[] = {
+       { .compatible = "bananapi,lhr050h41" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ili9881c_of_match);
+
+static struct mipi_dsi_driver ili9881c_dsi_driver = {
+       .probe          = ili9881c_dsi_probe,
+       .remove         = ili9881c_dsi_remove,
+       .driver = {
+               .name           = "ili9881c-dsi",
+               .of_match_table = ili9881c_of_match,
+       },
+};
+module_mipi_dsi_driver(ili9881c_dsi_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Ilitek ILI9881C Controller Driver");
+MODULE_LICENSE("GPL v2");
index 57df39b..bb53e08 100644 (file)
@@ -292,7 +292,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
                DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
                              err);
 
-       drm_panel_detach(&innolux->base);
        innolux_panel_del(innolux);
 
        return 0;
index 0a94ab7..99caa78 100644 (file)
@@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
                        ret);
 
-       drm_panel_detach(&jdi->base);
        jdi_panel_del(jdi);
 
        return 0;
index 5185819..8a16878 100644 (file)
@@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
 {
        struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
 
-       drm_panel_detach(&lvds->panel);
        drm_panel_remove(&lvds->panel);
 
        panel_lvds_disable(&lvds->panel);
index 90f1ae4..87fa316 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/regulator/consumer.h>
 #include <video/mipi_display.h>
 
-#define DRV_NAME "orisetech_otm8009a"
-
 #define OTM8009A_BACKLIGHT_DEFAULT     240
 #define OTM8009A_BACKLIGHT_MAX         255
 
@@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
                DRM_WARN("mipi dsi dcs write buffer failed\n");
 }
 
+static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
+                                     size_t len)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+       /* data will be sent in dsi hs mode (ie. no lpm) */
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       otm8009a_dcs_write_buf(ctx, data, len);
+
+       /* restore back the dsi lpm mode */
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+}
+
 #define dcs_write_seq(ctx, seq...)                     \
 ({                                                     \
        static const u8 d[] = { seq };                  \
@@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
        if (!ctx->enabled)
                return 0; /* This is not an issue so we return 0 here */
 
-       /* Power off the backlight. Note: end-user still controls brightness */
-       ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
-       ret = backlight_update_status(ctx->bl_dev);
-       if (ret)
-               return ret;
+       backlight_disable(ctx->bl_dev);
 
        ret = mipi_dsi_dcs_set_display_off(dsi);
        if (ret)
@@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
 
        ctx->prepared = true;
 
-       /*
-        * Power on the backlight. Note: end-user still controls brightness
-        * Note: ctx->prepared must be true before updating the backlight.
-        */
-       ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
-       backlight_update_status(ctx->bl_dev);
-
        return 0;
 }
 
@@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
 {
        struct otm8009a *ctx = panel_to_otm8009a(panel);
 
+       if (ctx->enabled)
+               return 0;
+
+       backlight_enable(ctx->bl_dev);
+
        ctx->enabled = true;
 
        return 0;
@@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
                 */
                data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
                data[1] = bd->props.brightness;
-               otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+               otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
 
                /* set Brightness Control & Backlight on */
                data[1] = 0x24;
@@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
 
        /* Update Brightness Control & Backlight */
        data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
-       otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+       otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
 
        return 0;
 }
@@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
        ctx->panel.dev = dev;
        ctx->panel.funcs = &otm8009a_drm_funcs;
 
-       ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
-                                               &otm8009a_backlight_ops, NULL);
+       ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+                                                    dsi->host->dev, ctx,
+                                                    &otm8009a_backlight_ops,
+                                                    NULL);
        if (IS_ERR(ctx->bl_dev)) {
-               dev_err(dev, "failed to register backlight device\n");
-               return PTR_ERR(ctx->bl_dev);
+               ret = PTR_ERR(ctx->bl_dev);
+               dev_err(dev, "failed to register backlight: %d\n", ret);
+               return ret;
        }
 
        ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
                return ret;
        }
 
-       DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
-                default_mode.hdisplay, default_mode.vdisplay,
-                default_mode.vrefresh,
-                mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
-
        return 0;
 }
 
@@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
        mipi_dsi_detach(dsi);
        drm_panel_remove(&ctx->panel);
 
-       backlight_device_unregister(ctx->bl_dev);
-
        return 0;
 }
 
@@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
        .probe  = otm8009a_probe,
        .remove = otm8009a_remove,
        .driver = {
-               .name = DRV_NAME "_panel",
+               .name = "panel-orisetech-otm8009a",
                .of_match_table = orisetech_otm8009a_of_match,
        },
 };
index 74a8061..cb4dfb9 100644 (file)
@@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
        if (ret < 0)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
 
-       drm_panel_detach(&wuxga_nt->base);
        wuxga_nt_panel_del(wuxga_nt);
 
        return 0;
index 71c09ed..75f9253 100644 (file)
@@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
 {
        struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
 
-       drm_panel_detach(&panel->base);
        drm_panel_remove(&panel->base);
 
        seiko_panel_disable(&panel->base);
index 6bf8730..02fc0f5 100644 (file)
@@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
        if (err < 0)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
 
-       drm_panel_detach(&sharp->base);
        sharp_panel_del(sharp);
 
        return 0;
index 494aa9b..e5cae00 100644 (file)
@@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
        if (ret < 0)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
 
-       drm_panel_detach(&sharp_nt->base);
        sharp_nt_panel_del(sharp_nt);
 
        return 0;
index cbf1ab4..ac6aaa1 100644 (file)
@@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
 {
        struct panel_simple *panel = dev_get_drvdata(dev);
 
-       drm_panel_detach(&panel->base);
        drm_panel_remove(&panel->base);
 
        panel_simple_disable(&panel->base);
@@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
        },
 };
 
+static const struct display_timing auo_g070vvn01_timings = {
+       .pixelclock = { 33300000, 34209000, 45000000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 20, 40, 200 },
+       .hback_porch = { 87, 40, 1 },
+       .hsync_len = { 1, 48, 87 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 5, 13, 200 },
+       .vback_porch = { 31, 31, 29 },
+       .vsync_len = { 1, 1, 3 },
+};
+
+static const struct panel_desc auo_g070vvn01 = {
+       .timings = &auo_g070vvn01_timings,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .delay = {
+               .prepare = 200,
+               .enable = 50,
+               .disable = 50,
+               .unprepare = 1000,
+       },
+};
+
 static const struct drm_display_mode auo_g104sn02_mode = {
        .clock = 40000,
        .hdisplay = 800,
@@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
                .enable = 450,
                .unprepare = 500,
        },
-       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
 };
 
 static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -1217,6 +1244,30 @@ static const struct panel_desc innolux_n156bge_l21 = {
        },
 };
 
+static const struct drm_display_mode innolux_tv123wam_mode = {
+       .clock = 206016,
+       .hdisplay = 2160,
+       .hsync_start = 2160 + 48,
+       .hsync_end = 2160 + 48 + 32,
+       .htotal = 2160 + 48 + 32 + 80,
+       .vdisplay = 1440,
+       .vsync_start = 1440 + 3,
+       .vsync_end = 1440 + 3 + 10,
+       .vtotal = 1440 + 3 + 10 + 27,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_tv123wam = {
+       .modes = &innolux_tv123wam_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 259,
+               .height = 173,
+       },
+};
+
 static const struct drm_display_mode innolux_zj070na_01p_mode = {
        .clock = 51501,
        .hdisplay = 1024,
@@ -1247,8 +1298,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
        .hback_porch = { 16, 36, 56 },
        .hsync_len = { 8, 8, 8 },
        .vactive = { 480, 480, 480 },
-       .vfront_porch = { 6, 21, 33.5 },
-       .vback_porch = { 6, 21, 33.5 },
+       .vfront_porch = { 6, 21, 33 },
+       .vback_porch = { 6, 21, 33 },
        .vsync_len = { 8, 8, 8 },
        .flags = DISPLAY_FLAGS_DE_HIGH,
 };
@@ -2095,6 +2146,9 @@ static const struct of_device_id platform_of_match[] = {
                .compatible = "auo,b133xtn01",
                .data = &auo_b133xtn01,
        }, {
+               .compatible = "auo,g070vvn01",
+               .data = &auo_g070vvn01,
+       }, {
                .compatible = "auo,g104sn02",
                .data = &auo_g104sn02,
        }, {
@@ -2170,6 +2224,9 @@ static const struct of_device_id platform_of_match[] = {
                .compatible = "innolux,n156bge-l21",
                .data = &innolux_n156bge_l21,
        }, {
+               .compatible = "innolux,tv123wam",
+               .data = &innolux_tv123wam,
+       }, {
                .compatible = "innolux,zj070na-01p",
                .data = &innolux_zj070na_01p,
        }, {
index 358c64e..74284e5 100644 (file)
@@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
 {
        struct st7789v *ctx = spi_get_drvdata(spi);
 
-       drm_panel_detach(&ctx->panel);
        drm_panel_remove(&ctx->panel);
 
        if (ctx->backlight)
index 8689fcc..cbb67e9 100644 (file)
@@ -947,11 +947,11 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 static struct vm_operations_struct radeon_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
-static int radeon_ttm_fault(struct vm_fault *vmf)
+static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo;
        struct radeon_device *rdev;
-       int r;
+       vm_fault_t ret;
 
        bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
        if (bo == NULL) {
@@ -959,9 +959,9 @@ static int radeon_ttm_fault(struct vm_fault *vmf)
        }
        rdev = radeon_get_rdev(bo->bdev);
        down_read(&rdev->pm.mclk_lock);
-       r = ttm_vm_ops->fault(vmf);
+       ret = ttm_vm_ops->fault(vmf);
        up_read(&rdev->pm.mclk_lock);
-       return r;
+       return ret;
 }
 
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
index eb3042c..3105965 100644 (file)
@@ -792,7 +792,6 @@ err_config_video:
 
 int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
 {
-       u32 val;
        int ret;
 
        ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
                return ret;
        }
 
-       val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       val |= SPDIF_FIFO_MID_RANGE(0xe0);
-       val |= SPDIF_JITTER_THRSH(0xe0);
-       val |= SPDIF_JITTER_AVG_WIN(7);
-       writel(val, dp->regs + SPDIF_CTRL_ADDR);
+       writel(0, dp->regs + SPDIF_CTRL_ADDR);
 
        /* clearn the audio config and reset */
        writel(0, dp->regs + AUDIO_SRC_CNTL);
@@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
 {
        u32 val;
 
-       val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       val |= SPDIF_FIFO_MID_RANGE(0xe0);
-       val |= SPDIF_JITTER_THRSH(0xe0);
-       val |= SPDIF_JITTER_AVG_WIN(7);
-       writel(val, dp->regs + SPDIF_CTRL_ADDR);
-
        writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
 
        val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
        writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
 
        val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       val |= SPDIF_FIFO_MID_RANGE(0xe0);
-       val |= SPDIF_JITTER_THRSH(0xe0);
-       val |= SPDIF_JITTER_AVG_WIN(7);
        writel(val, dp->regs + SPDIF_CTRL_ADDR);
 
        clk_prepare_enable(dp->spdif_clk);
index d4f4118..ea18cb2 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
 #include "rockchip_drm_psr.h"
 
-#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
-
-struct rockchip_drm_fb {
-       struct drm_framebuffer fb;
-       struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
-};
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
-                                              unsigned int plane)
-{
-       struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
-
-       if (plane >= ROCKCHIP_MAX_FB_BUFFER)
-               return NULL;
-
-       return rk_fb->obj[plane];
-}
-
-static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
-{
-       struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-       int i;
-
-       for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
-               drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
-
-       drm_framebuffer_cleanup(fb);
-       kfree(rockchip_fb);
-}
-
-static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
-                                        struct drm_file *file_priv,
-                                        unsigned int *handle)
-{
-       struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-
-       return drm_gem_handle_create(file_priv,
-                                    rockchip_fb->obj[0], handle);
-}
-
 static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
                                 struct drm_file *file,
                                 unsigned int flags, unsigned int color,
@@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
 }
 
 static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
-       .destroy        = rockchip_drm_fb_destroy,
-       .create_handle  = rockchip_drm_fb_create_handle,
-       .dirty          = rockchip_drm_fb_dirty,
+       .destroy       = drm_gem_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .dirty         = rockchip_drm_fb_dirty,
 };
 
-static struct rockchip_drm_fb *
+static struct drm_framebuffer *
 rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
                  struct drm_gem_object **obj, unsigned int num_planes)
 {
-       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_framebuffer *fb;
        int ret;
        int i;
 
-       rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
-       if (!rockchip_fb)
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
                return ERR_PTR(-ENOMEM);
 
-       drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
+       drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
 
        for (i = 0; i < num_planes; i++)
-               rockchip_fb->obj[i] = obj[i];
+               fb->obj[i] = obj[i];
 
-       ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
-                                  &rockchip_drm_fb_funcs);
+       ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
        if (ret) {
                DRM_DEV_ERROR(dev->dev,
                              "Failed to initialize framebuffer: %d\n",
                              ret);
-               kfree(rockchip_fb);
+               kfree(fb);
                return ERR_PTR(ret);
        }
 
-       return rockchip_fb;
+       return fb;
 }
 
 static struct drm_framebuffer *
 rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                        const struct drm_mode_fb_cmd2 *mode_cmd)
 {
-       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_framebuffer *fb;
        struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
        struct drm_gem_object *obj;
        unsigned int hsub;
@@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                objs[i] = obj;
        }
 
-       rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
-       if (IS_ERR(rockchip_fb)) {
-               ret = PTR_ERR(rockchip_fb);
+       fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
                goto err_gem_object_unreference;
        }
 
-       return &rockchip_fb->fb;
+       return fb;
 
 err_gem_object_unreference:
        for (i--; i >= 0; i--)
@@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
                              const struct drm_mode_fb_cmd2 *mode_cmd,
                              struct drm_gem_object *obj)
 {
-       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_framebuffer *fb;
 
-       rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
-       if (IS_ERR(rockchip_fb))
-               return ERR_CAST(rockchip_fb);
+       fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+       if (IS_ERR(fb))
+               return ERR_CAST(fb);
 
-       return &rockchip_fb->fb;
+       return fb;
 }
 
 void rockchip_drm_mode_config_init(struct drm_device *dev)
index 2fe47f1..f1265cb 100644 (file)
@@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
 void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
 
 void rockchip_drm_mode_config_init(struct drm_device *dev);
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
-                                              unsigned int plane);
 #endif /* _ROCKCHIP_DRM_FB_H */
index 2121345..effecbe 100644 (file)
@@ -486,6 +486,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
        spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
+static int vop_core_clks_enable(struct vop *vop)
+{
+       int ret;
+
+       ret = clk_enable(vop->hclk);
+       if (ret < 0)
+               return ret;
+
+       ret = clk_enable(vop->aclk);
+       if (ret < 0)
+               goto err_disable_hclk;
+
+       return 0;
+
+err_disable_hclk:
+       clk_disable(vop->hclk);
+       return ret;
+}
+
+static void vop_core_clks_disable(struct vop *vop)
+{
+       clk_disable(vop->aclk);
+       clk_disable(vop->hclk);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -497,17 +522,13 @@ static int vop_enable(struct drm_crtc *crtc)
                return ret;
        }
 
-       ret = clk_enable(vop->hclk);
+       ret = vop_core_clks_enable(vop);
        if (WARN_ON(ret < 0))
                goto err_put_pm_runtime;
 
        ret = clk_enable(vop->dclk);
        if (WARN_ON(ret < 0))
-               goto err_disable_hclk;
-
-       ret = clk_enable(vop->aclk);
-       if (WARN_ON(ret < 0))
-               goto err_disable_dclk;
+               goto err_disable_core;
 
        /*
         * Slave iommu shares power, irq and clock with vop.  It was associated
@@ -519,7 +540,7 @@ static int vop_enable(struct drm_crtc *crtc)
        if (ret) {
                DRM_DEV_ERROR(vop->dev,
                              "failed to attach dma mapping, %d\n", ret);
-               goto err_disable_aclk;
+               goto err_disable_dclk;
        }
 
        spin_lock(&vop->reg_lock);
@@ -552,18 +573,14 @@ static int vop_enable(struct drm_crtc *crtc)
 
        spin_unlock(&vop->reg_lock);
 
-       enable_irq(vop->irq);
-
        drm_crtc_vblank_on(crtc);
 
        return 0;
 
-err_disable_aclk:
-       clk_disable(vop->aclk);
 err_disable_dclk:
        clk_disable(vop->dclk);
-err_disable_hclk:
-       clk_disable(vop->hclk);
+err_disable_core:
+       vop_core_clks_disable(vop);
 err_put_pm_runtime:
        pm_runtime_put_sync(vop->dev);
        return ret;
@@ -599,8 +616,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
 
        vop_dsp_hold_valid_irq_disable(vop);
 
-       disable_irq(vop->irq);
-
        vop->is_enabled = false;
 
        /*
@@ -609,8 +624,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
        rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
 
        clk_disable(vop->dclk);
-       clk_disable(vop->aclk);
-       clk_disable(vop->hclk);
+       vop_core_clks_disable(vop);
        pm_runtime_put(vop->dev);
        mutex_unlock(&vop->vop_lock);
 
@@ -728,7 +742,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                return;
        }
 
-       obj = rockchip_fb_get_gem_obj(fb, 0);
+       obj = fb->obj[0];
        rk_obj = to_rockchip_obj(obj);
 
        actual_w = drm_rect_width(src) >> 16;
@@ -758,7 +772,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
                int bpp = fb->format->cpp[1];
 
-               uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+               uv_obj = fb->obj[1];
                rk_uv_obj = to_rockchip_obj(uv_obj);
 
                offset = (src->x1 >> 16) * bpp / hsub;
@@ -1178,6 +1192,18 @@ static irqreturn_t vop_isr(int irq, void *data)
        int ret = IRQ_NONE;
 
        /*
+        * The irq is shared with the iommu. If the runtime-pm state of the
+        * vop-device is disabled the irq has to be targeted at the iommu.
+        */
+       if (!pm_runtime_get_if_in_use(vop->dev))
+               return IRQ_NONE;
+
+       if (vop_core_clks_enable(vop)) {
+               DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
+               goto out;
+       }
+
+       /*
         * interrupt register has interrupt status, enable and clear bits, we
         * must hold irq_lock to avoid a race with enable/disable_vblank().
        */
@@ -1192,7 +1218,7 @@ static irqreturn_t vop_isr(int irq, void *data)
 
        /* This is expected for vop iommu irqs, since the irq is shared */
        if (!active_irqs)
-               return IRQ_NONE;
+               goto out_disable;
 
        if (active_irqs & DSP_HOLD_VALID_INTR) {
                complete(&vop->dsp_hold_completion);
@@ -1218,6 +1244,10 @@ static irqreturn_t vop_isr(int irq, void *data)
                DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
                              active_irqs);
 
+out_disable:
+       vop_core_clks_disable(vop);
+out:
+       pm_runtime_put(vop->dev);
        return ret;
 }
 
@@ -1278,7 +1308,7 @@ static int vop_create_crtc(struct vop *vop)
        for (i = 0; i < vop_data->win_size; i++) {
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win_data = vop_win->data;
-               unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+               unsigned long possible_crtcs = drm_crtc_mask(crtc);
 
                if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
                        continue;
@@ -1596,9 +1626,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
        if (ret)
                goto err_disable_pm_runtime;
 
-       /* IRQ is initially disabled; it gets enabled in power_on */
-       disable_irq(vop->irq);
-
        return 0;
 
 err_disable_pm_runtime:
index 084acdd..fcb9104 100644 (file)
@@ -331,16 +331,19 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
 {
        int lb_mode;
 
-       if (width > 2560)
-               lb_mode = LB_RGB_3840X2;
-       else if (width > 1920)
-               lb_mode = LB_RGB_2560X4;
-       else if (!is_yuv)
-               lb_mode = LB_RGB_1920X5;
-       else if (width > 1280)
-               lb_mode = LB_YUV_3840X5;
-       else
-               lb_mode = LB_YUV_2560X8;
+       if (is_yuv) {
+               if (width > 1280)
+                       lb_mode = LB_YUV_3840X5;
+               else
+                       lb_mode = LB_YUV_2560X8;
+       } else {
+               if (width > 2560)
+                       lb_mode = LB_RGB_3840X2;
+               else if (width > 1920)
+                       lb_mode = LB_RGB_2560X4;
+               else
+                       lb_mode = LB_RGB_1920X5;
+       }
 
        return lb_mode;
 }
index e67f4ea..b3f6f52 100644 (file)
@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
                of_property_read_u32(endpoint, "reg", &endpoint_id);
                ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
                                                  &lvds->panel, &lvds->bridge);
-               if (!ret)
+               if (!ret) {
+                       of_node_put(endpoint);
                        break;
+               }
        }
        if (!child_count) {
                DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
                        goto err_free_connector;
                }
        } else {
-               lvds->bridge->encoder = encoder;
                ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
                if (ret) {
                        DRM_DEV_ERROR(drm_dev->dev,
                                      "failed to attach bridge: %d\n", ret);
                        goto err_free_encoder;
                }
-               encoder->bridge = lvds->bridge;
        }
 
        pm_runtime_enable(dev);
index 44d4807..6a31670 100644 (file)
  *
  */
 
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ *    the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ */
+
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
@@ -39,7 +62,13 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
-/* Initialize a given run queue struct */
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
 static void drm_sched_rq_init(struct drm_sched_rq *rq)
 {
        spin_lock_init(&rq->lock);
@@ -47,6 +76,14 @@ static void drm_sched_rq_init(struct drm_sched_rq *rq)
        rq->current_entity = NULL;
 }
 
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
 static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
                                    struct drm_sched_entity *entity)
 {
@@ -57,6 +94,14 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
        spin_unlock(&rq->lock);
 }
 
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
 static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
                                       struct drm_sched_entity *entity)
 {
@@ -70,9 +115,9 @@ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 }
 
 /**
- * Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
  *
- * @rq         The run queue to check.
+ * @rq: scheduler run queue to check.
  *
  * Try to find a ready entity, returns NULL if none found.
  */
@@ -112,15 +157,16 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 }
 
 /**
- * Init a context entity used by scheduler when submit to HW ring.
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
  *
- * @sched      The pointer to the scheduler
- * @entity     The pointer to a valid drm_sched_entity
- * @rq         The run queue this entity belongs
- * @guilty      atomic_t set to 1 when a job on this queue
- *              is found to be guilty causing a timeout
+ * @sched: scheduler instance
+ * @entity: scheduler entity to init
+ * @rq: the run queue this entity belongs
+ * @guilty: atomic_t set to 1 when a job on this queue
+ *          is found to be guilty causing a timeout
  *
- * return 0 if succeed. negative error code on failure
+ * Returns 0 on success or a negative error code on failure.
 */
 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
                          struct drm_sched_entity *entity,
@@ -135,7 +181,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
        entity->rq = rq;
        entity->sched = sched;
        entity->guilty = guilty;
-       entity->fini_status = 0;
        entity->last_scheduled = NULL;
 
        spin_lock_init(&entity->rq_lock);
@@ -149,10 +194,10 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 EXPORT_SYMBOL(drm_sched_entity_init);
 
 /**
- * Query if entity is initialized
+ * drm_sched_entity_is_initialized - Query if entity is initialized
  *
- * @sched       Pointer to scheduler instance
- * @entity     The pointer to a valid scheduler entity
+ * @sched: Pointer to scheduler instance
+ * @entity: The pointer to a valid scheduler entity
  *
  * return true if entity is initialized, false otherwise
 */
@@ -164,25 +209,26 @@ static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
 }
 
 /**
- * Check if entity is idle
+ * drm_sched_entity_is_idle - Check if entity is idle
  *
- * @entity     The pointer to a valid scheduler entity
+ * @entity: scheduler entity
  *
- * Return true if entity don't has any unscheduled jobs.
+ * Returns true if the entity does not have any unscheduled jobs.
  */
 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
 {
        rmb();
-       if (spsc_queue_peek(&entity->job_queue) == NULL)
+
+       if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL)
                return true;
 
        return false;
 }
 
 /**
- * Check if entity is ready
+ * drm_sched_entity_is_ready - Check if entity is ready
  *
- * @entity     The pointer to a valid scheduler entity
+ * @entity: scheduler entity
  *
  * Return true if entity could provide a job.
  */
@@ -210,44 +256,66 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
 
 /**
- * Destroy a context entity
+ * drm_sched_entity_do_release - Destroy a context entity
  *
- * @sched       Pointer to scheduler instance
- * @entity     The pointer to a valid scheduler entity
+ * @sched: scheduler instance
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
  *
- * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
  * removes the entity from the runqueue and returns an error when the process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
  */
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity)
+long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+                          struct drm_sched_entity *entity, long timeout)
 {
+       long ret = timeout;
+
        if (!drm_sched_entity_is_initialized(sched, entity))
-               return;
+               return ret;
        /**
         * The client will not queue more IBs during this fini, consume existing
         * queued IBs or discard them on SIGKILL
        */
-       if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-               entity->fini_status = -ERESTARTSYS;
-       else
-               entity->fini_status = wait_event_killable(sched->job_scheduled,
-                                       drm_sched_entity_is_idle(entity));
-       drm_sched_entity_set_rq(entity, NULL);
+       if (current->flags & PF_EXITING) {
+               if (timeout)
+                       ret = wait_event_timeout(
+                                       sched->job_scheduled,
+                                       drm_sched_entity_is_idle(entity),
+                                       timeout);
+       } else
+               wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
+
+
+       /* For killed process disable any more IBs enqueue right now */
+       if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+               drm_sched_entity_set_rq(entity, NULL);
+
+       return ret;
 }
 EXPORT_SYMBOL(drm_sched_entity_do_release);
 
 /**
- * Destroy a context entity
+ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+ * @sched: scheduler instance
+ * @entity: scheduler entity
  *
- * @sched       Pointer to scheduler instance
- * @entity     The pointer to a valid scheduler entity
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
  *
- * The second one then goes over the entity and signals all jobs with an error code.
  */
 void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity)
 {
-       if (entity->fini_status) {
+
+       drm_sched_entity_set_rq(entity, NULL);
+
+       /* Consumption of existing IBs wasn't completed. Forcefully
+        * remove them here.
+        */
+       if (spsc_queue_peek(&entity->job_queue)) {
                struct drm_sched_job *job;
                int r;
 
@@ -267,12 +335,22 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
                        struct drm_sched_fence *s_fence = job->s_fence;
                        drm_sched_fence_scheduled(s_fence);
                        dma_fence_set_error(&s_fence->finished, -ESRCH);
-                       r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
-                                                       drm_sched_entity_kill_jobs_cb);
-                       if (r == -ENOENT)
+
+                       /*
+                        * When pipe is hanged by older entity, new entity might
+                        * not even have chance to submit it's first job to HW
+                        * and so entity->last_scheduled will remain NULL
+                        */
+                       if (!entity->last_scheduled) {
                                drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
-                       else if (r)
-                               DRM_ERROR("fence add callback failed (%d)\n", r);
+                       } else {
+                               r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+                                                               drm_sched_entity_kill_jobs_cb);
+                               if (r == -ENOENT)
+                                       drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+                               else if (r)
+                                       DRM_ERROR("fence add callback failed (%d)\n", r);
+                       }
                }
        }
 
@@ -281,10 +359,18 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
 }
 EXPORT_SYMBOL(drm_sched_entity_cleanup);
 
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @sched: scheduler instance
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ */
 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
                                struct drm_sched_entity *entity)
 {
-       drm_sched_entity_do_release(sched, entity);
+       drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
        drm_sched_entity_cleanup(sched, entity);
 }
 EXPORT_SYMBOL(drm_sched_entity_fini);
@@ -306,6 +392,15 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
        dma_fence_put(f);
 }
 
+/**
+ * drm_sched_entity_set_rq - Sets the run queue for an entity
+ *
+ * @entity: scheduler entity
+ * @rq: scheduler run queue
+ *
+ * Sets the run queue for an entity and removes the entity from the previous
+ * run queue in which was present.
+ */
 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
                             struct drm_sched_rq *rq)
 {
@@ -325,6 +420,14 @@ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
 }
 EXPORT_SYMBOL(drm_sched_entity_set_rq);
 
+/**
+ * drm_sched_dependency_optimized
+ *
+ * @fence: the dependency fence
+ * @entity: the entity which depends on the above fence
+ *
+ * Returns true if the dependency can be optimized and false otherwise
+ */
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
                                    struct drm_sched_entity *entity)
 {
@@ -413,9 +516,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 }
 
 /**
- * Submit a job to the job queue
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
  *
- * @sched_job          The pointer to job required to submit
+ * @sched_job: job to submit
+ * @entity: scheduler entity
  *
  * Note: To guarantee that the order of insertion to queue matches
  * the job's fence sequence number this function should be
@@ -506,6 +610,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
        job->sched->ops->timedout_job(job);
 }
 
+/**
+ * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ *
+ * @sched: scheduler instance
+ * @bad: bad scheduler job
+ *
+ */
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 {
        struct drm_sched_job *s_job;
@@ -550,6 +661,12 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 }
 EXPORT_SYMBOL(drm_sched_hw_job_reset);
 
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ *
+ */
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 {
        struct drm_sched_job *s_job, *tmp;
@@ -599,10 +716,17 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
 /**
- * Init a sched_job with basic field
+ * drm_sched_job_init - init a scheduler job
+ *
+ * @job: scheduler job to init
+ * @sched: scheduler instance
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
  *
- * Note: Refer to drm_sched_entity_push_job documentation
+ * Refer to drm_sched_entity_push_job() documentation
  * for locking considerations.
+ *
+ * Returns 0 for success, negative error code otherwise.
  */
 int drm_sched_job_init(struct drm_sched_job *job,
                       struct drm_gpu_scheduler *sched,
@@ -626,7 +750,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
- * Return ture if we can push more jobs to the hw.
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
  */
 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 {
@@ -635,7 +763,10 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * Wake up the scheduler when it is ready
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
  */
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 {
@@ -644,8 +775,12 @@ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * Select next entity to process
-*/
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
 static struct drm_sched_entity *
 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 {
@@ -665,6 +800,14 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
        return entity;
 }
 
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @f: fence
+ * @cb: fence callbacks
+ *
+ * Called after job has finished execution.
+ */
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 {
        struct drm_sched_fence *s_fence =
@@ -680,6 +823,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
 {
        if (kthread_should_park()) {
@@ -690,6 +840,13 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
        return false;
 }
 
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
 static int drm_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
@@ -744,15 +901,17 @@ static int drm_sched_main(void *param)
 }
 
 /**
- * Init a gpu scheduler instance
+ * drm_sched_init - Init a gpu scheduler instance
  *
- * @sched              The pointer to the scheduler
- * @ops                        The backend operations for this scheduler.
- * @hw_submissions     Number of hw submissions to do.
- * @name               Name used for debugging
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
  *
  * Return 0 on success, otherwise error code.
-*/
+ */
 int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const struct drm_sched_backend_ops *ops,
                   unsigned hw_submission,
@@ -788,9 +947,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 EXPORT_SYMBOL(drm_sched_init);
 
 /**
- * Destroy a gpu scheduler
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
  *
- * @sched      The pointer to the scheduler
+ * Tears down and cleans up the scheduler.
  */
 void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
index 54acc11..6b943ea 100644 (file)
@@ -19,7 +19,9 @@ selftest(align64, igt_align64)
 selftest(evict, igt_evict)
 selftest(evict_range, igt_evict_range)
 selftest(bottomup, igt_bottomup)
+selftest(lowest, igt_lowest)
 selftest(topdown, igt_topdown)
+selftest(highest, igt_highest)
 selftest(color, igt_color)
 selftest(color_evict, igt_color_evict)
 selftest(color_evict_range, igt_color_evict_range)
index 933af1c..fbed2c9 100644 (file)
@@ -1825,6 +1825,77 @@ err:
        return ret;
 }
 
+static int __igt_once(unsigned int mode)
+{
+       struct drm_mm mm;
+       struct drm_mm_node rsvd_lo, rsvd_hi, node;
+       int err;
+
+       drm_mm_init(&mm, 0, 7);
+
+       memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+       rsvd_lo.start = 1;
+       rsvd_lo.size = 1;
+       err = drm_mm_reserve_node(&mm, &rsvd_lo);
+       if (err) {
+               pr_err("Could not reserve low node\n");
+               goto err;
+       }
+
+       memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+       rsvd_hi.start = 5;
+       rsvd_hi.size = 1;
+       err = drm_mm_reserve_node(&mm, &rsvd_hi);
+       if (err) {
+               pr_err("Could not reserve low node\n");
+               goto err_lo;
+       }
+
+       if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+               pr_err("Expected a hole after lo and high nodes!\n");
+               err = -EINVAL;
+               goto err_hi;
+       }
+
+       memset(&node, 0, sizeof(node));
+       err = drm_mm_insert_node_generic(&mm, &node,
+                                        2, 0, 0,
+                                        mode | DRM_MM_INSERT_ONCE);
+       if (!err) {
+               pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
+                      node.start);
+               err = -EINVAL;
+               goto err_node;
+       }
+
+       err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
+       if (err) {
+               pr_err("Could not insert the node into the available hole!\n");
+               err = -EINVAL;
+               goto err_hi;
+       }
+
+err_node:
+       drm_mm_remove_node(&node);
+err_hi:
+       drm_mm_remove_node(&rsvd_hi);
+err_lo:
+       drm_mm_remove_node(&rsvd_lo);
+err:
+       drm_mm_takedown(&mm);
+       return err;
+}
+
+static int igt_lowest(void *ignored)
+{
+       return __igt_once(DRM_MM_INSERT_LOW);
+}
+
+static int igt_highest(void *ignored)
+{
+       return __igt_once(DRM_MM_INSERT_HIGH);
+}
+
 static void separate_adjacent_colors(const struct drm_mm_node *node,
                                     unsigned long color,
                                     u64 *start,
index 9b2c470..49813d3 100644 (file)
@@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
        struct drm_info_node *node = s->private;
        struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
        struct drm_plane *drm_plane = &gdp->plane.drm_plane;
-       struct drm_crtc *crtc = drm_plane->crtc;
+       struct drm_crtc *crtc;
+
+       drm_modeset_lock(&drm_plane->mutex, NULL);
+       crtc = drm_plane->state->crtc;
+       drm_modeset_unlock(&drm_plane->mutex);
 
        seq_printf(s, "%s: (vaddr = 0x%p)",
                   sti_plane_to_str(&gdp->plane), gdp->regs);
index 2589f4a..14f420f 100644 (file)
@@ -36,4 +36,4 @@ obj-$(CONFIG_DRM_SUN4I_BACKEND)       += sun4i-backend.o sun4i-frontend.o
 obj-$(CONFIG_DRM_SUN4I_HDMI)   += sun4i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN6I_DSI)    += sun6i-dsi.o
 obj-$(CONFIG_DRM_SUN8I_DW_HDMI)        += sun8i-drm-hdmi.o
-obj-$(CONFIG_DRM_SUN8I_MIXER)  += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_MIXER)  += sun8i-mixer.o sun8i_tcon_top.o
index 50d1960..6ddf4ea 100644 (file)
@@ -26,6 +26,7 @@
 #include "sun4i_frontend.h"
 #include "sun4i_framebuffer.h"
 #include "sun4i_tcon.h"
+#include "sun8i_tcon_top.h"
 
 DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
 
@@ -197,6 +198,27 @@ static bool sun4i_drv_node_is_tcon(struct device_node *node)
        return !!of_match_node(sun4i_tcon_of_table, node);
 }
 
+static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
+{
+       const struct of_device_id *match;
+
+       match = of_match_node(sun4i_tcon_of_table, node);
+       if (match) {
+               struct sun4i_tcon_quirks *quirks;
+
+               quirks = (struct sun4i_tcon_quirks *)match->data;
+
+               return quirks->has_channel_0;
+       }
+
+       return false;
+}
+
+static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
+{
+       return !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
 static int compare_of(struct device *dev, void *data)
 {
        DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -231,12 +253,69 @@ struct endpoint_list {
        DECLARE_KFIFO(fifo, struct device_node *, 16);
 };
 
+static void sun4i_drv_traverse_endpoints(struct endpoint_list *list,
+                                        struct device_node *node,
+                                        int port_id)
+{
+       struct device_node *ep, *remote, *port;
+
+       port = of_graph_get_port_by_id(node, port_id);
+       if (!port) {
+               DRM_DEBUG_DRIVER("No output to bind on port %d\n", port_id);
+               return;
+       }
+
+       for_each_available_child_of_node(port, ep) {
+               remote = of_graph_get_remote_port_parent(ep);
+               if (!remote) {
+                       DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+                       continue;
+               }
+
+               if (sun4i_drv_node_is_tcon(node)) {
+                       /*
+                        * TCON TOP is always probed before TCON. However, TCON
+                        * points back to TCON TOP when it is source for HDMI.
+                        * We have to skip it here to prevent infinite looping
+                        * between TCON TOP and TCON.
+                        */
+                       if (sun4i_drv_node_is_tcon_top(remote)) {
+                               DRM_DEBUG_DRIVER("TCON output endpoint is TCON TOP... skipping\n");
+                               of_node_put(remote);
+                               continue;
+                       }
+
+                       /*
+                        * If the node is our TCON with channel 0, the first
+                        * port is used for panel or bridges, and will not be
+                        * part of the component framework.
+                        */
+                       if (sun4i_drv_node_is_tcon_with_ch0(node)) {
+                               struct of_endpoint endpoint;
+
+                               if (of_graph_parse_endpoint(ep, &endpoint)) {
+                                       DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+                                       of_node_put(remote);
+                                       continue;
+                               }
+
+                               if (!endpoint.id) {
+                                       DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+                                       of_node_put(remote);
+                                       continue;
+                               }
+                       }
+               }
+
+               kfifo_put(&list->fifo, remote);
+       }
+}
+
 static int sun4i_drv_add_endpoints(struct device *dev,
                                   struct endpoint_list *list,
                                   struct component_match **match,
                                   struct device_node *node)
 {
-       struct device_node *port, *ep, *remote;
        int count = 0;
 
        /*
@@ -272,41 +351,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
                count++;
        }
 
-       /* Inputs are listed first, then outputs */
-       port = of_graph_get_port_by_id(node, 1);
-       if (!port) {
-               DRM_DEBUG_DRIVER("No output to bind\n");
-               return count;
-       }
+       /* each node has at least one output */
+       sun4i_drv_traverse_endpoints(list, node, 1);
 
-       for_each_available_child_of_node(port, ep) {
-               remote = of_graph_get_remote_port_parent(ep);
-               if (!remote) {
-                       DRM_DEBUG_DRIVER("Error retrieving the output node\n");
-                       of_node_put(remote);
-                       continue;
-               }
-
-               /*
-                * If the node is our TCON, the first port is used for
-                * panel or bridges, and will not be part of the
-                * component framework.
-                */
-               if (sun4i_drv_node_is_tcon(node)) {
-                       struct of_endpoint endpoint;
-
-                       if (of_graph_parse_endpoint(ep, &endpoint)) {
-                               DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
-                               continue;
-                       }
-
-                       if (!endpoint.id) {
-                               DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
-                               continue;
-                       }
-               }
-
-               kfifo_put(&list->fifo, remote);
+       /* TCON TOP has second and third output */
+       if (sun4i_drv_node_is_tcon_top(node)) {
+               sun4i_drv_traverse_endpoints(list, node, 3);
+               sun4i_drv_traverse_endpoints(list, node, 5);
        }
 
        return count;
index 8232b39..aacc841 100644 (file)
@@ -766,12 +766,14 @@ static int sun4i_tcon_init_regmap(struct device *dev,
  */
 static struct sunxi_engine *
 sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
-                               struct device_node *node)
+                               struct device_node *node,
+                               u32 port_id)
 {
        struct device_node *port, *ep, *remote;
        struct sunxi_engine *engine = ERR_PTR(-EINVAL);
+       u32 reg = 0;
 
-       port = of_graph_get_port_by_id(node, 0);
+       port = of_graph_get_port_by_id(node, port_id);
        if (!port)
                return ERR_PTR(-EINVAL);
 
@@ -801,8 +803,20 @@ sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
                if (remote == engine->node)
                        goto out_put_remote;
 
+       /*
+        * According to device tree binding input ports have even id
+        * number and output ports have odd id. Since component with
+        * more than one input and one output (TCON TOP) exits, correct
+        * remote input id has to be calculated by subtracting 1 from
+        * remote output id. If this for some reason can't be done, 0
+        * is used as input port id.
+        */
+       port = of_graph_get_remote_port(ep);
+       if (!of_property_read_u32(port, "reg", &reg) && reg > 0)
+               reg -= 1;
+
        /* keep looking through upstream ports */
-       engine = sun4i_tcon_find_engine_traverse(drv, remote);
+       engine = sun4i_tcon_find_engine_traverse(drv, remote, reg);
 
 out_put_remote:
        of_node_put(remote);
@@ -925,7 +939,7 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
 
        /* Fallback to old method by traversing input endpoints */
        of_node_put(port);
-       return sun4i_tcon_find_engine_traverse(drv, node);
+       return sun4i_tcon_find_engine_traverse(drv, node, 0);
 }
 
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -1067,23 +1081,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                goto err_free_dotclock;
        }
 
-       /*
-        * If we have an LVDS panel connected to the TCON, we should
-        * just probe the LVDS connector. Otherwise, just probe RGB as
-        * we used to.
-        */
-       remote = of_graph_get_remote_node(dev->of_node, 1, 0);
-       if (of_device_is_compatible(remote, "panel-lvds"))
-               if (can_lvds)
-                       ret = sun4i_lvds_init(drm, tcon);
+       if (tcon->quirks->has_channel_0) {
+               /*
+                * If we have an LVDS panel connected to the TCON, we should
+                * just probe the LVDS connector. Otherwise, just probe RGB as
+                * we used to.
+                */
+               remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+               if (of_device_is_compatible(remote, "panel-lvds"))
+                       if (can_lvds)
+                               ret = sun4i_lvds_init(drm, tcon);
+                       else
+                               ret = -EINVAL;
                else
-                       ret = -EINVAL;
-       else
-               ret = sun4i_rgb_init(drm, tcon);
-       of_node_put(remote);
+                       ret = sun4i_rgb_init(drm, tcon);
+               of_node_put(remote);
 
-       if (ret < 0)
-               goto err_free_dotclock;
+               if (ret < 0)
+                       goto err_free_dotclock;
+       }
 
        if (tcon->quirks->needs_de_be_mux) {
                /*
@@ -1137,13 +1153,19 @@ static const struct component_ops sun4i_tcon_ops = {
 static int sun4i_tcon_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
+       const struct sun4i_tcon_quirks *quirks;
        struct drm_bridge *bridge;
        struct drm_panel *panel;
        int ret;
 
-       ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
-       if (ret == -EPROBE_DEFER)
-               return ret;
+       quirks = of_device_get_match_data(&pdev->dev);
+
+       /* panels and bridges are present only on TCONs with channel 0 */
+       if (quirks->has_channel_0) {
+               ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        return component_add(&pdev->dev, &sun4i_tcon_ops);
 }
index bfbf761..d4e7d16 100644 (file)
@@ -1040,7 +1040,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int sun6i_dsi_runtime_resume(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
 {
        struct sun6i_dsi *dsi = dev_get_drvdata(dev);
 
@@ -1069,7 +1069,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
        return 0;
 }
 
-static int sun6i_dsi_runtime_suspend(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
 {
        struct sun6i_dsi *dsi = dev_get_drvdata(dev);
 
index 9f40a44..3459b9e 100644 (file)
@@ -12,6 +12,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "sun8i_dw_hdmi.h"
+#include "sun8i_tcon_top.h"
 
 static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
                                           struct drm_display_mode *mode,
@@ -41,6 +42,48 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
+static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
+{
+       return !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
+                                            struct device_node *node)
+{
+       struct device_node *port, *ep, *remote, *remote_port;
+       u32 crtcs = 0;
+
+       port = of_graph_get_port_by_id(node, 0);
+       if (!port)
+               return 0;
+
+       ep = of_get_next_available_child(port, NULL);
+       if (!ep)
+               return 0;
+
+       remote = of_graph_get_remote_port_parent(ep);
+       if (!remote)
+               return 0;
+
+       if (sun8i_dw_hdmi_node_is_tcon_top(remote)) {
+               port = of_graph_get_port_by_id(remote, 4);
+               if (!port)
+                       return 0;
+
+               for_each_child_of_node(port, ep) {
+                       remote_port = of_graph_get_remote_port(ep);
+                       if (remote_port) {
+                               crtcs |= drm_of_crtc_port_mask(drm, remote_port);
+                               of_node_put(remote_port);
+                       }
+               }
+       } else {
+               crtcs = drm_of_find_possible_crtcs(drm, node);
+       }
+
+       return crtcs;
+}
+
 static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                              void *data)
 {
@@ -63,7 +106,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
        hdmi->dev = &pdev->dev;
        encoder = &hdmi->encoder;
 
-       encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+       encoder->possible_crtcs =
+               sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
        /*
         * If we failed to find the CRTC(s) which this encoder is
         * supposed to be connected to, it's because the CRTC has
index 79154f0..aadbe0a 100644 (file)
@@ -98,7 +98,8 @@
 #define SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN                BIT(29)
 #define SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN                BIT(28)
 #define SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33       BIT(27)
-#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL       BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK   BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT 26
 #define SUN8I_HDMI_PHY_PLL_CFG1_PLLEN          BIT(25)
 #define SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(x)    ((x) << 22)
 #define SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(x)     ((x) << 20)
@@ -147,6 +148,7 @@ struct sun8i_hdmi_phy;
 
 struct sun8i_hdmi_phy_variant {
        bool has_phy_clk;
+       bool has_second_pll;
        void (*phy_init)(struct sun8i_hdmi_phy *phy);
        void (*phy_disable)(struct dw_hdmi *hdmi,
                            struct sun8i_hdmi_phy *phy);
@@ -160,6 +162,7 @@ struct sun8i_hdmi_phy {
        struct clk                      *clk_mod;
        struct clk                      *clk_phy;
        struct clk                      *clk_pll0;
+       struct clk                      *clk_pll1;
        unsigned int                    rcal;
        struct regmap                   *regs;
        struct reset_control            *rst_phy;
@@ -188,6 +191,7 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
 const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
 
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev);
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+                        bool second_parent);
 
 #endif /* _SUN8I_DW_HDMI_H_ */
index 5a52fc4..82502b3 100644 (file)
@@ -183,7 +183,13 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
        regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
                           SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
 
-       regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, pll_cfg1_init);
+       /*
+        * NOTE: We have to be careful not to overwrite PHY parent
+        * clock selection bit and clock divider.
+        */
+       regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+                          (u32)~SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+                          pll_cfg1_init);
        regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
                           (u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
                           pll_cfg2_init);
@@ -352,6 +358,10 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
                           SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
                           SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
 
+       /* reset PHY PLL clock parent */
+       regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+                          SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK, 0);
+
        /* set HW control of CEC pins */
        regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
 
@@ -386,6 +396,14 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
        .name           = "phy"
 };
 
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+       .has_phy_clk = true,
+       .has_second_pll = true,
+       .phy_init = &sun8i_hdmi_phy_init_h3,
+       .phy_disable = &sun8i_hdmi_phy_disable_h3,
+       .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
 static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
        .phy_init = &sun8i_hdmi_phy_init_a83t,
        .phy_disable = &sun8i_hdmi_phy_disable_a83t,
@@ -401,6 +419,10 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
 
 static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
        {
+               .compatible = "allwinner,sun50i-a64-hdmi-phy",
+               .data = &sun50i_a64_hdmi_phy,
+       },
+       {
                .compatible = "allwinner,sun8i-a83t-hdmi-phy",
                .data = &sun8i_a83t_hdmi_phy,
        },
@@ -472,18 +494,30 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
                        goto err_put_clk_mod;
                }
 
-               ret = sun8i_phy_clk_create(phy, dev);
+               if (phy->variant->has_second_pll) {
+                       phy->clk_pll1 = of_clk_get_by_name(node, "pll-1");
+                       if (IS_ERR(phy->clk_pll1)) {
+                               dev_err(dev, "Could not get pll-1 clock\n");
+                               ret = PTR_ERR(phy->clk_pll1);
+                               goto err_put_clk_pll0;
+                       }
+               }
+
+               ret = sun8i_phy_clk_create(phy, dev,
+                                          phy->variant->has_second_pll);
                if (ret) {
                        dev_err(dev, "Couldn't create the PHY clock\n");
-                       goto err_put_clk_pll0;
+                       goto err_put_clk_pll1;
                }
+
+               clk_prepare_enable(phy->clk_phy);
        }
 
        phy->rst_phy = of_reset_control_get_shared(node, "phy");
        if (IS_ERR(phy->rst_phy)) {
                dev_err(dev, "Could not get phy reset control\n");
                ret = PTR_ERR(phy->rst_phy);
-               goto err_put_clk_pll0;
+               goto err_disable_clk_phy;
        }
 
        ret = reset_control_deassert(phy->rst_phy);
@@ -514,9 +548,12 @@ err_deassert_rst_phy:
        reset_control_assert(phy->rst_phy);
 err_put_rst_phy:
        reset_control_put(phy->rst_phy);
+err_disable_clk_phy:
+       clk_disable_unprepare(phy->clk_phy);
+err_put_clk_pll1:
+       clk_put(phy->clk_pll1);
 err_put_clk_pll0:
-       if (phy->variant->has_phy_clk)
-               clk_put(phy->clk_pll0);
+       clk_put(phy->clk_pll0);
 err_put_clk_mod:
        clk_put(phy->clk_mod);
 err_put_clk_bus:
@@ -531,13 +568,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
 
        clk_disable_unprepare(phy->clk_mod);
        clk_disable_unprepare(phy->clk_bus);
+       clk_disable_unprepare(phy->clk_phy);
 
        reset_control_assert(phy->rst_phy);
 
        reset_control_put(phy->rst_phy);
 
-       if (phy->variant->has_phy_clk)
-               clk_put(phy->clk_pll0);
+       clk_put(phy->clk_pll0);
+       clk_put(phy->clk_pll1);
        clk_put(phy->clk_mod);
        clk_put(phy->clk_bus);
 }
index faea449..a4d31fe 100644 (file)
@@ -22,35 +22,45 @@ static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
 {
        unsigned long rate = req->rate;
        unsigned long best_rate = 0;
+       struct clk_hw *best_parent = NULL;
        struct clk_hw *parent;
        int best_div = 1;
-       int i;
+       int i, p;
 
-       parent = clk_hw_get_parent(hw);
-
-       for (i = 1; i <= 16; i++) {
-               unsigned long ideal = rate * i;
-               unsigned long rounded;
-
-               rounded = clk_hw_round_rate(parent, ideal);
+       for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+               parent = clk_hw_get_parent_by_index(hw, p);
+               if (!parent)
+                       continue;
 
-               if (rounded == ideal) {
-                       best_rate = rounded;
-                       best_div = i;
-                       break;
+               for (i = 1; i <= 16; i++) {
+                       unsigned long ideal = rate * i;
+                       unsigned long rounded;
+
+                       rounded = clk_hw_round_rate(parent, ideal);
+
+                       if (rounded == ideal) {
+                               best_rate = rounded;
+                               best_div = i;
+                               best_parent = parent;
+                               break;
+                       }
+
+                       if (!best_rate ||
+                           abs(rate - rounded / i) <
+                           abs(rate - best_rate / best_div)) {
+                               best_rate = rounded;
+                               best_div = i;
+                               best_parent = parent;
+                       }
                }
 
-               if (!best_rate ||
-                   abs(rate - rounded / i) <
-                   abs(rate - best_rate / best_div)) {
-                       best_rate = rounded;
-                       best_div = i;
-               }
+               if (best_rate / best_div == rate)
+                       break;
        }
 
        req->rate = best_rate / best_div;
        req->best_parent_rate = best_rate;
-       req->best_parent_hw = parent;
+       req->best_parent_hw = best_parent;
 
        return 0;
 }
@@ -95,22 +105,58 @@ static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static u8 sun8i_phy_clk_get_parent(struct clk_hw *hw)
+{
+       struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+       u32 reg;
+
+       regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, &reg);
+       reg = (reg & SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK) >>
+             SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT;
+
+       return reg;
+}
+
+static int sun8i_phy_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+
+       if (index > 1)
+               return -EINVAL;
+
+       regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+                          SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+                          index << SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT);
+
+       return 0;
+}
+
 static const struct clk_ops sun8i_phy_clk_ops = {
        .determine_rate = sun8i_phy_clk_determine_rate,
        .recalc_rate    = sun8i_phy_clk_recalc_rate,
        .set_rate       = sun8i_phy_clk_set_rate,
+
+       .get_parent     = sun8i_phy_clk_get_parent,
+       .set_parent     = sun8i_phy_clk_set_parent,
 };
 
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+                        bool second_parent)
 {
        struct clk_init_data init;
        struct sun8i_phy_clk *priv;
-       const char *parents[1];
+       const char *parents[2];
 
        parents[0] = __clk_get_name(phy->clk_pll0);
        if (!parents[0])
                return -ENODEV;
 
+       if (second_parent) {
+               parents[1] = __clk_get_name(phy->clk_pll1);
+               if (!parents[1])
+                       return -ENODEV;
+       }
+
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
@@ -118,7 +164,7 @@ int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
        init.name = "hdmi-phy-clk";
        init.ops = &sun8i_phy_clk_ops;
        init.parent_names = parents;
-       init.num_parents = 1;
+       init.num_parents = second_parent ? 2 : 1;
        init.flags = CLK_SET_RATE_PARENT;
 
        priv->phy = phy;
index 126899d..ee8febb 100644 (file)
@@ -500,6 +500,22 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
        .vi_num         = 1,
 };
 
+static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
+       .ccsc           = 0,
+       .mod_rate       = 297000000,
+       .scaler_mask    = 0xf,
+       .ui_num         = 3,
+       .vi_num         = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
+       .ccsc           = 1,
+       .mod_rate       = 297000000,
+       .scaler_mask    = 0x3,
+       .ui_num         = 1,
+       .vi_num         = 1,
+};
+
 static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
        .vi_num = 2,
        .ui_num = 1,
@@ -522,6 +538,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
                .data = &sun8i_h3_mixer0_cfg,
        },
        {
+               .compatible = "allwinner,sun8i-r40-de2-mixer-0",
+               .data = &sun8i_r40_mixer0_cfg,
+       },
+       {
+               .compatible = "allwinner,sun8i-r40-de2-mixer-1",
+               .data = &sun8i_r40_mixer1_cfg,
+       },
+       {
                .compatible = "allwinner,sun8i-v3s-de2-mixer",
                .data = &sun8i_v3s_mixer_cfg,
        },
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
new file mode 100644 (file)
index 0000000..8da0460
--- /dev/null
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#include <drm/drmP.h>
+
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "sun8i_tcon_top.h"
+
+static int sun8i_tcon_top_get_connected_ep_id(struct device_node *node,
+                                             int port_id)
+{
+       struct device_node *ep, *remote, *port;
+       struct of_endpoint endpoint;
+
+       port = of_graph_get_port_by_id(node, port_id);
+       if (!port)
+               return -ENOENT;
+
+       for_each_available_child_of_node(port, ep) {
+               remote = of_graph_get_remote_port_parent(ep);
+               if (!remote)
+                       continue;
+
+               if (of_device_is_available(remote)) {
+                       of_graph_parse_endpoint(ep, &endpoint);
+
+                       of_node_put(remote);
+
+                       return endpoint.id;
+               }
+
+               of_node_put(remote);
+       }
+
+       return -ENOENT;
+}
+
+static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev,
+                                                  struct clk *parent,
+                                                  void __iomem *regs,
+                                                  spinlock_t *lock,
+                                                  u8 bit, int name_index)
+{
+       const char *clk_name, *parent_name;
+       int ret;
+
+       parent_name = __clk_get_name(parent);
+       ret = of_property_read_string_index(dev->of_node,
+                                           "clock-output-names", name_index,
+                                           &clk_name);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return clk_hw_register_gate(dev, clk_name, parent_name,
+                                   CLK_SET_RATE_PARENT,
+                                   regs + TCON_TOP_GATE_SRC_REG,
+                                   bit, 0, lock);
+};
+
+static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
+                              void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct clk *dsi, *tcon_tv0, *tcon_tv1, *tve0, *tve1;
+       struct clk_hw_onecell_data *clk_data;
+       struct sun8i_tcon_top *tcon_top;
+       bool mixer0_unused = false;
+       struct resource *res;
+       void __iomem *regs;
+       int ret, i, id;
+       u32 val;
+
+       tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
+       if (!tcon_top)
+               return -ENOMEM;
+
+       clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
+                               sizeof(*clk_data->hws) * CLK_NUM,
+                               GFP_KERNEL);
+       if (!clk_data)
+               return -ENOMEM;
+       tcon_top->clk_data = clk_data;
+
+       spin_lock_init(&tcon_top->reg_lock);
+
+       tcon_top->rst = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(tcon_top->rst)) {
+               dev_err(dev, "Couldn't get our reset line\n");
+               return PTR_ERR(tcon_top->rst);
+       }
+
+       tcon_top->bus = devm_clk_get(dev, "bus");
+       if (IS_ERR(tcon_top->bus)) {
+               dev_err(dev, "Couldn't get the bus clock\n");
+               return PTR_ERR(tcon_top->bus);
+       }
+
+       dsi = devm_clk_get(dev, "dsi");
+       if (IS_ERR(dsi)) {
+               dev_err(dev, "Couldn't get the dsi clock\n");
+               return PTR_ERR(dsi);
+       }
+
+       tcon_tv0 = devm_clk_get(dev, "tcon-tv0");
+       if (IS_ERR(tcon_tv0)) {
+               dev_err(dev, "Couldn't get the tcon-tv0 clock\n");
+               return PTR_ERR(tcon_tv0);
+       }
+
+       tcon_tv1 = devm_clk_get(dev, "tcon-tv1");
+       if (IS_ERR(tcon_tv1)) {
+               dev_err(dev, "Couldn't get the tcon-tv1 clock\n");
+               return PTR_ERR(tcon_tv1);
+       }
+
+       tve0 = devm_clk_get(dev, "tve0");
+       if (IS_ERR(tve0)) {
+               dev_err(dev, "Couldn't get the tve0 clock\n");
+               return PTR_ERR(tve0);
+       }
+
+       tve1 = devm_clk_get(dev, "tve1");
+       if (IS_ERR(tve1)) {
+               dev_err(dev, "Couldn't get the tve1 clock\n");
+               return PTR_ERR(tve1);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+
+       ret = reset_control_deassert(tcon_top->rst);
+       if (ret) {
+               dev_err(dev, "Could not deassert ctrl reset control\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(tcon_top->bus);
+       if (ret) {
+               dev_err(dev, "Could not enable bus clock\n");
+               goto err_assert_reset;
+       }
+
+       val = 0;
+
+       /* check if HDMI mux output is connected */
+       if (sun8i_tcon_top_get_connected_ep_id(dev->of_node, 5) >= 0) {
+               /* find HDMI input endpoint id, if it is connected at all*/
+               id = sun8i_tcon_top_get_connected_ep_id(dev->of_node, 4);
+               if (id >= 0)
+                       val = FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, id + 1);
+               else
+                       DRM_DEBUG_DRIVER("TCON TOP HDMI input is not connected\n");
+       } else {
+               DRM_DEBUG_DRIVER("TCON TOP HDMI output is not connected\n");
+       }
+
+       writel(val, regs + TCON_TOP_GATE_SRC_REG);
+
+       val = 0;
+
+       /* process mixer0 mux output */
+       id = sun8i_tcon_top_get_connected_ep_id(dev->of_node, 1);
+       if (id >= 0) {
+               val = FIELD_PREP(TCON_TOP_PORT_DE0_MSK, id);
+       } else {
+               DRM_DEBUG_DRIVER("TCON TOP mixer0 output is not connected\n");
+               mixer0_unused = true;
+       }
+
+       /* process mixer1 mux output */
+       id = sun8i_tcon_top_get_connected_ep_id(dev->of_node, 3);
+       if (id >= 0) {
+               val |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, id);
+
+               /*
+                * mixer0 mux has priority over mixer1 mux. We have to
+                * make sure mixer0 doesn't overtake TCON from mixer1.
+                */
+               if (mixer0_unused && id == 0)
+                       val |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, 1);
+       } else {
+               DRM_DEBUG_DRIVER("TCON TOP mixer1 output is not connected\n");
+       }
+
+       writel(val, regs + TCON_TOP_PORT_SEL_REG);
+
+       /*
+        * TCON TOP has two muxes, which select parent clock for each TCON TV
+        * channel clock. Parent could be either TCON TV or TVE clock. For now
+        * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
+        * implemented. Once it is, graph needs to be traversed to determine
+        * if TVE is active on each TCON TV. If it is, mux should be switched
+        * to TVE clock parent.
+        */
+       clk_data->hws[CLK_TCON_TOP_TV0] =
+               sun8i_tcon_top_register_gate(dev, tcon_tv0, regs,
+                                            &tcon_top->reg_lock,
+                                            TCON_TOP_TCON_TV0_GATE, 0);
+
+       clk_data->hws[CLK_TCON_TOP_TV1] =
+               sun8i_tcon_top_register_gate(dev, tcon_tv1, regs,
+                                            &tcon_top->reg_lock,
+                                            TCON_TOP_TCON_TV1_GATE, 1);
+
+       clk_data->hws[CLK_TCON_TOP_DSI] =
+               sun8i_tcon_top_register_gate(dev, dsi, regs,
+                                            &tcon_top->reg_lock,
+                                            TCON_TOP_TCON_DSI_GATE, 2);
+
+       for (i = 0; i < CLK_NUM; i++)
+               if (IS_ERR(clk_data->hws[i])) {
+                       ret = PTR_ERR(clk_data->hws[i]);
+                       goto err_unregister_gates;
+               }
+
+       clk_data->num = CLK_NUM;
+
+       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+                                    clk_data);
+       if (ret)
+               goto err_unregister_gates;
+
+       dev_set_drvdata(dev, tcon_top);
+
+       return 0;
+
+err_unregister_gates:
+       for (i = 0; i < CLK_NUM; i++)
+               if (clk_data->hws[i])
+                       clk_hw_unregister_gate(clk_data->hws[i]);
+       clk_disable_unprepare(tcon_top->bus);
+err_assert_reset:
+       reset_control_assert(tcon_top->rst);
+
+       return ret;
+}
+
+static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
+                                 void *data)
+{
+       struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+       struct clk_hw_onecell_data *clk_data = tcon_top->clk_data;
+       int i;
+
+       of_clk_del_provider(dev->of_node);
+       for (i = 0; i < CLK_NUM; i++)
+               clk_hw_unregister_gate(clk_data->hws[i]);
+
+       clk_disable_unprepare(tcon_top->bus);
+       reset_control_assert(tcon_top->rst);
+}
+
+static const struct component_ops sun8i_tcon_top_ops = {
+       .bind   = sun8i_tcon_top_bind,
+       .unbind = sun8i_tcon_top_unbind,
+};
+
+static int sun8i_tcon_top_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &sun8i_tcon_top_ops);
+}
+
+static int sun8i_tcon_top_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &sun8i_tcon_top_ops);
+
+       return 0;
+}
+
+/* sun4i_drv uses this list to check if a device node is a TCON TOP */
+const struct of_device_id sun8i_tcon_top_of_table[] = {
+       { .compatible = "allwinner,sun8i-r40-tcon-top" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
+EXPORT_SYMBOL(sun8i_tcon_top_of_table);
+
+static struct platform_driver sun8i_tcon_top_platform_driver = {
+       .probe          = sun8i_tcon_top_probe,
+       .remove         = sun8i_tcon_top_remove,
+       .driver         = {
+               .name           = "sun8i-tcon-top",
+               .of_match_table = sun8i_tcon_top_of_table,
+       },
+};
+module_platform_driver(sun8i_tcon_top_platform_driver);
+
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner R40 TCON TOP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
new file mode 100644 (file)
index 0000000..39838bb
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _SUN8I_TCON_TOP_H_
+#define _SUN8I_TCON_TOP_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define TCON_TOP_TCON_TV_SETUP_REG     0x00
+
+#define TCON_TOP_PORT_SEL_REG          0x1C
+#define TCON_TOP_PORT_DE0_MSK                  GENMASK(1, 0)
+#define TCON_TOP_PORT_DE1_MSK                  GENMASK(5, 4)
+
+#define TCON_TOP_GATE_SRC_REG          0x20
+#define TCON_TOP_HDMI_SRC_MSK                  GENMASK(29, 28)
+#define TCON_TOP_TCON_TV1_GATE                 24
+#define TCON_TOP_TCON_TV0_GATE                 20
+#define TCON_TOP_TCON_DSI_GATE                 16
+
+#define CLK_NUM                                        3
+
+struct sun8i_tcon_top {
+       struct clk                      *bus;
+       struct clk_hw_onecell_data      *clk_data;
+       struct reset_control            *rst;
+
+       /*
+        * spinlock is used to synchronize access to same
+        * register where multiple clock gates can be set.
+        */
+       spinlock_t                      reg_lock;
+};
+
+extern const struct of_device_id sun8i_tcon_top_of_table[];
+
+#endif /* _SUN8I_TCON_TOP_H_ */
index 00a5c9f..4f80100 100644 (file)
@@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
        return 0;
 }
 
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
-                                        unsigned long page)
-{
-       return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
-                                         unsigned long page,
-                                         void *addr)
-{
-}
-
 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 {
        return NULL;
@@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
        .release = tegra_gem_prime_release,
        .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
        .end_cpu_access = tegra_gem_prime_end_cpu_access,
-       .map_atomic = tegra_gem_prime_kmap_atomic,
-       .unmap_atomic = tegra_gem_prime_kunmap_atomic,
        .map = tegra_gem_prime_kmap,
        .unmap = tegra_gem_prime_kunmap,
        .mmap = tegra_gem_prime_mmap,
index 4592a5e..7a8008b 100644 (file)
@@ -20,6 +20,16 @@ config TINYDRM_ILI9225
 
          If M is selected the module will be called ili9225.
 
+config TINYDRM_ILI9341
+       tristate "DRM support for ILI9341 display panels"
+       depends on DRM_TINYDRM && SPI
+       select TINYDRM_MIPI_DBI
+       help
+         DRM driver for the following Ilitek ILI9341 panels:
+         * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+
+         If M is selected the module will be called ili9341.
+
 config TINYDRM_MI0283QT
        tristate "DRM support for MI0283QT"
        depends on DRM_TINYDRM && SPI
index 49a1119..14d9908 100644 (file)
@@ -5,6 +5,7 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI)          += mipi-dbi.o
 
 # Displays
 obj-$(CONFIG_TINYDRM_ILI9225)          += ili9225.o
+obj-$(CONFIG_TINYDRM_ILI9341)          += ili9341.o
 obj-$(CONFIG_TINYDRM_MI0283QT)         += mi0283qt.o
 obj-$(CONFIG_TINYDRM_REPAPER)          += repaper.o
 obj-$(CONFIG_TINYDRM_ST7586)           += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
new file mode 100644 (file)
index 0000000..8864dcd
--- /dev/null
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9341 panels
+ *
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on mi0283qt.c:
+ * Copyright 2016 Noralf Trønnes
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define ILI9341_FRMCTR1                0xb1
+#define ILI9341_DISCTRL                0xb6
+#define ILI9341_ETMOD          0xb7
+
+#define ILI9341_PWCTRL1                0xc0
+#define ILI9341_PWCTRL2                0xc1
+#define ILI9341_VMCTRL1                0xc5
+#define ILI9341_VMCTRL2                0xc7
+#define ILI9341_PWCTRLA                0xcb
+#define ILI9341_PWCTRLB                0xcf
+
+#define ILI9341_PGAMCTRL       0xe0
+#define ILI9341_NGAMCTRL       0xe1
+#define ILI9341_DTCTRLA                0xe8
+#define ILI9341_DTCTRLB                0xea
+#define ILI9341_PWRSEQ         0xed
+
+#define ILI9341_EN3GAM         0xf2
+#define ILI9341_PUMPCTRL       0xf7
+
+#define ILI9341_MADCTL_BGR     BIT(3)
+#define ILI9341_MADCTL_MV      BIT(5)
+#define ILI9341_MADCTL_MX      BIT(6)
+#define ILI9341_MADCTL_MY      BIT(7)
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+                            struct drm_crtc_state *crtc_state,
+                            struct drm_plane_state *plane_state)
+{
+       struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+       struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+       u8 addr_mode;
+       int ret;
+
+       DRM_DEBUG_KMS("\n");
+
+       ret = mipi_dbi_poweron_conditional_reset(mipi);
+       if (ret < 0)
+               return;
+       if (ret == 1)
+               goto out_enable;
+
+       mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+       mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+       mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+       mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+       mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+       mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
+       mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+
+       /* Power Control */
+       mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
+       mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+       /* VCOM */
+       mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
+       mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+
+       /* Memory Access Control */
+       mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+       /* Frame Rate */
+       mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+
+       /* Gamma */
+       mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
+       mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+       mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+                        0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
+                        0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
+       mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+                        0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
+                        0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
+
+       /* DDRAM */
+       mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+
+       /* Display */
+       mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+       mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+       msleep(100);
+
+       mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+       msleep(100);
+
+out_enable:
+       switch (mipi->rotation) {
+       default:
+               addr_mode = ILI9341_MADCTL_MX;
+               break;
+       case 90:
+               addr_mode = ILI9341_MADCTL_MV;
+               break;
+       case 180:
+               addr_mode = ILI9341_MADCTL_MY;
+               break;
+       case 270:
+               addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+                           ILI9341_MADCTL_MX;
+               break;
+       }
+       addr_mode |= ILI9341_MADCTL_BGR;
+       mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+       .enable = yx240qv29_enable,
+       .disable = mipi_dbi_pipe_disable,
+       .update = tinydrm_display_pipe_update,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx240qv29_mode = {
+       TINYDRM_MODE(240, 320, 37, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+
+static struct drm_driver ili9341_driver = {
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+       .fops                   = &ili9341_fops,
+       TINYDRM_GEM_DRIVER_OPS,
+       .lastclose              = drm_fb_helper_lastclose,
+       .debugfs_init           = mipi_dbi_debugfs_init,
+       .name                   = "ili9341",
+       .desc                   = "Ilitek ILI9341",
+       .date                   = "20180514",
+       .major                  = 1,
+       .minor                  = 0,
+};
+
+static const struct of_device_id ili9341_of_match[] = {
+       { .compatible = "adafruit,yx240qv29" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ili9341_of_match);
+
+static const struct spi_device_id ili9341_id[] = {
+       { "yx240qv29", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, ili9341_id);
+
+static int ili9341_probe(struct spi_device *spi)
+{
+       struct device *dev = &spi->dev;
+       struct mipi_dbi *mipi;
+       struct gpio_desc *dc;
+       u32 rotation = 0;
+       int ret;
+
+       mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+       if (!mipi)
+               return -ENOMEM;
+
+       mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(mipi->reset)) {
+               DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+               return PTR_ERR(mipi->reset);
+       }
+
+       dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+       if (IS_ERR(dc)) {
+               DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+               return PTR_ERR(dc);
+       }
+
+       mipi->backlight = devm_of_find_backlight(dev);
+       if (IS_ERR(mipi->backlight))
+               return PTR_ERR(mipi->backlight);
+
+       device_property_read_u32(dev, "rotation", &rotation);
+
+       ret = mipi_dbi_spi_init(spi, mipi, dc);
+       if (ret)
+               return ret;
+
+       ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
+                           &ili9341_driver, &yx240qv29_mode, rotation);
+       if (ret)
+               return ret;
+
+       spi_set_drvdata(spi, mipi);
+
+       return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+       struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+       tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver ili9341_spi_driver = {
+       .driver = {
+               .name = "ili9341",
+               .of_match_table = ili9341_of_match,
+       },
+       .id_table = ili9341_id,
+       .probe = ili9341_probe,
+       .shutdown = ili9341_shutdown,
+};
+module_spi_driver(ili9341_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9341 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
index c7ece76..0ca0ec4 100644 (file)
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                                struct vm_fault *vmf)
 {
-       int ret = 0;
+       vm_fault_t ret = 0;
+       int err = 0;
 
        if (likely(!bo->moving))
                goto out_unlock;
@@ -78,9 +79,9 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
        /*
         * Ordinary wait.
         */
-       ret = dma_fence_wait(bo->moving, true);
-       if (unlikely(ret != 0)) {
-               ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+       err = dma_fence_wait(bo->moving, true);
+       if (unlikely(err != 0)) {
+               ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
                        VM_FAULT_NOPAGE;
                goto out_unlock;
        }
@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
                + page_offset;
 }
 
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
        unsigned long pfn;
        struct ttm_tt *ttm = NULL;
        struct page *page;
-       int ret;
+       int err;
        int i;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
        unsigned long address = vmf->address;
        struct ttm_mem_type_manager *man =
                &bdev->man[bo->mem.mem_type];
@@ -129,9 +131,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
         * for reserve, and if it fails, retry the fault after waiting
         * for the buffer to become unreserved.
         */
-       ret = ttm_bo_reserve(bo, true, true, NULL);
-       if (unlikely(ret != 0)) {
-               if (ret != -EBUSY)
+       err = ttm_bo_reserve(bo, true, true, NULL);
+       if (unlikely(err != 0)) {
+               if (err != -EBUSY)
                        return VM_FAULT_NOPAGE;
 
                if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
        }
 
        if (bdev->driver->fault_reserve_notify) {
-               ret = bdev->driver->fault_reserve_notify(bo);
-               switch (ret) {
+               err = bdev->driver->fault_reserve_notify(bo);
+               switch (err) {
                case 0:
                        break;
                case -EBUSY:
@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                goto out_unlock;
        }
 
-       ret = ttm_mem_io_lock(man, true);
-       if (unlikely(ret != 0)) {
+       err = ttm_mem_io_lock(man, true);
+       if (unlikely(err != 0)) {
                ret = VM_FAULT_NOPAGE;
                goto out_unlock;
        }
-       ret = ttm_mem_io_reserve_vm(bo);
-       if (unlikely(ret != 0)) {
+       err = ttm_mem_io_reserve_vm(bo);
+       if (unlikely(err != 0)) {
                ret = VM_FAULT_SIGBUS;
                goto out_io_unlock;
        }
@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                }
 
                if (vma->vm_flags & VM_MIXEDMAP)
-                       ret = vm_insert_mixed(&cvma, address,
+                       ret = vmf_insert_mixed(&cvma, address,
                                        __pfn_to_pfn_t(pfn, PFN_DEV));
                else
-                       ret = vm_insert_pfn(&cvma, address, pfn);
+                       ret = vmf_insert_pfn(&cvma, address, pfn);
 
                /*
                 * Somebody beat us to this PTE or prefaulting to
                 * an already populated PTE, or prefaulting error.
                 */
 
-               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+               if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
                        break;
-               else if (unlikely(ret != 0)) {
-                       ret =
-                           (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+               else if (unlikely(ret & VM_FAULT_ERROR))
                        goto out_io_unlock;
-               }
 
                address += PAGE_SIZE;
                if (unlikely(++page_offset >= page_last))
index 0a20695..556f626 100644 (file)
@@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
 };
 
 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
-                             struct device *dev,
                              struct dma_buf_attachment *attach)
 {
        struct udl_drm_dmabuf_attachment *udl_attach;
@@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
        return NULL;
 }
 
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-                                   unsigned long page_num)
-{
-       /* TODO */
-
-       return NULL;
-}
-
 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
                              unsigned long page_num, void *addr)
 {
        /* TODO */
 }
 
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-                                    unsigned long page_num,
-                                    void *addr)
-{
-       /* TODO */
-}
-
 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
                           struct vm_area_struct *vma)
 {
@@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
        .map_dma_buf            = udl_map_dma_buf,
        .unmap_dma_buf          = udl_unmap_dma_buf,
        .map                    = udl_dmabuf_kmap,
-       .map_atomic             = udl_dmabuf_kmap_atomic,
        .unmap                  = udl_dmabuf_kunmap,
-       .unmap_atomic           = udl_dmabuf_kunmap_atomic,
        .mmap                   = udl_dmabuf_mmap,
        .release                = drm_gem_dmabuf_release,
 };
index 55c0cc3..0725825 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/usb.h>
 #include <drm/drm_gem.h>
+#include <linux/mm_types.h>
 
 #define DRIVER_NAME            "udl"
 #define DRIVER_DESC            "DisplayLink"
@@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
 int udl_gem_vmap(struct udl_gem_object *obj);
 void udl_gem_vunmap(struct udl_gem_object *obj);
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_fault *vmf);
+vm_fault_t udl_gem_fault(struct vm_fault *vmf);
 
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                      int width, int height);
index 9a15cce..d5a2329 100644 (file)
@@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return ret;
 }
 
-int udl_gem_fault(struct vm_fault *vmf)
+vm_fault_t udl_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
        struct page *page;
        unsigned int page_offset;
-       int ret = 0;
 
        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
@@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        page = obj->pages[page_offset];
-       ret = vm_insert_page(vma, vmf->address, page);
-       switch (ret) {
-       case -EAGAIN:
-       case 0:
-       case -ERESTARTSYS:
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       return vmf_insert_page(vma, vmf->address, page);
 }
 
 int udl_gem_get_pages(struct udl_gem_object *obj)
index a043ac3..f32ac8c 100644 (file)
@@ -25,7 +25,6 @@ struct v3d_queue_state {
 
        u64 fence_context;
        u64 emit_seqno;
-       u64 finished_seqno;
 };
 
 struct v3d_dev {
@@ -85,6 +84,11 @@ struct v3d_dev {
         */
        struct mutex reset_lock;
 
+       /* Lock taken when creating and pushing the GPU scheduler
+        * jobs, to keep the sched-fence seqnos in order.
+        */
+       struct mutex sched_lock;
+
        struct {
                u32 num_allocated;
                u32 pages_allocated;
index 087d49c..bfe31a8 100644 (file)
@@ -40,19 +40,14 @@ static bool v3d_fence_enable_signaling(struct dma_fence *fence)
        return true;
 }
 
-static bool v3d_fence_signaled(struct dma_fence *fence)
-{
-       struct v3d_fence *f = to_v3d_fence(fence);
-       struct v3d_dev *v3d = to_v3d_dev(f->dev);
-
-       return v3d->queue[f->queue].finished_seqno >= f->seqno;
-}
-
 const struct dma_fence_ops v3d_fence_ops = {
        .get_driver_name = v3d_fence_get_driver_name,
        .get_timeline_name = v3d_fence_get_timeline_name,
        .enable_signaling = v3d_fence_enable_signaling,
-       .signaled = v3d_fence_signaled,
+       /* Each of our fences gets signaled as complete by the IRQ
+        * handler, so we rely on the core's tracking of signaling.
+        */
+       .signaled = NULL,
        .wait = dma_fence_default_wait,
        .release = dma_fence_free,
 };
index b513f91..e1fcbb4 100644 (file)
@@ -550,6 +550,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto fail;
 
+       mutex_lock(&v3d->sched_lock);
        if (exec->bin.start != exec->bin.end) {
                ret = drm_sched_job_init(&exec->bin.base,
                                         &v3d->queue[V3D_BIN].sched,
@@ -576,6 +577,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        kref_get(&exec->refcount); /* put by scheduler job completion */
        drm_sched_entity_push_job(&exec->render.base,
                                  &v3d_priv->sched_entity[V3D_RENDER]);
+       mutex_unlock(&v3d->sched_lock);
 
        v3d_attach_object_fences(exec);
 
@@ -594,6 +596,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        return 0;
 
 fail_unreserve:
+       mutex_unlock(&v3d->sched_lock);
        v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
 fail:
        v3d_exec_put(exec);
@@ -615,6 +618,7 @@ v3d_gem_init(struct drm_device *dev)
        spin_lock_init(&v3d->job_lock);
        mutex_init(&v3d->bo_lock);
        mutex_init(&v3d->reset_lock);
+       mutex_init(&v3d->sched_lock);
 
        /* Note: We don't allocate address 0.  Various bits of HW
         * treat 0 as special, such as the occlusion query counters
@@ -650,17 +654,14 @@ void
 v3d_gem_destroy(struct drm_device *dev)
 {
        struct v3d_dev *v3d = to_v3d_dev(dev);
-       enum v3d_queue q;
 
        v3d_sched_fini(v3d);
 
        /* Waiting for exec to finish would need to be done before
         * unregistering V3D.
         */
-       for (q = 0; q < V3D_MAX_QUEUES; q++) {
-               WARN_ON(v3d->queue[q].emit_seqno !=
-                       v3d->queue[q].finished_seqno);
-       }
+       WARN_ON(v3d->bin_job);
+       WARN_ON(v3d->render_job);
 
        drm_mm_takedown(&v3d->mm);
 
index 77e1fa0..e07514e 100644 (file)
@@ -87,15 +87,12 @@ v3d_irq(int irq, void *arg)
        }
 
        if (intsts & V3D_INT_FLDONE) {
-               v3d->queue[V3D_BIN].finished_seqno++;
                dma_fence_signal(v3d->bin_job->bin.done_fence);
                status = IRQ_HANDLED;
        }
 
        if (intsts & V3D_INT_FRDONE) {
-               v3d->queue[V3D_RENDER].finished_seqno++;
                dma_fence_signal(v3d->render_job->render.done_fence);
-
                status = IRQ_HANDLED;
        }
 
index b07bece..808bc90 100644 (file)
@@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
        v3d_invalidate_caches(v3d);
 
        fence = v3d_fence_create(v3d, q);
-       if (!fence)
-               return fence;
+       if (IS_ERR(fence))
+               return NULL;
 
        if (job->done_fence)
                dma_fence_put(job->done_fence);
index add9cc9..8dcce71 100644 (file)
@@ -721,7 +721,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
        return dmabuf;
 }
 
-int vc4_fault(struct vm_fault *vmf)
+vm_fault_t vc4_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
index c8650bb..dcadf79 100644 (file)
@@ -862,7 +862,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
         * is released.
         */
        drm_atomic_set_fb_for_plane(plane->state, fb);
-       plane->fb = fb;
 
        vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
                           vc4_async_page_flip_complete);
@@ -1057,7 +1056,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
        drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
                                  &vc4_crtc_funcs, NULL);
        drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
-       primary_plane->crtc = crtc;
        vc4_crtc->channel = vc4_crtc->data->hvs_channel;
        drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
        drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@@ -1093,7 +1091,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
        cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
        if (!IS_ERR(cursor_plane)) {
                cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
-               cursor_plane->crtc = crtc;
                crtc->cursor = cursor_plane;
        }
 
index 554a4e8..eace76c 100644 (file)
@@ -6,6 +6,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/mm_types.h>
 #include <linux/reservation.h>
 #include <drm/drmP.h>
 #include <drm/drm_encoder.h>
@@ -674,7 +675,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
-int vc4_fault(struct vm_fault *vmf);
+vm_fault_t vc4_fault(struct vm_fault *vmf);
 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
index 1d34619..8604fd2 100644 (file)
@@ -467,12 +467,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        struct drm_framebuffer *fb = state->fb;
        u32 ctl0_offset = vc4_state->dlist_count;
        const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+       u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
        int num_planes = drm_format_num_planes(format->drm);
        bool mix_plane_alpha;
        bool covers_screen;
        u32 scl0, scl1, pitch0;
        u32 lbm_size, tiling;
        unsigned long irqflags;
+       u32 hvs_format = format->hvs;
        int ret, i;
 
        ret = vc4_plane_setup_clipping_and_scaling(state);
@@ -512,7 +514,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                scl1 = vc4_get_scl_field(state, 0);
        }
 
-       switch (fb->modifier) {
+       switch (base_format_mod) {
        case DRM_FORMAT_MOD_LINEAR:
                tiling = SCALER_CTL0_TILING_LINEAR;
                pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@@ -535,6 +537,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                break;
        }
 
+       case DRM_FORMAT_MOD_BROADCOM_SAND64:
+       case DRM_FORMAT_MOD_BROADCOM_SAND128:
+       case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+               uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+
+               /* Column-based NV12 or RGBA.
+                */
+               if (fb->format->num_planes > 1) {
+                       if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+                               DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+                               return -EINVAL;
+                       }
+                       hvs_format = HVS_PIXEL_FORMAT_H264;
+               } else {
+                       if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+                               DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+                               return -EINVAL;
+                       }
+               }
+
+               switch (base_format_mod) {
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+                       tiling = SCALER_CTL0_TILING_64B;
+                       break;
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+                       tiling = SCALER_CTL0_TILING_128B;
+                       break;
+               case DRM_FORMAT_MOD_BROADCOM_SAND256:
+                       tiling = SCALER_CTL0_TILING_256B_OR_T;
+                       break;
+               default:
+                       break;
+               }
+
+               if (param > SCALER_TILE_HEIGHT_MASK) {
+                       DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+                       return -EINVAL;
+               }
+
+               pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+               break;
+       }
+
        default:
                DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
                              (long long)fb->modifier);
@@ -544,8 +589,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        /* Control word */
        vc4_dlist_write(vc4_state,
                        SCALER_CTL0_VALID |
+                       VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
                        (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
-                       (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+                       (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
                        VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
                        (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
                        VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@@ -607,8 +653,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 
        /* Pitch word 1/2 */
        for (i = 1; i < num_planes; i++) {
-               vc4_dlist_write(vc4_state,
-                               VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+               if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+                       vc4_dlist_write(vc4_state,
+                                       VC4_SET_FIELD(fb->pitches[i],
+                                                     SCALER_SRC_PITCH));
+               } else {
+                       vc4_dlist_write(vc4_state, pitch0);
+               }
        }
 
        /* Colorspace conversion words */
@@ -810,18 +861,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
        struct dma_fence *fence;
        int ret;
 
-       if ((plane->state->fb == state->fb) || !state->fb)
+       if (!state->fb)
                return 0;
 
        bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
 
+       fence = reservation_object_get_excl_rcu(bo->resv);
+       drm_atomic_set_fence_for_plane(state, fence);
+
+       if (plane->state->fb == state->fb)
+               return 0;
+
        ret = vc4_bo_inc_usecnt(bo);
        if (ret)
                return ret;
 
-       fence = reservation_object_get_excl_rcu(bo->resv);
-       drm_atomic_set_fence_for_plane(state, fence);
-
        return 0;
 }
 
@@ -866,13 +920,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
        case DRM_FORMAT_BGR565:
        case DRM_FORMAT_ARGB1555:
        case DRM_FORMAT_XRGB1555:
-               return true;
+               switch (fourcc_mod_broadcom_mod(modifier)) {
+               case DRM_FORMAT_MOD_LINEAR:
+               case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+                       return true;
+               default:
+                       return false;
+               }
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+               switch (fourcc_mod_broadcom_mod(modifier)) {
+               case DRM_FORMAT_MOD_LINEAR:
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+               case DRM_FORMAT_MOD_BROADCOM_SAND256:
+                       return true;
+               default:
+                       return false;
+               }
        case DRM_FORMAT_YUV422:
        case DRM_FORMAT_YVU422:
        case DRM_FORMAT_YUV420:
        case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
        default:
                return (modifier == DRM_FORMAT_MOD_LINEAR);
        }
@@ -900,6 +973,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
        unsigned i;
        static const uint64_t modifiers[] = {
                DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+               DRM_FORMAT_MOD_BROADCOM_SAND128,
+               DRM_FORMAT_MOD_BROADCOM_SAND64,
+               DRM_FORMAT_MOD_BROADCOM_SAND256,
                DRM_FORMAT_MOD_LINEAR,
                DRM_FORMAT_MOD_INVALID
        };
index d1fb6fe..d6864fa 100644 (file)
@@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
 #define SCALER_SRC_PITCH_MASK                  VC4_MASK(15, 0)
 #define SCALER_SRC_PITCH_SHIFT                 0
 
+/* PITCH0/1/2 fields for tiled (SAND). */
+#define SCALER_TILE_SKIP_0_MASK                        VC4_MASK(18, 16)
+#define SCALER_TILE_SKIP_0_SHIFT               16
+#define SCALER_TILE_HEIGHT_MASK                        VC4_MASK(15, 0)
+#define SCALER_TILE_HEIGHT_SHIFT               0
+
 /* PITCH0 fields for T-tiled. */
 #define SCALER_PITCH0_TILE_WIDTH_L_MASK                VC4_MASK(22, 16)
 #define SCALER_PITCH0_TILE_WIDTH_L_SHIFT       16
index 2524ff1..c64a859 100644 (file)
@@ -61,13 +61,13 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
        kfree(vgem_obj);
 }
 
-static int vgem_gem_fault(struct vm_fault *vmf)
+static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_vgem_gem_object *obj = vma->vm_private_data;
        /* We don't use vmf->pgoff since that has the fake offset */
        unsigned long vaddr = vmf->address;
-       int ret;
+       vm_fault_t ret = VM_FAULT_SIGBUS;
        loff_t num_pages;
        pgoff_t page_offset;
        page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
@@ -77,7 +77,6 @@ static int vgem_gem_fault(struct vm_fault *vmf)
        if (page_offset > num_pages)
                return VM_FAULT_SIGBUS;
 
-       ret = -ENOENT;
        mutex_lock(&obj->pages_lock);
        if (obj->pages) {
                get_page(obj->pages[page_offset]);
index a5edd86..ff9933e 100644 (file)
@@ -28,6 +28,7 @@
 #include "virtgpu_drv.h"
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #define XRES_MIN    32
 #define YRES_MIN    32
@@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
        .atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
 };
 
-static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct virtio_gpu_framebuffer *virtio_gpu_fb
-               = to_virtio_gpu_framebuffer(fb);
-
-       drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
-       drm_framebuffer_cleanup(fb);
-       kfree(virtio_gpu_fb);
-}
-
 static int
 virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
                                     struct drm_file *file_priv,
@@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
 }
 
-static int
-virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                    struct drm_file *file_priv,
-                                    unsigned int *handle)
-{
-       struct virtio_gpu_framebuffer *virtio_gpu_fb =
-               to_virtio_gpu_framebuffer(fb);
-
-       return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
-}
-
 static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
-       .create_handle = virtio_gpu_framebuffer_create_handle,
-       .destroy = virtio_gpu_user_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
        .dirty = virtio_gpu_framebuffer_surface_dirty,
 };
 
@@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
        int ret;
        struct virtio_gpu_object *bo;
 
-       vgfb->obj = obj;
+       vgfb->base.obj[0] = obj;
 
        bo = gem_to_virtio_gpu_obj(obj);
 
@@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
 
        ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
        if (ret) {
-               vgfb->obj = NULL;
+               vgfb->base.obj[0] = NULL;
                return ret;
        }
 
@@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
        drm_crtc_init_with_planes(dev, crtc, primary, cursor,
                                  &virtio_gpu_crtc_funcs, NULL);
        drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
-       primary->crtc = crtc;
-       cursor->crtc = crtc;
 
        drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
                           DRM_MODE_CONNECTOR_VIRTUAL);
index d25c8ca..65605e2 100644 (file)
@@ -124,7 +124,6 @@ struct virtio_gpu_output {
 
 struct virtio_gpu_framebuffer {
        struct drm_framebuffer base;
-       struct drm_gem_object *obj;
        int x1, y1, x2, y2; /* dirty rect */
        spinlock_t dirty_lock;
        uint32_t hw_res_handle;
index 8af69ab..a121b1c 100644 (file)
@@ -46,7 +46,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
        int bpp = fb->base.format->cpp[0];
        int x2, y2;
        unsigned long flags;
-       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
 
        if ((width <= 0) ||
            (x + width > fb->base.width) ||
@@ -121,7 +121,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
                             unsigned int num_clips)
 {
        struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
-       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
        struct drm_clip_rect norect;
        struct drm_clip_rect *clips_ptr;
        int left, right, top, bottom;
@@ -305,8 +305,8 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
 
        drm_fb_helper_unregister_fbi(&vgfbdev->helper);
 
-       if (vgfb->obj)
-               vgfb->obj = NULL;
+       if (vgfb->base.obj[0])
+               vgfb->base.obj[0] = NULL;
        drm_fb_helper_fini(&vgfbdev->helper);
        drm_framebuffer_cleanup(&vgfb->base);
 
index 71ba455..dc5b5b2 100644 (file)
@@ -154,7 +154,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
-               bo = gem_to_virtio_gpu_obj(vgfb->obj);
+               bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
                handle = bo->hw_res_handle;
                if (bo->dumb) {
                        virtio_gpu_cmd_transfer_to_host_2d
@@ -208,7 +208,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
-               bo = gem_to_virtio_gpu_obj(vgfb->obj);
+               bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
                handle = bo->hw_res_handle;
        } else {
                handle = 0;
index 54e3003..9b7e0ac 100644 (file)
@@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
 {
        struct drm_crtc *crtc = set->crtc;
-       struct drm_framebuffer *fb;
-       struct drm_crtc *tmp;
-       struct drm_device *dev = set->crtc->dev;
        struct drm_modeset_acquire_ctx ctx;
        int ret;
 
        drm_modeset_acquire_init(&ctx, 0);
 
 restart:
-       /*
-        * NOTE: ->set_config can also disable other crtcs (if we steal all
-        * connectors from it), hence we need to refcount the fbs across all
-        * crtcs. Atomic modeset will have saner semantics ...
-        */
-       drm_for_each_crtc(tmp, dev)
-               tmp->primary->old_fb = tmp->primary->fb;
-
-       fb = set->fb;
-
        ret = crtc->funcs->set_config(set, &ctx);
-       if (ret == 0) {
-               crtc->primary->crtc = crtc;
-               crtc->primary->fb = fb;
-       }
-
-       drm_for_each_crtc(tmp, dev) {
-               if (tmp->primary->fb)
-                       drm_framebuffer_get(tmp->primary->fb);
-               if (tmp->primary->old_fb)
-                       drm_framebuffer_put(tmp->primary->old_fb);
-               tmp->primary->old_fb = NULL;
-       }
 
        if (ret == -EDEADLK) {
                drm_modeset_backoff(&ctx);
index 01f2dc9..ef96ba7 100644 (file)
@@ -1536,9 +1536,13 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
                unsigned long requested_bb_mem = 0;
 
                if (dev_priv->active_display_unit == vmw_du_screen_target) {
-                       if (crtc->primary->fb) {
-                               int cpp = crtc->primary->fb->pitches[0] /
-                                         crtc->primary->fb->width;
+                       struct drm_plane *plane = crtc->primary;
+                       struct drm_plane_state *plane_state;
+
+                       plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+                       if (plane_state && plane_state->fb) {
+                               int cpp = plane_state->fb->format->cpp[0];
 
                                requested_bb_mem += crtc->mode.hdisplay * cpp *
                                                    crtc->mode.vdisplay;
@@ -2322,9 +2326,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
        } else {
                list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
                                    head) {
-                       if (crtc->primary->fb != &framebuffer->base)
-                               continue;
-                       units[num_units++] = vmw_crtc_to_du(crtc);
+                       struct drm_plane *plane = crtc->primary;
+
+                       if (plane->state->fb == &framebuffer->base)
+                               units[num_units++] = vmw_crtc_to_du(crtc);
                }
        }
 
@@ -2806,6 +2811,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
                                struct drm_crtc *crtc)
 {
        struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+       struct drm_plane *plane = crtc->primary;
        struct vmw_framebuffer *vfb;
 
        mutex_lock(&dev_priv->global_kms_state_mutex);
@@ -2813,7 +2819,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
        if (!du->is_implicit)
                goto out_unlock;
 
-       vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+       vfb = vmw_framebuffer_to_vfb(plane->state->fb);
        WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
                     dev_priv->implicit_fb != vfb);
 
index 0d42a46..373bc6d 100644 (file)
@@ -40,7 +40,6 @@
  */
 
 static int vmw_prime_map_attach(struct dma_buf *dma_buf,
-                               struct device *target_dev,
                                struct dma_buf_attachment *attach)
 {
        return -ENOSYS;
@@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 {
 }
 
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-               unsigned long page_num)
-{
-       return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-               unsigned long page_num, void *addr)
-{
-
-}
 static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
                unsigned long page_num)
 {
@@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
        .unmap_dma_buf = vmw_prime_unmap_dma_buf,
        .release = NULL,
        .map = vmw_prime_dmabuf_kmap,
-       .map_atomic = vmw_prime_dmabuf_kmap_atomic,
        .unmap = vmw_prime_dmabuf_kunmap,
-       .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
        .mmap = vmw_prime_dmabuf_mmap,
        .vmap = vmw_prime_dmabuf_vmap,
        .vunmap = vmw_prime_dmabuf_vunmap,
index 3d667e9..9798640 100644 (file)
@@ -527,8 +527,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
                 */
                if (ret != 0)
                        DRM_ERROR("Failed to update screen.\n");
-
-               crtc->primary->fb = plane->state->fb;
        } else {
                /*
                 * When disabling a plane, CRTC and FB should always be NULL
index 67331f0..152e96c 100644 (file)
@@ -414,6 +414,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
 static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
                                        struct drm_crtc_state *old_state)
 {
+       struct drm_plane_state *plane_state = crtc->primary->state;
        struct vmw_private *dev_priv;
        struct vmw_screen_target_display_unit *stdu;
        struct vmw_framebuffer *vfb;
@@ -422,7 +423,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
 
        stdu     = vmw_crtc_to_stdu(crtc);
        dev_priv = vmw_priv(crtc->dev);
-       fb       = crtc->primary->fb;
+       fb       = plane_state->fb;
 
        vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
 
@@ -1285,8 +1286,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                                                         1, 1, NULL, crtc);
                if (ret)
                        DRM_ERROR("Failed to update STDU.\n");
-
-               crtc->primary->fb = plane->state->fb;
        } else {
                crtc = old_state->crtc;
                stdu = vmw_crtc_to_stdu(crtc);
index b3786c1..6b6d5ab 100644 (file)
@@ -623,7 +623,7 @@ static int displback_initwait(struct xen_drm_front_info *front_info)
        if (ret < 0)
                return ret;
 
-       DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+       DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
        /* Create event channels for all connectors and publish */
        ret = xen_drm_front_evtchnl_create_all(front_info);
        if (ret < 0)
index 2c2479b..5693b4a 100644 (file)
@@ -126,12 +126,12 @@ struct xen_drm_front_drm_info {
 
 static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
 {
-       return (u64)fb;
+       return (uintptr_t)fb;
 }
 
 static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
 {
-       return (u64)gem_obj;
+       return (uintptr_t)gem_obj;
 }
 
 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
index 8099cb3..d333b67 100644 (file)
@@ -122,7 +122,7 @@ static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
 }
 
 #define xen_page_to_vaddr(page) \
-               ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+               ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
 
 static int backend_unmap(struct xen_drm_front_shbuf *buf)
 {
index f1178f6..aff0ab7 100644 (file)
@@ -222,7 +222,7 @@ struct vb2_dc_attachment {
        enum dma_data_direction dma_dir;
 };
 
-static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct vb2_dc_attachment *attach;
@@ -358,7 +358,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
        .map_dma_buf = vb2_dc_dmabuf_ops_map,
        .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
        .map = vb2_dc_dmabuf_ops_kmap,
-       .map_atomic = vb2_dc_dmabuf_ops_kmap,
        .vmap = vb2_dc_dmabuf_ops_vmap,
        .mmap = vb2_dc_dmabuf_ops_mmap,
        .release = vb2_dc_dmabuf_ops_release,
index 753ed31..015e737 100644 (file)
@@ -371,7 +371,7 @@ struct vb2_dma_sg_attachment {
        enum dma_data_direction dma_dir;
 };
 
-static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct vb2_dma_sg_attachment *attach;
@@ -507,7 +507,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
        .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
        .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
        .map = vb2_dma_sg_dmabuf_ops_kmap,
-       .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
        .vmap = vb2_dma_sg_dmabuf_ops_vmap,
        .mmap = vb2_dma_sg_dmabuf_ops_mmap,
        .release = vb2_dma_sg_dmabuf_ops_release,
index 359fb98..6dfbd5b 100644 (file)
@@ -209,7 +209,7 @@ struct vb2_vmalloc_attachment {
        enum dma_data_direction dma_dir;
 };
 
-static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct vb2_vmalloc_attachment *attach;
@@ -346,7 +346,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
        .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
        .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
        .map = vb2_vmalloc_dmabuf_ops_kmap,
-       .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
        .vmap = vb2_vmalloc_dmabuf_ops_vmap,
        .mmap = vb2_vmalloc_dmabuf_ops_mmap,
        .release = vb2_vmalloc_dmabuf_ops_release,
index 9d1109e..9907332 100644 (file)
@@ -201,7 +201,7 @@ struct ion_dma_buf_attachment {
        struct list_head list;
 };
 
-static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
+static int ion_dma_buf_attach(struct dma_buf *dmabuf,
                              struct dma_buf_attachment *attachment)
 {
        struct ion_dma_buf_attachment *a;
@@ -219,7 +219,7 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
        }
 
        a->table = table;
-       a->dev = dev;
+       a->dev = attachment->dev;
        INIT_LIST_HEAD(&a->list);
 
        attachment->priv = a;
@@ -375,8 +375,6 @@ static const struct dma_buf_ops dma_buf_ops = {
        .detach = ion_dma_buf_detatch,
        .begin_cpu_access = ion_dma_buf_begin_cpu_access,
        .end_cpu_access = ion_dma_buf_end_cpu_access,
-       .map_atomic = ion_dma_buf_kmap,
-       .unmap_atomic = ion_dma_buf_kunmap,
        .map = ion_dma_buf_kmap,
        .unmap = ion_dma_buf_kunmap,
 };
index 07d3be6..0b9ab1d 100644 (file)
@@ -80,11 +80,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
        tee_shm_release(shm);
 }
 
-static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
-{
-       return NULL;
-}
-
 static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
 {
        return NULL;
@@ -107,7 +102,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
        .map_dma_buf = tee_shm_op_map_dma_buf,
        .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
        .release = tee_shm_op_release,
-       .map_atomic = tee_shm_op_map_atomic,
        .map = tee_shm_op_map,
        .mmap = tee_shm_op_mmap,
 };
index f5099c1..c5dfbdb 100644 (file)
@@ -97,6 +97,16 @@ struct pci_controller;
 
 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
 
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
+
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+{
+       return dev->driver->driver_features & feature;
+}
+
 /**
  * drm_drv_uses_atomic_modeset - check if the driver implements
  * atomic_commit()
@@ -107,17 +117,8 @@ struct pci_controller;
  */
 static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
 {
-       return dev->mode_config.funcs->atomic_commit != NULL;
-}
-
-#define DRM_SWITCH_POWER_ON 0
-#define DRM_SWITCH_POWER_OFF 1
-#define DRM_SWITCH_POWER_CHANGING 2
-#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
-{
-       return dev->driver->driver_features & feature;
+       return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
+               dev->mode_config.funcs->atomic_commit != NULL;
 }
 
 /* returns true if currently okay to sleep */
index a57a8aa..da9d95a 100644 (file)
@@ -160,6 +160,14 @@ struct __drm_crtcs_state {
 struct __drm_connnectors_state {
        struct drm_connector *ptr;
        struct drm_connector_state *state, *old_state, *new_state;
+       /**
+        * @out_fence_ptr:
+        *
+        * User-provided pointer which the kernel uses to return a sync_file
+        * file descriptor. Used by writeback connectors to signal completion of
+        * the writeback.
+        */
+       s32 __user *out_fence_ptr;
 };
 
 struct drm_private_obj;
@@ -594,6 +602,9 @@ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
 int __must_check
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                                  struct drm_crtc *crtc);
+int drm_atomic_set_writeback_fb_for_connector(
+               struct drm_connector_state *conn_state,
+               struct drm_framebuffer *fb);
 int __must_check
 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
                                   struct drm_crtc *crtc);
@@ -601,9 +612,6 @@ int __must_check
 drm_atomic_add_affected_planes(struct drm_atomic_state *state,
                               struct drm_crtc *crtc);
 
-void
-drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
-
 int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
 int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
index 3270fec..bd85074 100644 (file)
@@ -97,7 +97,7 @@ struct drm_bridge_funcs {
        /**
         * @mode_fixup:
         *
-        * This callback is used to validate and adjust a mode. The paramater
+        * This callback is used to validate and adjust a mode. The parameter
         * mode is the display mode that should be fed to the next element in
         * the display chain, either the final &drm_connector or the next
         * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
@@ -178,6 +178,22 @@ struct drm_bridge_funcs {
         * then this would be &drm_encoder_helper_funcs.mode_set. The display
         * pipe (i.e.  clocks and timing signals) is off when this function is
         * called.
+        *
+        * The adjusted_mode parameter is the mode output by the CRTC for the
+        * first bridge in the chain. It can be different from the mode
+        * parameter that contains the desired mode for the connector at the end
+        * of the bridges chain, for instance when the first bridge in the chain
+        * performs scaling. The adjusted mode is mostly useful for the first
+        * bridge in the chain and is likely irrelevant for the other bridges.
+        *
+        * For atomic drivers the adjusted_mode is the mode stored in
+        * &drm_crtc_state.adjusted_mode.
+        *
+        * NOTE:
+        *
+        * If a need arises to store and access modes adjusted for other
+        * locations than the connection between the CRTC and the first bridge,
+        * the DRM framework will have to be extended with DRM bridge states.
         */
        void (*mode_set)(struct drm_bridge *bridge,
                         struct drm_display_mode *mode,
@@ -254,27 +270,29 @@ struct drm_bridge_timings {
 
 /**
  * struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @timings: the timing specification for the bridge, if any (may
- * be NULL)
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
  */
 struct drm_bridge {
+       /** @dev: DRM device this bridge belongs to */
        struct drm_device *dev;
+       /** @encoder: encoder to which this bridge is connected */
        struct drm_encoder *encoder;
+       /** @next: the next bridge in the encoder chain */
        struct drm_bridge *next;
 #ifdef CONFIG_OF
+       /** @of_node: device node pointer to the bridge */
        struct device_node *of_node;
 #endif
+       /** @list: to keep track of all added bridges */
        struct list_head list;
+       /**
+        * @timings:
+        *
+        * the timing specification for the bridge, if any (may be NULL)
+        */
        const struct drm_bridge_timings *timings;
-
+       /** @funcs: control functions */
        const struct drm_bridge_funcs *funcs;
+       /** @driver_private: pointer to the bridge driver's internal context */
        void *driver_private;
 };
 
@@ -285,15 +303,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
                      struct drm_bridge *previous);
 
 bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-                       const struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
+                          const struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
 enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
                                           const struct drm_display_mode *mode);
 void drm_bridge_disable(struct drm_bridge *bridge);
 void drm_bridge_post_disable(struct drm_bridge *bridge);
 void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
 void drm_bridge_pre_enable(struct drm_bridge *bridge);
 void drm_bridge_enable(struct drm_bridge *bridge);
 
index 675cc3f..14ab58a 100644 (file)
@@ -419,6 +419,14 @@ struct drm_connector_state {
        enum hdmi_picture_aspect picture_aspect_ratio;
 
        /**
+        * @content_type: Connector property to control the
+        * HDMI infoframe content type setting.
+        * The %DRM_MODE_CONTENT_TYPE_\* values much
+        * match the values.
+        */
+       unsigned int content_type;
+
+       /**
         * @scaling_mode: Connector property to control the
         * upscaling, mostly used for built-in panels.
         */
@@ -429,6 +437,19 @@ struct drm_connector_state {
         * protection. This is most commonly used for HDCP.
         */
        unsigned int content_protection;
+
+       /**
+        * @writeback_job: Writeback job for writeback connectors
+        *
+        * Holds the framebuffer and out-fence for a writeback connector. As
+        * the writeback completion may be asynchronous to the normal commit
+        * cycle, the writeback job lifetime is managed separately from the
+        * normal atomic state by this object.
+        *
+        * See also: drm_writeback_queue_job() and
+        * drm_writeback_signal_completion()
+        */
+       struct drm_writeback_job *writeback_job;
 };
 
 /**
@@ -608,6 +629,8 @@ struct drm_connector_funcs {
         * cleaned up by calling the @atomic_destroy_state hook in this
         * structure.
         *
+        * This callback is mandatory for atomic drivers.
+        *
         * Atomic drivers which don't subclass &struct drm_connector_state should use
         * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
         * state structure to extend it with driver-private state should use
@@ -634,6 +657,8 @@ struct drm_connector_funcs {
         *
         * Destroy a state duplicated with @atomic_duplicate_state and release
         * or unreference all resources it references
+        *
+        * This callback is mandatory for atomic drivers.
         */
        void (*atomic_destroy_state)(struct drm_connector *connector,
                                     struct drm_connector_state *state);
@@ -1089,11 +1114,16 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
                                  unsigned int num_modes,
                                  const char * const modes[]);
 int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_connector_attach_content_type_property(struct drm_connector *dev);
 int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
                                               u32 scaling_mode_mask);
 int drm_connector_attach_content_protection_property(
                struct drm_connector *connector);
 int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_content_type_property(struct drm_device *dev);
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+                                        const struct drm_connector_state *conn_state);
+
 int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
 
 int drm_mode_connector_set_path_property(struct drm_connector *connector,
index a2d81d2..23eddbc 100644 (file)
@@ -134,10 +134,13 @@ struct drm_crtc_state {
         *
         * Internal display timings which can be used by the driver to handle
         * differences between the mode requested by userspace in @mode and what
-        * is actually programmed into the hardware. It is purely driver
-        * implementation defined what exactly this adjusted mode means. Usually
-        * it is used to store the hardware display timings used between the
-        * CRTC and encoder blocks.
+        * is actually programmed into the hardware.
+        *
+        * For drivers using drm_bridge, this stores hardware display timings
+        * used between the CRTC and the first bridge. For other drivers, the
+        * meaning of the adjusted_mode field is purely driver implementation
+        * defined information, and will usually be used to store the hardware
+        * display timings used between the CRTC and encoder blocks.
         */
        struct drm_display_mode adjusted_mode;
 
@@ -503,6 +506,8 @@ struct drm_crtc_funcs {
         * cleaned up by calling the @atomic_destroy_state hook in this
         * structure.
         *
+        * This callback is mandatory for atomic drivers.
+        *
         * Atomic drivers which don't subclass &struct drm_crtc_state should use
         * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
         * state structure to extend it with driver-private state should use
@@ -529,6 +534,8 @@ struct drm_crtc_funcs {
         *
         * Destroy a state duplicated with @atomic_duplicate_state and release
         * or unreference all resources it references
+        *
+        * This callback is mandatory for atomic drivers.
         */
        void (*atomic_destroy_state)(struct drm_crtc *crtc,
                                     struct drm_crtc_state *state);
index 027ac16..26485ac 100644 (file)
@@ -193,6 +193,13 @@ struct drm_file {
        unsigned aspect_ratio_allowed:1;
 
        /**
+        * @writeback_connectors:
+        *
+        * True if client understands writeback connectors
+        */
+       unsigned writeback_connectors:1;
+
+       /**
         * @is_master:
         *
         * This client is the creator of @master. Protected by struct
index 101f566..2c3bbb4 100644 (file)
@@ -109,6 +109,38 @@ enum drm_mm_insert_mode {
         * Allocates the node from the bottom of the found hole.
         */
        DRM_MM_INSERT_EVICT,
+
+       /**
+        * @DRM_MM_INSERT_ONCE:
+        *
+        * Only check the first hole for suitablity and report -ENOSPC
+        * immediately otherwise, rather than check every hole until a
+        * suitable one is found. Can only be used in conjunction with another
+        * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
+        */
+       DRM_MM_INSERT_ONCE = BIT(31),
+
+       /**
+        * @DRM_MM_INSERT_HIGHEST:
+        *
+        * Only check the highest hole (the hole with the largest address) and
+        * insert the node at the top of the hole or report -ENOSPC if
+        * unsuitable.
+        *
+        * Does not search all holes.
+        */
+       DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
+
+       /**
+        * @DRM_MM_INSERT_LOWEST:
+        *
+        * Only check the lowest hole (the hole with the smallest address) and
+        * insert the node at the bottom of the hole or report -ENOSPC if
+        * unsuitable.
+        *
+        * Does not search all holes.
+        */
+       DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
 };
 
 /**
@@ -173,7 +205,7 @@ struct drm_mm {
        struct drm_mm_node head_node;
        /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
        struct rb_root_cached interval_tree;
-       struct rb_root holes_size;
+       struct rb_root_cached holes_size;
        struct rb_root holes_addr;
 
        unsigned long scan_active;
index 33b3a96..a0b202e 100644 (file)
@@ -329,10 +329,10 @@ struct drm_mode_config_funcs {
 
 /**
  * struct drm_mode_config - Mode configuration control structure
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
+ * @min_width: minimum fb pixel width on this device
+ * @min_height: minimum fb pixel height on this device
+ * @max_width: maximum fb pixel width on this device
+ * @max_height: maximum fb pixel height on this device
  * @funcs: core driver provided mode setting functions
  * @fb_base: base address of the framebuffer
  * @poll_enabled: track polling support for this device
@@ -727,6 +727,11 @@ struct drm_mode_config {
         */
        struct drm_property *aspect_ratio_property;
        /**
+        * @content_type_property: Optional connector property to control the
+        * HDMI infoframe content type setting.
+        */
+       struct drm_property *content_type_property;
+       /**
         * @degamma_lut_property: Optional CRTC property to set the LUT used to
         * convert the framebuffer's colors to linear gamma.
         */
@@ -779,6 +784,29 @@ struct drm_mode_config {
         */
        struct drm_property *panel_orientation_property;
 
+       /**
+        * @writeback_fb_id_property: Property for writeback connectors, storing
+        * the ID of the output framebuffer.
+        * See also: drm_writeback_connector_init()
+        */
+       struct drm_property *writeback_fb_id_property;
+
+       /**
+        * @writeback_pixel_formats_property: Property for writeback connectors,
+        * storing an array of the supported pixel formats for the writeback
+        * engine (read-only).
+        * See also: drm_writeback_connector_init()
+        */
+       struct drm_property *writeback_pixel_formats_property;
+       /**
+        * @writeback_out_fence_ptr_property: Property for writeback connectors,
+        * fd pointer representing the outgoing fences for a writeback
+        * connector. Userspace should provide a pointer to a value of type s32,
+        * and then cast that pointer to u64.
+        * See also: drm_writeback_connector_init()
+        */
+       struct drm_property *writeback_out_fence_ptr_property;
+
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
index 35e2a3a..3b28977 100644 (file)
@@ -974,6 +974,17 @@ struct drm_connector_helper_funcs {
         */
        int (*atomic_check)(struct drm_connector *connector,
                            struct drm_connector_state *state);
+
+       /**
+        * @atomic_commit:
+        *
+        * This hook is to be used by drivers implementing writeback connectors
+        * that need a point when to commit the writeback job to the hardware.
+        *
+        * This callback is used by the atomic modeset helpers.
+        */
+       void (*atomic_commit)(struct drm_connector *connector,
+                             struct drm_writeback_job *writeback_job);
 };
 
 /**
index b93c239..ead34ab 100644 (file)
@@ -17,6 +17,8 @@ struct drm_bridge;
 struct device_node;
 
 #ifdef CONFIG_OF
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+                           struct device_node *port);
 uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
                                    struct device_node *port);
 void drm_of_component_match_add(struct device *master,
@@ -34,6 +36,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
                                struct drm_panel **panel,
                                struct drm_bridge **bridge);
 #else
+static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+                                         struct device_node *port)
+{
+       return 0;
+}
+
 static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
                                                  struct device_node *port)
 {
index 14ac240..26a1b5f 100644 (file)
@@ -89,6 +89,7 @@ struct drm_panel {
        struct drm_device *drm;
        struct drm_connector *connector;
        struct device *dev;
+       struct device_link *link;
 
        const struct drm_panel_funcs *funcs;
 
index 26fa50c..7d4d6c7 100644 (file)
@@ -288,6 +288,8 @@ struct drm_plane_funcs {
         * cleaned up by calling the @atomic_destroy_state hook in this
         * structure.
         *
+        * This callback is mandatory for atomic drivers.
+        *
         * Atomic drivers which don't subclass &struct drm_plane_state should use
         * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
         * state structure to extend it with driver-private state should use
@@ -314,6 +316,8 @@ struct drm_plane_funcs {
         *
         * Destroy a state duplicated with @atomic_duplicate_state and release
         * or unreference all resources it references
+        *
+        * This callback is mandatory for atomic drivers.
         */
        void (*atomic_destroy_state)(struct drm_plane *plane,
                                     struct drm_plane_state *state);
@@ -431,7 +435,10 @@ struct drm_plane_funcs {
         * This optional hook is used for the DRM to determine if the given
         * format/modifier combination is valid for the plane. This allows the
         * DRM to generate the correct format bitmask (which formats apply to
-        * which modifier).
+        * which modifier), and to valdiate modifiers at atomic_check time.
+        *
+        * If not present, then any modifier in the plane's modifier
+        * list is allowed with any of the plane's formats.
         *
         * Returns:
         *
index 4d5f5d6..d716d65 100644 (file)
@@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
                                      struct dma_buf_export_info *exp_info);
 void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
                       struct dma_buf_attachment *attach);
 void drm_gem_map_detach(struct dma_buf *dma_buf,
                        struct dma_buf_attachment *attach);
@@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                           enum dma_data_direction dir);
 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-                                unsigned long page_num);
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-                                 unsigned long page_num, void *addr);
 void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
 void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
                           void *addr);
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
new file mode 100644 (file)
index 0000000..a10fe55
--- /dev/null
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef __DRM_WRITEBACK_H__
+#define __DRM_WRITEBACK_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <linux/workqueue.h>
+
+struct drm_writeback_connector {
+       struct drm_connector base;
+
+       /**
+        * @encoder: Internal encoder used by the connector to fulfill
+        * the DRM framework requirements. The users of the
+        * @drm_writeback_connector control the behaviour of the @encoder
+        * by passing the @enc_funcs parameter to drm_writeback_connector_init()
+        * function.
+        */
+       struct drm_encoder encoder;
+
+       /**
+        * @pixel_formats_blob_ptr:
+        *
+        * DRM blob property data for the pixel formats list on writeback
+        * connectors
+        * See also drm_writeback_connector_init()
+        */
+       struct drm_property_blob *pixel_formats_blob_ptr;
+
+       /** @job_lock: Protects job_queue */
+       spinlock_t job_lock;
+
+       /**
+        * @job_queue:
+        *
+        * Holds a list of a connector's writeback jobs; the last item is the
+        * most recent. The first item may be either waiting for the hardware
+        * to begin writing, or currently being written.
+        *
+        * See also: drm_writeback_queue_job() and
+        * drm_writeback_signal_completion()
+        */
+       struct list_head job_queue;
+
+       /**
+        * @fence_context:
+        *
+        * timeline context used for fence operations.
+        */
+       unsigned int fence_context;
+       /**
+        * @fence_lock:
+        *
+        * spinlock to protect the fences in the fence_context.
+        */
+       spinlock_t fence_lock;
+       /**
+        * @fence_seqno:
+        *
+        * Seqno variable used as monotonic counter for the fences
+        * created on the connector's timeline.
+        */
+       unsigned long fence_seqno;
+       /**
+        * @timeline_name:
+        *
+        * The name of the connector's fence timeline.
+        */
+       char timeline_name[32];
+};
+
+struct drm_writeback_job {
+       /**
+        * @cleanup_work:
+        *
+        * Used to allow drm_writeback_signal_completion to defer dropping the
+        * framebuffer reference to a workqueue
+        */
+       struct work_struct cleanup_work;
+
+       /**
+        * @list_entry:
+        *
+        * List item for the writeback connector's @job_queue
+        */
+       struct list_head list_entry;
+
+       /**
+        * @fb:
+        *
+        * Framebuffer to be written to by the writeback connector. Do not set
+        * directly, use drm_atomic_set_writeback_fb_for_connector()
+        */
+       struct drm_framebuffer *fb;
+
+       /**
+        * @out_fence:
+        *
+        * Fence which will signal once the writeback has completed
+        */
+       struct dma_fence *out_fence;
+};
+
+int drm_writeback_connector_init(struct drm_device *dev,
+                                struct drm_writeback_connector *wb_connector,
+                                const struct drm_connector_funcs *con_funcs,
+                                const struct drm_encoder_helper_funcs *enc_helper_funcs,
+                                const u32 *formats, int n_formats);
+
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+                            struct drm_writeback_job *job);
+
+void drm_writeback_cleanup_job(struct drm_writeback_job *job);
+
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+                               int status);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector);
+#endif
index dec6558..7c2dfd6 100644 (file)
@@ -27,6 +27,8 @@
 #include <drm/spsc_queue.h>
 #include <linux/dma-fence.h>
 
+#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+
 struct drm_gpu_scheduler;
 struct drm_sched_rq;
 
@@ -43,13 +45,33 @@ enum drm_sched_priority {
 };
 
 /**
- * drm_sched_entity - A wrapper around a job queue (typically attached
- * to the DRM file_priv).
+ * struct drm_sched_entity - A wrapper around a job queue (typically
+ * attached to the DRM file_priv).
+ *
+ * @list: used to append this struct to the list of entities in the
+ *        runqueue.
+ * @rq: runqueue to which this entity belongs.
+ * @rq_lock: lock to modify the runqueue to which this entity belongs.
+ * @sched: the scheduler instance to which this entity is enqueued.
+ * @job_queue: the list of jobs of this entity.
+ * @fence_seq: a linearly increasing seqno incremented with each
+ *             new &drm_sched_fence which is part of the entity.
+ * @fence_context: a unique context for all the fences which belong
+ *                 to this entity.
+ *                 The &drm_sched_fence.scheduled uses the
+ *                 fence_context but &drm_sched_fence.finished uses
+ *                 fence_context + 1.
+ * @dependency: the dependency fence of the job which is on the top
+ *              of the job queue.
+ * @cb: callback for the dependency fence above.
+ * @guilty: points to ctx's guilty.
+ * @fini_status: contains the exit status in case the process was signalled.
+ * @last_scheduled: points to the finished fence of the last scheduled job.
  *
  * Entities will emit jobs in order to their corresponding hardware
  * ring, and the scheduler will alternate between entities based on
  * scheduling policy.
-*/
+ */
 struct drm_sched_entity {
        struct list_head                list;
        struct drm_sched_rq             *rq;
@@ -63,47 +85,95 @@ struct drm_sched_entity {
 
        struct dma_fence                *dependency;
        struct dma_fence_cb             cb;
-       atomic_t                        *guilty; /* points to ctx's guilty */
-       int            fini_status;
-       struct dma_fence    *last_scheduled;
+       atomic_t                        *guilty;
+       struct dma_fence                *last_scheduled;
 };
 
 /**
+ * struct drm_sched_rq - queue of entities to be scheduled.
+ *
+ * @lock: to modify the entities list.
+ * @entities: list of the entities to be scheduled.
+ * @current_entity: the entity which is to be scheduled.
+ *
  * Run queue is a set of entities scheduling command submissions for
  * one specific ring. It implements the scheduling policy that selects
  * the next entity to emit commands from.
-*/
+ */
 struct drm_sched_rq {
        spinlock_t                      lock;
        struct list_head                entities;
        struct drm_sched_entity         *current_entity;
 };
 
+/**
+ * struct drm_sched_fence - fences corresponding to the scheduling of a job.
+ */
 struct drm_sched_fence {
+        /**
+         * @scheduled: this fence is what will be signaled by the scheduler
+         * when the job is scheduled.
+         */
        struct dma_fence                scheduled;
 
-       /* This fence is what will be signaled by the scheduler when
-        * the job is completed.
-        *
-        * When setting up an out fence for the job, you should use
-        * this, since it's available immediately upon
-        * drm_sched_job_init(), and the fence returned by the driver
-        * from run_job() won't be created until the dependencies have
-        * resolved.
-        */
+        /**
+         * @finished: this fence is what will be signaled by the scheduler
+         * when the job is completed.
+         *
+         * When setting up an out fence for the job, you should use
+         * this, since it's available immediately upon
+         * drm_sched_job_init(), and the fence returned by the driver
+         * from run_job() won't be created until the dependencies have
+         * resolved.
+         */
        struct dma_fence                finished;
 
+        /**
+         * @cb: the callback for the parent fence below.
+         */
        struct dma_fence_cb             cb;
+        /**
+         * @parent: the fence returned by &drm_sched_backend_ops.run_job
+         * when scheduling the job on hardware. We signal the
+         * &drm_sched_fence.finished fence once parent is signalled.
+         */
        struct dma_fence                *parent;
+        /**
+         * @sched: the scheduler instance to which the job having this struct
+         * belongs to.
+         */
        struct drm_gpu_scheduler        *sched;
+        /**
+         * @lock: the lock used by the scheduled and the finished fences.
+         */
        spinlock_t                      lock;
+        /**
+         * @owner: job owner for debugging
+         */
        void                            *owner;
 };
 
 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 
 /**
- * drm_sched_job - A job to be run by an entity.
+ * struct drm_sched_job - A job to be run by an entity.
+ *
+ * @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @sched: the scheduler instance on which this job is scheduled.
+ * @s_fence: contains the fences for the scheduling of job.
+ * @finish_cb: the callback for the finished fence.
+ * @finish_work: schedules the function @drm_sched_job_finish once the job has
+ *               finished to remove the job from the
+ *               @drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
+ *            interval is over.
+ * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @karma: increment on every hang caused by this job. If this exceeds the hang
+ *         limit of the scheduler then the job is marked guilty and will not
+ *         be scheduled further.
+ * @s_priority: the priority of the job.
+ * @entity: the entity to which this job belongs.
  *
  * A job is created by the driver using drm_sched_job_init(), and
  * should call drm_sched_entity_push_job() once it wants the scheduler
@@ -130,38 +200,64 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 }
 
 /**
+ * struct drm_sched_backend_ops
+ *
  * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
+ * these functions should be implemented in driver side.
+ */
 struct drm_sched_backend_ops {
-       /* Called when the scheduler is considering scheduling this
-        * job next, to get another struct dma_fence for this job to
+       /**
+         * @dependency: Called when the scheduler is considering scheduling
+         * this job next, to get another struct dma_fence for this job to
         * block on.  Once it returns NULL, run_job() may be called.
         */
        struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
                                        struct drm_sched_entity *s_entity);
 
-       /* Called to execute the job once all of the dependencies have
-        * been resolved.  This may be called multiple times, if
+       /**
+         * @run_job: Called to execute the job once all of the dependencies
+         * have been resolved.  This may be called multiple times, if
         * timedout_job() has happened and drm_sched_job_recovery()
         * decides to try it again.
         */
        struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
 
-       /* Called when a job has taken too long to execute, to trigger
-        * GPU recovery.
+       /**
+         * @timedout_job: Called when a job has taken too long to execute,
+         * to trigger GPU recovery.
         */
        void (*timedout_job)(struct drm_sched_job *sched_job);
 
-       /* Called once the job's finished fence has been signaled and
-        * it's time to clean it up.
+       /**
+         * @free_job: Called once the job's finished fence has been signaled
+         * and it's time to clean it up.
         */
        void (*free_job)(struct drm_sched_job *sched_job);
 };
 
 /**
- * One scheduler is implemented for each hardware ring
-*/
+ * struct drm_gpu_scheduler
+ *
+ * @ops: backend operations provided by the driver.
+ * @hw_submission_limit: the max size of the hardware queue.
+ * @timeout: the time after which a job is removed from the scheduler.
+ * @name: name of the ring for which this scheduler is being used.
+ * @sched_rq: priority wise array of run queues.
+ * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
+ *                  is ready to be scheduled.
+ * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ *                 waits on this wait queue until all the scheduled jobs are
+ *                 finished.
+ * @hw_rq_count: the number of jobs currently in the hardware queue.
+ * @job_id_count: used to assign unique id to the each job.
+ * @thread: the kthread on which the scheduler which run.
+ * @ring_mirror_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the ring_mirror_list.
+ * @hang_limit: once the hangs by a job crosses this limit then it is marked
+ *              guilty and it will be considered for scheduling further.
+ *
+ * One scheduler is implemented for each hardware ring.
+ */
 struct drm_gpu_scheduler {
        const struct drm_sched_backend_ops      *ops;
        uint32_t                        hw_submission_limit;
@@ -188,8 +284,8 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
                          struct drm_sched_entity *entity,
                          struct drm_sched_rq *rq,
                          atomic_t *guilty);
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity);
+long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+                          struct drm_sched_entity *entity, long timeout);
 void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity);
 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
index bab70ff..fbf5cfc 100644 (file)
 #define INTEL_KBL_GT2_IDS(info)        \
        INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
        INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
-       INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
        INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
        INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
        INTEL_VGA_DEVICE(0x5912, info), /* DT  GT2 */ \
 #define INTEL_KBL_GT4_IDS(info) \
        INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
 
+/* AML/KBL Y GT2 */
+#define INTEL_AML_GT2_IDS(info) \
+       INTEL_VGA_DEVICE(0x591C, info),  /* ULX GT2 */ \
+       INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+
 #define INTEL_KBL_IDS(info) \
        INTEL_KBL_GT1_IDS(info), \
        INTEL_KBL_GT2_IDS(info), \
        INTEL_KBL_GT3_IDS(info), \
-       INTEL_KBL_GT4_IDS(info)
+       INTEL_KBL_GT4_IDS(info), \
+       INTEL_AML_GT2_IDS(info)
 
 /* CFL S */
 #define INTEL_CFL_S_GT1_IDS(info) \
        INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
        INTEL_VGA_DEVICE(0x3E94, info)  /* Halo GT2 */
 
-/* CFL U GT1 */
-#define INTEL_CFL_U_GT1_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA1, info), \
-       INTEL_VGA_DEVICE(0x3EA4, info)
-
 /* CFL U GT2 */
 #define INTEL_CFL_U_GT2_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA0, info), \
-       INTEL_VGA_DEVICE(0x3EA3, info), \
        INTEL_VGA_DEVICE(0x3EA9, info)
 
 /* CFL U GT3 */
 #define INTEL_CFL_U_GT3_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA8, info)  /* ULT GT3 */
 
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(info) \
+       INTEL_VGA_DEVICE(0x3EA1, info)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(info) \
+       INTEL_VGA_DEVICE(0x3EA0, info)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(info) \
+       INTEL_VGA_DEVICE(0x3EA2, info), \
+       INTEL_VGA_DEVICE(0x3EA3, info), \
+       INTEL_VGA_DEVICE(0x3EA4, info)
+
 #define INTEL_CFL_IDS(info)       \
        INTEL_CFL_S_GT1_IDS(info), \
        INTEL_CFL_S_GT2_IDS(info), \
        INTEL_CFL_H_GT2_IDS(info), \
-       INTEL_CFL_U_GT1_IDS(info), \
        INTEL_CFL_U_GT2_IDS(info), \
-       INTEL_CFL_U_GT3_IDS(info)
+       INTEL_CFL_U_GT3_IDS(info), \
+       INTEL_WHL_U_GT1_IDS(info), \
+       INTEL_WHL_U_GT2_IDS(info), \
+       INTEL_WHL_U_GT3_IDS(info)
 
 /* CNL */
 #define INTEL_CNL_IDS(info) \
diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h
new file mode 100644 (file)
index 0000000..25164d7
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/* Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+
+#define CLK_TCON_TOP_TV0       0
+#define CLK_TCON_TOP_TV1       1
+#define CLK_TCON_TOP_DSI       2
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */
index 085db2f..58725f8 100644 (file)
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
 
 /**
  * struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
+ * @map_atomic: [optional] maps a page from the buffer into kernel address
  *             space, users may not block until the subsequent unmap call.
  *             This callback must not sleep.
  * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
  *               This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
+ * @map: [optional] maps a page from the buffer into kernel address space.
  * @unmap: [optional] unmaps a page from the buffer.
  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
  *       address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
         * @attach:
         *
         * This is called from dma_buf_attach() to make sure that a given
-        * &device can access the provided &dma_buf. Exporters which support
-        * buffer objects in special locations like VRAM or device-specific
-        * carveout areas should check whether the buffer could be move to
-        * system memory (or directly accessed by the provided device), and
-        * otherwise need to fail the attach operation.
+        * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+        * which support buffer objects in special locations like VRAM or
+        * device-specific carveout areas should check whether the buffer could
+        * be move to system memory (or directly accessed by the provided
+        * device), and otherwise need to fail the attach operation.
         *
         * The exporter should also in general check whether the current
         * allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
         * to signal that backing storage is already allocated and incompatible
         * with the requirements of requesting device.
         */
-       int (*attach)(struct dma_buf *, struct device *,
-                     struct dma_buf_attachment *);
+       int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
 
        /**
         * @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
         * to be restarted.
         */
        int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
-       void *(*map_atomic)(struct dma_buf *, unsigned long);
-       void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
        void *(*map)(struct dma_buf *, unsigned long);
        void (*unmap)(struct dma_buf *, unsigned long, void *);
 
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
                             enum dma_data_direction dir);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
                           enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
 void *dma_buf_kmap(struct dma_buf *, unsigned long);
 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
 
index 78b4dd8..784b0fe 100644 (file)
@@ -72,6 +72,29 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
 #define DRM_IOCTL_AMDGPU_SCHED         DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
 
+/**
+ * DOC: memory domains
+ *
+ * %AMDGPU_GEM_DOMAIN_CPU      System memory that is not GPU accessible.
+ * Memory in this pool could be swapped out to disk if there is pressure.
+ *
+ * %AMDGPU_GEM_DOMAIN_GTT      GPU accessible system memory, mapped into the
+ * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
+ * pages of system memory, allows GPU access system memory in a linezrized
+ * fashion.
+ *
+ * %AMDGPU_GEM_DOMAIN_VRAM     Local video memory. For APUs, it is memory
+ * carved out by the BIOS.
+ *
+ * %AMDGPU_GEM_DOMAIN_GDS      Global on-chip data storage used to share data
+ * across shader threads.
+ *
+ * %AMDGPU_GEM_DOMAIN_GWS      Global wave sync, used to synchronize the
+ * execution of all the waves on a device.
+ *
+ * %AMDGPU_GEM_DOMAIN_OA       Ordered append, used by 3D or Compute engines
+ * for appending data.
+ */
 #define AMDGPU_GEM_DOMAIN_CPU          0x1
 #define AMDGPU_GEM_DOMAIN_GTT          0x2
 #define AMDGPU_GEM_DOMAIN_VRAM         0x4
@@ -483,7 +506,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_HW_IP_UVD_ENC      5
 #define AMDGPU_HW_IP_VCN_DEC      6
 #define AMDGPU_HW_IP_VCN_ENC      7
-#define AMDGPU_HW_IP_NUM          8
+#define AMDGPU_HW_IP_VCN_JPEG     8
+#define AMDGPU_HW_IP_NUM          9
 
 #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
 
index 9c660e1..300f336 100644 (file)
@@ -687,6 +687,15 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
 
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS    5
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index e04613d..64bf67a 100644 (file)
@@ -385,6 +385,23 @@ extern "C" {
        fourcc_mod_code(NVIDIA, 0x15)
 
 /*
+ * Some Broadcom modifiers take parameters, for example the number of
+ * vertical lines in the image. Reserve the lower 32 bits for modifier
+ * type, and the next 24 bits for parameters. Top 8 bits are the
+ * vendor code.
+ */
+#define __fourcc_mod_broadcom_param_shift 8
+#define __fourcc_mod_broadcom_param_bits 48
+#define fourcc_mod_broadcom_code(val, params) \
+       fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
+#define fourcc_mod_broadcom_param(m) \
+       ((int)(((m) >> __fourcc_mod_broadcom_param_shift) &     \
+              ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
+#define fourcc_mod_broadcom_mod(m) \
+       ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) <<    \
+                __fourcc_mod_broadcom_param_shift))
+
+/*
  * Broadcom VC4 "T" format
  *
  * This is the primary layout that the V3D GPU can texture from (it
@@ -405,6 +422,48 @@ extern "C" {
  */
 #define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
 
+/*
+ * Broadcom SAND format
+ *
+ * This is the native format that the H.264 codec block uses.  For VC4
+ * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
+ *
+ * The image can be considered to be split into columns, and the
+ * columns are placed consecutively into memory.  The width of those
+ * columns can be either 32, 64, 128, or 256 pixels, but in practice
+ * only 128 pixel columns are used.
+ *
+ * The pitch between the start of each column is set to optimally
+ * switch between SDRAM banks. This is passed as the number of lines
+ * of column width in the modifier (we can't use the stride value due
+ * to various core checks that look at it , so you should set the
+ * stride to width*cpp).
+ *
+ * Note that the column height for this format modifier is the same
+ * for all of the planes, assuming that each column contains both Y
+ * and UV.  Some SAND-using hardware stores UV in a separate tiled
+ * image from Y to reduce the column height, which is not supported
+ * with these modifiers.
+ */
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(2, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(3, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(4, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(5, v)
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
+       DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
+       DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
+       DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
+       DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
+
 #if defined(__cplusplus)
 }
 #endif
index 4b3a1bb..8d67243 100644 (file)
@@ -96,6 +96,13 @@ extern "C" {
 #define DRM_MODE_PICTURE_ASPECT_64_27          3
 #define DRM_MODE_PICTURE_ASPECT_256_135                4
 
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA          0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS         1
+#define DRM_MODE_CONTENT_TYPE_PHOTO            2
+#define DRM_MODE_CONTENT_TYPE_CINEMA           3
+#define DRM_MODE_CONTENT_TYPE_GAME             4
+
 /* Aspect ratio flag bitmask (4 bits 22:19) */
 #define DRM_MODE_FLAG_PIC_AR_MASK              (0x0F<<19)
 #define  DRM_MODE_FLAG_PIC_AR_NONE \
@@ -344,6 +351,7 @@ enum drm_mode_subconnector {
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
 #define DRM_MODE_CONNECTOR_DSI         16
 #define DRM_MODE_CONNECTOR_DPI         17
+#define DRM_MODE_CONNECTOR_WRITEBACK   18
 
 struct drm_mode_get_connector {