1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19 * develop this driver.
21 **************************************************************************/
31 #include "psb_msvdx.h"
32 #include "drm_pciids.h"
33 #include "psb_scene.h"
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
38 int drm_psb_debug = 0;
39 EXPORT_SYMBOL(drm_psb_debug);
40 static int drm_psb_trap_pagefaults = 0;
41 static int drm_psb_clock_gating = 0;
42 static int drm_psb_ta_mem_size = 32 * 1024;
43 int drm_psb_disable_vsync = 0;
44 int drm_psb_detear = 0;
45 int drm_psb_no_fb = 0;
46 int drm_psb_force_pipeb = 0;
58 MODULE_PARM_DESC(debug, "Enable debug output");
59 MODULE_PARM_DESC(clock_gating, "clock gating");
60 MODULE_PARM_DESC(no_fb, "Disable FBdev");
61 MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
62 MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
63 MODULE_PARM_DESC(detear, "eliminate video playback tearing");
64 MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
65 MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
66 MODULE_PARM_DESC(mode, "initial mode name");
67 MODULE_PARM_DESC(xres, "initial mode width");
68 MODULE_PARM_DESC(yres, "initial mode height");
70 module_param_named(debug, drm_psb_debug, int, 0600);
71 module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
72 module_param_named(no_fb, drm_psb_no_fb, int, 0600);
73 module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
74 module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
75 module_param_named(detear, drm_psb_detear, int, 0600);
76 module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
77 module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
78 module_param_named(mode, psb_init_mode, charp, 0600);
79 module_param_named(xres, psb_init_xres, int, 0600);
80 module_param_named(yres, psb_init_yres, int, 0600);
82 static struct pci_device_id pciidlist[] = {
86 #define DRM_PSB_CMDBUF_IOCTL DRM_IOW(DRM_PSB_CMDBUF, \
87 struct drm_psb_cmdbuf_arg)
88 #define DRM_PSB_XHW_INIT_IOCTL DRM_IOR(DRM_PSB_XHW_INIT, \
89 struct drm_psb_xhw_init_arg)
90 #define DRM_PSB_XHW_IOCTL DRM_IO(DRM_PSB_XHW)
92 #define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
94 #define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
95 struct drm_psb_hw_info)
97 #define DRM_PSB_KMS_OFF_IOCTL DRM_IO(DRM_PSB_KMS_OFF)
98 #define DRM_PSB_KMS_ON_IOCTL DRM_IO(DRM_PSB_KMS_ON)
100 static struct drm_ioctl_desc psb_ioctls[] = {
101 DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
102 DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
104 DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_AUTH),
105 DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
107 DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
109 DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
110 DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
112 static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
114 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
118 static int __cpuinit psb_cpu_callback(struct notifier_block *nfb,
119 unsigned long action,
122 if (action == CPU_ONLINE)
128 static struct notifier_block __cpuinitdata psb_nb = {
129 .notifier_call = psb_cpu_callback,
134 static int dri_library_name(struct drm_device *dev, char *buf)
136 return snprintf(buf, PAGE_SIZE, "psb\n");
139 static void psb_set_uopt(struct drm_psb_uopt *uopt)
141 uopt->clock_gating = drm_psb_clock_gating;
144 static void psb_lastclose(struct drm_device *dev)
146 struct drm_psb_private *dev_priv =
147 (struct drm_psb_private *)dev->dev_private;
149 if (!dev->dev_private)
152 mutex_lock(&dev->struct_mutex);
153 if (dev_priv->ta_mem)
154 psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
155 mutex_unlock(&dev->struct_mutex);
156 mutex_lock(&dev_priv->cmdbuf_mutex);
157 if (dev_priv->buffers) {
158 vfree(dev_priv->buffers);
159 dev_priv->buffers = NULL;
161 mutex_unlock(&dev_priv->cmdbuf_mutex);
164 static void psb_do_takedown(struct drm_device *dev)
166 struct drm_psb_private *dev_priv =
167 (struct drm_psb_private *)dev->dev_private;
169 mutex_lock(&dev->struct_mutex);
170 if (dev->bm.initialized) {
171 if (dev_priv->have_mem_rastgeom) {
172 drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
173 dev_priv->have_mem_rastgeom = 0;
175 if (dev_priv->have_mem_mmu) {
176 drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
177 dev_priv->have_mem_mmu = 0;
179 if (dev_priv->have_mem_aper) {
180 drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
181 dev_priv->have_mem_aper = 0;
183 if (dev_priv->have_tt) {
184 drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
185 dev_priv->have_tt = 0;
187 if (dev_priv->have_vram) {
188 drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
189 dev_priv->have_vram = 0;
192 mutex_unlock(&dev->struct_mutex);
194 if (dev_priv->has_msvdx)
195 psb_msvdx_uninit(dev);
197 if (dev_priv->comm) {
198 kunmap(dev_priv->comm_page);
199 dev_priv->comm = NULL;
201 if (dev_priv->comm_page) {
202 __free_page(dev_priv->comm_page);
203 dev_priv->comm_page = NULL;
207 void psb_clockgating(struct drm_psb_private *dev_priv)
209 uint32_t clock_gating;
211 if (dev_priv->uopt.clock_gating == 1) {
212 PSB_DEBUG_INIT("Disabling clock gating.\n");
214 clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
215 _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
216 (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
217 _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
218 (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
219 _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
220 (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
221 _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
222 (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
223 _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
224 (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
225 _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
227 } else if (dev_priv->uopt.clock_gating == 2) {
228 PSB_DEBUG_INIT("Enabling clock gating.\n");
230 clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
231 _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
232 (_PSB_C_CLKGATECTL_CLKG_AUTO <<
233 _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
234 (_PSB_C_CLKGATECTL_CLKG_AUTO <<
235 _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
236 (_PSB_C_CLKGATECTL_CLKG_AUTO <<
237 _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
238 (_PSB_C_CLKGATECTL_CLKG_AUTO <<
239 _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
240 (_PSB_C_CLKGATECTL_CLKG_AUTO <<
241 _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
243 clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
245 #ifdef FIX_TG_2D_CLOCKGATE
246 clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
247 clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
248 _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
250 PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
251 (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
254 static int psb_do_init(struct drm_device *dev)
256 struct drm_psb_private *dev_priv =
257 (struct drm_psb_private *)dev->dev_private;
258 struct psb_gtt *pg = dev_priv->pg;
266 DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
268 dev_priv->ta_mem_pages =
269 PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
270 dev_priv->comm_page = alloc_page(GFP_KERNEL);
271 if (!dev_priv->comm_page)
274 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
275 change_page_attr(dev_priv->comm_page, 1, PAGE_KERNEL_NOCACHE);
277 map_page_into_agp(dev_priv->comm_page);
280 dev_priv->comm = kmap(dev_priv->comm_page);
281 memset((void *)dev_priv->comm, 0, PAGE_SIZE);
283 dev_priv->has_msvdx = 1;
284 if (psb_msvdx_init(dev))
285 dev_priv->has_msvdx = 0;
288 * Initialize sequence numbers for the different command
289 * submission mechanisms.
292 dev_priv->sequence[PSB_ENGINE_2D] = 0;
293 dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
294 dev_priv->sequence[PSB_ENGINE_TA] = 0;
295 dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
297 if (pg->gatt_start & 0x0FFFFFFF) {
298 DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
303 stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
304 stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
305 stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
307 dev_priv->gatt_free_offset = pg->gatt_start +
308 (stolen_gtt << PAGE_SHIFT) * 1024;
311 * Insert a cache-coherent communications page in mmu space
312 * just after the stolen area. Will be used for fencing etc.
315 dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
316 dev_priv->gatt_free_offset += PAGE_SIZE;
318 ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
319 &dev_priv->comm_page,
320 dev_priv->comm_mmu_offset, 1, 0, 0, 0);
325 if (1 || drm_debug) {
326 uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
327 uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
328 DRM_INFO("SGX core id = 0x%08x\n", core_id);
329 DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
330 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
331 _PSB_CC_REVISION_MAJOR_SHIFT,
332 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
333 _PSB_CC_REVISION_MINOR_SHIFT);
335 ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
336 (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
337 _PSB_CC_REVISION_MAINTENANCE_SHIFT,
338 (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
339 _PSB_CC_REVISION_DESIGNER_SHIFT);
342 dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
343 dev_priv->fence0_irq_on = 0;
345 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
346 pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
347 tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
348 tt_pages -= tt_start >> PAGE_SHIFT;
350 mutex_lock(&dev->struct_mutex);
352 if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
353 pg->stolen_size >> PAGE_SHIFT)) {
354 dev_priv->have_vram = 1;
357 if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
359 dev_priv->have_tt = 1;
362 if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
364 PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
365 dev_priv->have_mem_mmu = 1;
368 if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
370 PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
371 dev_priv->have_mem_rastgeom = 1;
374 if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
375 if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
376 pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
377 dev_priv->have_mem_aper = 1;
382 mutex_unlock(&dev->struct_mutex);
386 psb_do_takedown(dev);
390 static int psb_driver_unload(struct drm_device *dev)
392 struct drm_psb_private *dev_priv =
393 (struct drm_psb_private *)dev->dev_private;
397 // if (num_present_cpus() > 1)
398 unregister_cpu_notifier(&psb_nb);
401 intel_modeset_cleanup(dev);
404 psb_watchdog_takedown(dev_priv);
405 psb_do_takedown(dev);
406 psb_xhw_takedown(dev_priv);
407 psb_scheduler_takedown(&dev_priv->scheduler);
409 mutex_lock(&dev->struct_mutex);
410 if (dev_priv->have_mem_pds) {
411 drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
412 dev_priv->have_mem_pds = 0;
414 if (dev_priv->have_mem_kernel) {
415 drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
416 dev_priv->have_mem_kernel = 0;
418 mutex_unlock(&dev->struct_mutex);
420 (void)drm_bo_driver_finish(dev);
422 if (dev_priv->pf_pd) {
423 psb_mmu_free_pagedir(dev_priv->pf_pd);
424 dev_priv->pf_pd = NULL;
427 struct psb_gtt *pg = dev_priv->pg;
430 psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
434 stolen_size >> PAGE_SHIFT);
436 psb_mmu_driver_takedown(dev_priv->mmu);
437 dev_priv->mmu = NULL;
439 psb_gtt_takedown(dev_priv->pg, 1);
440 if (dev_priv->scratch_page) {
441 __free_page(dev_priv->scratch_page);
442 dev_priv->scratch_page = NULL;
444 psb_takedown_use_base(dev_priv);
445 if (dev_priv->vdc_reg) {
446 iounmap(dev_priv->vdc_reg);
447 dev_priv->vdc_reg = NULL;
449 if (dev_priv->sgx_reg) {
450 iounmap(dev_priv->sgx_reg);
451 dev_priv->sgx_reg = NULL;
453 if (dev_priv->msvdx_reg) {
454 iounmap(dev_priv->msvdx_reg);
455 dev_priv->msvdx_reg = NULL;
458 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
459 dev->dev_private = NULL;
464 extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
465 extern int drm_pick_crtcs(struct drm_device *dev);
466 extern char drm_init_mode[32];
467 extern int drm_init_xres;
468 extern int drm_init_yres;
470 static int psb_initial_config(struct drm_device *dev, bool can_grow)
472 struct drm_psb_private *dev_priv = dev->dev_private;
473 struct drm_output *output;
474 struct drm_crtc *crtc;
477 mutex_lock(&dev->mode_config.mutex);
479 drm_crtc_probe_output_modes(dev, 2048, 2048);
481 /* strncpy(drm_init_mode, psb_init_mode, strlen(psb_init_mode)); */
482 drm_init_xres = psb_init_xres;
483 drm_init_yres = psb_init_yres;
484 printk(KERN_ERR "detear is %sabled\n", drm_psb_detear ? "en" : "dis" );
488 if ((I915_READ(PIPEACONF) & PIPEACONF_ENABLE) && !drm_psb_force_pipeb)
489 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
490 if (!crtc->desired_mode)
493 dev->driver->fb_probe(dev, crtc);
495 list_for_each_entry_reverse(crtc, &dev->mode_config.crtc_list,
497 if (!crtc->desired_mode)
500 dev->driver->fb_probe(dev, crtc);
503 list_for_each_entry(output, &dev->mode_config.output_list, head) {
505 if (!output->crtc || !output->crtc->desired_mode)
508 if (output->crtc->fb)
509 drm_crtc_set_mode(output->crtc,
510 output->crtc->desired_mode, 0, 0);
514 if((SII_1392 != 1) || (drm_psb_no_fb==0))
515 drm_disable_unused_functions(dev);
517 drm_disable_unused_functions(dev);
520 mutex_unlock(&dev->mode_config.mutex);
526 static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
528 struct drm_psb_private *dev_priv;
529 unsigned long resource_start;
533 DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
534 dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
535 if (dev_priv == NULL)
538 mutex_init(&dev_priv->temp_mem);
539 mutex_init(&dev_priv->cmdbuf_mutex);
540 mutex_init(&dev_priv->reset_mutex);
541 psb_init_disallowed();
543 atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
546 atomic_set(&dev_priv->lock_2d, 0);
547 atomic_set(&dev_priv->ta_wait_2d, 0);
548 atomic_set(&dev_priv->ta_wait_2d_irq, 0);
549 atomic_set(&dev_priv->waiters_2d, 0);;
550 DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
552 mutex_init(&dev_priv->mutex_2d);
555 spin_lock_init(&dev_priv->reloc_lock);
557 DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
558 DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
560 dev->dev_private = (void *)dev_priv;
561 dev_priv->chipset = chipset;
562 psb_set_uopt(&dev_priv->uopt);
564 psb_watchdog_init(dev_priv);
565 psb_scheduler_init(dev, &dev_priv->scheduler);
567 resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
569 dev_priv->msvdx_reg =
570 ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
571 if (!dev_priv->msvdx_reg)
575 ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
576 if (!dev_priv->vdc_reg)
580 ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
581 if (!dev_priv->sgx_reg)
584 psb_clockgating(dev_priv);
585 if (psb_init_use_base(dev_priv, 3, 13))
588 dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
589 if (!dev_priv->scratch_page)
592 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
593 change_page_attr(dev_priv->scratch_page, 1, PAGE_KERNEL_NOCACHE);
595 map_page_into_agp(dev_priv->scratch_page);
598 dev_priv->pg = psb_gtt_alloc(dev);
602 ret = psb_gtt_init(dev_priv->pg, 0);
606 dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
607 drm_psb_trap_pagefaults, 0,
608 &dev_priv->msvdx_mmu_invaldc);
615 * Make sgx MMU aware of the stolen memory area we call VRAM.
620 psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
621 pg->stolen_base >> PAGE_SHIFT,
623 pg->stolen_size >> PAGE_SHIFT, 0);
628 dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
629 if (!dev_priv->pf_pd)
633 * Make all presumably unused requestors page-fault by making them
634 * use context 1 which does not have any valid mappings.
637 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
638 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
639 PSB_RSGX32(PSB_CR_BIF_BANK1);
641 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
642 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
643 psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
645 psb_init_2d(dev_priv);
647 ret = drm_bo_driver_init(dev);
651 ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
652 (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
656 dev_priv->have_mem_kernel = 1;
658 ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
659 (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
663 dev_priv->have_mem_pds = 1;
665 ret = psb_do_init(dev);
669 ret = psb_xhw_init(dev);
673 PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
674 PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
676 intel_modeset_init(dev);
677 psb_initial_config(dev, false);
681 // if (num_present_cpus() > 1)
682 register_cpu_notifier(&psb_nb);
687 psb_driver_unload(dev);
691 int psb_driver_device_is_agp(struct drm_device *dev)
696 static int psb_prepare_msvdx_suspend(struct drm_device *dev)
698 struct drm_psb_private *dev_priv =
699 (struct drm_psb_private *)dev->dev_private;
700 struct drm_fence_manager *fm = &dev->fm;
701 struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
702 struct drm_fence_object *fence;
706 unsigned long _end = jiffies + 3 * DRM_HZ;
708 PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
710 /*set the msvdx-reset flag here.. */
711 dev_priv->msvdx_needs_reset = 1;
713 /*Ensure that all pending IRQs are serviced, */
714 list_for_each_entry(fence, &fc->ring, ring) {
717 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
719 drm_fence_object_signaled(fence,
720 DRM_FENCE_TYPE_EXE)));
723 if (time_after_eq(jiffies, _end))
725 ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
726 (unsigned int)fence);
727 } while (ret == -EINTR);
731 /* Issue software reset */
732 PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
734 ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
735 MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
737 PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
742 static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
744 struct drm_device *dev = pci_get_drvdata(pdev);
745 struct drm_psb_private *dev_priv =
746 (struct drm_psb_private *)dev->dev_private;
747 struct drm_output *output;
749 //if (drm_psb_no_fb == 0)
750 // psbfb_suspend(dev);
751 #ifdef WA_NO_FB_GARBAGE_DISPLAY
753 if (drm_psb_no_fb != 0) {
754 if(num_registered_fb)
756 list_for_each_entry(output, &dev->mode_config.output_list, head) {
757 if(output->crtc != NULL)
758 intel_crtc_mode_save(output->crtc);
759 //if(output->funcs->save)
760 // output->funcs->save(output);
766 dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
767 (void)psb_idle_3d(dev);
768 (void)psb_idle_2d(dev);
769 flush_scheduled_work();
771 psb_takedown_use_base(dev_priv);
773 if (dev_priv->has_msvdx)
774 psb_prepare_msvdx_suspend(dev);
776 pci_save_state(pdev);
777 pci_disable_device(pdev);
778 pci_set_power_state(pdev, PCI_D3hot);
783 static int psb_resume(struct pci_dev *pdev)
785 struct drm_device *dev = pci_get_drvdata(pdev);
786 struct drm_psb_private *dev_priv =
787 (struct drm_psb_private *)dev->dev_private;
788 struct psb_gtt *pg = dev_priv->pg;
789 struct drm_output *output;
792 pci_set_power_state(pdev, PCI_D0);
793 pci_restore_state(pdev);
794 ret = pci_enable_device(pdev);
800 /* for single CPU's we do it here, then for more than one CPU we
801 * use the CPU notifier to reinit PAT on those CPU's.
803 // if (num_present_cpus() == 1)
807 INIT_LIST_HEAD(&dev_priv->resume_buf.head);
808 dev_priv->msvdx_needs_reset = 1;
810 PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
811 pci_write_config_word(pdev, PSB_GMCH_CTRL,
812 pg->gmch_ctrl | _PSB_GMCH_ENABLED);
815 * The GTT page tables are probably not saved.
816 * However, TT and VRAM is empty at this point.
819 psb_gtt_init(dev_priv->pg, 1);
822 * The SGX loses it's register contents.
823 * Restore BIF registers. The MMU page tables are
824 * "normal" pages, so their contents should be kept.
827 PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
828 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
829 PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
830 PSB_RSGX32(PSB_CR_BIF_BANK1);
832 psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
833 psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
834 psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
837 * 2D Base registers..
839 psb_init_2d(dev_priv);
841 if (drm_psb_no_fb == 0) {
842 list_for_each_entry(output, &dev->mode_config.output_list, head) {
843 if(output->crtc != NULL)
844 drm_crtc_set_mode(output->crtc, &output->crtc->mode,
845 output->crtc->x, output->crtc->y);
850 * Persistant 3D base registers and USSE base registers..
853 PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
854 PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
855 psb_init_use_base(dev_priv, 3, 13);
858 * Now, re-initialize the 3D engine.
861 psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
863 psb_scheduler_ta_mem_check(dev_priv);
864 if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
865 psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
867 PSB_TA_MEM_FLAG_RASTER |
868 PSB_TA_MEM_FLAG_HOSTA |
869 PSB_TA_MEM_FLAG_HOSTD |
870 PSB_TA_MEM_FLAG_INIT,
871 dev_priv->ta_mem->ta_memory->offset,
872 dev_priv->ta_mem->hw_data->offset,
873 dev_priv->ta_mem->hw_cookie);
876 //if (drm_psb_no_fb == 0)
877 // psbfb_resume(dev);
878 #ifdef WA_NO_FB_GARBAGE_DISPLAY
880 if (drm_psb_no_fb != 0) {
881 if(num_registered_fb)
883 struct fb_info *fb_info=registered_fb[0];
884 list_for_each_entry(output, &dev->mode_config.output_list, head) {
885 if(output->crtc != NULL)
886 intel_crtc_mode_restore(output->crtc);
890 fb_set_suspend(fb_info, 0);
891 printk("set the fb_set_suspend resume end\n");
900 /* always available as we are SIGIO'd */
901 static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
903 return (POLLIN | POLLRDNORM);
906 static int psb_release(struct inode *inode, struct file *filp)
908 struct drm_file *file_priv = (struct drm_file *)filp->private_data;
909 struct drm_device *dev = file_priv->head->dev;
910 struct drm_psb_private *dev_priv =
911 (struct drm_psb_private *)dev->dev_private;
913 if (dev_priv && dev_priv->xhw_file) {
914 psb_xhw_init_takedown(dev_priv, file_priv, 1);
916 return drm_release(inode, filp);
919 extern struct drm_fence_driver psb_fence_driver;
922 * Use this memory type priority if no eviction is needed.
924 static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
928 DRM_PSB_MEM_RASTGEOM,
935 * Use this memory type priority if need to evict.
937 static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
941 DRM_PSB_MEM_RASTGEOM,
947 static struct drm_bo_driver psb_bo_driver = {
948 .mem_type_prio = psb_mem_prios,
949 .mem_busy_prio = psb_busy_prios,
950 .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
951 .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
952 .create_ttm_backend_entry = drm_psb_tbe_init,
953 .fence_type = psb_fence_types,
954 .invalidate_caches = psb_invalidate_caches,
955 .init_mem_type = psb_init_mem_type,
956 .evict_mask = psb_evict_mask,
958 .backend_size = psb_tbe_size,
959 .command_stream_barrier = NULL,
962 static struct drm_driver driver = {
963 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
964 DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
965 .load = psb_driver_load,
966 .unload = psb_driver_unload,
967 .dri_library_name = dri_library_name,
968 .get_reg_ofs = drm_core_get_reg_ofs,
969 .ioctls = psb_ioctls,
970 .device_is_agp = psb_driver_device_is_agp,
971 .vblank_wait = psb_vblank_wait2,
972 .vblank_wait2 = psb_vblank_wait2,
973 .irq_preinstall = psb_irq_preinstall,
974 .irq_postinstall = psb_irq_postinstall,
975 .irq_uninstall = psb_irq_uninstall,
976 .irq_handler = psb_irq_handler,
977 .fb_probe = psbfb_probe,
978 .fb_remove = psbfb_remove,
980 .lastclose = psb_lastclose,
982 .owner = THIS_MODULE,
984 .release = psb_release,
988 .fasync = drm_fasync,
992 .id_table = pciidlist,
994 .remove = __devexit_p(drm_cleanup_pci),
995 .resume = psb_resume,
996 .suspend = psb_suspend,
998 .fence_driver = &psb_fence_driver,
999 .bo_driver = &psb_bo_driver,
1000 .name = DRIVER_NAME,
1001 .desc = DRIVER_DESC,
1002 .date = PSB_DRM_DRIVER_DATE,
1003 .major = PSB_DRM_DRIVER_MAJOR,
1004 .minor = PSB_DRM_DRIVER_MINOR,
1005 .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
1008 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1010 return drm_get_dev(pdev, ent, &driver);
1013 static int __init psb_init(void)
1015 driver.num_ioctls = psb_max_ioctl;
1017 return drm_init(&driver, pciidlist);
1020 static void __exit psb_exit(void)
1025 module_init(psb_init);
1026 module_exit(psb_exit);
1028 MODULE_AUTHOR(DRIVER_AUTHOR);
1029 MODULE_DESCRIPTION(DRIVER_DESC);
1030 MODULE_LICENSE("GPL");