1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #include "intel_drv.h"
36 #define USER_INT_FLAG (1<<1)
37 #define VSYNC_PIPEB_FLAG (1<<5)
38 #define VSYNC_PIPEA_FLAG (1<<7)
39 #define HOTPLUG_FLAG (1 << 17)
41 #define MAX_NOPID ((u32)~0)
44 * i915_get_pipe - return the the pipe associated with a given plane
46 * @plane: plane to look for
48 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
49 * rather than a pipe number, since they may not always be equal. This routine
50 * maps the given @plane back to a pipe number.
53 i915_get_pipe(struct drm_device *dev, int plane)
55 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
58 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
60 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
64 * i915_get_plane - return the the plane associated with a given pipe
66 * @pipe: pipe to look for
68 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
69 * rather than a plane number, since they may not always be equal. This routine
70 * maps the given @pipe back to a plane number.
73 i915_get_plane(struct drm_device *dev, int pipe)
75 if (i915_get_pipe(dev, 0) == pipe)
81 * i915_pipe_enabled - check if a pipe is enabled
83 * @pipe: pipe to check
85 * Reading certain registers when the pipe is disabled can hang the chip.
86 * Use this routine to make sure the PLL is running and the pipe is active
87 * before reading such registers if unsure.
90 i915_pipe_enabled(struct drm_device *dev, int pipe)
92 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
93 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
95 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
102 * Emit a synchronous flip.
104 * This function must be called with the drawable spinlock held.
107 i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
110 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
111 struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
113 int pf_planes = 1 << plane;
115 DRM_SPINLOCK_ASSERT(&dev->drw_lock);
117 /* If the window is visible on the other plane, we have to flip on that
121 x1 = sarea_priv->planeA_x;
122 y1 = sarea_priv->planeA_y;
123 x2 = x1 + sarea_priv->planeA_w;
124 y2 = y1 + sarea_priv->planeA_h;
126 x1 = sarea_priv->planeB_x;
127 y1 = sarea_priv->planeB_y;
128 x2 = x1 + sarea_priv->planeB_w;
129 y2 = y1 + sarea_priv->planeB_h;
132 if (x2 > 0 && y2 > 0) {
133 int i, num_rects = drw->num_rects;
134 struct drm_clip_rect *rect = drw->rects;
136 for (i = 0; i < num_rects; i++)
137 if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
138 rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
145 i915_dispatch_flip(dev, pf_planes, 1);
149 * Emit blits for scheduled buffer swaps.
151 * This function will be called with the HW lock held.
153 static void i915_vblank_tasklet(struct drm_device *dev)
155 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
156 struct list_head *list, *tmp, hits, *hit;
157 int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
159 struct drm_drawable_info *drw;
160 struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
161 u32 cpp = dev_priv->cpp, offsets[3];
162 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
163 XY_SRC_COPY_BLT_WRITE_ALPHA |
164 XY_SRC_COPY_BLT_WRITE_RGB)
165 : XY_SRC_COPY_BLT_CMD;
166 u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
167 (cpp << 23) | (1 << 24);
170 counter[0] = drm_vblank_count(dev, 0);
171 counter[1] = drm_vblank_count(dev, 1);
175 INIT_LIST_HEAD(&hits);
179 /* No irqsave/restore necessary. This tasklet may be run in an
180 * interrupt context or normal context, but we don't have to worry
181 * about getting interrupted by something acquiring the lock, because
182 * we are the interrupt context thing that acquires the lock.
184 DRM_SPINLOCK(&dev_priv->swaps_lock);
186 /* Find buffer swaps scheduled for this vertical blank */
187 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
188 struct drm_i915_vbl_swap *vbl_swap =
189 list_entry(list, struct drm_i915_vbl_swap, head);
190 int pipe = i915_get_pipe(dev, vbl_swap->plane);
192 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
196 dev_priv->swaps_pending--;
197 drm_vblank_put(dev, pipe);
199 DRM_SPINUNLOCK(&dev_priv->swaps_lock);
200 DRM_SPINLOCK(&dev->drw_lock);
202 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
205 DRM_SPINUNLOCK(&dev->drw_lock);
206 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
207 DRM_SPINLOCK(&dev_priv->swaps_lock);
211 list_for_each(hit, &hits) {
212 struct drm_i915_vbl_swap *swap_cmp =
213 list_entry(hit, struct drm_i915_vbl_swap, head);
214 struct drm_drawable_info *drw_cmp =
215 drm_get_drawable_info(dev, swap_cmp->drw_id);
218 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
219 list_add_tail(list, hit);
224 DRM_SPINUNLOCK(&dev->drw_lock);
226 /* List of hits was empty, or we reached the end of it */
228 list_add_tail(list, hits.prev);
232 DRM_SPINLOCK(&dev_priv->swaps_lock);
235 DRM_SPINUNLOCK(&dev_priv->swaps_lock);
241 i915_kernel_lost_context(dev);
243 upper[0] = upper[1] = 0;
244 slice[0] = max(sarea_priv->planeA_h / nhits, 1);
245 slice[1] = max(sarea_priv->planeB_h / nhits, 1);
246 lower[0] = sarea_priv->planeA_y + slice[0];
247 lower[1] = sarea_priv->planeB_y + slice[0];
249 offsets[0] = sarea_priv->front_offset;
250 offsets[1] = sarea_priv->back_offset;
251 offsets[2] = sarea_priv->third_offset;
252 num_pages = sarea_priv->third_handle ? 3 : 2;
254 DRM_SPINLOCK(&dev->drw_lock);
256 /* Emit blits for buffer swaps, partitioning both outputs into as many
257 * slices as there are buffer swaps scheduled in order to avoid tearing
258 * (based on the assumption that a single buffer swap would always
259 * complete before scanout starts).
261 for (i = 0; i++ < nhits;
262 upper[0] = lower[0], lower[0] += slice[0],
263 upper[1] = lower[1], lower[1] += slice[1]) {
264 int init_drawrect = 1;
267 lower[0] = lower[1] = sarea_priv->height;
269 list_for_each(hit, &hits) {
270 struct drm_i915_vbl_swap *swap_hit =
271 list_entry(hit, struct drm_i915_vbl_swap, head);
272 struct drm_clip_rect *rect;
273 int num_rects, plane, front, back;
274 unsigned short top, bottom;
276 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
281 plane = swap_hit->plane;
283 if (swap_hit->flip) {
284 i915_dispatch_vsync_flip(dev, drw, plane);
291 OUT_RING(GFX_OP_DRAWRECT_INFO);
294 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
295 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
300 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
307 bottom = lower[plane];
309 front = (dev_priv->sarea_priv->pf_current_page >>
311 back = (front + 1) % num_pages;
313 for (num_rects = drw->num_rects; num_rects--; rect++) {
314 int y1 = max(rect->y1, top);
315 int y2 = min(rect->y2, bottom);
323 OUT_RING(pitchropcpp);
324 OUT_RING((y1 << 16) | rect->x1);
325 OUT_RING((y2 << 16) | rect->x2);
326 OUT_RING(offsets[front]);
327 OUT_RING((y1 << 16) | rect->x1);
328 OUT_RING(pitchropcpp & 0xffff);
329 OUT_RING(offsets[back]);
336 DRM_SPINUNLOCK(&dev->drw_lock);
338 list_for_each_safe(hit, tmp, &hits) {
339 struct drm_i915_vbl_swap *swap_hit =
340 list_entry(hit, struct drm_i915_vbl_swap, head);
344 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
348 static int i915_in_vblank(struct drm_device *dev, int pipe)
350 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
351 unsigned long pipedsl, vblank, vtotal;
352 unsigned long vbl_start, vbl_end, cur_line;
354 pipedsl = pipe ? PIPEBDSL : PIPEADSL;
355 vblank = pipe ? VBLANK_B : VBLANK_A;
356 vtotal = pipe ? VTOTAL_B : VTOTAL_A;
358 vbl_start = I915_READ(vblank) & VBLANK_START_MASK;
359 vbl_end = (I915_READ(vblank) >> VBLANK_END_SHIFT) & VBLANK_END_MASK;
361 cur_line = I915_READ(pipedsl);
363 if (cur_line >= vbl_start)
369 u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
371 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
372 unsigned long high_frame;
373 unsigned long low_frame;
374 u32 high1, high2, low, count;
377 pipe = i915_get_pipe(dev, plane);
378 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
379 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
381 if (!i915_pipe_enabled(dev, pipe)) {
382 printk(KERN_ERR "trying to get vblank count for disabled "
388 * High & low register fields aren't synchronized, so make sure
389 * we get a low value that's stable across two reads of the high
393 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
394 PIPE_FRAME_HIGH_SHIFT);
395 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
396 PIPE_FRAME_LOW_SHIFT);
397 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
398 PIPE_FRAME_HIGH_SHIFT);
399 } while (high1 != high2);
401 count = (high1 << 8) | low;
404 * If we're in the middle of the vblank period, the
405 * above regs won't have been updated yet, so return
406 * an incremented count to stay accurate
409 if (i915_in_vblank(dev, pipe))
415 #define HOTPLUG_CMD_CRT 1
416 #define HOTPLUG_CMD_SDVOB 4
417 #define HOTPLUG_CMD_SDVOC 8
419 static struct drm_device *hotplug_dev;
420 static int hotplug_cmd = 0;
421 static spinlock_t hotplug_lock = SPIN_LOCK_UNLOCKED;
423 static void i915_hotplug_crt(struct drm_device *dev)
425 struct drm_output *output;
426 struct intel_output *iout;
428 mutex_lock(&dev->mode_config.mutex);
430 /* find the crt output */
431 list_for_each_entry(output, &dev->mode_config.output_list, head) {
432 iout = output->driver_private;
433 if (iout->type == INTEL_OUTPUT_ANALOG)
442 drm_hotplug_stage_two(dev, output);
445 mutex_unlock(&dev->mode_config.mutex);
448 static void i915_hotplug_sdvo(struct drm_device *dev, int sdvoB)
450 struct drm_output *output = 0;
451 enum drm_output_status status;
453 mutex_lock(&dev->mode_config.mutex);
455 output = intel_sdvo_find(dev, sdvoB);
458 DRM_ERROR("could not find sdvo%s output\n", sdvoB ? "B" : "C");
462 status = output->funcs->detect(output);
464 if (status != output_status_connected)
465 DRM_DEBUG("disconnect or unkown we don't do anything then\n");
467 drm_hotplug_stage_two(dev, output);
469 /* wierd hw bug, sdvo stop sending interupts */
470 intel_sdvo_set_hotplug(output, 1);
473 mutex_unlock(&dev->mode_config.mutex);
476 * This code is called in a more safe envirmoent to handle the hotplugs.
477 * Add code here for hotplug love to userspace.
479 static void i915_hotplug_work_func(struct work_struct *work)
481 struct drm_device *dev = hotplug_dev;
486 spin_lock(&hotplug_lock);
487 crt = hotplug_cmd & HOTPLUG_CMD_CRT;
488 sdvoB = hotplug_cmd & HOTPLUG_CMD_SDVOB;
489 sdvoC = hotplug_cmd & HOTPLUG_CMD_SDVOC;
491 spin_unlock(&hotplug_lock);
494 i915_hotplug_crt(dev);
497 i915_hotplug_sdvo(dev, 1);
500 i915_hotplug_sdvo(dev, 0);
504 static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
506 static DECLARE_WORK(hotplug, i915_hotplug_work_func);
507 struct drm_i915_private *dev_priv = dev->dev_private;
511 if (stat & CRT_HOTPLUG_INT_STATUS) {
512 DRM_DEBUG("CRT event\n");
514 if (stat & CRT_HOTPLUG_MONITOR_MASK) {
515 spin_lock(&hotplug_lock);
516 hotplug_cmd |= HOTPLUG_CMD_CRT;
517 spin_unlock(&hotplug_lock);
519 /* handle crt disconnects */
523 if (stat & SDVOB_HOTPLUG_INT_STATUS) {
524 DRM_DEBUG("sDVOB event\n");
526 spin_lock(&hotplug_lock);
527 hotplug_cmd |= HOTPLUG_CMD_SDVOB;
528 spin_unlock(&hotplug_lock);
531 if (stat & SDVOC_HOTPLUG_INT_STATUS) {
532 DRM_DEBUG("sDVOC event\n");
534 spin_lock(&hotplug_lock);
535 hotplug_cmd |= HOTPLUG_CMD_SDVOC;
536 spin_unlock(&hotplug_lock);
539 queue_work(dev_priv->wq, &hotplug);
544 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
546 struct drm_device *dev = (struct drm_device *) arg;
547 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
550 u32 pipea_stats, pipeb_stats;
552 pipea_stats = I915_READ(I915REG_PIPEASTAT);
553 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
555 /* On i8xx hw the IIR and IER are 16bit on i9xx its 32bit */
557 temp = I915_READ(I915REG_INT_IDENTITY_R);
559 temp = I915_READ16(I915REG_INT_IDENTITY_R);
562 temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
565 /* ugly despamification of pipeb event irq */
566 if (temp & (0xFFFFFFF ^ ((1 << 5) | (1 << 7)))) {
567 DRM_DEBUG("IIR %08x\n", temp2);
568 DRM_DEBUG("MSK %08x\n", dev_priv->irq_enable_reg | USER_INT_FLAG);
569 DRM_DEBUG("M&I %08x\n", temp);
570 DRM_DEBUG("HOT %08x\n", I915_READ(PORT_HOTPLUG_STAT));
574 DRM_DEBUG("flag=%08x\n", temp);
582 I915_WRITE(I915REG_INT_IDENTITY_R, temp);
583 (void) I915_READ(I915REG_INT_IDENTITY_R);
585 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
586 (void) I915_READ16(I915REG_INT_IDENTITY_R);
590 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
591 * we may get extra interrupts.
593 if (temp & VSYNC_PIPEA_FLAG) {
594 drm_handle_vblank(dev, i915_get_plane(dev, 0));
595 I915_WRITE(I915REG_PIPEASTAT,
596 pipea_stats | I915_VBLANK_INTERRUPT_ENABLE |
600 if (temp & VSYNC_PIPEB_FLAG) {
601 drm_handle_vblank(dev, i915_get_plane(dev, 1));
602 I915_WRITE(I915REG_PIPEBSTAT,
603 pipeb_stats | I915_VBLANK_INTERRUPT_ENABLE |
607 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
608 (void) I915_READ16(I915REG_INT_IDENTITY_R); /* Flush posted write */
610 DRM_READMEMORYBARRIER();
612 temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG | VSYNC_PIPEA_FLAG |
615 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
617 if (temp & USER_INT_FLAG) {
618 DRM_WAKEUP(&dev_priv->irq_queue);
619 #ifdef I915_HAVE_FENCE
620 i915_fence_handler(dev);
624 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
625 if (dev_priv->swaps_pending > 0)
626 drm_locked_tasklet(dev, i915_vblank_tasklet);
629 /* for now lest just ack it */
630 if (temp & (1 << 17)) {
631 DRM_DEBUG("Hotplug event received\n");
633 temp2 = I915_READ(PORT_HOTPLUG_STAT);
635 i915_run_hotplug_tasklet(dev, temp2);
637 I915_WRITE(PORT_HOTPLUG_STAT,temp2);
643 int i915_emit_irq(struct drm_device *dev)
645 struct drm_i915_private *dev_priv = dev->dev_private;
648 i915_kernel_lost_context(dev);
652 i915_emit_breadcrumb(dev);
656 OUT_RING(GFX_OP_USER_INTERRUPT);
659 return dev_priv->counter;
662 void i915_user_irq_on(struct drm_i915_private *dev_priv)
664 DRM_SPINLOCK(&dev_priv->user_irq_lock);
665 if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
666 dev_priv->irq_enable_reg |= USER_INT_FLAG;
667 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
669 DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
673 void i915_user_irq_off(struct drm_i915_private *dev_priv)
675 DRM_SPINLOCK(&dev_priv->user_irq_lock);
676 if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
677 // dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
678 // I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
680 DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
684 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
686 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
689 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
690 READ_BREADCRUMB(dev_priv));
692 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
695 i915_user_irq_on(dev_priv);
696 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
697 READ_BREADCRUMB(dev_priv) >= irq_nr);
698 i915_user_irq_off(dev_priv);
701 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
702 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
705 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
709 /* Needs the lock as it touches the ring.
711 int i915_irq_emit(struct drm_device *dev, void *data,
712 struct drm_file *file_priv)
714 struct drm_i915_private *dev_priv = dev->dev_private;
715 struct drm_i915_irq_emit *emit = data;
718 LOCK_TEST_WITH_RETURN(dev, file_priv);
721 DRM_ERROR("called with no initialization\n");
725 result = i915_emit_irq(dev);
727 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
728 DRM_ERROR("copy_to_user\n");
735 /* Doesn't need the hardware lock.
737 int i915_irq_wait(struct drm_device *dev, void *data,
738 struct drm_file *file_priv)
740 struct drm_i915_private *dev_priv = dev->dev_private;
741 struct drm_i915_irq_wait *irqwait = data;
744 DRM_ERROR("called with no initialization\n");
748 return i915_wait_irq(dev, irqwait->irq_seq);
751 int i915_enable_vblank(struct drm_device *dev, int plane)
753 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
754 int pipe = i915_get_pipe(dev, plane);
758 dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
761 dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
764 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
769 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
774 void i915_disable_vblank(struct drm_device *dev, int plane)
776 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
777 int pipe = i915_get_pipe(dev, plane);
781 dev_priv->irq_enable_reg &= ~VSYNC_PIPEA_FLAG;
784 dev_priv->irq_enable_reg &= ~VSYNC_PIPEB_FLAG;
787 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
792 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
795 void i915_enable_interrupt (struct drm_device *dev)
797 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
798 struct drm_output *o;
800 dev_priv->irq_enable_reg |= USER_INT_FLAG;
802 if (IS_I9XX(dev) && dev->mode_config.num_output) {
803 dev_priv->irq_enable_reg |= HOTPLUG_FLAG;
805 /* Activate the CRT */
806 I915_WRITE(PORT_HOTPLUG_EN, CRT_HOTPLUG_INT_EN);
809 o = intel_sdvo_find(dev, 1);
810 if (o && intel_sdvo_supports_hotplug(o)) {
811 intel_sdvo_set_hotplug(o, 1);
812 I915_WRITE(PORT_HOTPLUG_EN, SDVOB_HOTPLUG_INT_EN);
816 o = intel_sdvo_find(dev, 0);
817 if (o && intel_sdvo_supports_hotplug(o)) {
818 intel_sdvo_set_hotplug(o, 1);
819 I915_WRITE(PORT_HOTPLUG_EN, SDVOC_HOTPLUG_INT_EN);
825 I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
827 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
830 DRM_DEBUG("HEN %08x\n",I915_READ(PORT_HOTPLUG_EN));
831 DRM_DEBUG("HST %08x\n",I915_READ(PORT_HOTPLUG_STAT));
832 DRM_DEBUG("IER %08x\n",I915_READ(I915REG_INT_ENABLE_R));
833 DRM_DEBUG("SDB %08x\n",I915_READ(SDVOB));
835 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
837 dev_priv->irq_enabled = 1;
840 /* Set the vblank monitor pipe
842 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
843 struct drm_file *file_priv)
845 struct drm_i915_private *dev_priv = dev->dev_private;
846 struct drm_i915_vblank_pipe *pipe = data;
849 DRM_ERROR("called with no initialization\n");
853 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
854 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
858 dev_priv->vblank_pipe = pipe->pipe;
863 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
866 struct drm_i915_private *dev_priv = dev->dev_private;
867 struct drm_i915_vblank_pipe *pipe = data;
871 DRM_ERROR("called with no initialization\n");
875 flag = I915_READ(I915REG_INT_ENABLE_R);
877 if (flag & VSYNC_PIPEA_FLAG)
878 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
879 if (flag & VSYNC_PIPEB_FLAG)
880 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
886 * Schedule buffer swap at given vertical blank.
888 int i915_vblank_swap(struct drm_device *dev, void *data,
889 struct drm_file *file_priv)
891 struct drm_i915_private *dev_priv = dev->dev_private;
892 struct drm_i915_vblank_swap *swap = data;
893 struct drm_i915_vbl_swap *vbl_swap;
894 unsigned int pipe, seqtype, curseq, plane;
895 unsigned long irqflags;
896 struct list_head *list;
900 DRM_ERROR("%s called with no initialization\n", __func__);
904 if (dev_priv->sarea_priv->rotation) {
905 DRM_DEBUG("Rotation not supported\n");
909 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
910 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
912 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
916 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
917 pipe = i915_get_pipe(dev, plane);
919 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
921 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
922 DRM_ERROR("Invalid pipe %d\n", pipe);
926 DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
928 /* It makes no sense to schedule a swap for a drawable that doesn't have
929 * valid information at this point. E.g. this could mean that the X
930 * server is too old to push drawable information to the DRM, in which
931 * case all such swaps would become ineffective.
933 if (!drm_get_drawable_info(dev, swap->drawable)) {
934 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
935 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
939 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
941 drm_update_vblank_count(dev, pipe);
942 curseq = drm_vblank_count(dev, pipe);
944 if (seqtype == _DRM_VBLANK_RELATIVE)
945 swap->sequence += curseq;
947 if ((curseq - swap->sequence) <= (1<<23)) {
948 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
949 swap->sequence = curseq + 1;
951 DRM_DEBUG("Missed target sequence\n");
956 if (swap->seqtype & _DRM_VBLANK_FLIP) {
959 if ((curseq - swap->sequence) <= (1<<23)) {
960 struct drm_drawable_info *drw;
962 LOCK_TEST_WITH_RETURN(dev, file_priv);
964 DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
966 drw = drm_get_drawable_info(dev, swap->drawable);
969 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
971 DRM_DEBUG("Invalid drawable ID %d\n",
976 i915_dispatch_vsync_flip(dev, drw, plane);
978 DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
984 DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
986 list_for_each(list, &dev_priv->vbl_swaps.head) {
987 vbl_swap = list_entry(list, struct drm_i915_vbl_swap, head);
989 if (vbl_swap->drw_id == swap->drawable &&
990 vbl_swap->plane == plane &&
991 vbl_swap->sequence == swap->sequence) {
992 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
993 DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
994 DRM_DEBUG("Already scheduled\n");
999 DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
1001 if (dev_priv->swaps_pending >= 100) {
1002 DRM_DEBUG("Too many swaps queued\n");
1006 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
1009 DRM_ERROR("Failed to allocate memory to queue swap\n");
1015 ret = drm_vblank_get(dev, pipe);
1017 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
1021 vbl_swap->drw_id = swap->drawable;
1022 vbl_swap->plane = plane;
1023 vbl_swap->sequence = swap->sequence;
1024 vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
1029 DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
1031 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
1032 dev_priv->swaps_pending++;
1034 DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
1041 void i915_driver_irq_preinstall(struct drm_device * dev)
1043 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
1045 I915_WRITE16(I915REG_HWSTAM, 0xeffe);
1047 I915_WRITE(I915REG_INT_MASK_R, 0x0);
1048 I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
1050 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
1051 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
1056 int i915_driver_irq_postinstall(struct drm_device * dev)
1058 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
1059 int ret, num_pipes = 2;
1061 DRM_SPININIT(&dev_priv->swaps_lock, "swap");
1062 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
1063 dev_priv->swaps_pending = 0;
1065 DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
1066 dev_priv->user_irq_refcount = 0;
1067 dev_priv->irq_enable_reg = 0;
1069 ret = drm_vblank_init(dev, num_pipes);
1073 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1075 i915_enable_interrupt(dev);
1076 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1079 * Initialize the hardware status page IRQ location.
1082 I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
1086 void i915_driver_irq_uninstall(struct drm_device * dev)
1088 struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
1094 dev_priv->irq_enabled = 0;
1098 I915_WRITE(I915REG_HWSTAM, 0xffffffff);
1099 I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
1100 I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
1102 temp = I915_READ(I915REG_INT_IDENTITY_R);
1103 I915_WRITE(I915REG_INT_IDENTITY_R, temp);
1105 I915_WRITE16(I915REG_HWSTAM, 0xffff);
1106 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
1107 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
1109 temp = I915_READ16(I915REG_INT_IDENTITY_R);
1110 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);