1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
38 #define MAX_NOPID ((u32)~0)
41 * Emit blits for scheduled buffer swaps.
43 * This function will be called with the HW lock held.
45 static void i915_vblank_tasklet(drm_device_t *dev)
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned int irqflags;
49 struct list_head *list, *tmp;
53 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
55 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
56 drm_i915_vbl_swap_t *vbl_swap =
57 list_entry(list, drm_i915_vbl_swap_t, head);
58 atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 :
61 if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) {
62 drm_drawable_info_t *drw;
64 spin_unlock(&dev_priv->swaps_lock);
66 spin_lock(&dev->drw_lock);
68 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
71 int i, num_rects = drw->num_rects;
72 drm_clip_rect_t *rect = drw->rects;
73 drm_i915_sarea_t *sarea_priv =
75 u32 cpp = dev_priv->cpp;
76 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
77 XY_SRC_COPY_BLT_WRITE_ALPHA |
78 XY_SRC_COPY_BLT_WRITE_RGB)
79 : XY_SRC_COPY_BLT_CMD;
80 u32 pitchropcpp = (sarea_priv->pitch * cpp) |
81 (0xcc << 16) | (cpp << 23) |
85 i915_kernel_lost_context(dev);
89 OUT_RING(GFX_OP_DRAWRECT_INFO);
92 OUT_RING(sarea_priv->width |
93 sarea_priv->height << 16);
94 OUT_RING(sarea_priv->width |
95 sarea_priv->height << 16);
100 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
102 for (i = 0; i < num_rects; i++, rect++) {
106 OUT_RING(pitchropcpp);
107 OUT_RING((rect->y1 << 16) | rect->x1);
108 OUT_RING((rect->y2 << 16) | rect->x2);
109 OUT_RING(sarea_priv->front_offset);
110 OUT_RING((rect->y1 << 16) | rect->x1);
111 OUT_RING(pitchropcpp & 0xffff);
112 OUT_RING(sarea_priv->back_offset);
118 spin_unlock(&dev->drw_lock);
120 spin_lock(&dev_priv->swaps_lock);
124 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
126 dev_priv->swaps_pending--;
130 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
133 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
135 drm_device_t *dev = (drm_device_t *) arg;
136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
139 temp = I915_READ16(I915REG_INT_IDENTITY_R);
141 temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
144 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
149 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
151 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
153 if (temp & USER_INT_FLAG) {
154 DRM_WAKEUP(&dev_priv->irq_queue);
155 #ifdef I915_HAVE_FENCE
156 i915_fence_handler(dev);
160 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
161 int vblank_pipe = dev_priv->vblank_pipe;
164 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
165 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
166 if (temp & VSYNC_PIPEA_FLAG)
167 atomic_inc(&dev->vbl_received);
168 if (temp & VSYNC_PIPEB_FLAG)
169 atomic_inc(&dev->vbl_received2);
170 } else if (((temp & VSYNC_PIPEA_FLAG) &&
171 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
172 ((temp & VSYNC_PIPEB_FLAG) &&
173 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
174 atomic_inc(&dev->vbl_received);
176 DRM_WAKEUP(&dev->vbl_queue);
177 drm_vbl_send_signals(dev);
179 if (dev_priv->swaps_pending > 0)
180 drm_locked_tasklet(dev, i915_vblank_tasklet);
186 int i915_emit_irq(drm_device_t * dev)
189 drm_i915_private_t *dev_priv = dev->dev_private;
192 i915_kernel_lost_context(dev);
194 DRM_DEBUG("%s\n", __FUNCTION__);
196 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
198 if (dev_priv->counter > 0x7FFFFFFFUL)
199 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
202 OUT_RING(CMD_STORE_DWORD_IDX);
204 OUT_RING(dev_priv->counter);
208 OUT_RING(GFX_OP_USER_INTERRUPT);
211 return dev_priv->counter;
216 void i915_user_irq_on(drm_i915_private_t *dev_priv)
218 spin_lock(&dev_priv->user_irq_lock);
219 if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
220 dev_priv->irq_enable_reg |= USER_INT_FLAG;
221 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
223 spin_unlock(&dev_priv->user_irq_lock);
227 void i915_user_irq_off(drm_i915_private_t *dev_priv)
229 spin_lock(&dev_priv->user_irq_lock);
230 if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
231 // dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
232 // I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
234 spin_unlock(&dev_priv->user_irq_lock);
238 static int i915_wait_irq(drm_device_t * dev, int irq_nr)
240 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
244 READ_BREADCRUMB(dev_priv));
246 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
249 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
251 i915_user_irq_on(dev_priv);
252 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
253 READ_BREADCRUMB(dev_priv) >= irq_nr);
254 i915_user_irq_off(dev_priv);
256 if (ret == DRM_ERR(EBUSY)) {
257 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
259 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
262 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
266 static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
269 drm_i915_private_t *dev_priv = dev->dev_private;
270 unsigned int cur_vblank;
274 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
275 return DRM_ERR(EINVAL);
278 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
279 (((cur_vblank = atomic_read(counter))
280 - *sequence) <= (1<<23)));
282 *sequence = cur_vblank;
287 int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
289 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
292 int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
294 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
297 /* Needs the lock as it touches the ring.
299 int i915_irq_emit(DRM_IOCTL_ARGS)
302 drm_i915_private_t *dev_priv = dev->dev_private;
303 drm_i915_irq_emit_t emit;
306 LOCK_TEST_WITH_RETURN(dev, filp);
309 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
310 return DRM_ERR(EINVAL);
313 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
316 result = i915_emit_irq(dev);
318 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
319 DRM_ERROR("copy_to_user\n");
320 return DRM_ERR(EFAULT);
326 /* Doesn't need the hardware lock.
328 int i915_irq_wait(DRM_IOCTL_ARGS)
331 drm_i915_private_t *dev_priv = dev->dev_private;
332 drm_i915_irq_wait_t irqwait;
335 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
336 return DRM_ERR(EINVAL);
339 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
342 return i915_wait_irq(dev, irqwait.irq_seq);
345 static void i915_enable_interrupt (drm_device_t *dev)
347 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
349 dev_priv->irq_enable_reg = USER_INT_FLAG; //&= ~(VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
350 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
351 dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
352 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
353 dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
355 I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
356 dev_priv->irq_enabled = 1;
359 /* Set the vblank monitor pipe
361 int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
364 drm_i915_private_t *dev_priv = dev->dev_private;
365 drm_i915_vblank_pipe_t pipe;
368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
369 return DRM_ERR(EINVAL);
372 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
375 if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
376 DRM_ERROR("%s called with invalid pipe 0x%x\n",
377 __FUNCTION__, pipe.pipe);
378 return DRM_ERR(EINVAL);
381 dev_priv->vblank_pipe = pipe.pipe;
383 i915_enable_interrupt (dev);
388 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
391 drm_i915_private_t *dev_priv = dev->dev_private;
392 drm_i915_vblank_pipe_t pipe;
396 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
397 return DRM_ERR(EINVAL);
400 flag = I915_READ(I915REG_INT_ENABLE_R);
402 if (flag & VSYNC_PIPEA_FLAG)
403 pipe.pipe |= DRM_I915_VBLANK_PIPE_A;
404 if (flag & VSYNC_PIPEB_FLAG)
405 pipe.pipe |= DRM_I915_VBLANK_PIPE_B;
406 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe,
412 * Schedule buffer swap at given vertical blank.
414 int i915_vblank_swap(DRM_IOCTL_ARGS)
417 drm_i915_private_t *dev_priv = dev->dev_private;
418 drm_i915_vblank_swap_t swap;
419 drm_i915_vbl_swap_t *vbl_swap;
420 unsigned int pipe, seqtype, irqflags, curseq;
421 struct list_head *list;
424 DRM_ERROR("%s called with no initialization\n", __func__);
425 return DRM_ERR(EINVAL);
428 if (dev_priv->sarea_priv->rotation) {
429 DRM_DEBUG("Rotation not supported\n");
430 return DRM_ERR(EINVAL);
433 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
436 if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
437 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
438 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
439 return DRM_ERR(EINVAL);
442 pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
444 seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
446 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
447 DRM_ERROR("Invalid pipe %d\n", pipe);
448 return DRM_ERR(EINVAL);
451 spin_lock_irqsave(&dev->drw_lock, irqflags);
453 if (!drm_get_drawable_info(dev, swap.drawable)) {
454 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
455 DRM_ERROR("Invalid drawable ID %d\n", swap.drawable);
456 return DRM_ERR(EINVAL);
459 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
461 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
463 if (seqtype == _DRM_VBLANK_RELATIVE)
464 swap.sequence += curseq;
466 if ((curseq - swap.sequence) <= (1<<23)) {
467 if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
468 swap.sequence = curseq + 1;
470 DRM_DEBUG("Missed target sequence\n");
471 return DRM_ERR(EINVAL);
475 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
477 list_for_each(list, &dev_priv->vbl_swaps.head) {
478 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
480 if (vbl_swap->drw_id == swap.drawable &&
481 vbl_swap->pipe == pipe &&
482 vbl_swap->sequence == swap.sequence) {
483 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
484 DRM_DEBUG("Already scheduled\n");
489 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
491 if (dev_priv->swaps_pending >= 100) {
492 DRM_DEBUG("Too many swaps queued\n");
493 return DRM_ERR(EBUSY);
496 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
499 DRM_ERROR("Failed to allocate memory to queue swap\n");
500 return DRM_ERR(ENOMEM);
505 vbl_swap->drw_id = swap.drawable;
506 vbl_swap->pipe = pipe;
507 vbl_swap->sequence = swap.sequence;
509 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
511 list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
512 dev_priv->swaps_pending++;
514 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
516 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
524 void i915_driver_irq_preinstall(drm_device_t * dev)
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
528 I915_WRITE16(I915REG_HWSTAM, 0xeffe);
529 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
530 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
533 void i915_driver_irq_postinstall(drm_device_t * dev)
535 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
537 dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
538 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
539 dev_priv->swaps_pending = 0;
541 dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
542 dev_priv->user_irq_refcount = 0;
544 if (!dev_priv->vblank_pipe)
545 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
546 i915_enable_interrupt(dev);
547 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
550 * Initialize the hardware status page IRQ location.
553 I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
556 void i915_driver_irq_uninstall(drm_device_t * dev)
558 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
563 dev_priv->irq_enabled = 0;
564 I915_WRITE16(I915REG_HWSTAM, 0xffff);
565 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
566 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
568 temp = I915_READ16(I915REG_INT_IDENTITY_R);
569 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);