5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/interrupt.h> /* For task queue support */
41 * Get interrupt from bus id.
43 * \param inode device inode.
44 * \param file_priv DRM file private.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
56 struct drm_irq_busid *p = data;
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
66 p->irq = dev->pdev->irq;
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
74 static void vblank_disable_fn(unsigned long arg)
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
80 if (!dev->vblank_disable_allowed)
83 for (i = 0; i < dev->num_crtcs; i++) {
84 spin_lock_irqsave(&dev->vbl_lock, irqflags);
85 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
86 dev->vblank_enabled[i]) {
87 DRM_DEBUG("disabling vblank on crtc %d\n", i);
89 dev->driver->get_vblank_counter(dev, i);
90 dev->driver->disable_vblank(dev, i);
91 dev->vblank_enabled[i] = 0;
93 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
97 static void drm_vblank_cleanup(struct drm_device *dev)
99 /* Bail if the driver didn't call drm_vblank_init() */
100 if (dev->num_crtcs == 0)
103 del_timer(&dev->vblank_disable_timer);
105 vblank_disable_fn((unsigned long)dev);
107 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
109 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
111 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
112 dev->num_crtcs, DRM_MEM_DRIVER);
113 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
116 dev->num_crtcs, DRM_MEM_DRIVER);
117 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
119 drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
120 dev->num_crtcs, DRM_MEM_DRIVER);
125 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
127 int i, ret = -ENOMEM;
129 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
131 init_timer_deferrable(&dev->vblank_disable_timer);
132 spin_lock_init(&dev->vbl_lock);
133 atomic_set(&dev->vbl_signal_pending, 0);
134 dev->num_crtcs = num_crtcs;
136 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
141 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
146 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
148 if (!dev->_vblank_count)
151 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
153 if (!dev->vblank_refcount)
156 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
158 if (!dev->vblank_enabled)
161 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
162 if (!dev->last_vblank)
165 dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
167 if (!dev->vblank_inmodeset)
170 /* Zero per-crtc vblank stuff */
171 for (i = 0; i < num_crtcs; i++) {
172 init_waitqueue_head(&dev->vbl_queue[i]);
173 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
174 atomic_set(&dev->_vblank_count[i], 0);
175 atomic_set(&dev->vblank_refcount[i], 0);
178 dev->vblank_disable_allowed = 0;
183 drm_vblank_cleanup(dev);
186 EXPORT_SYMBOL(drm_vblank_init);
189 * Install IRQ handler.
191 * \param dev DRM device.
193 * Initializes the IRQ related data. Installs the handler, calling the driver
194 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
195 * before and after the installation.
197 int drm_irq_install(struct drm_device * dev)
200 unsigned long sh_flags = 0;
202 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
205 if (dev->pdev->irq == 0)
208 mutex_lock(&dev->struct_mutex);
210 /* Driver must have been initialized */
211 if (!dev->dev_private) {
212 mutex_unlock(&dev->struct_mutex);
216 if (dev->irq_enabled) {
217 mutex_unlock(&dev->struct_mutex);
220 dev->irq_enabled = 1;
221 mutex_unlock(&dev->struct_mutex);
223 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
225 /* Before installing handler */
226 dev->driver->irq_preinstall(dev);
228 /* Install handler */
229 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
230 sh_flags = IRQF_SHARED;
232 ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
233 sh_flags, dev->devname, dev);
235 mutex_lock(&dev->struct_mutex);
236 dev->irq_enabled = 0;
237 mutex_unlock(&dev->struct_mutex);
240 /* Expose the device irq to device drivers that want to export it for
243 dev->irq = dev->pdev->irq;
245 /* After installing handler */
246 ret = dev->driver->irq_postinstall(dev);
248 mutex_lock(&dev->struct_mutex);
249 dev->irq_enabled = 0;
250 mutex_unlock(&dev->struct_mutex);
255 EXPORT_SYMBOL(drm_irq_install);
258 * Uninstall the IRQ handler.
260 * \param dev DRM device.
262 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
264 int drm_irq_uninstall(struct drm_device * dev)
268 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
271 mutex_lock(&dev->struct_mutex);
272 irq_enabled = dev->irq_enabled;
273 dev->irq_enabled = 0;
274 mutex_unlock(&dev->struct_mutex);
279 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
281 dev->driver->irq_uninstall(dev);
283 free_irq(dev->pdev->irq, dev);
285 drm_vblank_cleanup(dev);
287 dev->locked_tasklet_func = NULL;
291 EXPORT_SYMBOL(drm_irq_uninstall);
296 * \param inode device inode.
297 * \param file_priv DRM file private.
298 * \param cmd command.
299 * \param arg user argument, pointing to a drm_control structure.
300 * \return zero on success or a negative number on failure.
302 * Calls irq_install() or irq_uninstall() according to \p arg.
304 int drm_control(struct drm_device *dev, void *data,
305 struct drm_file *file_priv)
307 struct drm_control *ctl = data;
309 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
313 case DRM_INST_HANDLER:
314 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
316 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
317 ctl->irq != dev->pdev->irq)
319 return drm_irq_install(dev);
320 case DRM_UNINST_HANDLER:
321 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
323 return drm_irq_uninstall(dev);
330 * drm_vblank_count - retrieve "cooked" vblank counter value
332 * @crtc: which counter to retrieve
334 * Fetches the "cooked" vblank count value that represents the number of
335 * vblank events since the system was booted, including lost events due to
336 * modesetting activity.
338 u32 drm_vblank_count(struct drm_device *dev, int crtc)
340 return atomic_read(&dev->_vblank_count[crtc]);
342 EXPORT_SYMBOL(drm_vblank_count);
345 * drm_update_vblank_count - update the master vblank counter
347 * @crtc: counter to update
349 * Call back into the driver to update the appropriate vblank counter
350 * (specified by @crtc). Deal with wraparound, if it occurred, and
351 * update the last read value so we can deal with wraparound on the next
354 * Only necessary when going from off->on, to account for frames we
355 * didn't get an interrupt for.
357 * Note: caller must hold dev->vbl_lock since this reads & writes
358 * device vblank fields.
360 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
362 u32 cur_vblank, diff;
365 * Interrupts were disabled prior to this call, so deal with counter
367 * NOTE! It's possible we lost a full dev->max_vblank_count events
368 * here if the register is small or we had vblank interrupts off for
371 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
372 diff = cur_vblank - dev->last_vblank[crtc];
373 if (cur_vblank < dev->last_vblank[crtc]) {
374 diff += dev->max_vblank_count;
376 DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
377 crtc, dev->last_vblank[crtc], cur_vblank, diff);
380 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
383 atomic_add(diff, &dev->_vblank_count[crtc]);
387 * drm_vblank_get - get a reference count on vblank events
389 * @crtc: which CRTC to own
391 * Acquire a reference count on vblank events to avoid having them disabled
395 * Zero on success, nonzero on failure.
397 int drm_vblank_get(struct drm_device *dev, int crtc)
399 unsigned long irqflags;
402 spin_lock_irqsave(&dev->vbl_lock, irqflags);
403 /* Going from 0->1 means we have to enable interrupts again */
404 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
405 !dev->vblank_enabled[crtc]) {
406 ret = dev->driver->enable_vblank(dev, crtc);
408 atomic_dec(&dev->vblank_refcount[crtc]);
410 dev->vblank_enabled[crtc] = 1;
411 drm_update_vblank_count(dev, crtc);
414 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
418 EXPORT_SYMBOL(drm_vblank_get);
421 * drm_vblank_put - give up ownership of vblank events
423 * @crtc: which counter to give up
425 * Release ownership of a given vblank counter, turning off interrupts
428 void drm_vblank_put(struct drm_device *dev, int crtc)
430 /* Last user schedules interrupt disable */
431 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
432 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
434 EXPORT_SYMBOL(drm_vblank_put);
437 * drm_modeset_ctl - handle vblank event counter changes across mode switch
438 * @DRM_IOCTL_ARGS: standard ioctl arguments
440 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
441 * ioctls around modesetting so that any lost vblank events are accounted for.
443 * Generally the counter will reset across mode sets. If interrupts are
444 * enabled around this call, we don't have to do anything since the counter
445 * will have already been incremented.
447 int drm_modeset_ctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv)
450 struct drm_modeset_ctl *modeset = data;
451 unsigned long irqflags;
454 /* If drm_vblank_init() hasn't been called yet, just no-op */
458 crtc = modeset->crtc;
459 if (crtc >= dev->num_crtcs) {
465 * To avoid all the problems that might happen if interrupts
466 * were enabled/disabled around or between these calls, we just
467 * have the kernel take a reference on the CRTC (just once though
468 * to avoid corrupting the count if multiple, mismatch calls occur),
469 * so that interrupts remain enabled in the interim.
471 switch (modeset->cmd) {
472 case _DRM_PRE_MODESET:
473 if (!dev->vblank_inmodeset[crtc]) {
474 dev->vblank_inmodeset[crtc] = 1;
475 drm_vblank_get(dev, crtc);
478 case _DRM_POST_MODESET:
479 if (dev->vblank_inmodeset[crtc]) {
480 spin_lock_irqsave(&dev->vbl_lock, irqflags);
481 dev->vblank_disable_allowed = 1;
482 dev->vblank_inmodeset[crtc] = 0;
483 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
484 drm_vblank_put(dev, crtc);
499 * \param inode device inode.
500 * \param file_priv DRM file private.
501 * \param cmd command.
502 * \param data user argument, pointing to a drm_wait_vblank structure.
503 * \return zero on success or a negative number on failure.
505 * Verifies the IRQ is installed.
507 * If a signal is requested checks if this task has already scheduled the same signal
508 * for the same vblank sequence number - nothing to be done in
509 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
510 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
513 * If a signal is not requested, then calls vblank_wait().
515 int drm_wait_vblank(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
518 union drm_wait_vblank *vblwait = data;
520 unsigned int flags, seq, crtc;
522 if ((!dev->pdev->irq) || (!dev->irq_enabled))
525 if (vblwait->request.type &
526 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
527 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
528 vblwait->request.type,
529 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
533 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
534 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
536 if (crtc >= dev->num_crtcs)
539 ret = drm_vblank_get(dev, crtc);
542 seq = drm_vblank_count(dev, crtc);
544 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
545 case _DRM_VBLANK_RELATIVE:
546 vblwait->request.sequence += seq;
547 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
548 case _DRM_VBLANK_ABSOLUTE:
555 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
556 (seq - vblwait->request.sequence) <= (1<<23)) {
557 vblwait->request.sequence = seq + 1;
560 if (flags & _DRM_VBLANK_SIGNAL) {
561 unsigned long irqflags;
562 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
563 struct drm_vbl_sig *vbl_sig;
565 spin_lock_irqsave(&dev->vbl_lock, irqflags);
567 /* Check if this task has already scheduled the same signal
568 * for the same vblank sequence number; nothing to be done in
571 list_for_each_entry(vbl_sig, vbl_sigs, head) {
572 if (vbl_sig->sequence == vblwait->request.sequence
573 && vbl_sig->info.si_signo ==
574 vblwait->request.signal
575 && vbl_sig->task == current) {
576 spin_unlock_irqrestore(&dev->vbl_lock,
578 vblwait->reply.sequence = seq;
583 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
584 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
589 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
591 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
598 ret = drm_vblank_get(dev, crtc);
600 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
605 atomic_inc(&dev->vbl_signal_pending);
607 vbl_sig->sequence = vblwait->request.sequence;
608 vbl_sig->info.si_signo = vblwait->request.signal;
609 vbl_sig->task = current;
611 spin_lock_irqsave(&dev->vbl_lock, irqflags);
613 list_add_tail(&vbl_sig->head, vbl_sigs);
615 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
617 vblwait->reply.sequence = seq;
619 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
620 ((drm_vblank_count(dev, crtc)
621 - vblwait->request.sequence) <= (1 << 23)));
626 do_gettimeofday(&now);
628 vblwait->reply.tval_sec = now.tv_sec;
629 vblwait->reply.tval_usec = now.tv_usec;
630 vblwait->reply.sequence = drm_vblank_count(dev, crtc);
635 drm_vblank_put(dev, crtc);
640 * Send the VBLANK signals.
642 * \param dev DRM device.
643 * \param crtc CRTC where the vblank event occurred
645 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
647 * If a signal is not requested, then calls vblank_wait().
649 static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
651 struct drm_vbl_sig *vbl_sig, *tmp;
652 struct list_head *vbl_sigs;
653 unsigned int vbl_seq;
656 spin_lock_irqsave(&dev->vbl_lock, flags);
658 vbl_sigs = &dev->vbl_sigs[crtc];
659 vbl_seq = drm_vblank_count(dev, crtc);
661 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
662 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
663 vbl_sig->info.si_code = vbl_seq;
664 send_sig_info(vbl_sig->info.si_signo,
665 &vbl_sig->info, vbl_sig->task);
667 list_del(&vbl_sig->head);
669 drm_free(vbl_sig, sizeof(*vbl_sig),
671 atomic_dec(&dev->vbl_signal_pending);
672 drm_vblank_put(dev, crtc);
676 spin_unlock_irqrestore(&dev->vbl_lock, flags);
680 * drm_handle_vblank - handle a vblank event
682 * @crtc: where this event occurred
684 * Drivers should call this routine in their vblank interrupt handlers to
685 * update the vblank counter and send any signals that may be pending.
687 void drm_handle_vblank(struct drm_device *dev, int crtc)
689 atomic_inc(&dev->_vblank_count[crtc]);
690 DRM_WAKEUP(&dev->vbl_queue[crtc]);
691 drm_vbl_send_signals(dev, crtc);
693 EXPORT_SYMBOL(drm_handle_vblank);
696 * Tasklet wrapper function.
698 * \param data DRM device in disguise.
700 * Attempts to grab the HW lock and calls the driver callback on success. On
701 * failure, leave the lock marked as contended so the callback can be called
704 static void drm_locked_tasklet_func(unsigned long data)
706 struct drm_device *dev = (struct drm_device *)data;
707 unsigned long irqflags;
708 void (*tasklet_func)(struct drm_device *);
710 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
711 tasklet_func = dev->locked_tasklet_func;
712 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
715 !drm_lock_take(&dev->lock,
716 DRM_KERNEL_CONTEXT)) {
720 dev->lock.lock_time = jiffies;
721 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
723 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
724 tasklet_func = dev->locked_tasklet_func;
725 dev->locked_tasklet_func = NULL;
726 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
728 if (tasklet_func != NULL)
731 drm_lock_free(&dev->lock,
736 * Schedule a tasklet to call back a driver hook with the HW lock held.
738 * \param dev DRM device.
739 * \param func Driver callback.
741 * This is intended for triggering actions that require the HW lock from an
742 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
743 * completes. Note that the callback may be called from interrupt or process
744 * context, it must not make any assumptions about this. Also, the HW lock will
745 * be held with the kernel context or any client context.
747 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
749 unsigned long irqflags;
750 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
752 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
753 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
756 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
758 if (dev->locked_tasklet_func) {
759 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
763 dev->locked_tasklet_func = func;
765 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
767 drm_tasklet.data = (unsigned long)dev;
769 tasklet_hi_schedule(&drm_tasklet);
771 EXPORT_SYMBOL(drm_locked_tasklet);