#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue;
atomic_t vbl_received;
- struct tq_struct vbl_tq;
- struct semaphore vbl_sem;
+ spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
#endif
cycles_t ctx_start;
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
-extern void DRM(vbl_immediate_bh)( void *arg );
+extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev );
int DRM(irq_install)( drm_device_t *dev, int irq )
{
int ret;
+#if __HAVE_VBL_IRQ
+ unsigned long flags;
+#endif
if ( !irq )
return -EINVAL;
#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
- sema_init( &dev->vbl_sem, 0 );
+ spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
-
- up( &dev->vbl_sem );
-
- INIT_LIST_HEAD( &dev->vbl_tq.list );
- dev->vbl_tq.sync = 0;
- dev->vbl_tq.routine = DRM(vbl_immediate_bh);
- dev->vbl_tq.data = dev;
#endif
/* Before installing handler */
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
+ unsigned long flags;
drm_vbl_sig_t *vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) );
if ( !vbl_sig )
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
/* Hook signal entry into list */
- down( &dev->vbl_sem );
+ spin_lock_irqsave( &dev->vbl_lock, flags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
- up( &dev->vbl_sem );
+ spin_unlock_irqrestore( &dev->vbl_lock, flags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
return ret;
}
-void DRM(vbl_immediate_bh)( void *arg )
+void DRM(vbl_send_signals)( drm_device_t *dev )
{
- drm_device_t *dev = (drm_device_t *) arg;
struct list_head *entry, *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
+ unsigned long flags;
- down( &dev->vbl_sem );
+ spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) {
}
}
- up( &dev->vbl_sem );
+ spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue;
atomic_t vbl_received;
- struct tq_struct vbl_tq;
- struct semaphore vbl_sem;
+ spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
#endif
cycles_t ctx_start;
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
-extern void DRM(vbl_immediate_bh)( void *arg );
+extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev );
int DRM(irq_install)( drm_device_t *dev, int irq )
{
int ret;
+#if __HAVE_VBL_IRQ
+ unsigned long flags;
+#endif
if ( !irq )
return -EINVAL;
#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
- sema_init( &dev->vbl_sem, 0 );
+ spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
-
- up( &dev->vbl_sem );
-
- INIT_LIST_HEAD( &dev->vbl_tq.list );
- dev->vbl_tq.sync = 0;
- dev->vbl_tq.routine = DRM(vbl_immediate_bh);
- dev->vbl_tq.data = dev;
#endif
/* Before installing handler */
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
+ unsigned long flags;
drm_vbl_sig_t *vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) );
if ( !vbl_sig )
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
/* Hook signal entry into list */
- down( &dev->vbl_sem );
+ spin_lock_irqsave( &dev->vbl_lock, flags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
- up( &dev->vbl_sem );
+ spin_unlock_irqrestore( &dev->vbl_lock, flags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
return ret;
}
-void DRM(vbl_immediate_bh)( void *arg )
+void DRM(vbl_send_signals)( drm_device_t *dev )
{
- drm_device_t *dev = (drm_device_t *) arg;
struct list_head *entry, *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
+ unsigned long flags;
- down( &dev->vbl_sem );
+ spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) {
}
}
- up( &dev->vbl_sem );
+ spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */
MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
-
- /* kick off bottom half for signals */
- queue_task(&dev->vbl_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ DRM(vbl_send_signals)( dev );
}
}
R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK );
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
-
- /* kick off bottom half for signals */
- queue_task(&dev->vbl_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ DRM(vbl_send_signals)( dev );
}
}
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
-
- /* kick off bottom half for signals */
- queue_task(&dev->vbl_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ DRM(vbl_send_signals)( dev );
}
/* Acknowledge all the bits in GEN_INT_STATUS -- seem to get
MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
-
- /* kick off bottom half for signals */
- queue_task(&dev->vbl_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ DRM(vbl_send_signals)( dev );
}
}
R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK );
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
-
- /* kick off bottom half for signals */
- queue_task(&dev->vbl_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ DRM(vbl_send_signals)( dev );
}
}
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
-
- /* kick off bottom half for signals */
- queue_task(&dev->vbl_tq, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
+ DRM(vbl_send_signals)( dev );
}
/* Acknowledge all the bits in GEN_INT_STATUS -- seem to get