int radeon_invalidate_caches(drm_device_t * dev, uint32_t flags)
{
- /*
- * FIXME: Only emit once per batchbuffer submission.
- */
-#if 0
- uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
- if (flags & DRM_BO_FLAG_READ)
- flush_cmd |= MI_READ_FLUSH;
- if (flags & DRM_BO_FLAG_EXE)
- flush_cmd |= MI_EXE_FLUSH;
-
- return 0;
-// return radeon_emit_mi_flush(dev, flush_cmd);
-#endif
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_FLUSH_ZCACHE();
+ ADVANCE_RING();
return 0;
}
int radeon_move(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
return 0;
}
drm_fence_class_manager_t *fc = &dev->fm.class[0];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t pending_flush_types = 0;
- uint32_t flush_flags = 0;
- uint32_t flush_sequence = 0;
- uint32_t i_status;
- uint32_t diff;
uint32_t sequence;
if (!dev_priv)
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
if (pending_flush_types) {
- drm_fence_handler(dev, 0, 0,0);
-
+ sequence = READ_BREADCRUMB(dev_priv);
+
+ drm_fence_handler(dev, 0, sequence, pending_flush_types);
}
return;
/* radeon_irq.c */
extern int radeon_irq_emit(DRM_IOCTL_ARGS);
extern int radeon_irq_wait(DRM_IOCTL_ARGS);
+extern int radeon_emit_irq(drm_device_t * dev);
extern void radeon_do_release(drm_device_t * dev);
extern int radeon_driver_vblank_wait(drm_device_t * dev,
return IRQ_HANDLED;
}
-static int radeon_emit_irq(drm_device_t * dev)
+int radeon_emit_irq(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;