From b81157d016a48b8025ccfcb286827679b35f16aa Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 13 Jun 2011 17:39:06 -0400 Subject: [PATCH] drm/radeon/kms: use helper functions for fence read/write The existing code assumed scratch registers in a number of places while in most cases we are be using writeback and events rather than scratch registers. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_fence.c | 51 ++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 1f8229436570..021d2b6b556f 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -40,6 +40,35 @@ #include "radeon.h" #include "radeon_trace.h" +static void radeon_fence_write(struct radeon_device *rdev, u32 seq) +{ + if (rdev->wb.enabled) { + u32 scratch_index; + if (rdev->wb.use_event) + scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + else + scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; + } else + WREG32(rdev->fence_drv.scratch_reg, seq); +} + +static u32 radeon_fence_read(struct radeon_device *rdev) +{ + u32 seq; + + if (rdev->wb.enabled) { + u32 scratch_index; + if (rdev->wb.use_event) + scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + else + scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); + } else + seq = RREG32(rdev->fence_drv.scratch_reg); + return seq; +} + int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { unsigned long irq_flags; @@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) return 0; } fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); - if (!rdev->cp.ready) { + if (!rdev->cp.ready) /* FIXME: cp is not running assume everythings is done right * away */ - WREG32(rdev->fence_drv.scratch_reg, fence->seq); - } else + radeon_fence_write(rdev, fence->seq); + else radeon_fence_ring_emit(rdev, fence); trace_radeon_fence_emit(rdev->ddev, fence->seq); @@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) bool wake = false; unsigned long cjiffies; - if (rdev->wb.enabled) { - u32 scratch_index; - if (rdev->wb.use_event) - scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - else - scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); - } else - seq = RREG32(rdev->fence_drv.scratch_reg); + seq = radeon_fence_read(rdev); if (seq != rdev->fence_drv.last_seq) { rdev->fence_drv.last_seq = seq; rdev->fence_drv.last_jiffies = jiffies; @@ -251,7 +272,7 @@ retry: r = radeon_gpu_reset(rdev); if (r) return r; - WREG32(rdev->fence_drv.scratch_reg, fence->seq); + radeon_fence_write(rdev, fence->seq); rdev->gpu_lockup = false; } timeout = RADEON_FENCE_JIFFIES_TIMEOUT; @@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); return r; } - WREG32(rdev->fence_drv.scratch_reg, 0); + radeon_fence_write(rdev, 0); atomic_set(&rdev->fence_drv.seq, 0); INIT_LIST_HEAD(&rdev->fence_drv.created); INIT_LIST_HEAD(&rdev->fence_drv.emited); @@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) struct radeon_fence *fence; seq_printf(m, "Last signaled fence 0x%08X\n", - RREG32(rdev->fence_drv.scratch_reg)); + radeon_fence_read(rdev)); if (!list_empty(&rdev->fence_drv.emited)) { fence = list_entry(rdev->fence_drv.emited.prev, struct radeon_fence, list); -- 2.11.0