#include "amdgpu.h"
#include "amdgpu_trace.h"
-static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
- return DRM_GPU_SCHED_STAT_NOMINAL;
}
}
return fence;
}
-static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
- *sched_job)
+static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
drm_sched_resubmit_jobs(&gpu->sched);
- drm_sched_start(&gpu->sched, true);
- return DRM_GPU_SCHED_STAT_NOMINAL;
-
out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
- return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
mutex_unlock(&dev->error_task_list_lock);
}
-static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
+static void lima_sched_timedout_job(struct drm_sched_job *job)
{
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void lima_sched_free_job(struct drm_sched_job *job)
mutex_unlock(&queue->lock);
}
-static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
- *sched_job)
+static void panfrost_job_timedout(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
* spurious. Bail out.
*/
if (dma_fence_is_signaled(job->done_fence))
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return;
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
/* Scheduler is already stopped, nothing to do. */
if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return;
/* Schedule a reset if there's no reset in progress. */
if (!atomic_xchg(&pfdev->reset.pending, 1))
schedule_work(&pfdev->reset.work);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
*
* @sched: scheduler instance
*
} else {
s_job->s_fence->parent = fence;
}
+
+
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
return NULL;
}
-static enum drm_gpu_sched_status
+static void
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{
enum v3d_queue q;
}
mutex_unlock(&v3d->reset_lock);
-
- return DRM_GPU_SCHED_STAT_NOMINAL;
}
/* If the current address or return address have changed, then the GPU
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
-static enum drm_task_status
+static void
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
{
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ v3d_gpu_reset_for_timeout(v3d, sched_job);
}
-static enum drm_task_status
+static void
v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
- return v3d_cl_job_timedout(sched_job, V3D_BIN,
- &job->timedout_ctca, &job->timedout_ctra);
+ v3d_cl_job_timedout(sched_job, V3D_BIN,
+ &job->timedout_ctca, &job->timedout_ctra);
}
-static enum drm_task_status
+static void
v3d_render_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
- return v3d_cl_job_timedout(sched_job, V3D_RENDER,
- &job->timedout_ctca, &job->timedout_ctra);
+ v3d_cl_job_timedout(sched_job, V3D_RENDER,
+ &job->timedout_ctca, &job->timedout_ctra);
}
-static enum drm_task_status
+static void
v3d_generic_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+ v3d_gpu_reset_for_timeout(job->v3d, sched_job);
}
-static enum drm_task_status
+static void
v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ v3d_gpu_reset_for_timeout(v3d, sched_job);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
-enum drm_gpu_sched_stat {
- DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
- DRM_GPU_SCHED_STAT_NOMINAL,
- DRM_GPU_SCHED_STAT_ENODEV,
-};
-
/**
* struct drm_sched_backend_ops
*
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
- *
- * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
- * and the underlying driver has started or completed recovery.
- *
- * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
- * available, i.e. has been unplugged.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
*/
- enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
+ void (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled