Add the capability to query information from a submit queue.
The first available parameter is to query the number of GPU
faults that have been caused by the queue. The driver can
periodically query this value to see if it has caused a fault
and take action accordingly.
Change-Id: Ic0dedbadc68d5782c0b8b71d89722742aa6aaf1a
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
args->flags, &args->id);
}
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue_query *args = data;
+ void __user *ptr = (void __user *)(uintptr_t) args->data;
+
+ return msm_submitqueue_query(file->driver_priv, args->id,
+ args->param, ptr, args->len);
+}
+
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
{
DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close,
DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query,
+ DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
u32 id);
int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio,
u32 flags, u32 *id);
+int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param,
+ void __user *data, u32 len);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
+
void msm_submitqueue_destroy(struct kref *kref);
struct hdmi;
round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
}
+static void retire_guilty_submit(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct msm_gem_submit *submit = list_first_entry_or_null(&ring->submits,
+ struct msm_gem_submit, node);
+
+ if (!submit)
+ return;
+
+ submit->queue->faults++;
+
+ msm_gem_submit_free(submit);
+}
+
/*
* Hangcheck detection for locked gpu:
*/
inactive_cancel(gpu);
- FOR_EACH_RING(gpu, ring, i) {
- uint32_t fence = gpu->funcs->last_fence(gpu, ring);
-
+ /* Retire all events that have already passed */
+ FOR_EACH_RING(gpu, ring, i)
retire_submits(gpu, ring,
- gpu->funcs->active_ring(gpu) == ring ?
- fence + 1 : fence);
- }
+ gpu->funcs->last_fence(gpu, ring));
+
+ retire_guilty_submit(gpu, gpu->funcs->active_ring(gpu));
/* Recover the GPU */
gpu->funcs->recover(gpu);
{
struct msm_gpu_submitqueue *entry;
+ if (!ctx)
+ return NULL;
+
read_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
return msm_submitqueue_create(ctx, 3, 0, NULL);
}
+int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param,
+ void __user *data, u32 len)
+{
+ struct msm_gpu_submitqueue *queue = msm_submitqueue_get(ctx, id);
+ int ret = 0;
+
+ if (!queue)
+ return -ENOENT;
+
+ if (param == MSM_SUBMITQUEUE_PARAM_FAULTS) {
+ u32 size = min_t(u32, len, sizeof(queue->faults));
+
+ if (copy_to_user(data, &queue->faults, size))
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
__u32 id; /* out, identifier */
};
+#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
+
+struct drm_msm_submitqueue_query {
+ __u64 data;
+ __u32 id;
+ __u32 param;
+ __u32 len;
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
#define DRM_MSM_GEM_SVM_NEW 0x09
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
+#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_SDE_WB_CONFIG 0x40
#define DRM_MSM_REGISTER_EVENT 0x41
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE \
DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, \
struct drm_msm_submitqueue)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, \
+ struct drm_msm_submitqueue_query)
#if defined(__cplusplus)
}