*/
static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- /* If the dispatcher is busy then schedule the work for later */
- if (!mutex_trylock(&dispatcher->mutex)) {
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
- return;
- }
-
- _adreno_dispatcher_issuecmds(adreno_dev);
- mutex_unlock(&dispatcher->mutex);
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
}
/**
mutex_unlock(&device->mutex);
}
-static void adreno_dispatcher_work(struct kthread_work *work)
+static void adreno_dispatcher_work(struct adreno_device *adreno_dev)
{
- struct adreno_dispatcher *dispatcher =
- container_of(work, struct adreno_dispatcher, work);
- struct adreno_device *adreno_dev =
- container_of(dispatcher, struct adreno_device, dispatcher);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int count = 0;
unsigned int i = 0;
- mutex_lock(&dispatcher->mutex);
-
/*
* As long as there are inflight commands, process retired comamnds from
* all drawqueues
_dispatcher_update_timers(adreno_dev);
else
_dispatcher_power_down(adreno_dev);
+}
- mutex_unlock(&dispatcher->mutex);
+static int adreno_dispatcher_thread(void *data)
+{
+ static const struct sched_param sched_rt_prio = {
+ .sched_priority = 16
+ };
+ struct adreno_device *adreno_dev = data;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_rt_prio);
+
+ while (1) {
+ bool should_stop;
+
+ wait_event(dispatcher->cmd_waitq,
+ (should_stop = kthread_should_stop()) ||
+ atomic_cmpxchg(&dispatcher->state, THREAD_REQ,
+ THREAD_ACTIVE) == THREAD_REQ);
+
+ if (should_stop)
+ break;
+
+ mutex_lock(&dispatcher->mutex);
+ do {
+ adreno_dispatcher_work(adreno_dev);
+ } while (atomic_cmpxchg(&dispatcher->state, THREAD_REQ,
+ THREAD_ACTIVE) == THREAD_REQ);
+ mutex_unlock(&dispatcher->mutex);
+
+ atomic_cmpxchg(&dispatcher->state, THREAD_ACTIVE, THREAD_IDLE);
+ }
+
+ return 0;
}
void adreno_dispatcher_schedule(struct kgsl_device *device)
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- queue_kthread_work(&kgsl_driver.worker, &dispatcher->work);
+ if (atomic_xchg(&dispatcher->state, THREAD_REQ) == THREAD_IDLE)
+ wake_up(&dispatcher->cmd_waitq);
}
/**
int i;
struct adreno_ringbuffer *rb;
+ kthread_stop(dispatcher->thread);
+
mutex_lock(&dispatcher->mutex);
del_timer_sync(&dispatcher->timer);
del_timer_sync(&dispatcher->fault_timer);
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- init_kthread_work(&dispatcher->work, adreno_dispatcher_work);
-
init_completion(&dispatcher->idle_gate);
complete_all(&dispatcher->idle_gate);
plist_head_init(&dispatcher->pending);
spin_lock_init(&dispatcher->plist_lock);
+ init_waitqueue_head(&dispatcher->cmd_waitq);
+ dispatcher->state = (atomic_t)ATOMIC_INIT(THREAD_IDLE);
+ dispatcher->thread = kthread_run(adreno_dispatcher_thread, adreno_dev,
+ "adreno_dispatch");
+ if (IS_ERR(dispatcher->thread))
+ return PTR_ERR(dispatcher->thread);
+
ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
&device->dev->kobj, "dispatch");
* @fault: Non-zero if a fault was detected.
* @pending: Priority list of contexts waiting to submit drawobjs
* @plist_lock: Spin lock to protect the pending queue
- * @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
* @idle_gate: Gate to wait on for dispatcher to idle
* @disp_preempt_fair_sched: If set then dispatcher will try to be fair to
* starving RB's by scheduling them in and enforcing a minimum time slice
* for every RB that is scheduled to run on the device
+ * @thread: Kthread for the command dispatcher
+ * @cmd_waitq: Waitqueue for the command dispatcher
+ * @state: Atomic tristate to control the dispatcher thread
*/
struct adreno_dispatcher {
struct mutex mutex;
atomic_t fault;
struct plist_head pending;
spinlock_t plist_lock;
- struct kthread_work work;
struct kobject kobj;
struct completion idle_gate;
unsigned int disp_preempt_fair_sched;
+ struct task_struct *thread;
+ wait_queue_head_t cmd_waitq;
+ atomic_t state;
+};
+
+enum adreno_dispatcher_thread_state {
+ THREAD_IDLE,
+ THREAD_REQ,
+ THREAD_ACTIVE
};
enum adreno_dispatcher_flags {