From: Christian Gromm Date: Fri, 19 Aug 2016 09:12:54 +0000 (+0200) Subject: staging: most: hdm-usb: fix race between enqueue and most_stop_enqueue X-Git-Tag: v4.9-rc1~119^2~1026 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=bf9503f11dedf40158a8a5847f2d482bc15cd82b;p=tomoyo%2Ftomoyo-test1.git staging: most: hdm-usb: fix race between enqueue and most_stop_enqueue The "broken in pipe" handler of the USB-HDM calls most_stop_enqueue() to stop the MBO traffic before returning all MBOs back to the Mostcore. As the enqueue() call from the Mostcore may run in parallel with the most_stop_enqueue(), the HDM may run into the inconsistent state and crash the kernel. This patch synchronizes enqueue(), most_stop_enqueue() and most_resume_enqueue() with a mutex, hence avoiding the race condition. Signed-off-by: Andrey Shvetsov Signed-off-by: Christian Gromm Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c index 7f00aafa9f21..8d8c72c0fbf2 100644 --- a/drivers/staging/most/hdm-usb/hdm_usb.c +++ b/drivers/staging/most/hdm-usb/hdm_usb.c @@ -415,7 +415,6 @@ static void hdm_write_completion(struct urb *urb) switch (urb->status) { case -EPIPE: dev_warn(dev, "Broken OUT pipe detected\n"); - most_stop_enqueue(&mdev->iface, channel); mdev->is_channel_healthy[channel] = false; mbo->status = MBO_E_INVAL; mdev->clear_work[channel].pipe = urb->pipe; @@ -578,7 +577,6 @@ static void hdm_read_completion(struct urb *urb) switch (urb->status) { case -EPIPE: dev_warn(dev, "Broken IN pipe detected\n"); - most_stop_enqueue(&mdev->iface, channel); mdev->is_channel_healthy[channel] = false; mbo->status = MBO_E_INVAL; mdev->clear_work[channel].pipe = urb->pipe; @@ -928,6 +926,7 @@ static void wq_clear_halt(struct work_struct *wq_obj) int pipe = clear_work->pipe; mutex_lock(&mdev->io_mutex); + most_stop_enqueue(&mdev->iface, channel); free_anchored_buffers(mdev, channel, MBO_E_INVAL); if (usb_clear_halt(mdev->usb_device, pipe)) dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n"); diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c index b03cdc99b711..db0606ca9f79 100644 --- a/drivers/staging/most/mostcore/core.c +++ b/drivers/staging/most/mostcore/core.c @@ -51,6 +51,7 @@ struct most_c_obj { u16 channel_id; bool is_poisoned; struct mutex start_mutex; + struct mutex nq_mutex; /* nq thread synchronization */ int is_starving; struct most_interface *iface; struct most_inst_obj *inst; @@ -1131,18 +1132,18 @@ static inline void trash_mbo(struct mbo *mbo) spin_unlock_irqrestore(&c->fifo_lock, flags); } -static struct mbo *get_hdm_mbo(struct most_c_obj *c) +static bool hdm_mbo_ready(struct most_c_obj *c) { - unsigned long flags; - struct mbo *mbo; + bool empty; - spin_lock_irqsave(&c->fifo_lock, flags); - if (c->enqueue_halt || list_empty(&c->halt_fifo)) - mbo = NULL; - else - mbo = list_pop_mbo(&c->halt_fifo); - spin_unlock_irqrestore(&c->fifo_lock, flags); - return mbo; + if (c->enqueue_halt) + return false; + + spin_lock_irq(&c->fifo_lock); + empty = list_empty(&c->halt_fifo); + spin_unlock_irq(&c->fifo_lock); + + return !empty; } static void nq_hdm_mbo(struct mbo *mbo) @@ -1160,20 +1161,32 @@ static int hdm_enqueue_thread(void *data) { struct most_c_obj *c = data; struct mbo *mbo; + int ret; typeof(c->iface->enqueue) enqueue = c->iface->enqueue; while (likely(!kthread_should_stop())) { wait_event_interruptible(c->hdm_fifo_wq, - (mbo = get_hdm_mbo(c)) || + hdm_mbo_ready(c) || kthread_should_stop()); - if (unlikely(!mbo)) + mutex_lock(&c->nq_mutex); + spin_lock_irq(&c->fifo_lock); + if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) { + spin_unlock_irq(&c->fifo_lock); + mutex_unlock(&c->nq_mutex); continue; + } + + mbo = list_pop_mbo(&c->halt_fifo); + spin_unlock_irq(&c->fifo_lock); if (c->cfg.direction == MOST_CH_RX) mbo->buffer_length = c->cfg.buffer_size; - if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) { + ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo); + mutex_unlock(&c->nq_mutex); + + if (unlikely(ret)) { pr_err("hdm enqueue failed\n"); nq_hdm_mbo(mbo); c->hdm_enqueue_task = NULL; @@ -1759,6 +1772,7 @@ struct kobject *most_register_interface(struct most_interface *iface) init_completion(&c->cleanup); atomic_set(&c->mbo_ref, 0); mutex_init(&c->start_mutex); + mutex_init(&c->nq_mutex); list_add_tail(&c->list, &inst->channel_list); } pr_info("registered new MOST device mdev%d (%s)\n", @@ -1824,8 +1838,12 @@ void most_stop_enqueue(struct most_interface *iface, int id) { struct most_c_obj *c = get_channel_by_iface(iface, id); - if (likely(c)) - c->enqueue_halt = true; + if (!c) + return; + + mutex_lock(&c->nq_mutex); + c->enqueue_halt = true; + mutex_unlock(&c->nq_mutex); } EXPORT_SYMBOL_GPL(most_stop_enqueue); @@ -1841,9 +1859,12 @@ void most_resume_enqueue(struct most_interface *iface, int id) { struct most_c_obj *c = get_channel_by_iface(iface, id); - if (unlikely(!c)) + if (!c) return; + + mutex_lock(&c->nq_mutex); c->enqueue_halt = false; + mutex_unlock(&c->nq_mutex); wake_up_interruptible(&c->hdm_fifo_wq); }