OSDN Git Service

Merge branch 'for-3.19/core' into for-3.19/drivers
authorJens Axboe <axboe@fb.com>
Mon, 24 Nov 2014 15:05:08 +0000 (08:05 -0700)
committerJens Axboe <axboe@fb.com>
Mon, 24 Nov 2014 15:05:08 +0000 (08:05 -0700)
1  2 
block/blk-mq.c

diff --combined block/blk-mq.c
@@@ -107,7 -107,11 +107,7 @@@ static void blk_mq_usage_counter_releas
        wake_up_all(&q->mq_freeze_wq);
  }
  
 -/*
 - * Guarantee no request is in use, so we can change any data structure of
 - * the queue afterward.
 - */
 -void blk_mq_freeze_queue(struct request_queue *q)
 +static void blk_mq_freeze_queue_start(struct request_queue *q)
  {
        bool freeze;
  
                percpu_ref_kill(&q->mq_usage_counter);
                blk_mq_run_queues(q, false);
        }
 +}
 +
 +static void blk_mq_freeze_queue_wait(struct request_queue *q)
 +{
        wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
  }
  
 +/*
 + * Guarantee no request is in use, so we can change any data structure of
 + * the queue afterward.
 + */
 +void blk_mq_freeze_queue(struct request_queue *q)
 +{
 +      blk_mq_freeze_queue_start(q);
 +      blk_mq_freeze_queue_wait(q);
 +}
 +
  static void blk_mq_unfreeze_queue(struct request_queue *q)
  {
        bool wake;
@@@ -798,10 -788,11 +798,11 @@@ static void __blk_mq_run_hw_queue(struc
   */
  static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
  {
-       int cpu = hctx->next_cpu;
+       if (hctx->queue->nr_hw_queues == 1)
+               return WORK_CPU_UNBOUND;
  
        if (--hctx->next_cpu_batch <= 0) {
-               int next_cpu;
+               int cpu = hctx->next_cpu, next_cpu;
  
                next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
                if (next_cpu >= nr_cpu_ids)
  
                hctx->next_cpu = next_cpu;
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+               return cpu;
        }
  
-       return cpu;
+       return hctx->next_cpu;
  }
  
  void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                put_cpu();
        }
  
-       if (hctx->queue->nr_hw_queues == 1)
-               kblockd_schedule_delayed_work(&hctx->run_work, 0);
-       else {
-               unsigned int cpu;
-               cpu = blk_mq_hctx_next_cpu(hctx);
-               kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
-       }
+       kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+                       &hctx->run_work, 0);
  }
  
  void blk_mq_run_queues(struct request_queue *q, bool async)
@@@ -929,16 -916,8 +926,8 @@@ static void blk_mq_delay_work_fn(struc
  
  void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
  {
-       unsigned long tmo = msecs_to_jiffies(msecs);
-       if (hctx->queue->nr_hw_queues == 1)
-               kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
-       else {
-               unsigned int cpu;
-               cpu = blk_mq_hctx_next_cpu(hctx);
-               kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
-       }
+       kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+                       &hctx->delay_work, msecs_to_jiffies(msecs));
  }
  EXPORT_SYMBOL(blk_mq_delay_queue);
  
@@@ -1972,7 -1951,7 +1961,7 @@@ void blk_mq_free_queue(struct request_q
  /* Basically redo blk_mq_init_queue with queue frozen */
  static void blk_mq_queue_reinit(struct request_queue *q)
  {
 -      blk_mq_freeze_queue(q);
 +      WARN_ON_ONCE(!q->mq_freeze_depth);
  
        blk_mq_sysfs_unregister(q);
  
        blk_mq_map_swqueue(q);
  
        blk_mq_sysfs_register(q);
 -
 -      blk_mq_unfreeze_queue(q);
  }
  
  static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
                return NOTIFY_OK;
  
        mutex_lock(&all_q_mutex);
 +
 +      /*
 +       * We need to freeze and reinit all existing queues.  Freezing
 +       * involves synchronous wait for an RCU grace period and doing it
 +       * one by one may take a long time.  Start freezing all queues in
 +       * one swoop and then wait for the completions so that freezing can
 +       * take place in parallel.
 +       */
 +      list_for_each_entry(q, &all_q_list, all_q_node)
 +              blk_mq_freeze_queue_start(q);
 +      list_for_each_entry(q, &all_q_list, all_q_node)
 +              blk_mq_freeze_queue_wait(q);
 +
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_queue_reinit(q);
 +
 +      list_for_each_entry(q, &all_q_list, all_q_node)
 +              blk_mq_unfreeze_queue(q);
 +
        mutex_unlock(&all_q_mutex);
        return NOTIFY_OK;
  }