OSDN Git Service

rcu: Remove "cpu" argument to rcu_pending()
[android-x86/kernel.git] / kernel / rcu / tree.c
index 133e472..0670ba3 100644 (file)
@@ -105,7 +105,7 @@ struct rcu_state sname##_state = { \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
 }; \
-DEFINE_PER_CPU(struct rcu_data, sname##_data)
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
@@ -325,7 +325,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
                                  unsigned long *maxj),
                         bool *isidle, unsigned long *maxj);
 static void force_quiescent_state(struct rcu_state *rsp);
-static int rcu_pending(int cpu);
+static int rcu_pending(void);
 
 /*
  * Return the number of RCU-sched batches processed thus far for debug & stats.
@@ -510,11 +510,11 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  * we really have entered idle, and must do the appropriate accounting.
  * The caller must have disabled interrupts.
  */
-static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
-                               bool user)
+static void rcu_eqs_enter_common(long long oldval, bool user)
 {
        struct rcu_state *rsp;
        struct rcu_data *rdp;
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
        trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
        if (!user && !is_idle_task(current)) {
@@ -565,7 +565,7 @@ static void rcu_eqs_enter(bool user)
        WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
        if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
                rdtp->dynticks_nesting = 0;
-               rcu_eqs_enter_common(rdtp, oldval, user);
+               rcu_eqs_enter_common(oldval, user);
        } else {
                rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
        }
@@ -589,7 +589,7 @@ void rcu_idle_enter(void)
 
        local_irq_save(flags);
        rcu_eqs_enter(false);
-       rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
+       rcu_sysidle_enter(0);
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -639,8 +639,8 @@ void rcu_irq_exit(void)
        if (rdtp->dynticks_nesting)
                trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
        else
-               rcu_eqs_enter_common(rdtp, oldval, true);
-       rcu_sysidle_enter(rdtp, 1);
+               rcu_eqs_enter_common(oldval, true);
+       rcu_sysidle_enter(1);
        local_irq_restore(flags);
 }
 
@@ -651,9 +651,10 @@ void rcu_irq_exit(void)
  * we really have exited idle, and must do the appropriate accounting.
  * The caller must have disabled interrupts.
  */
-static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
-                              int user)
+static void rcu_eqs_exit_common(long long oldval, int user)
 {
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+
        rcu_dynticks_task_exit();
        smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
        atomic_inc(&rdtp->dynticks);
@@ -691,7 +692,7 @@ static void rcu_eqs_exit(bool user)
                rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
        } else {
                rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-               rcu_eqs_exit_common(rdtp, oldval, user);
+               rcu_eqs_exit_common(oldval, user);
        }
 }
 
@@ -712,7 +713,7 @@ void rcu_idle_exit(void)
 
        local_irq_save(flags);
        rcu_eqs_exit(false);
-       rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
+       rcu_sysidle_exit(0);
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -763,8 +764,8 @@ void rcu_irq_enter(void)
        if (oldval)
                trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
        else
-               rcu_eqs_exit_common(rdtp, oldval, true);
-       rcu_sysidle_exit(rdtp, 1);
+               rcu_eqs_exit_common(oldval, true);
+       rcu_sysidle_exit(1);
        local_irq_restore(flags);
 }
 
@@ -2387,7 +2388,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  * invoked from the scheduling-clock interrupt.  If rcu_pending returns
  * false, there is no point in invoking rcu_check_callbacks().
  */
-void rcu_check_callbacks(int cpu, int user)
+void rcu_check_callbacks(int user)
 {
        trace_rcu_utilization(TPS("Start scheduler-tick"));
        increment_cpu_stall_ticks();
@@ -2419,8 +2420,8 @@ void rcu_check_callbacks(int cpu, int user)
 
                rcu_bh_qs();
        }
-       rcu_preempt_check_callbacks(cpu);
-       if (rcu_pending(cpu))
+       rcu_preempt_check_callbacks(smp_processor_id());
+       if (rcu_pending())
                invoke_rcu_core();
        if (user)
                rcu_note_voluntary_context_switch(current);
@@ -3143,12 +3144,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  * by the current CPU, returning 1 if so.  This function is part of the
  * RCU implementation; it is -not- an exported member of the RCU API.
  */
-static int rcu_pending(int cpu)
+static int rcu_pending(void)
 {
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp)
-               if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
+               if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
                        return 1;
        return 0;
 }
@@ -3299,11 +3300,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
                        continue;
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (rcu_is_nocb_cpu(cpu)) {
-                       _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
-                                          rsp->n_barrier_done);
-                       atomic_inc(&rsp->barrier_cpu_count);
-                       __call_rcu(&rdp->barrier_head, rcu_barrier_callback,
-                                  rsp, cpu, 0);
+                       if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
+                               _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
+                                                  rsp->n_barrier_done);
+                       } else {
+                               _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
+                                                  rsp->n_barrier_done);
+                               atomic_inc(&rsp->barrier_cpu_count);
+                               __call_rcu(&rdp->barrier_head,
+                                          rcu_barrier_callback, rsp, cpu, 0);
+                       }
                } else if (ACCESS_ONCE(rdp->qlen)) {
                        _rcu_barrier_trace(rsp, "OnlineQ", cpu,
                                           rsp->n_barrier_done);