OSDN Git Service

Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[uclinux-h8/linux.git] / kernel / rcu / tree.c
index 906563a..980ca3c 100644 (file)
@@ -102,11 +102,6 @@ int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 /* Number of rcu_nodes at specified level. */
 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
-/* panic() on RCU Stall sysctl. */
-int sysctl_panic_on_rcu_stall __read_mostly;
-/* Commandeer a sysrq key to dump RCU's tree. */
-static bool sysrq_rcu;
-module_param(sysrq_rcu, bool, 0444);
 
 /*
  * The rcu_scheduler_active variable is initialized to the value
@@ -149,7 +144,7 @@ static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
-module_param(kthread_prio, int, 0644);
+module_param(kthread_prio, int, 0444);
 
 /* Delay in jiffies for grace-period initialization delays, debug only. */
 
@@ -406,7 +401,7 @@ static bool rcu_kick_kthreads;
  */
 static ulong jiffies_till_sched_qs = ULONG_MAX;
 module_param(jiffies_till_sched_qs, ulong, 0444);
-static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */
+static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
 
 /*
@@ -424,6 +419,7 @@ static void adjust_jiffies_till_sched_qs(void)
                WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
                return;
        }
+       /* Otherwise, set to third fqs scan, but bound below on large system. */
        j = READ_ONCE(jiffies_till_first_fqs) +
                      2 * READ_ONCE(jiffies_till_next_fqs);
        if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
@@ -513,74 +509,6 @@ static const char *gp_state_getname(short gs)
 }
 
 /*
- * Show the state of the grace-period kthreads.
- */
-void show_rcu_gp_kthreads(void)
-{
-       int cpu;
-       unsigned long j;
-       unsigned long ja;
-       unsigned long jr;
-       unsigned long jw;
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-
-       j = jiffies;
-       ja = j - READ_ONCE(rcu_state.gp_activity);
-       jr = j - READ_ONCE(rcu_state.gp_req_activity);
-       jw = j - READ_ONCE(rcu_state.gp_wake_time);
-       pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
-               rcu_state.name, gp_state_getname(rcu_state.gp_state),
-               rcu_state.gp_state,
-               rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
-               ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
-               (long)READ_ONCE(rcu_state.gp_seq),
-               (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
-               READ_ONCE(rcu_state.gp_flags));
-       rcu_for_each_node_breadth_first(rnp) {
-               if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
-                       continue;
-               pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
-                       rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
-                       (long)rnp->gp_seq_needed);
-               if (!rcu_is_leaf_node(rnp))
-                       continue;
-               for_each_leaf_node_possible_cpu(rnp, cpu) {
-                       rdp = per_cpu_ptr(&rcu_data, cpu);
-                       if (rdp->gpwrap ||
-                           ULONG_CMP_GE(rcu_state.gp_seq,
-                                        rdp->gp_seq_needed))
-                               continue;
-                       pr_info("\tcpu %d ->gp_seq_needed %ld\n",
-                               cpu, (long)rdp->gp_seq_needed);
-               }
-       }
-       /* sched_show_task(rcu_state.gp_kthread); */
-}
-EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
-
-/* Dump grace-period-request information due to commandeered sysrq. */
-static void sysrq_show_rcu(int key)
-{
-       show_rcu_gp_kthreads();
-}
-
-static struct sysrq_key_op sysrq_rcudump_op = {
-       .handler = sysrq_show_rcu,
-       .help_msg = "show-rcu(y)",
-       .action_msg = "Show RCU tree",
-       .enable_mask = SYSRQ_ENABLE_DUMP,
-};
-
-static int __init rcu_sysrq_init(void)
-{
-       if (sysrq_rcu)
-               return register_sysrq_key('y', &sysrq_rcudump_op);
-       return 0;
-}
-early_initcall(rcu_sysrq_init);
-
-/*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
@@ -1034,27 +962,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
 }
 
 /*
- * Handler for the irq_work request posted when a grace period has
- * gone on for too long, but not yet long enough for an RCU CPU
- * stall warning.  Set state appropriately, but just complain if
- * there is unexpected state on entry.
- */
-static void rcu_iw_handler(struct irq_work *iwp)
-{
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-
-       rdp = container_of(iwp, struct rcu_data, rcu_iw);
-       rnp = rdp->mynode;
-       raw_spin_lock_rcu_node(rnp);
-       if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
-               rdp->rcu_iw_gp_seq = rnp->gp_seq;
-               rdp->rcu_iw_pending = false;
-       }
-       raw_spin_unlock_rcu_node(rnp);
-}
-
-/*
  * Return true if the specified CPU has passed through a quiescent
  * state by virtue of being in or having passed through an dynticks
  * idle state since the last call to dyntick_save_progress_counter()
@@ -1167,295 +1074,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        return 0;
 }
 
-static void record_gp_stall_check_time(void)
-{
-       unsigned long j = jiffies;
-       unsigned long j1;
-
-       rcu_state.gp_start = j;
-       j1 = rcu_jiffies_till_stall_check();
-       /* Record ->gp_start before ->jiffies_stall. */
-       smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
-       rcu_state.jiffies_resched = j + j1 / 2;
-       rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
-}
-
-/*
- * Complain about starvation of grace-period kthread.
- */
-static void rcu_check_gp_kthread_starvation(void)
-{
-       struct task_struct *gpk = rcu_state.gp_kthread;
-       unsigned long j;
-
-       j = jiffies - READ_ONCE(rcu_state.gp_activity);
-       if (j > 2 * HZ) {
-               pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
-                      rcu_state.name, j,
-                      (long)rcu_seq_current(&rcu_state.gp_seq),
-                      READ_ONCE(rcu_state.gp_flags),
-                      gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
-                      gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
-               if (gpk) {
-                       pr_err("RCU grace-period kthread stack dump:\n");
-                       sched_show_task(gpk);
-                       wake_up_process(gpk);
-               }
-       }
-}
-
-/*
- * Dump stacks of all tasks running on stalled CPUs.  First try using
- * NMIs, but fall back to manual remote stack tracing on architectures
- * that don't support NMI-based stack dumps.  The NMI-triggered stack
- * traces are more accurate because they are printed by the target CPU.
- */
-static void rcu_dump_cpu_stacks(void)
-{
-       int cpu;
-       unsigned long flags;
-       struct rcu_node *rnp;
-
-       rcu_for_each_leaf_node(rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               for_each_leaf_node_possible_cpu(rnp, cpu)
-                       if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
-                               if (!trigger_single_cpu_backtrace(cpu))
-                                       dump_cpu_task(cpu);
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       }
-}
-
-/*
- * If too much time has passed in the current grace period, and if
- * so configured, go kick the relevant kthreads.
- */
-static void rcu_stall_kick_kthreads(void)
-{
-       unsigned long j;
-
-       if (!rcu_kick_kthreads)
-               return;
-       j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
-       if (time_after(jiffies, j) && rcu_state.gp_kthread &&
-           (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
-               WARN_ONCE(1, "Kicking %s grace-period kthread\n",
-                         rcu_state.name);
-               rcu_ftrace_dump(DUMP_ALL);
-               wake_up_process(rcu_state.gp_kthread);
-               WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
-       }
-}
-
-static void panic_on_rcu_stall(void)
-{
-       if (sysctl_panic_on_rcu_stall)
-               panic("RCU Stall\n");
-}
-
-static void print_other_cpu_stall(unsigned long gp_seq)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned long gpa;
-       unsigned long j;
-       int ndetected = 0;
-       struct rcu_node *rnp = rcu_get_root();
-       long totqlen = 0;
-
-       /* Kick and suppress, if so configured. */
-       rcu_stall_kick_kthreads();
-       if (rcu_cpu_stall_suppress)
-               return;
-
-       /*
-        * OK, time to rat on our buddy...
-        * See Documentation/RCU/stallwarn.txt for info on how to debug
-        * RCU CPU stall warnings.
-        */
-       pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
-       print_cpu_stall_info_begin();
-       rcu_for_each_leaf_node(rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               ndetected += rcu_print_task_stall(rnp);
-               if (rnp->qsmask != 0) {
-                       for_each_leaf_node_possible_cpu(rnp, cpu)
-                               if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
-                                       print_cpu_stall_info(cpu);
-                                       ndetected++;
-                               }
-               }
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       }
-
-       print_cpu_stall_info_end();
-       for_each_possible_cpu(cpu)
-               totqlen += rcu_get_n_cbs_cpu(cpu);
-       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
-              smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
-              (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
-       if (ndetected) {
-               rcu_dump_cpu_stacks();
-
-               /* Complain about tasks blocking the grace period. */
-               rcu_print_detail_task_stall();
-       } else {
-               if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
-                       pr_err("INFO: Stall ended before state dump start\n");
-               } else {
-                       j = jiffies;
-                       gpa = READ_ONCE(rcu_state.gp_activity);
-                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
-                              rcu_state.name, j - gpa, j, gpa,
-                              READ_ONCE(jiffies_till_next_fqs),
-                              rcu_get_root()->qsmask);
-                       /* In this case, the current CPU might be at fault. */
-                       sched_show_task(current);
-               }
-       }
-       /* Rewrite if needed in case of slow consoles. */
-       if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
-               WRITE_ONCE(rcu_state.jiffies_stall,
-                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-
-       rcu_check_gp_kthread_starvation();
-
-       panic_on_rcu_stall();
-
-       rcu_force_quiescent_state();  /* Kick them all. */
-}
-
-static void print_cpu_stall(void)
-{
-       int cpu;
-       unsigned long flags;
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-       struct rcu_node *rnp = rcu_get_root();
-       long totqlen = 0;
-
-       /* Kick and suppress, if so configured. */
-       rcu_stall_kick_kthreads();
-       if (rcu_cpu_stall_suppress)
-               return;
-
-       /*
-        * OK, time to rat on ourselves...
-        * See Documentation/RCU/stallwarn.txt for info on how to debug
-        * RCU CPU stall warnings.
-        */
-       pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
-       print_cpu_stall_info_begin();
-       raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
-       print_cpu_stall_info(smp_processor_id());
-       raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
-       print_cpu_stall_info_end();
-       for_each_possible_cpu(cpu)
-               totqlen += rcu_get_n_cbs_cpu(cpu);
-       pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
-               jiffies - rcu_state.gp_start,
-               (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
-
-       rcu_check_gp_kthread_starvation();
-
-       rcu_dump_cpu_stacks();
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       /* Rewrite if needed in case of slow consoles. */
-       if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
-               WRITE_ONCE(rcu_state.jiffies_stall,
-                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
-       panic_on_rcu_stall();
-
-       /*
-        * Attempt to revive the RCU machinery by forcing a context switch.
-        *
-        * A context switch would normally allow the RCU state machine to make
-        * progress and it could be we're stuck in kernel space without context
-        * switches for an entirely unreasonable amount of time.
-        */
-       set_tsk_need_resched(current);
-       set_preempt_need_resched();
-}
-
-static void check_cpu_stall(struct rcu_data *rdp)
-{
-       unsigned long gs1;
-       unsigned long gs2;
-       unsigned long gps;
-       unsigned long j;
-       unsigned long jn;
-       unsigned long js;
-       struct rcu_node *rnp;
-
-       if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
-           !rcu_gp_in_progress())
-               return;
-       rcu_stall_kick_kthreads();
-       j = jiffies;
-
-       /*
-        * Lots of memory barriers to reject false positives.
-        *
-        * The idea is to pick up rcu_state.gp_seq, then
-        * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
-        * another copy of rcu_state.gp_seq.  These values are updated in
-        * the opposite order with memory barriers (or equivalent) during
-        * grace-period initialization and cleanup.  Now, a false positive
-        * can occur if we get an new value of rcu_state.gp_start and a old
-        * value of rcu_state.jiffies_stall.  But given the memory barriers,
-        * the only way that this can happen is if one grace period ends
-        * and another starts between these two fetches.  This is detected
-        * by comparing the second fetch of rcu_state.gp_seq with the
-        * previous fetch from rcu_state.gp_seq.
-        *
-        * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
-        * and rcu_state.gp_start suffice to forestall false positives.
-        */
-       gs1 = READ_ONCE(rcu_state.gp_seq);
-       smp_rmb(); /* Pick up ->gp_seq first... */
-       js = READ_ONCE(rcu_state.jiffies_stall);
-       smp_rmb(); /* ...then ->jiffies_stall before the rest... */
-       gps = READ_ONCE(rcu_state.gp_start);
-       smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
-       gs2 = READ_ONCE(rcu_state.gp_seq);
-       if (gs1 != gs2 ||
-           ULONG_CMP_LT(j, js) ||
-           ULONG_CMP_GE(gps, js))
-               return; /* No stall or GP completed since entering function. */
-       rnp = rdp->mynode;
-       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
-       if (rcu_gp_in_progress() &&
-           (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
-           cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
-
-               /* We haven't checked in, so go dump stack. */
-               print_cpu_stall();
-
-       } else if (rcu_gp_in_progress() &&
-                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
-                  cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
-
-               /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(gs2);
-       }
-}
-
-/**
- * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
- *
- * Set the stall-warning timeout way off into the future, thus preventing
- * any RCU CPU stall-warning messages from appearing in the current set of
- * RCU grace periods.
- *
- * The caller must disable hard irqs.
- */
-void rcu_cpu_stall_reset(void)
-{
-       WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
-}
-
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
                              unsigned long gp_seq_req, const char *s)
@@ -1585,7 +1203,7 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
 static void rcu_gp_kthread_wake(void)
 {
        if ((current == rcu_state.gp_kthread &&
-            !in_interrupt() && !in_serving_softirq()) ||
+            !in_irq() && !in_serving_softirq()) ||
            !READ_ONCE(rcu_state.gp_flags) ||
            !rcu_state.gp_kthread)
                return;
@@ -2295,11 +1913,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
                return;
        }
        mask = rdp->grpmask;
+       rdp->core_needs_qs = false;
        if ((rnp->qsmask & mask) == 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        } else {
-               rdp->core_needs_qs = false;
-
                /*
                 * This GP can't end until cpu checks in, so all of our
                 * callbacks can be processed during the next GP.
@@ -2548,11 +2165,11 @@ void rcu_sched_clock_irq(int user)
 }
 
 /*
- * Scan the leaf rcu_node structures, processing dyntick state for any that
- * have not yet encountered a quiescent state, using the function specified.
- * Also initiate boosting for any threads blocked on the root rcu_node.
- *
- * The caller must have suppressed start of new grace periods.
+ * Scan the leaf rcu_node structures.  For each structure on which all
+ * CPUs have reported a quiescent state and on which there are tasks
+ * blocking the current grace period, initiate RCU priority boosting.
+ * Otherwise, invoke the specified function to check dyntick state for
+ * each CPU that has not yet reported a quiescent state.
  */
 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
 {
@@ -2635,101 +2252,6 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
-/*
- * This function checks for grace-period requests that fail to motivate
- * RCU to come out of its idle mode.
- */
-void
-rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
-                        const unsigned long gpssdelay)
-{
-       unsigned long flags;
-       unsigned long j;
-       struct rcu_node *rnp_root = rcu_get_root();
-       static atomic_t warned = ATOMIC_INIT(0);
-
-       if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
-           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
-               return;
-       j = jiffies; /* Expensive access, and in common case don't get here. */
-       if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
-           time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
-           atomic_read(&warned))
-               return;
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       j = jiffies;
-       if (rcu_gp_in_progress() ||
-           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-           time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
-           time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
-           atomic_read(&warned)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       /* Hold onto the leaf lock to make others see warned==1. */
-
-       if (rnp_root != rnp)
-               raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
-       j = jiffies;
-       if (rcu_gp_in_progress() ||
-           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-           time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
-           time_before(j, rcu_state.gp_activity + gpssdelay) ||
-           atomic_xchg(&warned, 1)) {
-               raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       WARN_ON(1);
-       if (rnp_root != rnp)
-               raw_spin_unlock_rcu_node(rnp_root);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       show_rcu_gp_kthreads();
-}
-
-/*
- * Do a forward-progress check for rcutorture.  This is normally invoked
- * due to an OOM event.  The argument "j" gives the time period during
- * which rcutorture would like progress to have been made.
- */
-void rcu_fwd_progress_check(unsigned long j)
-{
-       unsigned long cbs;
-       int cpu;
-       unsigned long max_cbs = 0;
-       int max_cpu = -1;
-       struct rcu_data *rdp;
-
-       if (rcu_gp_in_progress()) {
-               pr_info("%s: GP age %lu jiffies\n",
-                       __func__, jiffies - rcu_state.gp_start);
-               show_rcu_gp_kthreads();
-       } else {
-               pr_info("%s: Last GP end %lu jiffies ago\n",
-                       __func__, jiffies - rcu_state.gp_end);
-               preempt_disable();
-               rdp = this_cpu_ptr(&rcu_data);
-               rcu_check_gp_start_stall(rdp->mynode, rdp, j);
-               preempt_enable();
-       }
-       for_each_possible_cpu(cpu) {
-               cbs = rcu_get_n_cbs_cpu(cpu);
-               if (!cbs)
-                       continue;
-               if (max_cpu < 0)
-                       pr_info("%s: callbacks", __func__);
-               pr_cont(" %d: %lu", cpu, cbs);
-               if (cbs <= max_cbs)
-                       continue;
-               max_cbs = cbs;
-               max_cpu = cpu;
-       }
-       if (max_cpu >= 0)
-               pr_cont("\n");
-}
-EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
-
 /* Perform RCU core processing work for the current CPU.  */
 static __latent_entropy void rcu_core(struct softirq_action *unused)
 {
@@ -2870,7 +2392,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
                 * Use rcu:rcu_callback trace event to find the previous
                 * time callback was passed to __call_rcu().
                 */
-               WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
+               WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
                          head, head->func);
                WRITE_ONCE(head->func, rcu_leak_callback);
                return;
@@ -3559,13 +3081,11 @@ static int rcu_pm_notify(struct notifier_block *self,
        switch (action) {
        case PM_HIBERNATION_PREPARE:
        case PM_SUSPEND_PREPARE:
-               if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
-                       rcu_expedite_gp();
+               rcu_expedite_gp();
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
-               if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
-                       rcu_unexpedite_gp();
+               rcu_unexpedite_gp();
                break;
        default:
                break;
@@ -3742,8 +3262,7 @@ static void __init rcu_init_geometry(void)
                jiffies_till_first_fqs = d;
        if (jiffies_till_next_fqs == ULONG_MAX)
                jiffies_till_next_fqs = d;
-       if (jiffies_till_sched_qs == ULONG_MAX)
-               adjust_jiffies_till_sched_qs();
+       adjust_jiffies_till_sched_qs();
 
        /* If the compile-time values are accurate, just leave. */
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
@@ -3858,5 +3377,6 @@ void __init rcu_init(void)
        srcu_init();
 }
 
+#include "tree_stall.h"
 #include "tree_exp.h"
 #include "tree_plugin.h"