OSDN Git Service

ftrace: Optimize testing what context current is in
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 6 Nov 2020 02:32:38 +0000 (21:32 -0500)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 6 Nov 2020 13:33:23 +0000 (08:33 -0500)
The preempt_count() is not a simple location in memory, it could be part of
per_cpu code or more. Each access to preempt_count(), or one of its accessor
functions (like in_interrupt()) takes several cycles. By reading
preempt_count() once, and then doing tests to find the context against the
value return is slightly faster than using in_nmi() and in_interrupt().

Link: https://lkml.kernel.org/r/20201028115612.780796355@goodmis.org
Link: https://lkml.kernel.org/r/20201106023546.558881845@goodmis.org
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
include/linux/trace_recursion.h

index f2a949d..ac3d734 100644 (file)
@@ -117,22 +117,29 @@ enum {
 
 #define TRACE_CONTEXT_MASK     TRACE_LIST_MAX
 
+/*
+ * Used for setting context
+ *  NMI     = 0
+ *  IRQ     = 1
+ *  SOFTIRQ = 2
+ *  NORMAL  = 3
+ */
+enum {
+       TRACE_CTX_NMI,
+       TRACE_CTX_IRQ,
+       TRACE_CTX_SOFTIRQ,
+       TRACE_CTX_NORMAL,
+};
+
 static __always_inline int trace_get_context_bit(void)
 {
-       int bit;
-
-       if (in_interrupt()) {
-               if (in_nmi())
-                       bit = 0;
-
-               else if (in_irq())
-                       bit = 1;
-               else
-                       bit = 2;
-       } else
-               bit = 3;
+       unsigned long pc = preempt_count();
 
-       return bit;
+       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+               return TRACE_CTX_NORMAL;
+       else
+               return pc & NMI_MASK ? TRACE_CTX_NMI :
+                       pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
 }
 
 static __always_inline int trace_test_and_set_recursion(int start, int max)