#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-
- int __init ftrace_dyn_arch_init(void)
- {
- return 0;
- }
-
int ftrace_update_ftrace_func(ftrace_func_t func)
{
+ ftrace_func = func;
return 0;
}
# define do_ftrace_record_recursion(ip, pip) do { } while (0)
#endif
+ /*
+ * Preemption is promised to be disabled when return bit >= 0.
+ */
static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
- int start, int max)
+ int start)
{
unsigned int val = READ_ONCE(current->trace_recursion);
int bit;
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
- * It could be that preempt_count has not been updated during
- * a switch between contexts. Allow for a single recursion.
+ * If an interrupt occurs during a trace, and another trace
+ * happens in that interrupt but before the preempt_count is
+ * updated to reflect the new interrupt context, then this
+ * will think a recursion occurred, and the event will be dropped.
+ * Let a single instance happen via the TRANSITION_BIT to
+ * not drop those events.
*/
- bit = TRACE_TRANSITION_BIT;
+ bit = TRACE_CTX_TRANSITION + start;
if (val & (1 << bit)) {
do_ftrace_record_recursion(ip, pip);
return -1;
current->trace_recursion = val;
barrier();
- return bit + 1;
+ preempt_disable_notrace();
+
+ return bit;
}
+ /*
+ * Preemption will be enabled (if it was previously enabled).
+ */
static __always_inline void trace_clear_recursion(int bit)
{
- if (!bit)
- return;
-
+ preempt_enable_notrace();
barrier();
- bit--;
trace_recursion_clear(bit);
}
}
/*
- * This function is called from finish_task_switch() when task 'tk' becomes
- * dead, so that we can recycle any kretprobe instances associated
- * with this task. These left over instances represent probed functions
- * that have been called but will never return.
+ * This function is called from delayed_put_task_struct() when a task is
- * dead and cleaned up to recycle any function-return probe instances
- * associated with this task. These left over instances represent probed
- * functions that have been called but will never return.
++ * dead and cleaned up to recycle any kretprobe instances associated with
++ * this task. These left over instances represent probed functions that
++ * have been called but will never return.
*/
void kprobe_flush_task(struct task_struct *tk)
{
struct ftrace_ops *op;
int bit;
- bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
+ /*
+ * The ftrace_test_and_set_recursion() will disable preemption,
+ * which is required since some of the ops may be dynamically
+ * allocated, they must be freed after a synchronize_rcu().
+ */
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
if (bit < 0)
return;
irq_work_queue(&tr->fsnotify_irqwork);
}
-/*
- * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- * defined(CONFIG_FSNOTIFY)
- */
-#else
+#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+ || defined(CONFIG_OSNOISE_TRACER)
#define trace_create_maxlat_file(tr, d_tracer) \
- trace_create_file("tracing_max_latency", 0644, d_tracer, \
- &tr->max_latency, &tracing_max_lat_fops)
+ trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
+ d_tracer, &tr->max_latency, &tracing_max_lat_fops)
+#else
+#define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
#endif
#ifdef CONFIG_TRACER_MAX_TRACE