4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
8 * Data type definitions, declarations, prototypes.
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * For licencing details see kernel-base/COPYING
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
17 #include <uapi/linux/perf_event.h>
20 * Kernel-internal data types and definitions:
23 #ifdef CONFIG_PERF_EVENTS
24 # include <asm/perf_event.h>
25 # include <asm/local64.h>
28 struct perf_guest_info_callbacks {
29 int (*is_in_guest)(void);
30 int (*is_user_mode)(void);
31 unsigned long (*get_guest_ip)(void);
34 #ifdef CONFIG_HAVE_HW_BREAKPOINT
35 #include <asm/hw_breakpoint.h>
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/rculist.h>
41 #include <linux/rcupdate.h>
42 #include <linux/spinlock.h>
43 #include <linux/hrtimer.h>
45 #include <linux/pid_namespace.h>
46 #include <linux/workqueue.h>
47 #include <linux/ftrace.h>
48 #include <linux/cpu.h>
49 #include <linux/irq_work.h>
50 #include <linux/static_key.h>
51 #include <linux/jump_label_ratelimit.h>
52 #include <linux/atomic.h>
53 #include <linux/sysfs.h>
54 #include <linux/perf_regs.h>
55 #include <linux/workqueue.h>
56 #include <linux/cgroup.h>
57 #include <asm/local.h>
59 struct perf_callchain_entry {
61 __u64 ip[PERF_MAX_STACK_DEPTH];
64 struct perf_raw_record {
70 * branch stack layout:
71 * nr: number of taken branches stored in entries[]
73 * Note that nr can vary from sample to sample
74 * branches (to, from) are stored from most recent
75 * to least recent, i.e., entries[0] contains the most
78 struct perf_branch_stack {
80 struct perf_branch_entry entries[0];
86 * extra PMU register associated with an event
88 struct hw_perf_event_extra {
89 u64 config; /* register value */
90 unsigned int reg; /* register address or index */
91 int alloc; /* extra register already allocated */
92 int idx; /* index in shared_regs->regs[] */
96 * struct hw_perf_event - performance event hardware details:
98 struct hw_perf_event {
99 #ifdef CONFIG_PERF_EVENTS
101 struct { /* hardware */
104 unsigned long config_base;
105 unsigned long event_base;
106 int event_base_rdpmc;
111 struct hw_perf_event_extra extra_reg;
112 struct hw_perf_event_extra branch_reg;
114 struct { /* software */
115 struct hrtimer hrtimer;
117 struct { /* tracepoint */
118 /* for tp_event->class */
119 struct list_head tp_list;
121 struct { /* intel_cqm */
125 struct list_head cqm_events_entry;
126 struct list_head cqm_groups_entry;
127 struct list_head cqm_group_entry;
129 struct { /* itrace */
132 #ifdef CONFIG_HAVE_HW_BREAKPOINT
133 struct { /* breakpoint */
135 * Crufty hack to avoid the chicken and egg
136 * problem hw_breakpoint has with context
137 * creation and event initalization.
139 struct arch_hw_breakpoint info;
140 struct list_head bp_list;
145 * If the event is a per task event, this will point to the task in
146 * question. See the comment in perf_event_alloc().
148 struct task_struct *target;
151 * hw_perf_event::state flags; used to track the PERF_EF_* state.
153 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
154 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
155 #define PERF_HES_ARCH 0x04
160 * The last observed hardware counter value, updated with a
161 * local64_cmpxchg() such that pmu::read() can be called nested.
163 local64_t prev_count;
166 * The period to start the next sample with.
171 * The period we started this sample with.
176 * However much is left of the current period; note that this is
177 * a full 64bit value and allows for generation of periods longer
178 * than hardware might allow.
180 local64_t period_left;
183 * State for throttling the event, see __perf_event_overflow() and
184 * perf_adjust_freq_unthr_context().
190 * State for freq target events, see __perf_event_overflow() and
191 * perf_adjust_freq_unthr_context().
194 u64 freq_count_stamp;
201 * Common implementation detail of pmu::{start,commit,cancel}_txn
203 #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
204 #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
207 * pmu::capabilities flags
209 #define PERF_PMU_CAP_NO_INTERRUPT 0x01
210 #define PERF_PMU_CAP_NO_NMI 0x02
211 #define PERF_PMU_CAP_AUX_NO_SG 0x04
212 #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
213 #define PERF_PMU_CAP_EXCLUSIVE 0x10
214 #define PERF_PMU_CAP_ITRACE 0x20
217 * struct pmu - generic performance monitoring unit
220 struct list_head entry;
222 struct module *module;
224 const struct attribute_group **attr_groups;
229 * various common per-pmu feature flags
233 int * __percpu pmu_disable_count;
234 struct perf_cpu_context __percpu *pmu_cpu_context;
235 atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
237 int hrtimer_interval_ms;
238 u32 events_across_hotplug:1,
242 * Fully disable/enable this PMU, can be used to protect from the PMI
243 * as well as for lazy/batch writing of the MSRs.
245 void (*pmu_enable) (struct pmu *pmu); /* optional */
246 void (*pmu_disable) (struct pmu *pmu); /* optional */
249 * Try and initialize the event for this PMU.
252 * -ENOENT -- @event is not for this PMU
254 * -ENODEV -- @event is for this PMU but PMU not present
255 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
256 * -EINVAL -- @event is for this PMU but @event is not valid
257 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
258 * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
260 * 0 -- @event is for this PMU and valid
262 * Other error return values are allowed.
264 int (*event_init) (struct perf_event *event);
267 * Notification that the event was mapped or unmapped. Called
268 * in the context of the mapping task.
270 void (*event_mapped) (struct perf_event *event); /*optional*/
271 void (*event_unmapped) (struct perf_event *event); /*optional*/
274 * Flags for ->add()/->del()/ ->start()/->stop(). There are
275 * matching hw_perf_event::state flags.
277 #define PERF_EF_START 0x01 /* start the counter when adding */
278 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
279 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
282 * Adds/Removes a counter to/from the PMU, can be done inside a
283 * transaction, see the ->*_txn() methods.
285 * The add/del callbacks will reserve all hardware resources required
286 * to service the event, this includes any counter constraint
289 * Called with IRQs disabled and the PMU disabled on the CPU the event
292 * ->add() called without PERF_EF_START should result in the same state
293 * as ->add() followed by ->stop().
295 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
296 * ->stop() that must deal with already being stopped without
299 int (*add) (struct perf_event *event, int flags);
300 void (*del) (struct perf_event *event, int flags);
303 * Starts/Stops a counter present on the PMU.
305 * The PMI handler should stop the counter when perf_event_overflow()
306 * returns !0. ->start() will be used to continue.
308 * Also used to change the sample period.
310 * Called with IRQs disabled and the PMU disabled on the CPU the event
311 * is on -- will be called from NMI context with the PMU generates
314 * ->stop() with PERF_EF_UPDATE will read the counter and update
315 * period/count values like ->read() would.
317 * ->start() with PERF_EF_RELOAD will reprogram the the counter
318 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
320 void (*start) (struct perf_event *event, int flags);
321 void (*stop) (struct perf_event *event, int flags);
324 * Updates the counter value of the event.
326 * For sampling capable PMUs this will also update the software period
327 * hw_perf_event::period_left field.
329 void (*read) (struct perf_event *event);
332 * Group events scheduling is treated as a transaction, add
333 * group events as a whole and perform one schedulability test.
334 * If the test fails, roll back the whole group
336 * Start the transaction, after this ->add() doesn't need to
337 * do schedulability tests.
341 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
343 * If ->start_txn() disabled the ->add() schedulability test
344 * then ->commit_txn() is required to perform one. On success
345 * the transaction is closed. On error the transaction is kept
346 * open until ->cancel_txn() is called.
350 int (*commit_txn) (struct pmu *pmu);
352 * Will cancel the transaction, assumes ->del() is called
353 * for each successful ->add() during the transaction.
357 void (*cancel_txn) (struct pmu *pmu);
360 * Will return the value for perf_event_mmap_page::index for this event,
361 * if no implementation is provided it will default to: event->hw.idx + 1.
363 int (*event_idx) (struct perf_event *event); /*optional */
366 * context-switches callback
368 void (*sched_task) (struct perf_event_context *ctx,
371 * PMU specific data size
373 size_t task_ctx_size;
377 * Return the count value for a counter.
379 u64 (*count) (struct perf_event *event); /*optional*/
382 * Set up pmu-private data structures for an AUX area
384 void *(*setup_aux) (struct perf_event *event, void **pages,
385 int nr_pages, bool overwrite);
389 * Free pmu-private AUX data structures
391 void (*free_aux) (void *aux); /* optional */
394 * Filter events for PMU-specific reasons.
396 int (*filter_match) (struct perf_event *event); /* optional */
399 * Initial, PMU driver specific configuration.
401 int (*get_drv_configs) (struct perf_event *event,
402 void __user *arg); /* optional */
403 void (*free_drv_configs) (struct perf_event *event);
408 * enum perf_event_active_state - the states of a event
410 enum perf_event_active_state {
411 PERF_EVENT_STATE_EXIT = -3,
412 PERF_EVENT_STATE_ERROR = -2,
413 PERF_EVENT_STATE_OFF = -1,
414 PERF_EVENT_STATE_INACTIVE = 0,
415 PERF_EVENT_STATE_ACTIVE = 1,
419 struct perf_sample_data;
421 typedef void (*perf_overflow_handler_t)(struct perf_event *,
422 struct perf_sample_data *,
423 struct pt_regs *regs);
425 enum perf_group_flag {
426 PERF_GROUP_SOFTWARE = 0x1,
429 #define SWEVENT_HLIST_BITS 8
430 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
432 struct swevent_hlist {
433 struct hlist_head heads[SWEVENT_HLIST_SIZE];
434 struct rcu_head rcu_head;
437 #define PERF_ATTACH_CONTEXT 0x01
438 #define PERF_ATTACH_GROUP 0x02
439 #define PERF_ATTACH_TASK 0x04
440 #define PERF_ATTACH_TASK_DATA 0x08
446 * struct perf_event - performance event kernel representation:
449 #ifdef CONFIG_PERF_EVENTS
451 * entry onto perf_event_context::event_list;
452 * modifications require ctx->lock
453 * RCU safe iterations.
455 struct list_head event_entry;
458 * XXX: group_entry and sibling_list should be mutually exclusive;
459 * either you're a sibling on a group, or you're the group leader.
460 * Rework the code to always use the same list element.
462 * Locked for modification by both ctx->mutex and ctx->lock; holding
463 * either sufficies for read.
465 struct list_head group_entry;
466 struct list_head sibling_list;
469 * We need storage to track the entries in perf_pmu_migrate_context; we
470 * cannot use the event_entry because of RCU and we want to keep the
471 * group in tact which avoids us using the other two entries.
473 struct list_head migrate_entry;
475 struct hlist_node hlist_entry;
476 struct list_head active_entry;
479 struct perf_event *group_leader;
482 * Protect the pmu, attributes and context of a group leader.
483 * Note: does not protect the pointer to the group_leader.
485 struct mutex group_leader_mutex;
488 enum perf_event_active_state state;
489 unsigned int attach_state;
491 atomic64_t child_count;
494 * These are the total time in nanoseconds that the event
495 * has been enabled (i.e. eligible to run, and the task has
496 * been scheduled in, if this is a per-task event)
497 * and running (scheduled onto the CPU), respectively.
499 * They are computed from tstamp_enabled, tstamp_running and
500 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
502 u64 total_time_enabled;
503 u64 total_time_running;
506 * These are timestamps used for computing total_time_enabled
507 * and total_time_running when the event is in INACTIVE or
508 * ACTIVE state, measured in nanoseconds from an arbitrary point
510 * tstamp_enabled: the notional time when the event was enabled
511 * tstamp_running: the notional time when the event was scheduled on
512 * tstamp_stopped: in INACTIVE state, the notional time when the
513 * event was scheduled off.
520 * timestamp shadows the actual context timing but it can
521 * be safely used in NMI interrupt context. It reflects the
522 * context time as it was when the event was last scheduled in.
524 * ctx_time already accounts for ctx->timestamp. Therefore to
525 * compute ctx_time for a sample, simply add perf_clock().
529 struct perf_event_attr attr;
533 struct hw_perf_event hw;
535 struct perf_event_context *ctx;
536 atomic_long_t refcount;
539 * These accumulate total time (in nanoseconds) that children
540 * events have been enabled and running, respectively.
542 atomic64_t child_total_time_enabled;
543 atomic64_t child_total_time_running;
546 * Protect attach/detach and child_list:
548 struct mutex child_mutex;
549 struct list_head child_list;
550 struct perf_event *parent;
555 struct list_head owner_entry;
556 struct task_struct *owner;
559 struct mutex mmap_mutex;
562 struct ring_buffer *rb;
563 struct list_head rb_entry;
564 unsigned long rcu_batches;
568 wait_queue_head_t waitq;
569 struct fasync_struct *fasync;
571 /* delayed work for NMIs and such */
575 struct irq_work pending;
577 atomic_t event_limit;
578 struct list_head drv_configs;
580 void (*destroy)(struct perf_event *);
581 struct rcu_head rcu_head;
583 struct pid_namespace *ns;
587 perf_overflow_handler_t overflow_handler;
588 void *overflow_handler_context;
590 #ifdef CONFIG_EVENT_TRACING
591 struct trace_event_call *tp_event;
592 struct event_filter *filter;
593 #ifdef CONFIG_FUNCTION_TRACER
594 struct ftrace_ops ftrace_ops;
598 #ifdef CONFIG_CGROUP_PERF
599 struct perf_cgroup *cgrp; /* cgroup event is attach to */
600 int cgrp_defer_enabled;
603 #endif /* CONFIG_PERF_EVENTS */
607 * struct perf_event_context - event context structure
609 * Used as a container for task events and CPU events as well:
611 struct perf_event_context {
614 * Protect the states of the events in the list,
615 * nr_active, and the list:
619 * Protect the list of events. Locking either mutex or lock
620 * is sufficient to ensure the list doesn't change; to change
621 * the list you need to lock both the mutex and the spinlock.
625 struct list_head active_ctx_list;
626 struct list_head pinned_groups;
627 struct list_head flexible_groups;
628 struct list_head event_list;
636 struct task_struct *task;
639 * Context clock, runs when context enabled.
645 * These fields let us detect when two contexts have both
646 * been cloned (inherited) from a common ancestor.
648 struct perf_event_context *parent_ctx;
652 int nr_cgroups; /* cgroup evts */
653 void *task_ctx_data; /* pmu specific data */
654 struct rcu_head rcu_head;
656 struct delayed_work orphans_remove;
657 bool orphans_remove_sched;
661 * Number of contexts where an event can trigger:
662 * task, softirq, hardirq, nmi.
664 #define PERF_NR_CONTEXTS 4
667 * struct perf_event_cpu_context - per cpu event context structure
669 struct perf_cpu_context {
670 struct perf_event_context ctx;
671 struct perf_event_context *task_ctx;
675 raw_spinlock_t hrtimer_lock;
676 struct hrtimer hrtimer;
677 ktime_t hrtimer_interval;
678 unsigned int hrtimer_active;
680 struct pmu *unique_pmu;
681 struct perf_cgroup *cgrp;
684 struct perf_output_handle {
685 struct perf_event *event;
686 struct ring_buffer *rb;
687 unsigned long wakeup;
696 #ifdef CONFIG_CGROUP_PERF
699 * perf_cgroup_info keeps track of time_enabled for a cgroup.
700 * This is a per-cpu dynamically allocated data structure.
702 struct perf_cgroup_info {
708 struct cgroup_subsys_state css;
709 struct perf_cgroup_info __percpu *info;
713 * Must ensure cgroup is pinned (css_get) before calling
714 * this function. In other words, we cannot call this function
715 * if there is no cgroup event for the current CPU context.
717 static inline struct perf_cgroup *
718 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
720 return container_of(task_css_check(task, perf_event_cgrp_id,
721 ctx ? lockdep_is_held(&ctx->lock)
723 struct perf_cgroup, css);
725 #endif /* CONFIG_CGROUP_PERF */
727 #ifdef CONFIG_PERF_EVENTS
729 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
730 struct perf_event *event);
731 extern void perf_aux_output_end(struct perf_output_handle *handle,
732 unsigned long size, bool truncated);
733 extern int perf_aux_output_skip(struct perf_output_handle *handle,
735 extern void *perf_get_aux(struct perf_output_handle *handle);
737 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
738 extern void perf_pmu_unregister(struct pmu *pmu);
740 extern int perf_num_counters(void);
741 extern const char *perf_pmu_name(void);
742 extern void __perf_event_task_sched_in(struct task_struct *prev,
743 struct task_struct *task);
744 extern void __perf_event_task_sched_out(struct task_struct *prev,
745 struct task_struct *next);
746 extern int perf_event_init_task(struct task_struct *child);
747 extern void perf_event_exit_task(struct task_struct *child);
748 extern void perf_event_free_task(struct task_struct *task);
749 extern void perf_event_delayed_put(struct task_struct *task);
750 extern struct perf_event *perf_event_get(unsigned int fd);
751 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
752 extern void perf_event_print_debug(void);
753 extern void perf_pmu_disable(struct pmu *pmu);
754 extern void perf_pmu_enable(struct pmu *pmu);
755 extern void perf_sched_cb_dec(struct pmu *pmu);
756 extern void perf_sched_cb_inc(struct pmu *pmu);
757 extern int perf_event_task_disable(void);
758 extern int perf_event_task_enable(void);
759 extern int perf_event_refresh(struct perf_event *event, int refresh);
760 extern void perf_event_update_userpage(struct perf_event *event);
761 extern int perf_event_release_kernel(struct perf_event *event);
762 extern struct perf_event *
763 perf_event_create_kernel_counter(struct perf_event_attr *attr,
765 struct task_struct *task,
766 perf_overflow_handler_t callback,
768 extern void perf_pmu_migrate_context(struct pmu *pmu,
769 int src_cpu, int dst_cpu);
770 extern u64 perf_event_read_local(struct perf_event *event);
771 extern u64 perf_event_read_value(struct perf_event *event,
772 u64 *enabled, u64 *running);
774 extern struct dentry *perf_create_debug_dir(void);
776 struct perf_sample_data {
778 * Fields set by perf_sample_data_init(), group so as to
779 * minimize the cachelines touched.
782 struct perf_raw_record *raw;
783 struct perf_branch_stack *br_stack;
787 union perf_mem_data_src data_src;
790 * The other fields, optionally {set,used} by
791 * perf_{prepare,output}_sample().
806 struct perf_callchain_entry *callchain;
809 * regs_user may point to task_pt_regs or to regs_user_copy, depending
812 struct perf_regs regs_user;
813 struct pt_regs regs_user_copy;
815 struct perf_regs regs_intr;
817 } ____cacheline_aligned;
819 /* default value for data source */
820 #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
821 PERF_MEM_S(LVL, NA) |\
822 PERF_MEM_S(SNOOP, NA) |\
823 PERF_MEM_S(LOCK, NA) |\
826 static inline void perf_sample_data_init(struct perf_sample_data *data,
827 u64 addr, u64 period)
829 /* remaining struct members initialized in perf_prepare_sample() */
832 data->br_stack = NULL;
833 data->period = period;
835 data->data_src.val = PERF_MEM_NA;
839 extern void perf_output_sample(struct perf_output_handle *handle,
840 struct perf_event_header *header,
841 struct perf_sample_data *data,
842 struct perf_event *event);
843 extern void perf_prepare_sample(struct perf_event_header *header,
844 struct perf_sample_data *data,
845 struct perf_event *event,
846 struct pt_regs *regs);
848 extern int perf_event_overflow(struct perf_event *event,
849 struct perf_sample_data *data,
850 struct pt_regs *regs);
852 extern void perf_event_output(struct perf_event *event,
853 struct perf_sample_data *data,
854 struct pt_regs *regs);
857 perf_event_header__init_id(struct perf_event_header *header,
858 struct perf_sample_data *data,
859 struct perf_event *event);
861 perf_event__output_id_sample(struct perf_event *event,
862 struct perf_output_handle *handle,
863 struct perf_sample_data *sample);
866 perf_log_lost_samples(struct perf_event *event, u64 lost);
868 static inline bool is_sampling_event(struct perf_event *event)
870 return event->attr.sample_period != 0;
874 * Return 1 for a software event, 0 for a hardware event
876 static inline int is_software_event(struct perf_event *event)
878 return event->pmu->task_ctx_nr == perf_sw_context;
881 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
883 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
884 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
886 #ifndef perf_arch_fetch_caller_regs
887 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
891 * Take a snapshot of the regs. Skip ip and frame pointer to
892 * the nth caller. We only need a few of the regs:
893 * - ip for PERF_SAMPLE_IP
894 * - cs for user_mode() tests
895 * - bp for callchains
896 * - eflags, for future purposes, just in case
898 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
900 memset(regs, 0, sizeof(*regs));
902 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
905 static __always_inline void
906 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
908 if (static_key_false(&perf_swevent_enabled[event_id]))
909 __perf_sw_event(event_id, nr, regs, addr);
912 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
915 * 'Special' version for the scheduler, it hard assumes no recursion,
916 * which is guaranteed by us not actually scheduling inside other swevents
917 * because those disable preemption.
919 static __always_inline void
920 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
922 if (static_key_false(&perf_swevent_enabled[event_id])) {
923 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
925 perf_fetch_caller_regs(regs);
926 ___perf_sw_event(event_id, nr, regs, addr);
930 extern struct static_key_deferred perf_sched_events;
932 static __always_inline bool
933 perf_sw_migrate_enabled(void)
935 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
940 static inline void perf_event_task_migrate(struct task_struct *task)
942 if (perf_sw_migrate_enabled())
943 task->sched_migrated = 1;
946 static inline void perf_event_task_sched_in(struct task_struct *prev,
947 struct task_struct *task)
949 if (static_key_false(&perf_sched_events.key))
950 __perf_event_task_sched_in(prev, task);
952 if (perf_sw_migrate_enabled() && task->sched_migrated) {
953 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
955 perf_fetch_caller_regs(regs);
956 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
957 task->sched_migrated = 0;
961 static inline void perf_event_task_sched_out(struct task_struct *prev,
962 struct task_struct *next)
964 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
966 if (static_key_false(&perf_sched_events.key))
967 __perf_event_task_sched_out(prev, next);
970 static inline u64 __perf_event_count(struct perf_event *event)
972 return local64_read(&event->count) + atomic64_read(&event->child_count);
975 extern void perf_event_mmap(struct vm_area_struct *vma);
976 extern struct perf_guest_info_callbacks *perf_guest_cbs;
977 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
978 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
980 extern void perf_event_exec(void);
981 extern void perf_event_comm(struct task_struct *tsk, bool exec);
982 extern void perf_event_fork(struct task_struct *tsk);
985 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
987 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
988 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
990 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
992 if (entry->nr < PERF_MAX_STACK_DEPTH)
993 entry->ip[entry->nr++] = ip;
996 extern int sysctl_perf_event_paranoid;
997 extern int sysctl_perf_event_mlock;
998 extern int sysctl_perf_event_sample_rate;
999 extern int sysctl_perf_cpu_time_max_percent;
1001 extern void perf_sample_event_took(u64 sample_len_ns);
1003 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1004 void __user *buffer, size_t *lenp,
1006 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1007 void __user *buffer, size_t *lenp,
1011 static inline bool perf_paranoid_any(void)
1013 return sysctl_perf_event_paranoid > 2;
1016 static inline bool perf_paranoid_tracepoint_raw(void)
1018 return sysctl_perf_event_paranoid > -1;
1021 static inline bool perf_paranoid_cpu(void)
1023 return sysctl_perf_event_paranoid > 0;
1026 static inline bool perf_paranoid_kernel(void)
1028 return sysctl_perf_event_paranoid > 1;
1031 extern void perf_event_init(void);
1032 extern void perf_tp_event(u64 addr, u64 count, void *record,
1033 int entry_size, struct pt_regs *regs,
1034 struct hlist_head *head, int rctx,
1035 struct task_struct *task);
1036 extern void perf_bp_event(struct perf_event *event, void *data);
1038 #ifndef perf_misc_flags
1039 # define perf_misc_flags(regs) \
1040 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1041 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1044 static inline bool has_branch_stack(struct perf_event *event)
1046 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1049 static inline bool needs_branch_stack(struct perf_event *event)
1051 return event->attr.branch_sample_type != 0;
1054 static inline bool has_aux(struct perf_event *event)
1056 return event->pmu->setup_aux;
1059 extern int perf_output_begin(struct perf_output_handle *handle,
1060 struct perf_event *event, unsigned int size);
1061 extern void perf_output_end(struct perf_output_handle *handle);
1062 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1063 const void *buf, unsigned int len);
1064 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1066 extern int perf_swevent_get_recursion_context(void);
1067 extern void perf_swevent_put_recursion_context(int rctx);
1068 extern u64 perf_swevent_set_period(struct perf_event *event);
1069 extern void perf_event_enable(struct perf_event *event);
1070 extern void perf_event_disable(struct perf_event *event);
1071 extern int __perf_event_disable(void *info);
1072 extern void perf_event_task_tick(void);
1073 #else /* !CONFIG_PERF_EVENTS: */
1074 static inline void *
1075 perf_aux_output_begin(struct perf_output_handle *handle,
1076 struct perf_event *event) { return NULL; }
1078 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
1081 perf_aux_output_skip(struct perf_output_handle *handle,
1082 unsigned long size) { return -EINVAL; }
1083 static inline void *
1084 perf_get_aux(struct perf_output_handle *handle) { return NULL; }
1086 perf_event_task_migrate(struct task_struct *task) { }
1088 perf_event_task_sched_in(struct task_struct *prev,
1089 struct task_struct *task) { }
1091 perf_event_task_sched_out(struct task_struct *prev,
1092 struct task_struct *next) { }
1093 static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1094 static inline void perf_event_exit_task(struct task_struct *child) { }
1095 static inline void perf_event_free_task(struct task_struct *task) { }
1096 static inline void perf_event_delayed_put(struct task_struct *task) { }
1097 static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
1098 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1100 return ERR_PTR(-EINVAL);
1102 static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
1103 static inline void perf_event_print_debug(void) { }
1104 static inline int perf_event_task_disable(void) { return -EINVAL; }
1105 static inline int perf_event_task_enable(void) { return -EINVAL; }
1106 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1112 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1114 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
1116 perf_bp_event(struct perf_event *event, void *data) { }
1118 static inline int perf_register_guest_info_callbacks
1119 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1120 static inline int perf_unregister_guest_info_callbacks
1121 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1123 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1124 static inline void perf_event_exec(void) { }
1125 static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
1126 static inline void perf_event_fork(struct task_struct *tsk) { }
1127 static inline void perf_event_init(void) { }
1128 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1129 static inline void perf_swevent_put_recursion_context(int rctx) { }
1130 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
1131 static inline void perf_event_enable(struct perf_event *event) { }
1132 static inline void perf_event_disable(struct perf_event *event) { }
1133 static inline int __perf_event_disable(void *info) { return -1; }
1134 static inline void perf_event_task_tick(void) { }
1135 static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1138 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
1139 extern bool perf_event_can_stop_tick(void);
1141 static inline bool perf_event_can_stop_tick(void) { return true; }
1144 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1145 extern void perf_restore_debug_store(void);
1147 static inline void perf_restore_debug_store(void) { }
1150 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1153 * This has to have a higher priority than migration_notifier in sched/core.c.
1155 #define perf_cpu_notifier(fn) \
1157 static struct notifier_block fn##_nb = \
1158 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1159 unsigned long cpu = smp_processor_id(); \
1160 unsigned long flags; \
1162 cpu_notifier_register_begin(); \
1163 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1164 (void *)(unsigned long)cpu); \
1165 local_irq_save(flags); \
1166 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1167 (void *)(unsigned long)cpu); \
1168 local_irq_restore(flags); \
1169 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1170 (void *)(unsigned long)cpu); \
1171 __register_cpu_notifier(&fn##_nb); \
1172 cpu_notifier_register_done(); \
1176 * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
1177 * callback for already online CPUs.
1179 #define __perf_cpu_notifier(fn) \
1181 static struct notifier_block fn##_nb = \
1182 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1184 __register_cpu_notifier(&fn##_nb); \
1187 struct perf_pmu_events_attr {
1188 struct device_attribute attr;
1190 const char *event_str;
1193 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1196 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
1197 static struct perf_pmu_events_attr _var = { \
1198 .attr = __ATTR(_name, 0444, _show, NULL), \
1202 #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
1203 static struct perf_pmu_events_attr _var = { \
1204 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1206 .event_str = _str, \
1209 #define PMU_FORMAT_ATTR(_name, _format) \
1211 _name##_show(struct device *dev, \
1212 struct device_attribute *attr, \
1215 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1216 return sprintf(page, _format "\n"); \
1219 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1221 #endif /* _LINUX_PERF_EVENT_H */