diff options
Diffstat (limited to 'include/linux/perf_event.h')
| -rw-r--r-- | include/linux/perf_event.h | 50 | 
1 files changed, 45 insertions, 5 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61992cf2e977..1b82d44b0a02 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -92,8 +92,6 @@ struct hw_perf_event_extra {  	int		idx;	/* index in shared_regs->regs[] */  }; -struct event_constraint; -  /**   * struct hw_perf_event - performance event hardware details:   */ @@ -112,8 +110,6 @@ struct hw_perf_event {  			struct hw_perf_event_extra extra_reg;  			struct hw_perf_event_extra branch_reg; - -			struct event_constraint *constraint;  		};  		struct { /* software */  			struct hrtimer	hrtimer; @@ -124,7 +120,7 @@ struct hw_perf_event {  		};  		struct { /* intel_cqm */  			int			cqm_state; -			int			cqm_rmid; +			u32			cqm_rmid;  			struct list_head	cqm_events_entry;  			struct list_head	cqm_groups_entry;  			struct list_head	cqm_group_entry; @@ -566,8 +562,12 @@ struct perf_cpu_context {  	struct perf_event_context	*task_ctx;  	int				active_oncpu;  	int				exclusive; + +	raw_spinlock_t			hrtimer_lock;  	struct hrtimer			hrtimer;  	ktime_t				hrtimer_interval; +	unsigned int			hrtimer_active; +  	struct pmu			*unique_pmu;  	struct perf_cgroup		*cgrp;  }; @@ -734,6 +734,22 @@ extern int perf_event_overflow(struct perf_event *event,  				 struct perf_sample_data *data,  				 struct pt_regs *regs); +extern void perf_event_output(struct perf_event *event, +				struct perf_sample_data *data, +				struct pt_regs *regs); + +extern void +perf_event_header__init_id(struct perf_event_header *header, +			   struct perf_sample_data *data, +			   struct perf_event *event); +extern void +perf_event__output_id_sample(struct perf_event *event, +			     struct perf_output_handle *handle, +			     struct perf_sample_data *sample); + +extern void +perf_log_lost_samples(struct perf_event *event, u64 lost); +  static inline bool is_sampling_event(struct perf_event *event)  {  	return event->attr.sample_period != 0; @@ -798,11 +814,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)  extern struct static_key_deferred perf_sched_events; +static __always_inline bool +perf_sw_migrate_enabled(void) +{ +	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) +		return true; +	return false; +} + +static inline void perf_event_task_migrate(struct task_struct *task) +{ +	if (perf_sw_migrate_enabled()) +		task->sched_migrated = 1; +} +  static inline void perf_event_task_sched_in(struct task_struct *prev,  					    struct task_struct *task)  {  	if (static_key_false(&perf_sched_events.key))  		__perf_event_task_sched_in(prev, task); + +	if (perf_sw_migrate_enabled() && task->sched_migrated) { +		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + +		perf_fetch_caller_regs(regs); +		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); +		task->sched_migrated = 0; +	}  }  static inline void perf_event_task_sched_out(struct task_struct *prev, @@ -925,6 +963,8 @@ perf_aux_output_skip(struct perf_output_handle *handle,  static inline void *  perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }  static inline void +perf_event_task_migrate(struct task_struct *task)			{ } +static inline void  perf_event_task_sched_in(struct task_struct *prev,  			 struct task_struct *task)			{ }  static inline void  |