diff options
Diffstat (limited to 'tools/perf/util/bpf_skel/lock_contention.bpf.c')
| -rw-r--r-- | tools/perf/util/bpf_skel/lock_contention.bpf.c | 138 | 
1 files changed, 129 insertions, 9 deletions
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c index e6007eaeda1a..1d48226ae75d 100644 --- a/tools/perf/util/bpf_skel/lock_contention.bpf.c +++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c @@ -4,11 +4,12 @@  #include <bpf/bpf_helpers.h>  #include <bpf/bpf_tracing.h>  #include <bpf/bpf_core_read.h> +#include <asm-generic/errno-base.h>  #include "lock_data.h" -/* default buffer size */ -#define MAX_ENTRIES  10240 +/* for collect_lock_syms().  4096 was rejected by the verifier */ +#define MAX_CPUS  1024  /* lock contention flags from include/trace/events/lock.h */  #define LCB_F_SPIN	(1U << 0) @@ -58,6 +59,13 @@ struct {  struct {  	__uint(type, BPF_MAP_TYPE_HASH); +	__uint(key_size, sizeof(__u64)); +	__uint(value_size, sizeof(__u32)); +	__uint(max_entries, MAX_ENTRIES); +} lock_syms SEC(".maps"); + +struct { +	__uint(type, BPF_MAP_TYPE_HASH);  	__uint(key_size, sizeof(__u32));  	__uint(value_size, sizeof(__u8));  	__uint(max_entries, 1); @@ -92,6 +100,14 @@ struct rw_semaphore___new {  	atomic_long_t owner;  } __attribute__((preserve_access_index)); +struct mm_struct___old { +	struct rw_semaphore mmap_sem; +} __attribute__((preserve_access_index)); + +struct mm_struct___new { +	struct rw_semaphore mmap_lock; +} __attribute__((preserve_access_index)); +  /* control flags */  int enabled;  int has_cpu; @@ -106,7 +122,13 @@ int lock_owner;  int aggr_mode;  /* error stat */ -int lost; +int task_fail; +int stack_fail; +int time_fail; +int data_fail; + +int task_map_full; +int data_map_full;  static inline int can_record(u64 *ctx)  { @@ -159,11 +181,12 @@ static inline int update_task_data(struct task_struct *task)  		return -1;  	p = bpf_map_lookup_elem(&task_data, &pid); -	if (p == NULL) { +	if (p == NULL && !task_map_full) {  		struct contention_task_data data = {};  		BPF_CORE_READ_STR_INTO(&data.comm, task, comm); -		bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST); +		if (bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST) == -E2BIG) +			task_map_full = 1;  	}  	return 0; @@ -182,7 +205,13 @@ static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)  		struct mutex *mutex = (void *)lock;  		owner = BPF_CORE_READ(mutex, owner.counter);  	} else if (flags == LCB_F_READ || flags == LCB_F_WRITE) { -#if __has_builtin(bpf_core_type_matches) +	/* +	 * Support for the BPF_TYPE_MATCHES argument to the +	 * __builtin_preserve_type_info builtin was added at some point during +	 * development of clang 15 and it's what is needed for +	 * bpf_core_type_matches. +	 */ +#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15  		if (bpf_core_type_matches(struct rw_semaphore___old)) {  			struct rw_semaphore___old *rwsem = (void *)lock;  			owner = (unsigned long)BPF_CORE_READ(rwsem, owner); @@ -204,6 +233,41 @@ static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)  	return task;  } +static inline __u32 check_lock_type(__u64 lock, __u32 flags) +{ +	struct task_struct *curr; +	struct mm_struct___old *mm_old; +	struct mm_struct___new *mm_new; + +	switch (flags) { +	case LCB_F_READ:  /* rwsem */ +	case LCB_F_WRITE: +		curr = bpf_get_current_task_btf(); +		if (curr->mm == NULL) +			break; +		mm_new = (void *)curr->mm; +		if (bpf_core_field_exists(mm_new->mmap_lock)) { +			if (&mm_new->mmap_lock == (void *)lock) +				return LCD_F_MMAP_LOCK; +			break; +		} +		mm_old = (void *)curr->mm; +		if (bpf_core_field_exists(mm_old->mmap_sem)) { +			if (&mm_old->mmap_sem == (void *)lock) +				return LCD_F_MMAP_LOCK; +		} +		break; +	case LCB_F_SPIN:  /* spinlock */ +		curr = bpf_get_current_task_btf(); +		if (&curr->sighand->siglock == (void *)lock) +			return LCD_F_SIGHAND_LOCK; +		break; +	default: +		break; +	} +	return 0; +} +  SEC("tp_btf/contention_begin")  int contention_begin(u64 *ctx)  { @@ -224,7 +288,7 @@ int contention_begin(u64 *ctx)  		bpf_map_update_elem(&tstamp, &pid, &zero, BPF_ANY);  		pelem = bpf_map_lookup_elem(&tstamp, &pid);  		if (pelem == NULL) { -			lost++; +			__sync_fetch_and_add(&task_fail, 1);  			return 0;  		}  	} @@ -237,7 +301,7 @@ int contention_begin(u64 *ctx)  		pelem->stack_id = bpf_get_stackid(ctx, &stacks,  						  BPF_F_FAST_STACK_CMP | stack_skip);  		if (pelem->stack_id < 0) -			lost++; +			__sync_fetch_and_add(&stack_fail, 1);  	} else if (aggr_mode == LOCK_AGGR_TASK) {  		struct task_struct *task; @@ -281,6 +345,11 @@ int contention_end(u64 *ctx)  		return 0;  	duration = bpf_ktime_get_ns() - pelem->timestamp; +	if ((__s64)duration < 0) { +		bpf_map_delete_elem(&tstamp, &pid); +		__sync_fetch_and_add(&time_fail, 1); +		return 0; +	}  	switch (aggr_mode) {  	case LOCK_AGGR_CALLER: @@ -306,6 +375,12 @@ int contention_end(u64 *ctx)  	data = bpf_map_lookup_elem(&lock_stat, &key);  	if (!data) { +		if (data_map_full) { +			bpf_map_delete_elem(&tstamp, &pid); +			__sync_fetch_and_add(&data_fail, 1); +			return 0; +		} +  		struct contention_data first = {  			.total_time = duration,  			.max_time = duration, @@ -313,8 +388,17 @@ int contention_end(u64 *ctx)  			.count = 1,  			.flags = pelem->flags,  		}; +		int err; + +		if (aggr_mode == LOCK_AGGR_ADDR) +			first.flags |= check_lock_type(pelem->lock, pelem->flags); -		bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST); +		err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST); +		if (err < 0) { +			if (err == -E2BIG) +				data_map_full = 1; +			__sync_fetch_and_add(&data_fail, 1); +		}  		bpf_map_delete_elem(&tstamp, &pid);  		return 0;  	} @@ -332,4 +416,40 @@ int contention_end(u64 *ctx)  	return 0;  } +struct rq {}; + +extern struct rq runqueues __ksym; + +struct rq___old { +	raw_spinlock_t lock; +} __attribute__((preserve_access_index)); + +struct rq___new { +	raw_spinlock_t __lock; +} __attribute__((preserve_access_index)); + +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(collect_lock_syms) +{ +	__u64 lock_addr, lock_off; +	__u32 lock_flag; + +	if (bpf_core_field_exists(struct rq___new, __lock)) +		lock_off = offsetof(struct rq___new, __lock); +	else +		lock_off = offsetof(struct rq___old, lock); + +	for (int i = 0; i < MAX_CPUS; i++) { +		struct rq *rq = bpf_per_cpu_ptr(&runqueues, i); + +		if (rq == NULL) +			break; + +		lock_addr = (__u64)(void *)rq + lock_off; +		lock_flag = LOCK_CLASS_RQLOCK; +		bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY); +	} +	return 0; +} +  char LICENSE[] SEC("license") = "Dual BSD/GPL";  |