diff options
Diffstat (limited to 'tools/perf/util/header.c')
| -rw-r--r-- | tools/perf/util/header.c | 52 | 
1 files changed, 25 insertions, 27 deletions
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 52fbf526fe74..d812e1e371a7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -456,6 +456,8 @@ static int write_cpudesc(struct feat_fd *ff,  #define CPUINFO_PROC	{ "Processor", }  #elif defined(__xtensa__)  #define CPUINFO_PROC	{ "core ID", } +#elif defined(__loongarch__) +#define CPUINFO_PROC	{ "Model Name", }  #else  #define CPUINFO_PROC	{ "model name", }  #endif @@ -746,20 +748,14 @@ static int write_pmu_mappings(struct feat_fd *ff,  	 * Do a first pass to count number of pmu to avoid lseek so this  	 * works in pipe mode as well.  	 */ -	while ((pmu = perf_pmus__scan(pmu))) { -		if (!pmu->name) -			continue; +	while ((pmu = perf_pmus__scan(pmu)))  		pmu_num++; -	}  	ret = do_write(ff, &pmu_num, sizeof(pmu_num));  	if (ret < 0)  		return ret;  	while ((pmu = perf_pmus__scan(pmu))) { -		if (!pmu->name) -			continue; -  		ret = do_write(ff, &pmu->type, sizeof(pmu->type));  		if (ret < 0)  			return ret; @@ -1605,8 +1601,15 @@ static int write_pmu_caps(struct feat_fd *ff,  	int ret;  	while ((pmu = perf_pmus__scan(pmu))) { -		if (!pmu->name || !strcmp(pmu->name, "cpu") || -		    perf_pmu__caps_parse(pmu) <= 0) +		if (!strcmp(pmu->name, "cpu")) { +			/* +			 * The "cpu" PMU is special and covered by +			 * HEADER_CPU_PMU_CAPS. Note, core PMUs are +			 * counted/written here for ARM, s390 and Intel hybrid. +			 */ +			continue; +		} +		if (perf_pmu__caps_parse(pmu) <= 0)  			continue;  		nr_pmu++;  	} @@ -1619,23 +1622,17 @@ static int write_pmu_caps(struct feat_fd *ff,  		return 0;  	/* -	 * Write hybrid pmu caps first to maintain compatibility with -	 * older perf tool. +	 * Note older perf tools assume core PMUs come first, this is a property +	 * of perf_pmus__scan.  	 */ -	if (perf_pmus__num_core_pmus() > 1) { -		pmu = NULL; -		while ((pmu = perf_pmus__scan_core(pmu))) { -			ret = __write_pmu_caps(ff, pmu, true); -			if (ret < 0) -				return ret; -		} -	} -  	pmu = NULL;  	while ((pmu = perf_pmus__scan(pmu))) { -		if (pmu->is_core || !pmu->nr_caps) +		if (!strcmp(pmu->name, "cpu")) { +			/* Skip as above. */ +			continue; +		} +		if (perf_pmu__caps_parse(pmu) <= 0)  			continue; -  		ret = __write_pmu_caps(ff, pmu, true);  		if (ret < 0)  			return ret; @@ -4381,7 +4378,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,  			     union perf_event *event,  			     struct evlist **pevlist)  { -	u32 i, ids, n_ids; +	u32 i, n_ids; +	u64 *ids;  	struct evsel *evsel;  	struct evlist *evlist = *pevlist; @@ -4397,9 +4395,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,  	evlist__add(evlist, evsel); -	ids = event->header.size; -	ids -= (void *)&event->attr.id - (void *)event; -	n_ids = ids / sizeof(u64); +	n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size; +	n_ids = n_ids / sizeof(u64);  	/*  	 * We don't have the cpu and thread maps on the header, so  	 * for allocating the perf_sample_id table we fake 1 cpu and @@ -4408,8 +4405,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,  	if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))  		return -ENOMEM; +	ids = perf_record_header_attr_id(event);  	for (i = 0; i < n_ids; i++) { -		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]); +		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);  	}  	return 0;  |