Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | // SPDX-License-Identifier: GPL-2.0-only #include <errno.h> #include <inttypes.h> #include "cpumap.h" #include "evlist.h" #include "evsel.h" #include "../perf.h" #include "util/pmu-hybrid.h" #include "util/evlist-hybrid.h" #include "debug.h" #include <unistd.h> #include <stdlib.h> #include <linux/err.h> #include <linux/string.h> #include <perf/evlist.h> #include <perf/evsel.h> #include <perf/cpumap.h> int evlist__add_default_hybrid(struct evlist *evlist, bool precise) { struct evsel *evsel; struct perf_pmu *pmu; __u64 config; struct perf_cpu_map *cpus; perf_pmu__for_each_hybrid_pmu(pmu) { config = PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT); evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE, config); if (!evsel) return -ENOMEM; cpus = perf_cpu_map__get(pmu->cpus); evsel->core.cpus = cpus; evsel->core.own_cpus = perf_cpu_map__get(cpus); evsel->pmu_name = strdup(pmu->name); evlist__add(evlist, evsel); } return 0; } static bool group_hybrid_conflict(struct evsel *leader) { struct evsel *pos, *prev = NULL; for_each_group_evsel(pos, leader) { if (!evsel__is_hybrid(pos)) continue; if (prev && strcmp(prev->pmu_name, pos->pmu_name)) return true; prev = pos; } return false; } void evlist__warn_hybrid_group(struct evlist *evlist) { struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1 && group_hybrid_conflict(evsel)) { pr_warning("WARNING: events in group from " "different hybrid PMUs!\n"); return; } } } bool evlist__has_hybrid(struct evlist *evlist) { struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { if (evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name)) { return true; } } return false; } int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list) { struct perf_cpu_map *cpus; struct evsel *evsel, *tmp; struct perf_pmu *pmu; int ret, unmatched_count = 0, events_nr = 0; if (!perf_pmu__has_hybrid() || !cpu_list) return 0; cpus = perf_cpu_map__new(cpu_list); if (!cpus) return -1; /* * The evsels are created with hybrid pmu's cpus. But now we * need to check and adjust the cpus of evsel by cpu_list because * cpu_list may cause conflicts with cpus of evsel. For example, * cpus of evsel is cpu0-7, but the cpu_list is cpu6-8, we need * to adjust the cpus of evsel to cpu6-7. And then propatate maps * in evlist__create_maps(). */ evlist__for_each_entry_safe(evlist, tmp, evsel) { struct perf_cpu_map *matched_cpus, *unmatched_cpus; char buf1[128], buf2[128]; pmu = perf_pmu__find_hybrid_pmu(evsel->pmu_name); if (!pmu) continue; ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus, &unmatched_cpus); if (ret) goto out; events_nr++; if (perf_cpu_map__nr(matched_cpus) > 0 && (perf_cpu_map__nr(unmatched_cpus) > 0 || perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) || perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) { perf_cpu_map__put(evsel->core.cpus); perf_cpu_map__put(evsel->core.own_cpus); evsel->core.cpus = perf_cpu_map__get(matched_cpus); evsel->core.own_cpus = perf_cpu_map__get(matched_cpus); if (perf_cpu_map__nr(unmatched_cpus) > 0) { cpu_map__snprint(matched_cpus, buf1, sizeof(buf1)); pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n", buf1, pmu->name, evsel->name); } } if (perf_cpu_map__nr(matched_cpus) == 0) { evlist__remove(evlist, evsel); evsel__delete(evsel); cpu_map__snprint(cpus, buf1, sizeof(buf1)); cpu_map__snprint(pmu->cpus, buf2, sizeof(buf2)); pr_warning("WARNING: %s isn't a '%s', please use a CPU list in the '%s' range (%s)\n", buf1, pmu->name, pmu->name, buf2); unmatched_count++; } perf_cpu_map__put(matched_cpus); perf_cpu_map__put(unmatched_cpus); } if (events_nr) ret = (unmatched_count == events_nr) ? -1 : 0; out: perf_cpu_map__put(cpus); return ret; } |