Lines Matching +full:cpu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
18 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) in perf_cpu_map__set_nr() argument
20 RC_CHK_ACCESS(map)->nr = nr_cpus; in perf_cpu_map__set_nr()
33 cpus->nr = nr_cpus; in perf_cpu_map__alloc()
34 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__alloc()
44 RC_CHK_ACCESS(cpus)->map[0].cpu = -1; in perf_cpu_map__new_any_cpu()
49 static void cpu_map__delete(struct perf_cpu_map *map) in cpu_map__delete() argument
51 if (map) { in cpu_map__delete()
52 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0, in cpu_map__delete()
54 RC_CHK_FREE(map); in cpu_map__delete()
58 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map) in perf_cpu_map__get() argument
62 if (RC_CHK_GET(result, map)) in perf_cpu_map__get()
63 refcount_inc(perf_cpu_map__refcnt(map)); in perf_cpu_map__get()
68 void perf_cpu_map__put(struct perf_cpu_map *map) in perf_cpu_map__put() argument
70 if (map) { in perf_cpu_map__put()
71 if (refcount_dec_and_test(perf_cpu_map__refcnt(map))) in perf_cpu_map__put()
72 cpu_map__delete(map); in perf_cpu_map__put()
74 RC_CHK_PUT(map); in perf_cpu_map__put()
89 …pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will on… in cpu_map__new_sysconf()
98 RC_CHK_ACCESS(cpus)->map[i].cpu = i; in cpu_map__new_sysconf()
110 if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) { in cpu_map__new_sysfs_online()
132 return cpu_a->cpu - cpu_b->cpu; in cmp_cpu()
137 return RC_CHK_ACCESS(cpus)->map[idx]; in __perf_cpu_map__cpu()
147 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size); in cpu_map__trim_new()
148 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu); in cpu_map__trim_new()
153 __perf_cpu_map__cpu(cpus, i).cpu != in cpu_map__trim_new()
154 __perf_cpu_map__cpu(cpus, i - 1).cpu) { in cpu_map__trim_new()
155 RC_CHK_ACCESS(cpus)->map[j++].cpu = in cpu_map__trim_new()
156 __perf_cpu_map__cpu(cpus, i).cpu; in cpu_map__trim_new()
179 * TOPOLOGY header for NUMA nodes with no CPU in perf_cpu_map__new()
180 * ( e.g., because of CPU hotplug) in perf_cpu_map__new()
189 || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n')) in perf_cpu_map__new()
192 if (*p == '-') { in perf_cpu_map__new()
212 if (tmp_cpus[i].cpu == (int)start_cpu) in perf_cpu_map__new()
216 max_entries += max(end_cpu - start_cpu + 1, 16UL); in perf_cpu_map__new()
222 tmp_cpus[nr_cpus++].cpu = (int)start_cpu; in perf_cpu_map__new()
233 pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.", in perf_cpu_map__new()
247 return RC_CHK_ACCESS(cpus)->nr; in __perf_cpu_map__nr()
253 .cpu = -1 in perf_cpu_map__cpu()
267 bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map) in perf_cpu_map__has_any_cpu_or_is_empty() argument
269 return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true; in perf_cpu_map__has_any_cpu_or_is_empty()
272 bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map) in perf_cpu_map__is_any_cpu_or_is_empty() argument
274 if (!map) in perf_cpu_map__is_any_cpu_or_is_empty()
277 return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1; in perf_cpu_map__is_any_cpu_or_is_empty()
280 bool perf_cpu_map__is_empty(const struct perf_cpu_map *map) in perf_cpu_map__is_empty() argument
282 return map == NULL; in perf_cpu_map__is_empty()
285 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu) in perf_cpu_map__idx() argument
290 return -1; in perf_cpu_map__idx()
298 if (cpu_at_idx.cpu == cpu.cpu) in perf_cpu_map__idx()
301 if (cpu_at_idx.cpu > cpu.cpu) in perf_cpu_map__idx()
307 return -1; in perf_cpu_map__idx()
310 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu) in perf_cpu_map__has() argument
312 return perf_cpu_map__idx(cpus, cpu) != -1; in perf_cpu_map__has()
330 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu) in perf_cpu_map__equal()
336 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map) in perf_cpu_map__has_any_cpu() argument
338 return map && __perf_cpu_map__cpu(map, 0).cpu == -1; in perf_cpu_map__has_any_cpu()
341 struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map) in perf_cpu_map__min() argument
343 struct perf_cpu cpu, result = { in perf_cpu_map__min() local
344 .cpu = -1 in perf_cpu_map__min()
348 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) { in perf_cpu_map__min()
349 result = cpu; in perf_cpu_map__min()
355 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map) in perf_cpu_map__max() argument
358 .cpu = -1 in perf_cpu_map__max()
362 return __perf_cpu_map__nr(map) > 0 in perf_cpu_map__max()
363 ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1) in perf_cpu_map__max()
376 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu) in perf_cpu_map__is_subset()
378 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) { in perf_cpu_map__is_subset()
396 * Otherwise, '*orig' gets freed and replaced with a new map.
416 return -ENOMEM; in perf_cpu_map__merge()
421 if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) { in perf_cpu_map__merge()
422 if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu) in perf_cpu_map__merge()
463 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu) in perf_cpu_map__intersect()
465 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu) in perf_cpu_map__intersect()