1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_ENV_H
#define __PERF_ENV_H
#include <linux/types.h>
#include <linux/rbtree.h>
#include "cpumap.h"
#include "rwsem.h"
struct perf_cpu_map;
struct cpu_topology_map {
int socket_id;
int die_id;
int core_id;
};
struct cpu_cache_level {
u32 level;
u32 line_size;
u32 sets;
u32 ways;
char *type;
char *size;
char *map;
};
struct numa_node {
u32 node;
u64 mem_total;
u64 mem_free;
struct perf_cpu_map *map;
};
struct memory_node {
u64 node;
u64 size;
unsigned long *set;
};
struct hybrid_node {
char *pmu_name;
char *cpus;
};
struct pmu_caps {
int nr_caps;
unsigned int max_branches;
char **caps;
char *pmu_name;
};
struct perf_env {
char *hostname;
char *os_release;
char *version;
char *arch;
int nr_cpus_online;
int nr_cpus_avail;
char *cpu_desc;
char *cpuid;
unsigned long long total_mem;
unsigned int msr_pmu_type;
unsigned int max_branches;
int kernel_is_64_bit;
int nr_cmdline;
int nr_sibling_cores;
int nr_sibling_dies;
int nr_sibling_threads;
int nr_numa_nodes;
int nr_memory_nodes;
int nr_pmu_mappings;
int nr_groups;
int nr_cpu_pmu_caps;
int nr_hybrid_nodes;
int nr_pmus_with_caps;
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
char *sibling_dies;
char *sibling_threads;
char *pmu_mappings;
char **cpu_pmu_caps;
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
int caches_cnt;
u32 comp_ratio;
u32 comp_ver;
u32 comp_type;
u32 comp_level;
u32 comp_mmap_len;
struct numa_node *numa_nodes;
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
struct hybrid_node *hybrid_nodes;
struct pmu_caps *pmu_caps;
#ifdef HAVE_LIBBPF_SUPPORT
/*
* bpf_info_lock protects bpf rbtrees. This is needed because the
* trees are accessed by different threads in perf-top
*/
struct {
struct rw_semaphore lock;
struct rb_root infos;
u32 infos_cnt;
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
#endif // HAVE_LIBBPF_SUPPORT
/* same reason as above (for perf-top) */
struct {
struct rw_semaphore lock;
struct rb_root tree;
} cgroups;
/* For fast cpu to numa node lookup via perf_env__numa_node */
int *numa_map;
int nr_numa_map;
/* For real clock time reference. */
struct {
u64 tod_ns;
u64 clockid_ns;
u64 clockid_res_ns;
int clockid;
/*
* enabled is valid for report mode, and is true if above
* values are set, it's set in process_clock_data
*/
bool enabled;
} clock;
};
enum perf_compress_type {
PERF_COMP_NONE = 0,
PERF_COMP_ZSTD,
PERF_COMP_MAX
};
struct bpf_prog_info_node;
struct btf_node;
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
int perf_env__kernel_is_64_bit(struct perf_env *env);
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
int perf_env__read_cpuid(struct perf_env *env);
int perf_env__read_pmu_mappings(struct perf_env *env);
int perf_env__nr_pmu_mappings(struct perf_env *env);
const char *perf_env__pmu_mappings(struct perf_env *env);
int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
const char *perf_env__arch(struct perf_env *env);
const char *perf_env__cpuid(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env);
void __perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
const char *cap);
#endif /* __PERF_ENV_H */
|