summaryrefslogtreecommitdiffstats
path: root/tools/lib/perf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /tools/lib/perf
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/lib/perf')
-rw-r--r--tools/lib/perf/Build15
-rw-r--r--tools/lib/perf/Documentation/Makefile156
-rw-r--r--tools/lib/perf/Documentation/asciidoc.conf120
-rw-r--r--tools/lib/perf/Documentation/examples/counting.c83
-rw-r--r--tools/lib/perf/Documentation/examples/sampling.c119
-rw-r--r--tools/lib/perf/Documentation/libperf-counting.txt213
-rw-r--r--tools/lib/perf/Documentation/libperf-sampling.txt244
-rw-r--r--tools/lib/perf/Documentation/libperf.txt251
-rw-r--r--tools/lib/perf/Documentation/manpage-1.72.xsl14
-rw-r--r--tools/lib/perf/Documentation/manpage-base.xsl35
-rw-r--r--tools/lib/perf/Documentation/manpage-bold-literal.xsl17
-rw-r--r--tools/lib/perf/Documentation/manpage-normal.xsl13
-rw-r--r--tools/lib/perf/Documentation/manpage-suppress-sp.xsl21
-rw-r--r--tools/lib/perf/Makefile220
-rw-r--r--tools/lib/perf/core.c38
-rw-r--r--tools/lib/perf/cpumap.c481
-rw-r--r--tools/lib/perf/evlist.c740
-rw-r--r--tools/lib/perf/evsel.c557
-rw-r--r--tools/lib/perf/include/internal/cpumap.h38
-rw-r--r--tools/lib/perf/include/internal/evlist.h138
-rw-r--r--tools/lib/perf/include/internal/evsel.h91
-rw-r--r--tools/lib/perf/include/internal/lib.h14
-rw-r--r--tools/lib/perf/include/internal/mmap.h59
-rw-r--r--tools/lib/perf/include/internal/rc_check.h106
-rw-r--r--tools/lib/perf/include/internal/tests.h67
-rw-r--r--tools/lib/perf/include/internal/threadmap.h23
-rw-r--r--tools/lib/perf/include/internal/xyarray.h43
-rw-r--r--tools/lib/perf/include/perf/bpf_perf.h31
-rw-r--r--tools/lib/perf/include/perf/core.h25
-rw-r--r--tools/lib/perf/include/perf/cpumap.h57
-rw-r--r--tools/lib/perf/include/perf/event.h517
-rw-r--r--tools/lib/perf/include/perf/evlist.h51
-rw-r--r--tools/lib/perf/include/perf/evsel.h50
-rw-r--r--tools/lib/perf/include/perf/mmap.h15
-rw-r--r--tools/lib/perf/include/perf/threadmap.h21
-rw-r--r--tools/lib/perf/internal.h23
-rw-r--r--tools/lib/perf/lib.c68
-rw-r--r--tools/lib/perf/libperf.map59
-rw-r--r--tools/lib/perf/libperf.pc.template11
-rw-r--r--tools/lib/perf/mmap.c525
-rw-r--r--tools/lib/perf/tests/Build5
-rw-r--r--tools/lib/perf/tests/main.c15
-rw-r--r--tools/lib/perf/tests/test-cpumap.c43
-rw-r--r--tools/lib/perf/tests/test-evlist.c589
-rw-r--r--tools/lib/perf/tests/test-evsel.c367
-rw-r--r--tools/lib/perf/tests/test-threadmap.c73
-rw-r--r--tools/lib/perf/tests/tests.h10
-rw-r--r--tools/lib/perf/threadmap.c101
-rw-r--r--tools/lib/perf/xyarray.c33
49 files changed, 6605 insertions, 0 deletions
diff --git a/tools/lib/perf/Build b/tools/lib/perf/Build
new file mode 100644
index 0000000000..e8f5b7fb99
--- /dev/null
+++ b/tools/lib/perf/Build
@@ -0,0 +1,15 @@
+libperf-y += core.o
+libperf-y += cpumap.o
+libperf-y += threadmap.o
+libperf-y += evsel.o
+libperf-y += evlist.o
+libperf-y += mmap.o
+libperf-y += zalloc.o
+libperf-y += xyarray.o
+libperf-y += lib.o
+
+$(OUTPUT)zalloc.o: ../../lib/zalloc.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+tests-y += tests/
diff --git a/tools/lib/perf/Documentation/Makefile b/tools/lib/perf/Documentation/Makefile
new file mode 100644
index 0000000000..972754082a
--- /dev/null
+++ b/tools/lib/perf/Documentation/Makefile
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Most of this file is copied from tools/perf/Documentation/Makefile
+
+include ../../../scripts/Makefile.include
+include ../../../scripts/utilities.mak
+
+MAN3_TXT = libperf.txt
+MAN7_TXT = libperf-counting.txt libperf-sampling.txt
+MAN_EX = examples/*.c
+
+MAN_TXT = $(MAN3_TXT) $(MAN7_TXT)
+
+_MAN_XML = $(patsubst %.txt,%.xml,$(MAN_TXT))
+_MAN_HTML = $(patsubst %.txt,%.html,$(MAN_TXT))
+_MAN_3 = $(patsubst %.txt,%.3,$(MAN3_TXT))
+_MAN_7 = $(patsubst %.txt,%.7,$(MAN7_TXT))
+
+MAN_XML = $(addprefix $(OUTPUT),$(_MAN_XML))
+MAN_HTML = $(addprefix $(OUTPUT),$(_MAN_HTML))
+MAN_3 = $(addprefix $(OUTPUT),$(_MAN_3))
+MAN_7 = $(addprefix $(OUTPUT),$(_MAN_7))
+MAN_X = $(MAN_3) $(MAN_7)
+
+# Make the path relative to DESTDIR, not prefix
+ifndef DESTDIR
+ prefix ?=$(HOME)
+endif
+
+mandir ?= $(prefix)/share/man
+man3dir = $(mandir)/man3
+man7dir = $(mandir)/man7
+
+docdir ?= $(prefix)/share/doc/libperf
+htmldir = $(docdir)/html
+exdir = $(docdir)/examples
+
+ASCIIDOC = asciidoc
+ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
+ASCIIDOC_HTML = xhtml11
+MANPAGE_XSL = manpage-normal.xsl
+XMLTO_EXTRA =
+XMLTO =xmlto
+
+INSTALL ?= install
+RM ?= rm -f
+
+# For asciidoc ...
+# -7.1.2, no extra settings are needed.
+# 8.0-, set ASCIIDOC8.
+#
+
+# For docbook-xsl ...
+# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0)
+# 1.69.0, no extra settings are needed?
+# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP?
+# 1.71.1, no extra settings are needed?
+# 1.72.0, set DOCBOOK_XSL_172.
+# 1.73.0-, set ASCIIDOC_NO_ROFF
+
+# If you had been using DOCBOOK_XSL_172 in an attempt to get rid
+# of 'the ".ft C" problem' in your generated manpages, and you
+# instead ended up with weird characters around callouts, try
+# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8).
+
+ifdef ASCIIDOC8
+ ASCIIDOC_EXTRA += -a asciidoc7compatible
+endif
+ifdef DOCBOOK_XSL_172
+ ASCIIDOC_EXTRA += -a libperf-asciidoc-no-roff
+ MANPAGE_XSL = manpage-1.72.xsl
+else
+ ifdef ASCIIDOC_NO_ROFF
+ # docbook-xsl after 1.72 needs the regular XSL, but will not
+ # pass-thru raw roff codes from asciidoc.conf, so turn them off.
+ ASCIIDOC_EXTRA += -a libperf-asciidoc-no-roff
+ endif
+endif
+ifdef MAN_BOLD_LITERAL
+ XMLTO_EXTRA += -m manpage-bold-literal.xsl
+endif
+ifdef DOCBOOK_SUPPRESS_SP
+ XMLTO_EXTRA += -m manpage-suppress-sp.xsl
+endif
+
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+export DESTDIR DESTDIR_SQ
+
+# Please note that there is a minor bug in asciidoc.
+# The version after 6.0.3 _will_ include the patch found here:
+# http://marc.theaimsgroup.com/?l=libtraceevent&m=111558757202243&w=2
+#
+# Until that version is released you may have to apply the patch
+# yourself - yes, all 6 characters of it!
+
+QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1 =
+
+ifneq ($(findstring $(MAKEFLAGS),w),w)
+ PRINT_DIR = --no-print-directory
+else # "make -w"
+ NO_SUBDIR = :
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ ifneq ($(V),1)
+ QUIET_ASCIIDOC = @echo ' ASCIIDOC '$@;
+ QUIET_XMLTO = @echo ' XMLTO '$@;
+ endif
+endif
+
+all: $(MAN_X) $(MAN_HTML)
+
+$(MAN_HTML) $(MAN_X): asciidoc.conf
+
+install-man: all
+ $(call QUIET_INSTALL, man) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(man3dir); \
+ $(INSTALL) -m 644 $(MAN_3) $(DESTDIR)$(man3dir); \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir); \
+ $(INSTALL) -m 644 $(MAN_7) $(DESTDIR)$(man7dir);
+
+install-html:
+ $(call QUIET_INSTALL, html) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(htmldir); \
+ $(INSTALL) -m 644 $(MAN_HTML) $(DESTDIR)$(htmldir); \
+
+install-examples:
+ $(call QUIET_INSTALL, examples) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(exdir); \
+ $(INSTALL) -m 644 $(MAN_EX) $(DESTDIR)$(exdir); \
+
+CLEAN_FILES = \
+ $(MAN_XML) $(addsuffix +,$(MAN_XML)) \
+ $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) \
+ $(MAN_X)
+
+clean:
+ $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
+
+$(MAN_3): $(OUTPUT)%.3: %.xml
+ $(QUIET_XMLTO)$(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+$(MAN_7): $(OUTPUT)%.7: %.xml
+ $(QUIET_XMLTO)$(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+$(MAN_XML): $(OUTPUT)%.xml: %.txt
+ $(QUIET_ASCIIDOC)$(ASCIIDOC) -b docbook -d manpage \
+ $(ASCIIDOC_EXTRA) -alibperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+ mv $@+ $@
+
+$(MAN_HTML): $(OUTPUT)%.html: %.txt
+ $(QUIET_ASCIIDOC)$(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
+ $(ASCIIDOC_EXTRA) -aperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+ mv $@+ $@
diff --git a/tools/lib/perf/Documentation/asciidoc.conf b/tools/lib/perf/Documentation/asciidoc.conf
new file mode 100644
index 0000000000..9d5a5a5ee0
--- /dev/null
+++ b/tools/lib/perf/Documentation/asciidoc.conf
@@ -0,0 +1,120 @@
+## linktep: macro
+#
+# Usage: linktep:command[manpage-section]
+#
+# Note, {0} is the manpage section, while {target} is the command.
+#
+# Show TEP link as: <command>(<section>); if section is defined, else just show
+# the command.
+
+[macros]
+(?su)[\\]?(?P<name>linktep):(?P<target>\S*?)\[(?P<attrlist>.*?)\]=
+
+[attributes]
+asterisk=&#42;
+plus=&#43;
+caret=&#94;
+startsb=&#91;
+endsb=&#93;
+tilde=&#126;
+
+ifdef::backend-docbook[]
+[linktep-inlinemacro]
+{0%{target}}
+{0#<citerefentry>}
+{0#<refentrytitle>{target}</refentrytitle><manvolnum>{0}</manvolnum>}
+{0#</citerefentry>}
+endif::backend-docbook[]
+
+ifdef::backend-docbook[]
+ifndef::tep-asciidoc-no-roff[]
+# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
+# v1.72 breaks with this because it replaces dots not in roff requests.
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+ifdef::doctype-manpage[]
+&#10;.ft C&#10;
+endif::doctype-manpage[]
+|
+ifdef::doctype-manpage[]
+&#10;.ft&#10;
+endif::doctype-manpage[]
+</literallayout>
+{title#}</example>
+endif::tep-asciidoc-no-roff[]
+
+ifdef::tep-asciidoc-no-roff[]
+ifdef::doctype-manpage[]
+# The following two small workarounds insert a simple paragraph after screen
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+|
+</literallayout><simpara></simpara>
+{title#}</example>
+
+[verseblock]
+<formalpara{id? id="{id}"}><title>{title}</title><para>
+{title%}<literallayout{id? id="{id}"}>
+{title#}<literallayout>
+|
+</literallayout>
+{title#}</para></formalpara>
+{title%}<simpara></simpara>
+endif::doctype-manpage[]
+endif::tep-asciidoc-no-roff[]
+endif::backend-docbook[]
+
+ifdef::doctype-manpage[]
+ifdef::backend-docbook[]
+[header]
+template::[header-declarations]
+<refentry>
+<refmeta>
+<refentrytitle>{mantitle}</refentrytitle>
+<manvolnum>{manvolnum}</manvolnum>
+<refmiscinfo class="source">libperf</refmiscinfo>
+<refmiscinfo class="version">{libperf_version}</refmiscinfo>
+<refmiscinfo class="manual">libperf Manual</refmiscinfo>
+</refmeta>
+<refnamediv>
+ <refname>{manname1}</refname>
+ <refname>{manname2}</refname>
+ <refname>{manname3}</refname>
+ <refname>{manname4}</refname>
+ <refname>{manname5}</refname>
+ <refname>{manname6}</refname>
+ <refname>{manname7}</refname>
+ <refname>{manname8}</refname>
+ <refname>{manname9}</refname>
+ <refname>{manname10}</refname>
+ <refname>{manname11}</refname>
+ <refname>{manname12}</refname>
+ <refname>{manname13}</refname>
+ <refname>{manname14}</refname>
+ <refname>{manname15}</refname>
+ <refname>{manname16}</refname>
+ <refname>{manname17}</refname>
+ <refname>{manname18}</refname>
+ <refname>{manname19}</refname>
+ <refname>{manname20}</refname>
+ <refname>{manname21}</refname>
+ <refname>{manname22}</refname>
+ <refname>{manname23}</refname>
+ <refname>{manname24}</refname>
+ <refname>{manname25}</refname>
+ <refname>{manname26}</refname>
+ <refname>{manname27}</refname>
+ <refname>{manname28}</refname>
+ <refname>{manname29}</refname>
+ <refname>{manname30}</refname>
+ <refpurpose>{manpurpose}</refpurpose>
+</refnamediv>
+endif::backend-docbook[]
+endif::doctype-manpage[]
+
+ifdef::backend-xhtml11[]
+[linktep-inlinemacro]
+<a href="{target}.html">{target}{0?({0})}</a>
+endif::backend-xhtml11[]
diff --git a/tools/lib/perf/Documentation/examples/counting.c b/tools/lib/perf/Documentation/examples/counting.c
new file mode 100644
index 0000000000..6085693571
--- /dev/null
+++ b/tools/lib/perf/Documentation/examples/counting.c
@@ -0,0 +1,83 @@
+#include <linux/perf_event.h>
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <perf/mmap.h>
+#include <perf/core.h>
+#include <perf/event.h>
+#include <stdio.h>
+#include <unistd.h>
+
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
+int main(int argc, char **argv)
+{
+ int count = 100000, err = 0;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_thread_map *threads;
+ struct perf_counts_values counts;
+
+ struct perf_event_attr attr1 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
+ .disabled = 1,
+ };
+ struct perf_event_attr attr2 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
+ .disabled = 1,
+ };
+
+ libperf_init(libperf_print);
+ threads = perf_thread_map__new_dummy();
+ if (!threads) {
+ fprintf(stderr, "failed to create threads\n");
+ return -1;
+ }
+ perf_thread_map__set_pid(threads, 0, 0);
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ fprintf(stderr, "failed to create evlist\n");
+ goto out_threads;
+ }
+ evsel = perf_evsel__new(&attr1);
+ if (!evsel) {
+ fprintf(stderr, "failed to create evsel1\n");
+ goto out_evlist;
+ }
+ perf_evlist__add(evlist, evsel);
+ evsel = perf_evsel__new(&attr2);
+ if (!evsel) {
+ fprintf(stderr, "failed to create evsel2\n");
+ goto out_evlist;
+ }
+ perf_evlist__add(evlist, evsel);
+ perf_evlist__set_maps(evlist, NULL, threads);
+ err = perf_evlist__open(evlist);
+ if (err) {
+ fprintf(stderr, "failed to open evsel\n");
+ goto out_evlist;
+ }
+ perf_evlist__enable(evlist);
+ while (count--);
+ perf_evlist__disable(evlist);
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts);
+ fprintf(stdout, "count %llu, enabled %llu, run %llu\n",
+ counts.val, counts.ena, counts.run);
+ }
+ perf_evlist__close(evlist);
+out_evlist:
+ perf_evlist__delete(evlist);
+out_threads:
+ perf_thread_map__put(threads);
+ return err;
+}
diff --git a/tools/lib/perf/Documentation/examples/sampling.c b/tools/lib/perf/Documentation/examples/sampling.c
new file mode 100644
index 0000000000..8e1a926a9c
--- /dev/null
+++ b/tools/lib/perf/Documentation/examples/sampling.c
@@ -0,0 +1,119 @@
+#include <linux/perf_event.h>
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <perf/mmap.h>
+#include <perf/core.h>
+#include <perf/event.h>
+#include <stdio.h>
+#include <unistd.h>
+
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
+union u64_swap {
+ __u64 val64;
+ __u32 val32[2];
+};
+
+int main(int argc, char **argv)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_mmap *map;
+ struct perf_cpu_map *cpus;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .disabled = 1,
+ .freq = 1,
+ .sample_freq = 10,
+ .sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_TID|PERF_SAMPLE_CPU|PERF_SAMPLE_PERIOD,
+ };
+ int err = -1;
+ union perf_event *event;
+
+ libperf_init(libperf_print);
+
+ cpus = perf_cpu_map__new(NULL);
+ if (!cpus) {
+ fprintf(stderr, "failed to create cpus\n");
+ return -1;
+ }
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ fprintf(stderr, "failed to create evlist\n");
+ goto out_cpus;
+ }
+
+ evsel = perf_evsel__new(&attr);
+ if (!evsel) {
+ fprintf(stderr, "failed to create cycles\n");
+ goto out_cpus;
+ }
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_maps(evlist, cpus, NULL);
+
+ err = perf_evlist__open(evlist);
+ if (err) {
+ fprintf(stderr, "failed to open evlist\n");
+ goto out_evlist;
+ }
+
+ err = perf_evlist__mmap(evlist, 4);
+ if (err) {
+ fprintf(stderr, "failed to mmap evlist\n");
+ goto out_evlist;
+ }
+
+ perf_evlist__enable(evlist);
+ sleep(3);
+ perf_evlist__disable(evlist);
+
+ perf_evlist__for_each_mmap(evlist, map, false) {
+ if (perf_mmap__read_init(map) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ int cpu, pid, tid;
+ __u64 ip, period, *array;
+ union u64_swap u;
+
+ array = event->sample.array;
+
+ ip = *array;
+ array++;
+
+ u.val64 = *array;
+ pid = u.val32[0];
+ tid = u.val32[1];
+ array++;
+
+ u.val64 = *array;
+ cpu = u.val32[0];
+ array++;
+
+ period = *array;
+
+ fprintf(stdout, "cpu %3d, pid %6d, tid %6d, ip %20llx, period %20llu\n",
+ cpu, pid, tid, ip, period);
+
+ perf_mmap__consume(map);
+ }
+
+ perf_mmap__read_done(map);
+ }
+
+out_evlist:
+ perf_evlist__delete(evlist);
+out_cpus:
+ perf_cpu_map__put(cpus);
+ return err;
+}
diff --git a/tools/lib/perf/Documentation/libperf-counting.txt b/tools/lib/perf/Documentation/libperf-counting.txt
new file mode 100644
index 0000000000..8b75efcd67
--- /dev/null
+++ b/tools/lib/perf/Documentation/libperf-counting.txt
@@ -0,0 +1,213 @@
+libperf-counting(7)
+===================
+
+NAME
+----
+libperf-counting - counting interface
+
+DESCRIPTION
+-----------
+The counting interface provides API to measure and get count for specific perf events.
+
+The following test tries to explain count on `counting.c` example.
+
+It is by no means complete guide to counting, but shows libperf basic API for counting.
+
+The `counting.c` comes with libperf package and can be compiled and run like:
+
+[source,bash]
+--
+$ gcc -o counting counting.c -lperf
+$ sudo ./counting
+count 176792, enabled 176944, run 176944
+count 176242, enabled 176242, run 176242
+--
+
+It requires root access, because of the `PERF_COUNT_SW_CPU_CLOCK` event,
+which is available only for root.
+
+The `counting.c` example monitors two events on the current process and displays
+their count, in a nutshell it:
+
+* creates events
+* adds them to the event list
+* opens and enables events through the event list
+* does some workload
+* disables events
+* reads and displays event counts
+* destroys the event list
+
+The first thing you need to do before using libperf is to call init function:
+
+[source,c]
+--
+ 8 static int libperf_print(enum libperf_print_level level,
+ 9 const char *fmt, va_list ap)
+ 10 {
+ 11 return vfprintf(stderr, fmt, ap);
+ 12 }
+
+ 14 int main(int argc, char **argv)
+ 15 {
+ ...
+ 35 libperf_init(libperf_print);
+--
+
+It will setup the library and sets function for debug output from library.
+
+The `libperf_print` callback will receive any message with its debug level,
+defined as:
+
+[source,c]
+--
+enum libperf_print_level {
+ LIBPERF_ERR,
+ LIBPERF_WARN,
+ LIBPERF_INFO,
+ LIBPERF_DEBUG,
+ LIBPERF_DEBUG2,
+ LIBPERF_DEBUG3,
+};
+--
+
+Once the setup is complete we start by defining specific events using the `struct perf_event_attr`.
+
+We create software events for cpu and task:
+
+[source,c]
+--
+ 20 struct perf_event_attr attr1 = {
+ 21 .type = PERF_TYPE_SOFTWARE,
+ 22 .config = PERF_COUNT_SW_CPU_CLOCK,
+ 23 .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
+ 24 .disabled = 1,
+ 25 };
+ 26 struct perf_event_attr attr2 = {
+ 27 .type = PERF_TYPE_SOFTWARE,
+ 28 .config = PERF_COUNT_SW_TASK_CLOCK,
+ 29 .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
+ 30 .disabled = 1,
+ 31 };
+--
+
+The `read_format` setup tells perf to include timing details together with each count.
+
+Next step is to prepare threads map.
+
+In this case we will monitor current process, so we create threads map with single pid (0):
+
+[source,c]
+--
+ 37 threads = perf_thread_map__new_dummy();
+ 38 if (!threads) {
+ 39 fprintf(stderr, "failed to create threads\n");
+ 40 return -1;
+ 41 }
+ 42
+ 43 perf_thread_map__set_pid(threads, 0, 0);
+--
+
+Now we create libperf's event list, which will serve as holder for the events we want:
+
+[source,c]
+--
+ 45 evlist = perf_evlist__new();
+ 46 if (!evlist) {
+ 47 fprintf(stderr, "failed to create evlist\n");
+ 48 goto out_threads;
+ 49 }
+--
+
+We create libperf's events for the attributes we defined earlier and add them to the list:
+
+[source,c]
+--
+ 51 evsel = perf_evsel__new(&attr1);
+ 52 if (!evsel) {
+ 53 fprintf(stderr, "failed to create evsel1\n");
+ 54 goto out_evlist;
+ 55 }
+ 56
+ 57 perf_evlist__add(evlist, evsel);
+ 58
+ 59 evsel = perf_evsel__new(&attr2);
+ 60 if (!evsel) {
+ 61 fprintf(stderr, "failed to create evsel2\n");
+ 62 goto out_evlist;
+ 63 }
+ 64
+ 65 perf_evlist__add(evlist, evsel);
+--
+
+Configure event list with the thread map and open events:
+
+[source,c]
+--
+ 67 perf_evlist__set_maps(evlist, NULL, threads);
+ 68
+ 69 err = perf_evlist__open(evlist);
+ 70 if (err) {
+ 71 fprintf(stderr, "failed to open evsel\n");
+ 72 goto out_evlist;
+ 73 }
+--
+
+Both events are created as disabled (note the `disabled = 1` assignment above),
+so we need to enable the whole list explicitly (both events).
+
+From this moment events are counting and we can do our workload.
+
+When we are done we disable the events list.
+
+[source,c]
+--
+ 75 perf_evlist__enable(evlist);
+ 76
+ 77 while (count--);
+ 78
+ 79 perf_evlist__disable(evlist);
+--
+
+Now we need to get the counts from events, following code iterates through the
+events list and read counts:
+
+[source,c]
+--
+ 81 perf_evlist__for_each_evsel(evlist, evsel) {
+ 82 perf_evsel__read(evsel, 0, 0, &counts);
+ 83 fprintf(stdout, "count %llu, enabled %llu, run %llu\n",
+ 84 counts.val, counts.ena, counts.run);
+ 85 }
+--
+
+And finally cleanup.
+
+We close the whole events list (both events) and remove it together with the threads map:
+
+[source,c]
+--
+ 87 perf_evlist__close(evlist);
+ 88
+ 89 out_evlist:
+ 90 perf_evlist__delete(evlist);
+ 91 out_threads:
+ 92 perf_thread_map__put(threads);
+ 93 return err;
+ 94 }
+--
+
+REPORTING BUGS
+--------------
+Report bugs to <linux-perf-users@vger.kernel.org>.
+
+LICENSE
+-------
+libperf is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+SEE ALSO
+--------
+libperf(3), libperf-sampling(7)
diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt
new file mode 100644
index 0000000000..d6ca24f6ef
--- /dev/null
+++ b/tools/lib/perf/Documentation/libperf-sampling.txt
@@ -0,0 +1,244 @@
+libperf-sampling(7)
+===================
+
+NAME
+----
+libperf-sampling - sampling interface
+
+
+DESCRIPTION
+-----------
+The sampling interface provides API to measure and get count for specific perf events.
+
+The following test tries to explain count on `sampling.c` example.
+
+It is by no means complete guide to sampling, but shows libperf basic API for sampling.
+
+The `sampling.c` comes with libperf package and can be compiled and run like:
+
+[source,bash]
+--
+$ gcc -o sampling sampling.c -lperf
+$ sudo ./sampling
+cpu 0, pid 0, tid 0, ip ffffffffad06c4e6, period 1
+cpu 0, pid 4465, tid 4469, ip ffffffffad118748, period 18322959
+cpu 0, pid 0, tid 0, ip ffffffffad115722, period 33544846
+cpu 0, pid 4465, tid 4470, ip 7f84fe0cdad6, period 23687474
+cpu 0, pid 0, tid 0, ip ffffffffad9e0349, period 34255790
+cpu 0, pid 4465, tid 4469, ip ffffffffad136581, period 38664069
+cpu 0, pid 0, tid 0, ip ffffffffad9e55e2, period 21922384
+cpu 0, pid 4465, tid 4470, ip 7f84fe0ebebf, period 17655175
+...
+--
+
+It requires root access, because it uses hardware cycles event.
+
+The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a
+nutshell it:
+
+- creates events
+- adds them to the event list
+- opens and enables events through the event list
+- sleeps for 3 seconds
+- disables events
+- reads and displays recorded samples
+- destroys the event list
+
+The first thing you need to do before using libperf is to call init function:
+
+[source,c]
+--
+ 12 static int libperf_print(enum libperf_print_level level,
+ 13 const char *fmt, va_list ap)
+ 14 {
+ 15 return vfprintf(stderr, fmt, ap);
+ 16 }
+
+ 23 int main(int argc, char **argv)
+ 24 {
+ ...
+ 40 libperf_init(libperf_print);
+--
+
+It will setup the library and sets function for debug output from library.
+
+The `libperf_print` callback will receive any message with its debug level,
+defined as:
+
+[source,c]
+--
+enum libperf_print_level {
+ LIBPERF_ERR,
+ LIBPERF_WARN,
+ LIBPERF_INFO,
+ LIBPERF_DEBUG,
+ LIBPERF_DEBUG2,
+ LIBPERF_DEBUG3,
+};
+--
+
+Once the setup is complete we start by defining cycles event using the `struct perf_event_attr`:
+
+[source,c]
+--
+ 29 struct perf_event_attr attr = {
+ 30 .type = PERF_TYPE_HARDWARE,
+ 31 .config = PERF_COUNT_HW_CPU_CYCLES,
+ 32 .disabled = 1,
+ 33 .freq = 1,
+ 34 .sample_freq = 10,
+ 35 .sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_TID|PERF_SAMPLE_CPU|PERF_SAMPLE_PERIOD,
+ 36 };
+--
+
+Next step is to prepare CPUs map.
+
+In this case we will monitor all the available CPUs:
+
+[source,c]
+--
+ 42 cpus = perf_cpu_map__new(NULL);
+ 43 if (!cpus) {
+ 44 fprintf(stderr, "failed to create cpus\n");
+ 45 return -1;
+ 46 }
+--
+
+Now we create libperf's event list, which will serve as holder for the cycles event:
+
+[source,c]
+--
+ 48 evlist = perf_evlist__new();
+ 49 if (!evlist) {
+ 50 fprintf(stderr, "failed to create evlist\n");
+ 51 goto out_cpus;
+ 52 }
+--
+
+We create libperf's event for the cycles attribute we defined earlier and add it to the list:
+
+[source,c]
+--
+ 54 evsel = perf_evsel__new(&attr);
+ 55 if (!evsel) {
+ 56 fprintf(stderr, "failed to create cycles\n");
+ 57 goto out_cpus;
+ 58 }
+ 59
+ 60 perf_evlist__add(evlist, evsel);
+--
+
+Configure event list with the cpus map and open event:
+
+[source,c]
+--
+ 62 perf_evlist__set_maps(evlist, cpus, NULL);
+ 63
+ 64 err = perf_evlist__open(evlist);
+ 65 if (err) {
+ 66 fprintf(stderr, "failed to open evlist\n");
+ 67 goto out_evlist;
+ 68 }
+--
+
+Once the events list is open, we can create memory maps AKA perf ring buffers:
+
+[source,c]
+--
+ 70 err = perf_evlist__mmap(evlist, 4);
+ 71 if (err) {
+ 72 fprintf(stderr, "failed to mmap evlist\n");
+ 73 goto out_evlist;
+ 74 }
+--
+
+The event is created as disabled (note the `disabled = 1` assignment above),
+so we need to enable the events list explicitly.
+
+From this moment the cycles event is sampling.
+
+We will sleep for 3 seconds while the ring buffers get data from all CPUs, then we disable the events list.
+
+[source,c]
+--
+ 76 perf_evlist__enable(evlist);
+ 77 sleep(3);
+ 78 perf_evlist__disable(evlist);
+--
+
+Following code walks through the ring buffers and reads stored events/samples:
+
+[source,c]
+--
+ 80 perf_evlist__for_each_mmap(evlist, map, false) {
+ 81 if (perf_mmap__read_init(map) < 0)
+ 82 continue;
+ 83
+ 84 while ((event = perf_mmap__read_event(map)) != NULL) {
+
+ /* process event */
+
+108 perf_mmap__consume(map);
+109 }
+110 perf_mmap__read_done(map);
+111 }
+
+--
+
+Each sample needs to get parsed:
+
+[source,c]
+--
+ 85 int cpu, pid, tid;
+ 86 __u64 ip, period, *array;
+ 87 union u64_swap u;
+ 88
+ 89 array = event->sample.array;
+ 90
+ 91 ip = *array;
+ 92 array++;
+ 93
+ 94 u.val64 = *array;
+ 95 pid = u.val32[0];
+ 96 tid = u.val32[1];
+ 97 array++;
+ 98
+ 99 u.val64 = *array;
+100 cpu = u.val32[0];
+101 array++;
+102
+103 period = *array;
+104
+105 fprintf(stdout, "cpu %3d, pid %6d, tid %6d, ip %20llx, period %20llu\n",
+106 cpu, pid, tid, ip, period);
+--
+
+And finally cleanup.
+
+We close the whole events list (both events) and remove it together with the threads map:
+
+[source,c]
+--
+113 out_evlist:
+114 perf_evlist__delete(evlist);
+115 out_cpus:
+116 perf_cpu_map__put(cpus);
+117 return err;
+118 }
+--
+
+REPORTING BUGS
+--------------
+Report bugs to <linux-perf-users@vger.kernel.org>.
+
+LICENSE
+-------
+libperf is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+SEE ALSO
+--------
+libperf(3), libperf-counting(7)
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
new file mode 100644
index 0000000000..a8f1a23793
--- /dev/null
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -0,0 +1,251 @@
+libperf(3)
+==========
+
+NAME
+----
+libperf - Linux kernel perf event library
+
+
+SYNOPSIS
+--------
+*Generic API:*
+
+[source,c]
+--
+ #include <perf/core.h>
+
+ enum libperf_print_level {
+ LIBPERF_ERR,
+ LIBPERF_WARN,
+ LIBPERF_INFO,
+ LIBPERF_DEBUG,
+ LIBPERF_DEBUG2,
+ LIBPERF_DEBUG3,
+ };
+
+ typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
+ const char *, va_list ap);
+
+ void libperf_init(libperf_print_fn_t fn);
+--
+
+*API to handle CPU maps:*
+
+[source,c]
+--
+ #include <perf/cpumap.h>
+
+ struct perf_cpu_map;
+
+ struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
+ struct perf_cpu_map *perf_cpu_map__read(FILE *file);
+ struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
+ struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other);
+ void perf_cpu_map__put(struct perf_cpu_map *map);
+ int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+ int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
+ bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+ int perf_cpu_map__max(struct perf_cpu_map *map);
+ bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
+
+ #define perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+--
+
+*API to handle thread maps:*
+
+[source,c]
+--
+ #include <perf/threadmap.h>
+
+ struct perf_thread_map;
+
+ struct perf_thread_map *perf_thread_map__new_dummy(void);
+ struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
+
+ void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
+ char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
+ int perf_thread_map__nr(struct perf_thread_map *threads);
+ pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
+
+ struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
+ void perf_thread_map__put(struct perf_thread_map *map);
+--
+
+*API to handle event lists:*
+
+[source,c]
+--
+ #include <perf/evlist.h>
+
+ struct perf_evlist;
+
+ void perf_evlist__add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+ void perf_evlist__remove(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+ struct perf_evlist *perf_evlist__new(void);
+ void perf_evlist__delete(struct perf_evlist *evlist);
+ struct perf_evsel* perf_evlist__next(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+ int perf_evlist__open(struct perf_evlist *evlist);
+ void perf_evlist__close(struct perf_evlist *evlist);
+ void perf_evlist__enable(struct perf_evlist *evlist);
+ void perf_evlist__disable(struct perf_evlist *evlist);
+
+ #define perf_evlist__for_each_evsel(evlist, pos)
+
+ void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads);
+ int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
+ int perf_evlist__filter_pollfd(struct perf_evlist *evlist,
+ short revents_and_mask);
+
+ int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
+ void perf_evlist__munmap(struct perf_evlist *evlist);
+
+ struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
+ struct perf_mmap *map,
+ bool overwrite);
+
+ #define perf_evlist__for_each_mmap(evlist, pos, overwrite)
+--
+
+*API to handle events:*
+
+[source,c]
+--
+ #include <perf/evsel.h>*
+
+ struct perf_evsel;
+
+ struct perf_counts_values {
+ union {
+ struct {
+ uint64_t val;
+ uint64_t ena;
+ uint64_t run;
+ };
+ uint64_t values[3];
+ };
+ };
+
+ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr);
+ void perf_evsel__delete(struct perf_evsel *evsel);
+ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads);
+ void perf_evsel__close(struct perf_evsel *evsel);
+ void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+ int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+ void perf_evsel__munmap(struct perf_evsel *evsel);
+ void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
+ int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
+ struct perf_counts_values *count);
+ int perf_evsel__enable(struct perf_evsel *evsel);
+ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+ int perf_evsel__disable(struct perf_evsel *evsel);
+ int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+ struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
+ struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
+ struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
+--
+
+*API to handle maps (perf ring buffers):*
+
+[source,c]
+--
+ #include <perf/mmap.h>
+
+ struct perf_mmap;
+
+ void perf_mmap__consume(struct perf_mmap *map);
+ int perf_mmap__read_init(struct perf_mmap *map);
+ void perf_mmap__read_done(struct perf_mmap *map);
+ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
+--
+
+*Structures to access perf API events:*
+
+[source,c]
+--
+ #include <perf/event.h>
+
+ struct perf_record_mmap;
+ struct perf_record_mmap2;
+ struct perf_record_comm;
+ struct perf_record_namespaces;
+ struct perf_record_fork;
+ struct perf_record_lost;
+ struct perf_record_lost_samples;
+ struct perf_record_read;
+ struct perf_record_throttle;
+ struct perf_record_ksymbol;
+ struct perf_record_bpf_event;
+ struct perf_record_sample;
+ struct perf_record_switch;
+ struct perf_record_header_attr;
+ struct perf_record_record_cpu_map;
+ struct perf_record_cpu_map_data;
+ struct perf_record_cpu_map;
+ struct perf_record_event_update_cpus;
+ struct perf_record_event_update_scale;
+ struct perf_record_event_update;
+ struct perf_trace_event_type;
+ struct perf_record_header_event_type;
+ struct perf_record_header_tracing_data;
+ struct perf_record_header_build_id;
+ struct perf_record_id_index;
+ struct perf_record_auxtrace_info;
+ struct perf_record_auxtrace;
+ struct perf_record_auxtrace_error;
+ struct perf_record_aux;
+ struct perf_record_itrace_start;
+ struct perf_record_thread_map_entry;
+ struct perf_record_thread_map;
+ struct perf_record_stat_config_entry;
+ struct perf_record_stat_config;
+ struct perf_record_stat;
+ struct perf_record_stat_round;
+ struct perf_record_time_conv;
+ struct perf_record_header_feature;
+ struct perf_record_compressed;
+--
+
+DESCRIPTION
+-----------
+The libperf library provides an API to access the linux kernel perf
+events subsystem.
+
+Following objects are key to the libperf interface:
+
+[horizontal]
+
+struct perf_cpu_map:: Provides a CPU list abstraction.
+
+struct perf_thread_map:: Provides a thread list abstraction.
+
+struct perf_evsel:: Provides an abstraction for single a perf event.
+
+struct perf_evlist:: Gathers several struct perf_evsel object and performs functions on all of them.
+
+struct perf_mmap:: Provides an abstraction for accessing perf ring buffer.
+
+The exported API functions bind these objects together.
+
+REPORTING BUGS
+--------------
+Report bugs to <linux-perf-users@vger.kernel.org>.
+
+LICENSE
+-------
+libperf is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+SEE ALSO
+--------
+libperf-sampling(7), libperf-counting(7)
diff --git a/tools/lib/perf/Documentation/manpage-1.72.xsl b/tools/lib/perf/Documentation/manpage-1.72.xsl
new file mode 100644
index 0000000000..b4d315cb8c
--- /dev/null
+++ b/tools/lib/perf/Documentation/manpage-1.72.xsl
@@ -0,0 +1,14 @@
+<!-- manpage-1.72.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles peculiarities in docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the special values for the roff control characters
+ needed for docbook-xsl 1.72.0 -->
+<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
+<xsl:param name="git.docbook.dot" >&#x2302;</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-base.xsl b/tools/lib/perf/Documentation/manpage-base.xsl
new file mode 100644
index 0000000000..a264fa6160
--- /dev/null
+++ b/tools/lib/perf/Documentation/manpage-base.xsl
@@ -0,0 +1,35 @@
+<!-- manpage-base.xsl:
+ special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- these params silence some output from xmlto -->
+<xsl:param name="man.output.quietly" select="1"/>
+<xsl:param name="refentry.meta.get.quietly" select="1"/>
+
+<!-- convert asciidoc callouts to man page format;
+ git.docbook.backslash and git.docbook.dot params
+ must be supplied by another XSL file or other means -->
+<xsl:template match="co">
+ <xsl:value-of select="concat(
+ $git.docbook.backslash,'fB(',
+ substring-after(@id,'-'),')',
+ $git.docbook.backslash,'fR')"/>
+</xsl:template>
+<xsl:template match="calloutlist">
+ <xsl:value-of select="$git.docbook.dot"/>
+ <xsl:text>sp&#10;</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:text>&#10;</xsl:text>
+</xsl:template>
+<xsl:template match="callout">
+ <xsl:value-of select="concat(
+ $git.docbook.backslash,'fB',
+ substring-after(@arearefs,'-'),
+ '. ',$git.docbook.backslash,'fR')"/>
+ <xsl:apply-templates/>
+ <xsl:value-of select="$git.docbook.dot"/>
+ <xsl:text>br&#10;</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-bold-literal.xsl b/tools/lib/perf/Documentation/manpage-bold-literal.xsl
new file mode 100644
index 0000000000..608eb5df62
--- /dev/null
+++ b/tools/lib/perf/Documentation/manpage-bold-literal.xsl
@@ -0,0 +1,17 @@
+<!-- manpage-bold-literal.xsl:
+ special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- render literal text as bold (instead of plain or monospace);
+ this makes literal text easier to distinguish in manpages
+ viewed on a tty -->
+<xsl:template match="literal">
+ <xsl:value-of select="$git.docbook.backslash"/>
+ <xsl:text>fB</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:value-of select="$git.docbook.backslash"/>
+ <xsl:text>fR</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-normal.xsl b/tools/lib/perf/Documentation/manpage-normal.xsl
new file mode 100644
index 0000000000..a48f5b11f3
--- /dev/null
+++ b/tools/lib/perf/Documentation/manpage-normal.xsl
@@ -0,0 +1,13 @@
+<!-- manpage-normal.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles anything we want to keep away from docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the normal values for the roff control characters -->
+<xsl:param name="git.docbook.backslash">\</xsl:param>
+<xsl:param name="git.docbook.dot" >.</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-suppress-sp.xsl b/tools/lib/perf/Documentation/manpage-suppress-sp.xsl
new file mode 100644
index 0000000000..a63c7632a8
--- /dev/null
+++ b/tools/lib/perf/Documentation/manpage-suppress-sp.xsl
@@ -0,0 +1,21 @@
+<!-- manpage-suppress-sp.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles erroneous, inline .sp in manpage output of some
+ versions of docbook-xsl -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- attempt to work around spurious .sp at the tail of the line
+ that some versions of docbook stylesheets seem to add -->
+<xsl:template match="simpara">
+ <xsl:variable name="content">
+ <xsl:apply-templates/>
+ </xsl:variable>
+ <xsl:value-of select="normalize-space($content)"/>
+ <xsl:if test="not(ancestor::authorblurb) and
+ not(ancestor::personblurb)">
+ <xsl:text>&#10;&#10;</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
new file mode 100644
index 0000000000..3a9b2140aa
--- /dev/null
+++ b/tools/lib/perf/Makefile
@@ -0,0 +1,220 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Most of this file is copied from tools/lib/bpf/Makefile
+
+LIBPERF_VERSION = 0
+LIBPERF_PATCHLEVEL = 0
+LIBPERF_EXTRAVERSION = 1
+
+MAKEFLAGS += --no-print-directory
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+include $(srctree)/tools/scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.arch
+
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
+
+ifeq ("$(origin V)", "command line")
+ VERBOSE = $(V)
+endif
+ifndef VERBOSE
+ VERBOSE = 0
+endif
+
+ifeq ($(VERBOSE),1)
+ Q =
+else
+ Q = @
+endif
+
+TEST_ARGS := $(if $(V),-v)
+
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+ CFLAGS := $(EXTRA_CFLAGS)
+else
+ CFLAGS := -g -Wall
+endif
+
+INCLUDES = \
+-I$(srctree)/tools/lib/perf/include \
+-I$(srctree)/tools/lib/ \
+-I$(srctree)/tools/include \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/ \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \
+-I$(srctree)/tools/include/uapi
+
+# Append required CFLAGS
+override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += -Werror -Wall
+override CFLAGS += -fPIC
+override CFLAGS += $(INCLUDES)
+override CFLAGS += -fvisibility=hidden
+
+all:
+
+export srctree OUTPUT CC LD CFLAGS V
+export DESTDIR DESTDIR_SQ
+
+include $(srctree)/tools/build/Makefile.include
+
+VERSION_SCRIPT := libperf.map
+
+PATCHLEVEL = $(LIBPERF_PATCHLEVEL)
+EXTRAVERSION = $(LIBPERF_EXTRAVERSION)
+VERSION = $(LIBPERF_VERSION).$(LIBPERF_PATCHLEVEL).$(LIBPERF_EXTRAVERSION)
+
+LIBPERF_SO := $(OUTPUT)libperf.so.$(VERSION)
+LIBPERF_A := $(OUTPUT)libperf.a
+LIBPERF_IN := $(OUTPUT)libperf-in.o
+LIBPERF_PC := $(OUTPUT)libperf.pc
+
+LIBPERF_ALL := $(LIBPERF_A) $(OUTPUT)libperf.so*
+
+LIB_DIR := $(srctree)/tools/lib/api/
+
+ifneq ($(OUTPUT),)
+ifneq ($(subdir),)
+ API_PATH=$(OUTPUT)/../lib/api/
+else
+ API_PATH=$(OUTPUT)
+endif
+else
+ API_PATH=$(LIB_DIR)
+endif
+
+LIBAPI = $(API_PATH)libapi.a
+export LIBAPI
+
+$(LIBAPI): FORCE
+ $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
+
+$(LIBAPI)-clean:
+ $(call QUIET_CLEAN, libapi)
+ $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
+
+$(LIBPERF_IN): FORCE
+ $(Q)$(MAKE) $(build)=libperf
+
+$(LIBPERF_A): $(LIBPERF_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN)
+
+$(LIBPERF_SO): $(LIBPERF_IN) $(LIBAPI)
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libperf.so \
+ -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+ @ln -sf $(@F) $(OUTPUT)libperf.so
+ @ln -sf $(@F) $(OUTPUT)libperf.so.$(LIBPERF_VERSION)
+
+
+libs: $(LIBPERF_A) $(LIBPERF_SO) $(LIBPERF_PC)
+
+all: fixdep
+ $(Q)$(MAKE) libs
+
+clean: $(LIBAPI)-clean
+ $(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \
+ $(TESTS_STATIC) $(TESTS_SHARED)
+
+TESTS_IN = tests-in.o
+
+TESTS_STATIC = $(OUTPUT)tests-static
+TESTS_SHARED = $(OUTPUT)tests-shared
+
+$(TESTS_IN): FORCE
+ $(Q)$(MAKE) $(build)=tests
+
+$(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ $^
+
+$(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ -L$(or $(OUTPUT),.) $^ -lperf
+
+make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
+
+tests: make-tests
+ @echo "running static:"
+ @./$(TESTS_STATIC) $(TEST_ARGS)
+ @echo "running dynamic:"
+ @LD_LIBRARY_PATH=. ./$(TESTS_SHARED) $(TEST_ARGS)
+
+$(LIBPERF_PC):
+ $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
+ -e "s|@LIBDIR@|$(libdir_SQ)|" \
+ -e "s|@VERSION@|$(VERSION)|" \
+ < libperf.pc.template > $@
+
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$2' ]; then \
+ $(INSTALL) -d -m 755 '$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$2'
+endef
+
+install_lib: libs
+ $(call QUIET_INSTALL, $(LIBPERF_ALL)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
+
+HDRS := bpf_perf.h core.h cpumap.h threadmap.h evlist.h evsel.h event.h mmap.h
+INTERNAL_HDRS := cpumap.h evlist.h evsel.h lib.h mmap.h rc_check.h threadmap.h xyarray.h
+
+INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/perf
+INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
+INSTALL_INTERNAL_HDRS_PFX := $(DESTDIR)$(prefix)/include/internal
+INSTALL_INTERNAL_HDRS := $(addprefix $(INSTALL_INTERNAL_HDRS_PFX)/, $(INTERNAL_HDRS))
+
+$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: include/perf/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
+
+$(INSTALL_INTERNAL_HDRS): $(INSTALL_INTERNAL_HDRS_PFX)/%.h: include/internal/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_INTERNAL_HDRS_PFX)/,644)
+
+install_headers: $(INSTALL_HDRS) $(INSTALL_INTERNAL_HDRS)
+ $(call QUIET_INSTALL, libperf_headers)
+
+install_pkgconfig: $(LIBPERF_PC)
+ $(call QUIET_INSTALL, $(LIBPERF_PC)) \
+ $(call do_install,$(LIBPERF_PC),$(DESTDIR_SQ)$(libdir_SQ)/pkgconfig,644)
+
+install_doc:
+ $(Q)$(MAKE) -C Documentation install-man install-html install-examples
+
+install: install_lib install_headers install_pkgconfig install_doc
+
+FORCE:
+
+.PHONY: all install clean tests FORCE
diff --git a/tools/lib/perf/core.c b/tools/lib/perf/core.c
new file mode 100644
index 0000000000..58fc894b76
--- /dev/null
+++ b/tools/lib/perf/core.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define __printf(a, b) __attribute__((format(printf, a, b)))
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <linux/compiler.h>
+#include <perf/core.h>
+#include <internal/lib.h>
+#include "internal.h"
+
+static int __base_pr(enum libperf_print_level level __maybe_unused, const char *format,
+ va_list args)
+{
+ return vfprintf(stderr, format, args);
+}
+
+static libperf_print_fn_t __libperf_pr = __base_pr;
+
+__printf(2, 3)
+void libperf_print(enum libperf_print_level level, const char *format, ...)
+{
+ va_list args;
+
+ if (!__libperf_pr)
+ return;
+
+ va_start(args, format);
+ __libperf_pr(level, format, args);
+ va_end(args);
+}
+
+void libperf_init(libperf_print_fn_t fn)
+{
+ page_size = sysconf(_SC_PAGE_SIZE);
+ __libperf_pr = fn;
+}
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
new file mode 100644
index 0000000000..2a5a292173
--- /dev/null
+++ b/tools/lib/perf/cpumap.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <perf/cpumap.h>
+#include <stdlib.h>
+#include <linux/refcount.h>
+#include <internal/cpumap.h>
+#include <asm/bug.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <limits.h>
+
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
+{
+ RC_CHK_ACCESS(map)->nr = nr_cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
+{
+ RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
+ struct perf_cpu_map *result;
+
+ if (ADD_RC_CHK(result, cpus)) {
+ cpus->nr = nr_cpus;
+ refcount_set(&cpus->refcnt, 1);
+ }
+ return result;
+}
+
+struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+{
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
+
+ if (cpus)
+ RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
+
+ return cpus;
+}
+
+static void cpu_map__delete(struct perf_cpu_map *map)
+{
+ if (map) {
+ WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
+ "cpu_map refcnt unbalanced\n");
+ RC_CHK_FREE(map);
+ }
+}
+
+struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
+{
+ struct perf_cpu_map *result;
+
+ if (RC_CHK_GET(result, map))
+ refcount_inc(perf_cpu_map__refcnt(map));
+
+ return result;
+}
+
+void perf_cpu_map__put(struct perf_cpu_map *map)
+{
+ if (map) {
+ if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
+ cpu_map__delete(map);
+ else
+ RC_CHK_PUT(map);
+ }
+}
+
+static struct perf_cpu_map *cpu_map__default_new(void)
+{
+ struct perf_cpu_map *cpus;
+ int nr_cpus;
+
+ nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr_cpus < 0)
+ return NULL;
+
+ cpus = perf_cpu_map__alloc(nr_cpus);
+ if (cpus != NULL) {
+ int i;
+
+ for (i = 0; i < nr_cpus; ++i)
+ RC_CHK_ACCESS(cpus)->map[i].cpu = i;
+ }
+
+ return cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__default_new(void)
+{
+ return cpu_map__default_new();
+}
+
+
+static int cmp_cpu(const void *a, const void *b)
+{
+ const struct perf_cpu *cpu_a = a, *cpu_b = b;
+
+ return cpu_a->cpu - cpu_b->cpu;
+}
+
+static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+{
+ return RC_CHK_ACCESS(cpus)->map[idx];
+}
+
+static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
+{
+ size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
+ int i, j;
+
+ if (cpus != NULL) {
+ memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
+ qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
+ /* Remove dups */
+ j = 0;
+ for (i = 0; i < nr_cpus; i++) {
+ if (i == 0 ||
+ __perf_cpu_map__cpu(cpus, i).cpu !=
+ __perf_cpu_map__cpu(cpus, i - 1).cpu) {
+ RC_CHK_ACCESS(cpus)->map[j++].cpu =
+ __perf_cpu_map__cpu(cpus, i).cpu;
+ }
+ }
+ perf_cpu_map__set_nr(cpus, j);
+ assert(j <= nr_cpus);
+ }
+ return cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__read(FILE *file)
+{
+ struct perf_cpu_map *cpus = NULL;
+ int nr_cpus = 0;
+ struct perf_cpu *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
+ int n, cpu, prev;
+ char sep;
+
+ sep = 0;
+ prev = -1;
+ for (;;) {
+ n = fscanf(file, "%u%c", &cpu, &sep);
+ if (n <= 0)
+ break;
+ if (prev >= 0) {
+ int new_max = nr_cpus + cpu - prev - 1;
+
+ WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
+ "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
+
+ if (new_max >= max_entries) {
+ max_entries = new_max + MAX_NR_CPUS / 2;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ while (++prev < cpu)
+ tmp_cpus[nr_cpus++].cpu = prev;
+ }
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ tmp_cpus[nr_cpus++].cpu = cpu;
+ if (n == 2 && sep == '-')
+ prev = cpu;
+ else
+ prev = -1;
+ if (n == 1 || sep == '\n')
+ break;
+ }
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
+out_free_tmp:
+ free(tmp_cpus);
+ return cpus;
+}
+
+static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
+{
+ struct perf_cpu_map *cpus = NULL;
+ FILE *onlnf;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (!onlnf)
+ return cpu_map__default_new();
+
+ cpus = perf_cpu_map__read(onlnf);
+ fclose(onlnf);
+ return cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
+{
+ struct perf_cpu_map *cpus = NULL;
+ unsigned long start_cpu, end_cpu = 0;
+ char *p = NULL;
+ int i, nr_cpus = 0;
+ struct perf_cpu *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
+
+ if (!cpu_list)
+ return cpu_map__read_all_cpu_map();
+
+ /*
+ * must handle the case of empty cpumap to cover
+ * TOPOLOGY header for NUMA nodes with no CPU
+ * ( e.g., because of CPU hotplug)
+ */
+ if (!isdigit(*cpu_list) && *cpu_list != '\0')
+ goto out;
+
+ while (isdigit(*cpu_list)) {
+ p = NULL;
+ start_cpu = strtoul(cpu_list, &p, 0);
+ if (start_cpu >= INT_MAX
+ || (*p != '\0' && *p != ',' && *p != '-'))
+ goto invalid;
+
+ if (*p == '-') {
+ cpu_list = ++p;
+ p = NULL;
+ end_cpu = strtoul(cpu_list, &p, 0);
+
+ if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+ goto invalid;
+
+ if (end_cpu < start_cpu)
+ goto invalid;
+ } else {
+ end_cpu = start_cpu;
+ }
+
+ WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
+ "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
+
+ for (; start_cpu <= end_cpu; start_cpu++) {
+ /* check for duplicates */
+ for (i = 0; i < nr_cpus; i++)
+ if (tmp_cpus[i].cpu == (int)start_cpu)
+ goto invalid;
+
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
+ if (tmp == NULL)
+ goto invalid;
+ tmp_cpus = tmp;
+ }
+ tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
+ }
+ if (*p)
+ ++p;
+
+ cpu_list = p;
+ }
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else if (*cpu_list != '\0')
+ cpus = cpu_map__default_new();
+ else
+ cpus = perf_cpu_map__dummy_new();
+invalid:
+ free(tmp_cpus);
+out:
+ return cpus;
+}
+
+static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+{
+ return RC_CHK_ACCESS(cpus)->nr;
+}
+
+struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+{
+ struct perf_cpu result = {
+ .cpu = -1
+ };
+
+ if (cpus && idx < __perf_cpu_map__nr(cpus))
+ return __perf_cpu_map__cpu(cpus, idx);
+
+ return result;
+}
+
+int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
+{
+ return cpus ? __perf_cpu_map__nr(cpus) : 1;
+}
+
+bool perf_cpu_map__empty(const struct perf_cpu_map *map)
+{
+ return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
+}
+
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
+{
+ int low, high;
+
+ if (!cpus)
+ return -1;
+
+ low = 0;
+ high = __perf_cpu_map__nr(cpus);
+ while (low < high) {
+ int idx = (low + high) / 2;
+ struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
+
+ if (cpu_at_idx.cpu == cpu.cpu)
+ return idx;
+
+ if (cpu_at_idx.cpu > cpu.cpu)
+ high = idx;
+ else
+ low = idx + 1;
+ }
+
+ return -1;
+}
+
+bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
+{
+ return perf_cpu_map__idx(cpus, cpu) != -1;
+}
+
+bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
+{
+ int nr;
+
+ if (lhs == rhs)
+ return true;
+
+ if (!lhs || !rhs)
+ return false;
+
+ nr = __perf_cpu_map__nr(lhs);
+ if (nr != __perf_cpu_map__nr(rhs))
+ return false;
+
+ for (int idx = 0; idx < nr; idx++) {
+ if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
+ return false;
+ }
+ return true;
+}
+
+bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
+{
+ return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
+}
+
+struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
+{
+ struct perf_cpu result = {
+ .cpu = -1
+ };
+
+ // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
+ return __perf_cpu_map__nr(map) > 0
+ ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
+ : result;
+}
+
+/** Is 'b' a subset of 'a'. */
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
+{
+ if (a == b || !b)
+ return true;
+ if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
+ return false;
+
+ for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
+ if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
+ return false;
+ if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
+ j++;
+ if (j == __perf_cpu_map__nr(b))
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Merge two cpumaps
+ *
+ * orig either gets freed and replaced with a new map, or reused
+ * with no reference count change (similar to "realloc")
+ * other has its reference count increased.
+ */
+
+struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other)
+{
+ struct perf_cpu *tmp_cpus;
+ int tmp_len;
+ int i, j, k;
+ struct perf_cpu_map *merged;
+
+ if (perf_cpu_map__is_subset(orig, other))
+ return orig;
+ if (perf_cpu_map__is_subset(other, orig)) {
+ perf_cpu_map__put(orig);
+ return perf_cpu_map__get(other);
+ }
+
+ tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
+ tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
+ if (!tmp_cpus)
+ return NULL;
+
+ /* Standard merge algorithm from wikipedia */
+ i = j = k = 0;
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
+ if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
+ j++;
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ } else
+ tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
+ }
+
+ while (i < __perf_cpu_map__nr(orig))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+
+ while (j < __perf_cpu_map__nr(other))
+ tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
+ assert(k <= tmp_len);
+
+ merged = cpu_map__trim_new(k, tmp_cpus);
+ free(tmp_cpus);
+ perf_cpu_map__put(orig);
+ return merged;
+}
+
+struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other)
+{
+ struct perf_cpu *tmp_cpus;
+ int tmp_len;
+ int i, j, k;
+ struct perf_cpu_map *merged = NULL;
+
+ if (perf_cpu_map__is_subset(other, orig))
+ return perf_cpu_map__get(orig);
+ if (perf_cpu_map__is_subset(orig, other))
+ return perf_cpu_map__get(other);
+
+ tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
+ tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
+ if (!tmp_cpus)
+ return NULL;
+
+ i = j = k = 0;
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
+ i++;
+ else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
+ j++;
+ else {
+ j++;
+ tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ }
+ }
+ if (k)
+ merged = cpu_map__trim_new(k, tmp_cpus);
+ free(tmp_cpus);
+ return merged;
+}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
new file mode 100644
index 0000000000..b8b066d0dc
--- /dev/null
+++ b/tools/lib/perf/evlist.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <sys/ioctl.h>
+#include <internal/evlist.h>
+#include <internal/evsel.h>
+#include <internal/xyarray.h>
+#include <internal/mmap.h>
+#include <internal/cpumap.h>
+#include <internal/threadmap.h>
+#include <internal/lib.h>
+#include <linux/zalloc.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <poll.h>
+#include <sys/mman.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <api/fd/array.h>
+#include "internal.h"
+
+void perf_evlist__init(struct perf_evlist *evlist)
+{
+ INIT_LIST_HEAD(&evlist->entries);
+ evlist->nr_entries = 0;
+ fdarray__init(&evlist->pollfd, 64);
+ perf_evlist__reset_id_hash(evlist);
+}
+
+static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ if (evsel->system_wide) {
+ /* System wide: set the cpu map of the evsel to all online CPUs. */
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__new(NULL);
+ } else if (evlist->has_user_cpus && evsel->is_pmu_core) {
+ /*
+ * User requested CPUs on a core PMU, ensure the requested CPUs
+ * are valid by intersecting with those of the PMU.
+ */
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
+ } else if (!evsel->own_cpus || evlist->has_user_cpus ||
+ (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
+ /*
+ * The PMU didn't specify a default cpu map, this isn't a core
+ * event and the user requested CPUs or the evlist user
+ * requested CPUs have the "any CPU" (aka dummy) CPU value. In
+ * which case use the user requested CPUs rather than the PMU
+ * ones.
+ */
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
+ } else if (evsel->cpus != evsel->own_cpus) {
+ /*
+ * No user requested cpu map but the PMU cpu map doesn't match
+ * the evsel's. Reset it back to the PMU cpu map.
+ */
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
+ }
+
+ if (evsel->system_wide) {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__new_dummy();
+ } else {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__get(evlist->threads);
+ }
+
+ evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
+}
+
+static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ evlist->needs_map_propagation = true;
+
+ perf_evlist__for_each_evsel(evlist, evsel)
+ __perf_evlist__propagate_maps(evlist, evsel);
+}
+
+void perf_evlist__add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ evsel->idx = evlist->nr_entries;
+ list_add_tail(&evsel->node, &evlist->entries);
+ evlist->nr_entries += 1;
+
+ if (evlist->needs_map_propagation)
+ __perf_evlist__propagate_maps(evlist, evsel);
+}
+
+void perf_evlist__remove(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ list_del_init(&evsel->node);
+ evlist->nr_entries -= 1;
+}
+
+struct perf_evlist *perf_evlist__new(void)
+{
+ struct perf_evlist *evlist = zalloc(sizeof(*evlist));
+
+ if (evlist != NULL)
+ perf_evlist__init(evlist);
+
+ return evlist;
+}
+
+struct perf_evsel *
+perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
+{
+ struct perf_evsel *next;
+
+ if (!prev) {
+ next = list_first_entry(&evlist->entries,
+ struct perf_evsel,
+ node);
+ } else {
+ next = list_next_entry(prev, node);
+ }
+
+ /* Empty list is noticed here so don't need checking on entry. */
+ if (&next->node == &evlist->entries)
+ return NULL;
+
+ return next;
+}
+
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *n;
+
+ perf_evlist__for_each_entry_safe(evlist, n, pos) {
+ list_del_init(&pos->node);
+ perf_evsel__delete(pos);
+ }
+
+ evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+ perf_cpu_map__put(evlist->user_requested_cpus);
+ perf_cpu_map__put(evlist->all_cpus);
+ perf_thread_map__put(evlist->threads);
+ evlist->user_requested_cpus = NULL;
+ evlist->all_cpus = NULL;
+ evlist->threads = NULL;
+ fdarray__exit(&evlist->pollfd);
+}
+
+void perf_evlist__delete(struct perf_evlist *evlist)
+{
+ if (evlist == NULL)
+ return;
+
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ perf_evlist__purge(evlist);
+ perf_evlist__exit(evlist);
+ free(evlist);
+}
+
+void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads)
+{
+ /*
+ * Allow for the possibility that one or another of the maps isn't being
+ * changed i.e. don't put it. Note we are assuming the maps that are
+ * being applied are brand new and evlist is taking ownership of the
+ * original reference count of 1. If that is not the case it is up to
+ * the caller to increase the reference count.
+ */
+ if (cpus != evlist->user_requested_cpus) {
+ perf_cpu_map__put(evlist->user_requested_cpus);
+ evlist->user_requested_cpus = perf_cpu_map__get(cpus);
+ }
+
+ if (threads != evlist->threads) {
+ perf_thread_map__put(evlist->threads);
+ evlist->threads = perf_thread_map__get(threads);
+ }
+
+ perf_evlist__propagate_maps(evlist);
+}
+
+int perf_evlist__open(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int err;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
+ if (err < 0)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ perf_evlist__close(evlist);
+ return err;
+}
+
+void perf_evlist__close(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ perf_evlist__for_each_entry_reverse(evlist, evsel)
+ perf_evsel__close(evsel);
+}
+
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ perf_evlist__for_each_entry(evlist, evsel)
+ perf_evsel__enable(evsel);
+}
+
+void perf_evlist__disable(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ perf_evlist__for_each_entry(evlist, evsel)
+ perf_evsel__disable(evsel);
+}
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ return first->attr.read_format;
+}
+
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ int hash;
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->id = id;
+ sid->evsel = evsel;
+ hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&sid->node, &evlist->heads[hash]);
+}
+
+void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+}
+
+void perf_evlist__id_add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ evsel->id[evsel->ids++] = id;
+}
+
+int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
+{
+ u64 read_data[4] = { 0, };
+ int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
+
+ if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+ read(fd, &read_data, sizeof(read_data)) == -1)
+ return -1;
+
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ ++id_idx;
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ ++id_idx;
+
+ id = read_data[id_idx];
+
+add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ return 0;
+}
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nfds = 0;
+ struct perf_evsel *evsel;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ if (evsel->system_wide)
+ nfds += nr_cpus;
+ else
+ nfds += nr_cpus * nr_threads;
+ }
+
+ if (fdarray__available_entries(&evlist->pollfd) < nfds &&
+ fdarray__grow(&evlist->pollfd, nfds) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
+ void *ptr, short revent, enum fdarray_flags flags)
+{
+ int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
+
+ if (pos >= 0) {
+ evlist->pollfd.priv[pos].ptr = ptr;
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ }
+
+ return pos;
+}
+
+static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
+ void *arg __maybe_unused)
+{
+ struct perf_mmap *map = fda->priv[fd].ptr;
+
+ if (map)
+ perf_mmap__put(map);
+}
+
+int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
+{
+ return fdarray__filter(&evlist->pollfd, revents_and_mask,
+ perf_evlist__munmap_filtered, NULL);
+}
+
+int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
+{
+ return fdarray__poll(&evlist->pollfd, timeout);
+}
+
+static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
+{
+ int i;
+ struct perf_mmap *map;
+
+ map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ if (!map)
+ return NULL;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *prev = i ? &map[i - 1] : NULL;
+
+ /*
+ * When the perf_mmap() call is made we grab one refcount, plus
+ * one extra to let perf_mmap__consume() get the last
+ * events after all real references (perf_mmap__get()) are
+ * dropped.
+ *
+ * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
+ * thus does perf_mmap__get() on it.
+ */
+ perf_mmap__init(&map[i], prev, overwrite, NULL);
+ }
+
+ return map;
+}
+
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
+{
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->idx = idx;
+ sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+ sid->tid = perf_thread_map__pid(evsel->threads, thread);
+}
+
+static struct perf_mmap*
+perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
+{
+ struct perf_mmap *maps;
+
+ maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
+
+ if (!maps) {
+ maps = perf_evlist__alloc_mmap(evlist, overwrite);
+ if (!maps)
+ return NULL;
+
+ if (overwrite)
+ evlist->mmap_ovw = maps;
+ else
+ evlist->mmap = maps;
+ }
+
+ return &maps[idx];
+}
+
+#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+
+static int
+perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int output, struct perf_cpu cpu)
+{
+ return perf_mmap__mmap(map, mp, output, cpu);
+}
+
+static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
+ bool overwrite)
+{
+ if (overwrite)
+ evlist->mmap_ovw_first = map;
+ else
+ evlist->mmap_first = map;
+}
+
+static int
+mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ int idx, struct perf_mmap_param *mp, int cpu_idx,
+ int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
+{
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
+ struct perf_evsel *evsel;
+ int revent;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ bool overwrite = evsel->attr.write_backward;
+ enum fdarray_flags flgs;
+ struct perf_mmap *map;
+ int *output, fd, cpu;
+
+ if (evsel->system_wide && thread)
+ continue;
+
+ cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
+ if (cpu == -1)
+ continue;
+
+ map = ops->get(evlist, overwrite, idx);
+ if (map == NULL)
+ return -ENOMEM;
+
+ if (overwrite) {
+ mp->prot = PROT_READ;
+ output = _output_overwrite;
+ } else {
+ mp->prot = PROT_READ | PROT_WRITE;
+ output = _output;
+ }
+
+ fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+
+ /*
+ * The last one will be done at perf_mmap__consume(), so that we
+ * make sure we don't prevent tools from consuming every last event in
+ * the ring buffer.
+ *
+ * I.e. we can get the POLLHUP meaning that the fd doesn't exist
+ * anymore, but the last events for it are still in the ring buffer,
+ * waiting to be consumed.
+ *
+ * Tools can chose to ignore this at their own discretion, but the
+ * evlist layer can't just drop it when filtering events in
+ * perf_evlist__filter_pollfd().
+ */
+ refcount_set(&map->refcnt, 2);
+
+ if (ops->idx)
+ ops->idx(evlist, evsel, mp, idx);
+
+ /* Debug message used by test scripts */
+ pr_debug("idx %d: mmapping fd %d\n", idx, *output);
+ if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
+ return -1;
+
+ *nr_mmaps += 1;
+
+ if (!idx)
+ perf_evlist__set_mmap_first(evlist, map, overwrite);
+ } else {
+ /* Debug message used by test scripts */
+ pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+
+ perf_mmap__get(map);
+ }
+
+ revent = !overwrite ? POLLIN : 0;
+
+ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
+ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
+ perf_mmap__put(map);
+ return -1;
+ }
+
+ if (evsel->attr.read_format & PERF_FORMAT_ID) {
+ if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
+ fd) < 0)
+ return -1;
+ perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
+ }
+ }
+
+ return 0;
+}
+
+static int
+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int cpu, thread, idx = 0;
+ int nr_mmaps = 0;
+
+ pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
+ __func__, nr_cpus, nr_threads);
+
+ /* per-thread mmaps */
+ for (thread = 0; thread < nr_threads; thread++, idx++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
+ &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
+
+ /* system-wide mmaps i.e. per-cpu */
+ for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
+ &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
+
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
+ return 0;
+
+out_unmap:
+ perf_evlist__munmap(evlist);
+ return -1;
+}
+
+static int
+mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int nr_mmaps = 0;
+ int cpu, thread;
+
+ pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ for (thread = 0; thread < nr_threads; thread++) {
+ if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
+ thread, &output, &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
+ }
+
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
+ return 0;
+
+out_unmap:
+ perf_evlist__munmap(evlist);
+ return -1;
+}
+
+static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
+{
+ int nr_mmaps;
+
+ /* One for each CPU */
+ nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
+ if (perf_cpu_map__empty(evlist->all_cpus)) {
+ /* Plus one for each thread */
+ nr_mmaps += perf_thread_map__nr(evlist->threads);
+ /* Minus the per-thread CPU (-1) */
+ nr_mmaps -= 1;
+ }
+
+ return nr_mmaps;
+}
+
+int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ const struct perf_cpu_map *cpus = evlist->all_cpus;
+ struct perf_evsel *evsel;
+
+ if (!ops || !ops->get || !ops->mmap)
+ return -EINVAL;
+
+ mp->mask = evlist->mmap_len - page_size - 1;
+
+ evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->sample_id == NULL &&
+ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
+ return -ENOMEM;
+ }
+
+ if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ return -ENOMEM;
+
+ if (perf_cpu_map__empty(cpus))
+ return mmap_per_thread(evlist, ops, mp);
+
+ return mmap_per_cpu(evlist, ops, mp);
+}
+
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
+{
+ struct perf_mmap_param mp;
+ struct perf_evlist_mmap_ops ops = {
+ .get = perf_evlist__mmap_cb_get,
+ .mmap = perf_evlist__mmap_cb_mmap,
+ };
+
+ evlist->mmap_len = (pages + 1) * page_size;
+
+ return perf_evlist__mmap_ops(evlist, &ops, &mp);
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+ int i;
+
+ if (evlist->mmap) {
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->mmap[i]);
+ }
+
+ if (evlist->mmap_ovw) {
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ perf_mmap__munmap(&evlist->mmap_ovw[i]);
+ }
+
+ zfree(&evlist->mmap);
+ zfree(&evlist->mmap_ovw);
+}
+
+struct perf_mmap*
+perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
+ bool overwrite)
+{
+ if (map)
+ return map->next;
+
+ return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
+}
+
+void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
+{
+ struct perf_evsel *evsel;
+ int n = 0;
+
+ __perf_evlist__for_each_entry(list, evsel) {
+ evsel->leader = leader;
+ n++;
+ }
+ leader->nr_members = n;
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries) {
+ struct perf_evsel *first = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ __perf_evlist__set_leader(&evlist->entries, first);
+ }
+}
+
+int perf_evlist__nr_groups(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int nr_groups = 0;
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ /*
+ * evsels by default have a nr_members of 1, and they are their
+ * own leader. If the nr_members is >1 then this is an
+ * indication of a group.
+ */
+ if (evsel->leader == evsel && evsel->nr_members > 1)
+ nr_groups++;
+ }
+ return nr_groups;
+}
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
new file mode 100644
index 0000000000..8b51b008a8
--- /dev/null
+++ b/tools/lib/perf/evsel.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <perf/evsel.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <linux/list.h>
+#include <internal/evsel.h>
+#include <linux/zalloc.h>
+#include <stdlib.h>
+#include <internal/xyarray.h>
+#include <internal/cpumap.h>
+#include <internal/mmap.h>
+#include <internal/threadmap.h>
+#include <internal/lib.h>
+#include <linux/string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <asm/bug.h>
+
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx)
+{
+ INIT_LIST_HEAD(&evsel->node);
+ evsel->attr = *attr;
+ evsel->idx = idx;
+ evsel->leader = evsel;
+}
+
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL)
+ perf_evsel__init(evsel, attr, 0);
+
+ return evsel;
+}
+
+void perf_evsel__delete(struct perf_evsel *evsel)
+{
+ free(evsel);
+}
+
+#define FD(_evsel, _cpu_map_idx, _thread) \
+ ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
+#define MMAP(_evsel, _cpu_map_idx, _thread) \
+ (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
+ : NULL)
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
+
+ if (evsel->fd) {
+ int idx, thread;
+
+ for (idx = 0; idx < ncpus; idx++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ int *fd = FD(evsel, idx, thread);
+
+ if (fd)
+ *fd = -1;
+ }
+ }
+ }
+
+ return evsel->fd != NULL ? 0 : -ENOMEM;
+}
+
+static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
+
+ return evsel->mmap != NULL ? 0 : -ENOMEM;
+}
+
+static int
+sys_perf_event_open(struct perf_event_attr *attr,
+ pid_t pid, struct perf_cpu cpu, int group_fd,
+ unsigned long flags)
+{
+ return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
+}
+
+static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int *fd;
+
+ if (evsel == leader) {
+ *group_fd = -1;
+ return 0;
+ }
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ if (!leader->fd)
+ return -ENOTCONN;
+
+ fd = FD(leader, cpu_map_idx, thread);
+ if (fd == NULL || *fd == -1)
+ return -EBADF;
+
+ *group_fd = *fd;
+
+ return 0;
+}
+
+int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads)
+{
+ struct perf_cpu cpu;
+ int idx, thread, err = 0;
+
+ if (cpus == NULL) {
+ static struct perf_cpu_map *empty_cpu_map;
+
+ if (empty_cpu_map == NULL) {
+ empty_cpu_map = perf_cpu_map__dummy_new();
+ if (empty_cpu_map == NULL)
+ return -ENOMEM;
+ }
+
+ cpus = empty_cpu_map;
+ }
+
+ if (threads == NULL) {
+ static struct perf_thread_map *empty_thread_map;
+
+ if (empty_thread_map == NULL) {
+ empty_thread_map = perf_thread_map__new_dummy();
+ if (empty_thread_map == NULL)
+ return -ENOMEM;
+ }
+
+ threads = empty_thread_map;
+ }
+
+ if (evsel->fd == NULL &&
+ perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ return -ENOMEM;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ for (thread = 0; thread < threads->nr; thread++) {
+ int fd, group_fd, *evsel_fd;
+
+ evsel_fd = FD(evsel, idx, thread);
+ if (evsel_fd == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = get_group_fd(evsel, idx, thread, &group_fd);
+ if (err < 0)
+ goto out;
+
+ fd = sys_perf_event_open(&evsel->attr,
+ threads->map[thread].pid,
+ cpu, group_fd, 0);
+
+ if (fd < 0) {
+ err = -errno;
+ goto out;
+ }
+
+ *evsel_fd = fd;
+ }
+ }
+out:
+ if (err)
+ perf_evsel__close(evsel);
+
+ return err;
+}
+
+static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
+{
+ int thread;
+
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd && *fd >= 0) {
+ close(*fd);
+ *fd = -1;
+ }
+ }
+}
+
+void perf_evsel__close_fd(struct perf_evsel *evsel)
+{
+ for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
+ perf_evsel__close_fd_cpu(evsel, idx);
+}
+
+void perf_evsel__free_fd(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->fd);
+ evsel->fd = NULL;
+}
+
+void perf_evsel__close(struct perf_evsel *evsel)
+{
+ if (evsel->fd == NULL)
+ return;
+
+ perf_evsel__close_fd(evsel);
+ perf_evsel__free_fd(evsel);
+}
+
+void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
+{
+ if (evsel->fd == NULL)
+ return;
+
+ perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
+}
+
+void perf_evsel__munmap(struct perf_evsel *evsel)
+{
+ int idx, thread;
+
+ if (evsel->fd == NULL || evsel->mmap == NULL)
+ return;
+
+ for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int *fd = FD(evsel, idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ continue;
+
+ perf_mmap__munmap(MMAP(evsel, idx, thread));
+ }
+ }
+
+ xyarray__delete(evsel->mmap);
+ evsel->mmap = NULL;
+}
+
+int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
+{
+ int ret, idx, thread;
+ struct perf_mmap_param mp = {
+ .prot = PROT_READ | PROT_WRITE,
+ .mask = (pages * page_size) - 1,
+ };
+
+ if (evsel->fd == NULL || evsel->mmap)
+ return -EINVAL;
+
+ if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
+ return -ENOMEM;
+
+ for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int *fd = FD(evsel, idx, thread);
+ struct perf_mmap *map;
+ struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
+
+ if (fd == NULL || *fd < 0)
+ continue;
+
+ map = MMAP(evsel, idx, thread);
+ perf_mmap__init(map, NULL, false, NULL);
+
+ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
+ if (ret) {
+ perf_evsel__munmap(evsel);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
+ return NULL;
+
+ return MMAP(evsel, cpu_map_idx, thread)->base;
+}
+
+int perf_evsel__read_size(struct perf_evsel *evsel)
+{
+ u64 read_format = evsel->attr.read_format;
+ int entry = sizeof(u64); /* value */
+ int size = 0;
+ int nr = 1;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ size += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ size += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ nr = evsel->nr_members;
+ size += sizeof(u64);
+ }
+
+ size += entry * nr;
+ return size;
+}
+
+/* This only reads values for the leader */
+static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
+ int thread, struct perf_counts_values *count)
+{
+ size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ u64 *data;
+ int idx = 1;
+
+ if (fd == NULL || *fd < 0)
+ return -EINVAL;
+
+ data = calloc(1, size);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (readn(*fd, data, size) <= 0) {
+ free(data);
+ return -errno;
+ }
+
+ /*
+ * This reads only the leader event intentionally since we don't have
+ * perf counts values for sibling events.
+ */
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = data[idx++];
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = data[idx++];
+
+ /* value is always available */
+ count->val = data[idx++];
+ if (read_format & PERF_FORMAT_ID)
+ count->id = data[idx++];
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = data[idx++];
+
+ free(data);
+ return 0;
+}
+
+/*
+ * The perf read format is very flexible. It needs to set the proper
+ * values according to the read format.
+ */
+static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
+ struct perf_counts_values *count)
+{
+ u64 read_format = evsel->attr.read_format;
+ int n = 0;
+
+ count->val = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = buf[n++];
+
+ if (read_format & PERF_FORMAT_ID)
+ count->id = buf[n++];
+
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = buf[n++];
+}
+
+int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
+ struct perf_counts_values *count)
+{
+ size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ struct perf_counts_values buf;
+
+ memset(count, 0, sizeof(*count));
+
+ if (fd == NULL || *fd < 0)
+ return -EINVAL;
+
+ if (read_format & PERF_FORMAT_GROUP)
+ return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
+
+ if (MMAP(evsel, cpu_map_idx, thread) &&
+ !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
+ !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
+ return 0;
+
+ if (readn(*fd, buf.values, size) <= 0)
+ return -errno;
+
+ perf_evsel__adjust_values(evsel, buf.values, count);
+ return 0;
+}
+
+static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
+ int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ return ioctl(*fd, ioc, arg);
+}
+
+static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
+ int ioc, void *arg,
+ int cpu_map_idx)
+{
+ int thread;
+
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
+{
+ return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
+}
+
+int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
+{
+ struct perf_cpu cpu __maybe_unused;
+ int idx;
+ int err;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
+ err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int perf_evsel__enable(struct perf_evsel *evsel)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
+ err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
+ return err;
+}
+
+int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
+{
+ return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
+}
+
+int perf_evsel__disable(struct perf_evsel *evsel)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
+ err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
+ return err;
+}
+
+int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
+{
+ int err = 0, i;
+
+ for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
+ err = perf_evsel__run_ioctl(evsel,
+ PERF_EVENT_IOC_SET_FILTER,
+ (void *)filter, i);
+ return err;
+}
+
+struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
+{
+ return evsel->cpus;
+}
+
+struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
+{
+ return evsel->threads;
+}
+
+struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
+{
+ return &evsel->attr;
+}
+
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ if (ncpus == 0 || nthreads == 0)
+ return 0;
+
+ evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+ if (evsel->sample_id == NULL)
+ return -ENOMEM;
+
+ evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
+ if (evsel->id == NULL) {
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ zfree(&evsel->id);
+ evsel->ids = 0;
+}
+
+void perf_counts_values__scale(struct perf_counts_values *count,
+ bool scale, __s8 *pscaled)
+{
+ s8 scaled = 0;
+
+ if (scale) {
+ if (count->run == 0) {
+ scaled = -1;
+ count->val = 0;
+ } else if (count->run < count->ena) {
+ scaled = 1;
+ count->val = (u64)((double)count->val * count->ena / count->run);
+ }
+ }
+
+ if (pscaled)
+ *pscaled = scaled;
+}
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
new file mode 100644
index 0000000000..49649eb51c
--- /dev/null
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_CPUMAP_H
+#define __LIBPERF_INTERNAL_CPUMAP_H
+
+#include <linux/refcount.h>
+#include <perf/cpumap.h>
+#include <internal/rc_check.h>
+
+/**
+ * A sized, reference counted, sorted array of integers representing CPU
+ * numbers. This is commonly used to capture which CPUs a PMU is associated
+ * with. The indices into the cpumap are frequently used as they avoid having
+ * gaps if CPU numbers were used. For events associated with a pid, rather than
+ * a CPU, a single dummy map with an entry of -1 is used.
+ */
+DECLARE_RC_STRUCT(perf_cpu_map) {
+ refcount_t refcnt;
+ /** Length of the map array. */
+ int nr;
+ /** The CPU values. */
+ struct perf_cpu map[];
+};
+
+#ifndef MAX_NR_CPUS
+#define MAX_NR_CPUS 2048
+#endif
+
+struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
+
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
+
+static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
+{
+ return &RC_CHK_ACCESS(map)->refcnt;
+}
+#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
new file mode 100644
index 0000000000..3339bc2f17
--- /dev/null
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_EVLIST_H
+#define __LIBPERF_INTERNAL_EVLIST_H
+
+#include <linux/list.h>
+#include <api/fd/array.h>
+#include <internal/cpumap.h>
+#include <internal/evsel.h>
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+
+struct perf_cpu_map;
+struct perf_thread_map;
+struct perf_mmap_param;
+
+struct perf_evlist {
+ struct list_head entries;
+ int nr_entries;
+ bool has_user_cpus;
+ bool needs_map_propagation;
+ /**
+ * The cpus passed from the command line or all online CPUs by
+ * default.
+ */
+ struct perf_cpu_map *user_requested_cpus;
+ /** The union of all evsel cpu maps. */
+ struct perf_cpu_map *all_cpus;
+ struct perf_thread_map *threads;
+ int nr_mmaps;
+ size_t mmap_len;
+ struct fdarray pollfd;
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ struct perf_mmap *mmap;
+ struct perf_mmap *mmap_ovw;
+ struct perf_mmap *mmap_first;
+ struct perf_mmap *mmap_ovw_first;
+};
+
+typedef void
+(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_evsel*,
+ struct perf_mmap_param*, int);
+typedef struct perf_mmap*
+(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
+typedef int
+(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, struct perf_cpu);
+
+struct perf_evlist_mmap_ops {
+ perf_evlist_mmap__cb_idx_t idx;
+ perf_evlist_mmap__cb_get_t get;
+ perf_evlist_mmap__cb_mmap_t mmap;
+};
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
+int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
+ void *ptr, short revent, enum fdarray_flags flags);
+
+int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp);
+
+void perf_evlist__init(struct perf_evlist *evlist);
+void perf_evlist__exit(struct perf_evlist *evlist);
+
+/**
+ * __perf_evlist__for_each_entry - iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct perf_evsel iterator
+ */
+#define __perf_evlist__for_each_entry(list, evsel) \
+ list_for_each_entry(evsel, list, node)
+
+/**
+ * evlist__for_each_entry - iterate thru all the evsels
+ * @evlist: perf_evlist instance to iterate
+ * @evsel: struct perf_evsel iterator
+ */
+#define perf_evlist__for_each_entry(evlist, evsel) \
+ __perf_evlist__for_each_entry(&(evlist)->entries, evsel)
+
+/**
+ * __perf_evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __perf_evlist__for_each_entry_reverse(list, evsel) \
+ list_for_each_entry_reverse(evsel, list, node)
+
+/**
+ * perf_evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define perf_evlist__for_each_entry_reverse(evlist, evsel) \
+ __perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
+
+/**
+ * __perf_evlist__for_each_entry_safe - safely iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @tmp: struct evsel temp iterator
+ * @evsel: struct evsel iterator
+ */
+#define __perf_evlist__for_each_entry_safe(list, tmp, evsel) \
+ list_for_each_entry_safe(evsel, tmp, list, node)
+
+/**
+ * perf_evlist__for_each_entry_safe - safely iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ * @tmp: struct evsel temp iterator
+ */
+#define perf_evlist__for_each_entry_safe(evlist, tmp, evsel) \
+ __perf_evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
+
+static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.prev, struct perf_evsel, node);
+}
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist);
+
+void perf_evlist__id_add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id);
+
+int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd);
+
+void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
+
+void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader);
+#endif /* __LIBPERF_INTERNAL_EVLIST_H */
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
new file mode 100644
index 0000000000..5cd220a619
--- /dev/null
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_EVSEL_H
+#define __LIBPERF_INTERNAL_EVSEL_H
+
+#include <linux/types.h>
+#include <linux/perf_event.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <internal/cpumap.h>
+
+struct perf_thread_map;
+struct xyarray;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+ struct hlist_node node;
+ u64 id;
+ struct perf_evsel *evsel;
+ /*
+ * 'idx' will be used for AUX area sampling. A sample will have AUX area
+ * data that will be queued for decoding, where there are separate
+ * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
+ * The sample ID can be used to lookup 'idx' which is effectively the
+ * queue number.
+ */
+ int idx;
+ struct perf_cpu cpu;
+ pid_t tid;
+
+ /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
+ pid_t machine_pid;
+ struct perf_cpu vcpu;
+
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
+};
+
+struct perf_evsel {
+ struct list_head node;
+ struct perf_event_attr attr;
+ /** The commonly used cpu map of CPUs the event should be opened upon, etc. */
+ struct perf_cpu_map *cpus;
+ /**
+ * The cpu map read from the PMU. For core PMUs this is the list of all
+ * CPUs the event can be opened upon. For other PMUs this is the default
+ * cpu map for opening the event on, for example, the first CPU on a
+ * socket for an uncore event.
+ */
+ struct perf_cpu_map *own_cpus;
+ struct perf_thread_map *threads;
+ struct xyarray *fd;
+ struct xyarray *mmap;
+ struct xyarray *sample_id;
+ u64 *id;
+ u32 ids;
+ struct perf_evsel *leader;
+
+ /* parse modifier helper */
+ int nr_members;
+ /*
+ * system_wide is for events that need to be on every CPU, irrespective
+ * of user requested CPUs or threads. Tha main example of this is the
+ * dummy event. Map propagation will set cpus for this event to all CPUs
+ * as software PMU events like dummy, have a CPU map that is empty.
+ */
+ bool system_wide;
+ /*
+ * Some events, for example uncore events, require a CPU.
+ * i.e. it cannot be the 'any CPU' value of -1.
+ */
+ bool requires_cpu;
+ /** Is the PMU for the event a core one? Effects the handling of own_cpus. */
+ bool is_pmu_core;
+ int idx;
+};
+
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx);
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+void perf_evsel__close_fd(struct perf_evsel *evsel);
+void perf_evsel__free_fd(struct perf_evsel *evsel);
+int perf_evsel__read_size(struct perf_evsel *evsel);
+int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
+
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
+void perf_evsel__free_id(struct perf_evsel *evsel);
+
+#endif /* __LIBPERF_INTERNAL_EVSEL_H */
diff --git a/tools/lib/perf/include/internal/lib.h b/tools/lib/perf/include/internal/lib.h
new file mode 100644
index 0000000000..85471a4b90
--- /dev/null
+++ b/tools/lib/perf/include/internal/lib.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_LIB_H
+#define __LIBPERF_INTERNAL_LIB_H
+
+#include <sys/types.h>
+
+extern unsigned int page_size;
+
+ssize_t readn(int fd, void *buf, size_t n);
+ssize_t writen(int fd, const void *buf, size_t n);
+
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs);
+
+#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
new file mode 100644
index 0000000000..5a062af8e9
--- /dev/null
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_MMAP_H
+#define __LIBPERF_INTERNAL_MMAP_H
+
+#include <linux/compiler.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <stdbool.h>
+#include <internal/cpumap.h>
+
+/* perf sample has 16 bits size limit */
+#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+
+struct perf_mmap;
+struct perf_counts_values;
+
+typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
+
+/**
+ * struct perf_mmap - perf's ring buffer mmap details
+ *
+ * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
+ */
+struct perf_mmap {
+ void *base;
+ int mask;
+ int fd;
+ struct perf_cpu cpu;
+ refcount_t refcnt;
+ u64 prev;
+ u64 start;
+ u64 end;
+ bool overwrite;
+ u64 flush;
+ libperf_unmap_cb_t unmap_cb;
+ char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ struct perf_mmap *next;
+};
+
+struct perf_mmap_param {
+ int prot;
+ int mask;
+};
+
+size_t perf_mmap__mmap_len(struct perf_mmap *map);
+
+void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
+ bool overwrite, libperf_unmap_cb_t unmap_cb);
+int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int fd, struct perf_cpu cpu);
+void perf_mmap__munmap(struct perf_mmap *map);
+void perf_mmap__get(struct perf_mmap *map);
+void perf_mmap__put(struct perf_mmap *map);
+
+u64 perf_mmap__read_head(struct perf_mmap *map);
+
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count);
+
+#endif /* __LIBPERF_INTERNAL_MMAP_H */
diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
new file mode 100644
index 0000000000..e88a6d8a0b
--- /dev/null
+++ b/tools/lib/perf/include/internal/rc_check.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_INTERNAL_RC_CHECK_H
+#define __LIBPERF_INTERNAL_RC_CHECK_H
+
+#include <stdlib.h>
+#include <linux/zalloc.h>
+
+/*
+ * Enable reference count checking implicitly with leak checking, which is
+ * integrated into address sanitizer.
+ */
+#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+#define REFCNT_CHECKING 1
+#elif defined(__has_feature)
+#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
+#define REFCNT_CHECKING 1
+#endif
+#endif
+
+/*
+ * Shared reference count checking macros.
+ *
+ * Reference count checking is an approach to sanitizing the use of reference
+ * counted structs. It leverages address and leak sanitizers to make sure gets
+ * are paired with a put. Reference count checking adds a malloc-ed layer of
+ * indirection on a get, and frees it on a put. A missed put will be reported as
+ * a memory leak. A double put will be reported as a double free. Accessing
+ * after a put will cause a use-after-free and/or a segfault.
+ */
+
+#ifndef REFCNT_CHECKING
+/* Replaces "struct foo" so that the pointer may be interposed. */
+#define DECLARE_RC_STRUCT(struct_name) \
+ struct struct_name
+
+/* Declare a reference counted struct variable. */
+#define RC_STRUCT(struct_name) struct struct_name
+
+/*
+ * Interpose the indirection. Result will hold the indirection and object is the
+ * reference counted struct.
+ */
+#define ADD_RC_CHK(result, object) (result = object, object)
+
+/* Strip the indirection layer. */
+#define RC_CHK_ACCESS(object) object
+
+/* Frees the object and the indirection layer. */
+#define RC_CHK_FREE(object) free(object)
+
+/* A get operation adding the indirection layer. */
+#define RC_CHK_GET(result, object) ADD_RC_CHK(result, object)
+
+/* A put operation removing the indirection layer. */
+#define RC_CHK_PUT(object) {}
+
+#else
+
+/* Replaces "struct foo" so that the pointer may be interposed. */
+#define DECLARE_RC_STRUCT(struct_name) \
+ struct original_##struct_name; \
+ struct struct_name { \
+ struct original_##struct_name *orig; \
+ }; \
+ struct original_##struct_name
+
+/* Declare a reference counted struct variable. */
+#define RC_STRUCT(struct_name) struct original_##struct_name
+
+/*
+ * Interpose the indirection. Result will hold the indirection and object is the
+ * reference counted struct.
+ */
+#define ADD_RC_CHK(result, object) \
+ ( \
+ object ? (result = malloc(sizeof(*result)), \
+ result ? (result->orig = object, result) \
+ : (result = NULL, NULL)) \
+ : (result = NULL, NULL) \
+ )
+
+/* Strip the indirection layer. */
+#define RC_CHK_ACCESS(object) object->orig
+
+/* Frees the object and the indirection layer. */
+#define RC_CHK_FREE(object) \
+ do { \
+ zfree(&object->orig); \
+ free(object); \
+ } while(0)
+
+/* A get operation adding the indirection layer. */
+#define RC_CHK_GET(result, object) ADD_RC_CHK(result, (object ? object->orig : NULL))
+
+/* A put operation removing the indirection layer. */
+#define RC_CHK_PUT(object) \
+ do { \
+ if (object) { \
+ object->orig = NULL; \
+ free(object); \
+ } \
+ } while(0)
+
+#endif
+
+#endif /* __LIBPERF_INTERNAL_RC_CHECK_H */
diff --git a/tools/lib/perf/include/internal/tests.h b/tools/lib/perf/include/internal/tests.h
new file mode 100644
index 0000000000..b130a6663f
--- /dev/null
+++ b/tools/lib/perf/include/internal/tests.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_TESTS_H
+#define __LIBPERF_INTERNAL_TESTS_H
+
+#include <stdio.h>
+#include <unistd.h>
+
+extern int tests_failed;
+extern int tests_verbose;
+
+static inline int get_verbose(char **argv, int argc)
+{
+ int c;
+ int verbose = 0;
+
+ while ((c = getopt(argc, argv, "v")) != -1) {
+ switch (c)
+ {
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ optind = 1;
+
+ return verbose;
+}
+
+#define __T_START \
+do { \
+ tests_verbose = get_verbose(argv, argc); \
+ fprintf(stdout, "- running %s...", __FILE__); \
+ fflush(NULL); \
+ tests_failed = 0; \
+} while (0)
+
+#define __T_END \
+do { \
+ if (tests_failed) \
+ fprintf(stdout, " FAILED (%d)\n", tests_failed); \
+ else \
+ fprintf(stdout, "OK\n"); \
+} while (0)
+
+#define __T(text, cond) \
+do { \
+ if (!(cond)) { \
+ fprintf(stderr, "FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
+ tests_failed++; \
+ return -1; \
+ } \
+} while (0)
+
+#define __T_VERBOSE(...) \
+do { \
+ if (tests_verbose) { \
+ if (tests_verbose == 1) { \
+ fputc('\n', stderr); \
+ tests_verbose++; \
+ } \
+ fprintf(stderr, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#endif /* __LIBPERF_INTERNAL_TESTS_H */
diff --git a/tools/lib/perf/include/internal/threadmap.h b/tools/lib/perf/include/internal/threadmap.h
new file mode 100644
index 0000000000..df748baf9e
--- /dev/null
+++ b/tools/lib/perf/include/internal/threadmap.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_THREADMAP_H
+#define __LIBPERF_INTERNAL_THREADMAP_H
+
+#include <linux/refcount.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+struct thread_map_data {
+ pid_t pid;
+ char *comm;
+};
+
+struct perf_thread_map {
+ refcount_t refcnt;
+ int nr;
+ int err_thread;
+ struct thread_map_data map[];
+};
+
+struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr);
+
+#endif /* __LIBPERF_INTERNAL_THREADMAP_H */
diff --git a/tools/lib/perf/include/internal/xyarray.h b/tools/lib/perf/include/internal/xyarray.h
new file mode 100644
index 0000000000..f10af3da7b
--- /dev/null
+++ b/tools/lib/perf/include/internal/xyarray.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_XYARRAY_H
+#define __LIBPERF_INTERNAL_XYARRAY_H
+
+#include <linux/compiler.h>
+#include <sys/types.h>
+
+struct xyarray {
+ size_t row_size;
+ size_t entry_size;
+ size_t entries;
+ size_t max_x;
+ size_t max_y;
+ char contents[] __aligned(8);
+};
+
+struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
+void xyarray__delete(struct xyarray *xy);
+void xyarray__reset(struct xyarray *xy);
+
+static inline void *__xyarray__entry(struct xyarray *xy, int x, int y)
+{
+ return &xy->contents[x * xy->row_size + y * xy->entry_size];
+}
+
+static inline void *xyarray__entry(struct xyarray *xy, size_t x, size_t y)
+{
+ if (x >= xy->max_x || y >= xy->max_y)
+ return NULL;
+ return __xyarray__entry(xy, x, y);
+}
+
+static inline int xyarray__max_y(struct xyarray *xy)
+{
+ return xy->max_y;
+}
+
+static inline int xyarray__max_x(struct xyarray *xy)
+{
+ return xy->max_x;
+}
+
+#endif /* __LIBPERF_INTERNAL_XYARRAY_H */
diff --git a/tools/lib/perf/include/perf/bpf_perf.h b/tools/lib/perf/include/perf/bpf_perf.h
new file mode 100644
index 0000000000..e7cf6ba7b6
--- /dev/null
+++ b/tools/lib/perf/include/perf/bpf_perf.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_BPF_PERF_H
+#define __LIBPERF_BPF_PERF_H
+
+#include <linux/types.h> /* for __u32 */
+
+/*
+ * bpf_perf uses a hashmap, the attr_map, to track all the leader programs.
+ * The hashmap is pinned in bpffs. flock() on this file is used to ensure
+ * no concurrent access to the attr_map. The key of attr_map is struct
+ * perf_event_attr, and the value is struct perf_event_attr_map_entry.
+ *
+ * struct perf_event_attr_map_entry contains two __u32 IDs, bpf_link of the
+ * leader prog, and the diff_map. Each perf-stat session holds a reference
+ * to the bpf_link to make sure the leader prog is attached to sched_switch
+ * tracepoint.
+ *
+ * Since the hashmap only contains IDs of the bpf_link and diff_map, it
+ * does not hold any references to the leader program. Once all perf-stat
+ * sessions of these events exit, the leader prog, its maps, and the
+ * perf_events will be freed.
+ */
+struct perf_event_attr_map_entry {
+ __u32 link_id;
+ __u32 diff_map_id;
+};
+
+/* default attr_map name */
+#define BPF_PERF_DEFAULT_ATTR_MAP_PATH "perf_attr_map"
+
+#endif /* __LIBPERF_BPF_PERF_H */
diff --git a/tools/lib/perf/include/perf/core.h b/tools/lib/perf/include/perf/core.h
new file mode 100644
index 0000000000..a3f6d68eda
--- /dev/null
+++ b/tools/lib/perf/include/perf/core.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_CORE_H
+#define __LIBPERF_CORE_H
+
+#include <stdarg.h>
+
+#ifndef LIBPERF_API
+#define LIBPERF_API __attribute__((visibility("default")))
+#endif
+
+enum libperf_print_level {
+ LIBPERF_ERR,
+ LIBPERF_WARN,
+ LIBPERF_INFO,
+ LIBPERF_DEBUG,
+ LIBPERF_DEBUG2,
+ LIBPERF_DEBUG3,
+};
+
+typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
+ const char *, va_list ap);
+
+LIBPERF_API void libperf_init(libperf_print_fn_t fn);
+
+#endif /* __LIBPERF_CORE_H */
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
new file mode 100644
index 0000000000..e38d859a38
--- /dev/null
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_CPUMAP_H
+#define __LIBPERF_CPUMAP_H
+
+#include <perf/core.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+ int cpu;
+};
+
+struct perf_cache {
+ int cache_lvl;
+ int cache;
+};
+
+struct perf_cpu_map;
+
+/**
+ * perf_cpu_map__dummy_new - a map with a singular "any CPU"/dummy -1 value.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
+ struct perf_cpu_map *other);
+LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
+/**
+ * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value.
+ */
+LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
+LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,
+ const struct perf_cpu_map *rhs);
+/**
+ * perf_cpu_map__any_cpu - Does the map contain the "any CPU"/dummy -1 value?
+ */
+LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map);
+
+#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
+ for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
+ (idx) < perf_cpu_map__nr(cpus); \
+ (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
+
+#define perf_cpu_map__for_each_idx(idx, cpus) \
+ for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
+
+#endif /* __LIBPERF_CPUMAP_H */
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
new file mode 100644
index 0000000000..ae64090184
--- /dev/null
+++ b/tools/lib/perf/include/perf/event.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_EVENT_H
+#define __LIBPERF_EVENT_H
+
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/bpf.h>
+#include <sys/types.h> /* pid_t */
+
+#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
+
+struct perf_record_mmap {
+ struct perf_event_header header;
+ __u32 pid, tid;
+ __u64 start;
+ __u64 len;
+ __u64 pgoff;
+ char filename[PATH_MAX];
+};
+
+struct perf_record_mmap2 {
+ struct perf_event_header header;
+ __u32 pid, tid;
+ __u64 start;
+ __u64 len;
+ __u64 pgoff;
+ union {
+ struct {
+ __u32 maj;
+ __u32 min;
+ __u64 ino;
+ __u64 ino_generation;
+ };
+ struct {
+ __u8 build_id_size;
+ __u8 __reserved_1;
+ __u16 __reserved_2;
+ __u8 build_id[20];
+ };
+ };
+ __u32 prot;
+ __u32 flags;
+ char filename[PATH_MAX];
+};
+
+struct perf_record_comm {
+ struct perf_event_header header;
+ __u32 pid, tid;
+ char comm[16];
+};
+
+struct perf_record_namespaces {
+ struct perf_event_header header;
+ __u32 pid, tid;
+ __u64 nr_namespaces;
+ struct perf_ns_link_info link_info[];
+};
+
+struct perf_record_fork {
+ struct perf_event_header header;
+ __u32 pid, ppid;
+ __u32 tid, ptid;
+ __u64 time;
+};
+
+struct perf_record_lost {
+ struct perf_event_header header;
+ __u64 id;
+ __u64 lost;
+};
+
+#define PERF_RECORD_MISC_LOST_SAMPLES_BPF (1 << 15)
+
+struct perf_record_lost_samples {
+ struct perf_event_header header;
+ __u64 lost;
+};
+
+/*
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST
+ */
+struct perf_record_read {
+ struct perf_event_header header;
+ __u32 pid, tid;
+ __u64 value;
+ __u64 time_enabled;
+ __u64 time_running;
+ __u64 id;
+ __u64 lost;
+};
+
+struct perf_record_throttle {
+ struct perf_event_header header;
+ __u64 time;
+ __u64 id;
+ __u64 stream_id;
+};
+
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 512
+#endif
+
+struct perf_record_ksymbol {
+ struct perf_event_header header;
+ __u64 addr;
+ __u32 len;
+ __u16 ksym_type;
+ __u16 flags;
+ char name[KSYM_NAME_LEN];
+};
+
+struct perf_record_bpf_event {
+ struct perf_event_header header;
+ __u16 type;
+ __u16 flags;
+ __u32 id;
+
+ /* for bpf_prog types */
+ __u8 tag[BPF_TAG_SIZE]; // prog tag
+};
+
+struct perf_record_cgroup {
+ struct perf_event_header header;
+ __u64 id;
+ char path[PATH_MAX];
+};
+
+struct perf_record_text_poke_event {
+ struct perf_event_header header;
+ __u64 addr;
+ __u16 old_len;
+ __u16 new_len;
+ __u8 bytes[];
+};
+
+struct perf_record_sample {
+ struct perf_event_header header;
+ __u64 array[];
+};
+
+struct perf_record_switch {
+ struct perf_event_header header;
+ __u32 next_prev_pid;
+ __u32 next_prev_tid;
+};
+
+struct perf_record_header_attr {
+ struct perf_event_header header;
+ struct perf_event_attr attr;
+ /*
+ * Array of u64 id follows here but we cannot use a flexible array
+ * because size of attr in the data can be different then current
+ * version. Please use perf_record_header_attr_id() below.
+ *
+ * __u64 id[]; // do not use this
+ */
+};
+
+/* Returns the pointer to id array based on the actual attr size. */
+#define perf_record_header_attr_id(evt) \
+ ((void *)&(evt)->attr.attr + (evt)->attr.attr.size)
+
+enum {
+ PERF_CPU_MAP__CPUS = 0,
+ PERF_CPU_MAP__MASK = 1,
+ PERF_CPU_MAP__RANGE_CPUS = 2,
+};
+
+/*
+ * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[]
+ * and each entry is a value for a CPU in the map.
+ */
+struct cpu_map_entries {
+ __u16 nr;
+ __u16 cpu[];
+};
+
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */
+struct perf_record_mask_cpu_map32 {
+ /* Number of mask values. */
+ __u16 nr;
+ /* Constant 4. */
+ __u16 long_size;
+ /* Bitmap data. */
+ __u32 mask[];
+};
+
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */
+struct perf_record_mask_cpu_map64 {
+ /* Number of mask values. */
+ __u16 nr;
+ /* Constant 8. */
+ __u16 long_size;
+ /* Legacy padding. */
+ char __pad[4];
+ /* Bitmap data. */
+ __u64 mask[];
+};
+
+/*
+ * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier
+ * version had unaligned data and we wish to retain file format compatibility.
+ * -irogers
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+
+/*
+ * An encoding of a CPU map for a range starting at start_cpu through to
+ * end_cpu. If any_cpu is 1, an any CPU (-1) value (aka dummy value) is present.
+ */
+struct perf_record_range_cpu_map {
+ __u8 any_cpu;
+ __u8 __pad;
+ __u16 start_cpu;
+ __u16 end_cpu;
+};
+
+struct perf_record_cpu_map_data {
+ __u16 type;
+ union {
+ /* Used when type == PERF_CPU_MAP__CPUS. */
+ struct cpu_map_entries cpus_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */
+ struct perf_record_mask_cpu_map32 mask32_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */
+ struct perf_record_mask_cpu_map64 mask64_data;
+ /* Used when type == PERF_CPU_MAP__RANGE_CPUS. */
+ struct perf_record_range_cpu_map range_cpu_data;
+ };
+} __attribute__((packed));
+
+#pragma GCC diagnostic pop
+
+struct perf_record_cpu_map {
+ struct perf_event_header header;
+ struct perf_record_cpu_map_data data;
+};
+
+enum {
+ PERF_EVENT_UPDATE__UNIT = 0,
+ PERF_EVENT_UPDATE__SCALE = 1,
+ PERF_EVENT_UPDATE__NAME = 2,
+ PERF_EVENT_UPDATE__CPUS = 3,
+};
+
+struct perf_record_event_update_cpus {
+ struct perf_record_cpu_map_data cpus;
+};
+
+struct perf_record_event_update_scale {
+ double scale;
+};
+
+struct perf_record_event_update {
+ struct perf_event_header header;
+ __u64 type;
+ __u64 id;
+ union {
+ /* Used when type == PERF_EVENT_UPDATE__SCALE. */
+ struct perf_record_event_update_scale scale;
+ /* Used when type == PERF_EVENT_UPDATE__UNIT. */
+ char unit[0];
+ /* Used when type == PERF_EVENT_UPDATE__NAME. */
+ char name[0];
+ /* Used when type == PERF_EVENT_UPDATE__CPUS. */
+ struct perf_record_event_update_cpus cpus;
+ };
+};
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+ __u64 event_id;
+ char name[MAX_EVENT_NAME];
+};
+
+struct perf_record_header_event_type {
+ struct perf_event_header header;
+ struct perf_trace_event_type event_type;
+};
+
+struct perf_record_header_tracing_data {
+ struct perf_event_header header;
+ __u32 size;
+};
+
+#define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15)
+
+struct perf_record_header_build_id {
+ struct perf_event_header header;
+ pid_t pid;
+ union {
+ __u8 build_id[24];
+ struct {
+ __u8 data[20];
+ __u8 size;
+ __u8 reserved1__;
+ __u16 reserved2__;
+ };
+ };
+ char filename[];
+};
+
+struct id_index_entry {
+ __u64 id;
+ __u64 idx;
+ __u64 cpu;
+ __u64 tid;
+};
+
+struct id_index_entry_2 {
+ __u64 machine_pid;
+ __u64 vcpu;
+};
+
+struct perf_record_id_index {
+ struct perf_event_header header;
+ __u64 nr;
+ struct id_index_entry entries[];
+};
+
+struct perf_record_auxtrace_info {
+ struct perf_event_header header;
+ __u32 type;
+ __u32 reserved__; /* For alignment */
+ __u64 priv[];
+};
+
+struct perf_record_auxtrace {
+ struct perf_event_header header;
+ __u64 size;
+ __u64 offset;
+ __u64 reference;
+ __u32 idx;
+ __u32 tid;
+ __u32 cpu;
+ __u32 reserved__; /* For alignment */
+};
+
+#define MAX_AUXTRACE_ERROR_MSG 64
+
+struct perf_record_auxtrace_error {
+ struct perf_event_header header;
+ __u32 type;
+ __u32 code;
+ __u32 cpu;
+ __u32 pid;
+ __u32 tid;
+ __u32 fmt;
+ __u64 ip;
+ __u64 time;
+ char msg[MAX_AUXTRACE_ERROR_MSG];
+ __u32 machine_pid;
+ __u32 vcpu;
+};
+
+struct perf_record_aux {
+ struct perf_event_header header;
+ __u64 aux_offset;
+ __u64 aux_size;
+ __u64 flags;
+};
+
+struct perf_record_itrace_start {
+ struct perf_event_header header;
+ __u32 pid;
+ __u32 tid;
+};
+
+struct perf_record_aux_output_hw_id {
+ struct perf_event_header header;
+ __u64 hw_id;
+};
+
+struct perf_record_thread_map_entry {
+ __u64 pid;
+ char comm[16];
+};
+
+struct perf_record_thread_map {
+ struct perf_event_header header;
+ __u64 nr;
+ struct perf_record_thread_map_entry entries[];
+};
+
+enum {
+ PERF_STAT_CONFIG_TERM__AGGR_MODE = 0,
+ PERF_STAT_CONFIG_TERM__INTERVAL = 1,
+ PERF_STAT_CONFIG_TERM__SCALE = 2,
+ PERF_STAT_CONFIG_TERM__AGGR_LEVEL = 3,
+ PERF_STAT_CONFIG_TERM__MAX = 4,
+};
+
+struct perf_record_stat_config_entry {
+ __u64 tag;
+ __u64 val;
+};
+
+struct perf_record_stat_config {
+ struct perf_event_header header;
+ __u64 nr;
+ struct perf_record_stat_config_entry data[];
+};
+
+struct perf_record_stat {
+ struct perf_event_header header;
+
+ __u64 id;
+ __u32 cpu;
+ __u32 thread;
+
+ union {
+ struct {
+ __u64 val;
+ __u64 ena;
+ __u64 run;
+ };
+ __u64 values[3];
+ };
+};
+
+struct perf_record_stat_round {
+ struct perf_event_header header;
+ __u64 type;
+ __u64 time;
+};
+
+struct perf_record_time_conv {
+ struct perf_event_header header;
+ __u64 time_shift;
+ __u64 time_mult;
+ __u64 time_zero;
+ __u64 time_cycles;
+ __u64 time_mask;
+ __u8 cap_user_time_zero;
+ __u8 cap_user_time_short;
+ __u8 reserved[6]; /* For alignment */
+};
+
+struct perf_record_header_feature {
+ struct perf_event_header header;
+ __u64 feat_id;
+ char data[];
+};
+
+struct perf_record_compressed {
+ struct perf_event_header header;
+ char data[];
+};
+
+enum perf_user_event_type { /* above any possible kernel type */
+ PERF_RECORD_USER_TYPE_START = 64,
+ PERF_RECORD_HEADER_ATTR = 64,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */
+ PERF_RECORD_HEADER_TRACING_DATA = 66,
+ PERF_RECORD_HEADER_BUILD_ID = 67,
+ PERF_RECORD_FINISHED_ROUND = 68,
+ PERF_RECORD_ID_INDEX = 69,
+ PERF_RECORD_AUXTRACE_INFO = 70,
+ PERF_RECORD_AUXTRACE = 71,
+ PERF_RECORD_AUXTRACE_ERROR = 72,
+ PERF_RECORD_THREAD_MAP = 73,
+ PERF_RECORD_CPU_MAP = 74,
+ PERF_RECORD_STAT_CONFIG = 75,
+ PERF_RECORD_STAT = 76,
+ PERF_RECORD_STAT_ROUND = 77,
+ PERF_RECORD_EVENT_UPDATE = 78,
+ PERF_RECORD_TIME_CONV = 79,
+ PERF_RECORD_HEADER_FEATURE = 80,
+ PERF_RECORD_COMPRESSED = 81,
+ PERF_RECORD_FINISHED_INIT = 82,
+ PERF_RECORD_HEADER_MAX
+};
+
+union perf_event {
+ struct perf_event_header header;
+ struct perf_record_mmap mmap;
+ struct perf_record_mmap2 mmap2;
+ struct perf_record_comm comm;
+ struct perf_record_namespaces namespaces;
+ struct perf_record_cgroup cgroup;
+ struct perf_record_fork fork;
+ struct perf_record_lost lost;
+ struct perf_record_lost_samples lost_samples;
+ struct perf_record_read read;
+ struct perf_record_throttle throttle;
+ struct perf_record_sample sample;
+ struct perf_record_bpf_event bpf;
+ struct perf_record_ksymbol ksymbol;
+ struct perf_record_text_poke_event text_poke;
+ struct perf_record_header_attr attr;
+ struct perf_record_event_update event_update;
+ struct perf_record_header_event_type event_type;
+ struct perf_record_header_tracing_data tracing_data;
+ struct perf_record_header_build_id build_id;
+ struct perf_record_id_index id_index;
+ struct perf_record_auxtrace_info auxtrace_info;
+ struct perf_record_auxtrace auxtrace;
+ struct perf_record_auxtrace_error auxtrace_error;
+ struct perf_record_aux aux;
+ struct perf_record_itrace_start itrace_start;
+ struct perf_record_aux_output_hw_id aux_output_hw_id;
+ struct perf_record_switch context_switch;
+ struct perf_record_thread_map thread_map;
+ struct perf_record_cpu_map cpu_map;
+ struct perf_record_stat_config stat_config;
+ struct perf_record_stat stat;
+ struct perf_record_stat_round stat_round;
+ struct perf_record_time_conv time_conv;
+ struct perf_record_header_feature feat;
+ struct perf_record_compressed pack;
+};
+
+#endif /* __LIBPERF_EVENT_H */
diff --git a/tools/lib/perf/include/perf/evlist.h b/tools/lib/perf/include/perf/evlist.h
new file mode 100644
index 0000000000..e894b77077
--- /dev/null
+++ b/tools/lib/perf/include/perf/evlist.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_EVLIST_H
+#define __LIBPERF_EVLIST_H
+
+#include <perf/core.h>
+#include <stdbool.h>
+
+struct perf_evlist;
+struct perf_evsel;
+struct perf_cpu_map;
+struct perf_thread_map;
+
+LIBPERF_API void perf_evlist__add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+LIBPERF_API void perf_evlist__remove(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+LIBPERF_API struct perf_evlist *perf_evlist__new(void);
+LIBPERF_API void perf_evlist__delete(struct perf_evlist *evlist);
+LIBPERF_API struct perf_evsel* perf_evlist__next(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+LIBPERF_API int perf_evlist__open(struct perf_evlist *evlist);
+LIBPERF_API void perf_evlist__close(struct perf_evlist *evlist);
+LIBPERF_API void perf_evlist__enable(struct perf_evlist *evlist);
+LIBPERF_API void perf_evlist__disable(struct perf_evlist *evlist);
+
+#define perf_evlist__for_each_evsel(evlist, pos) \
+ for ((pos) = perf_evlist__next((evlist), NULL); \
+ (pos) != NULL; \
+ (pos) = perf_evlist__next((evlist), (pos)))
+
+LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads);
+LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
+LIBPERF_API int perf_evlist__filter_pollfd(struct perf_evlist *evlist,
+ short revents_and_mask);
+
+LIBPERF_API int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
+LIBPERF_API void perf_evlist__munmap(struct perf_evlist *evlist);
+
+LIBPERF_API struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
+ struct perf_mmap *map,
+ bool overwrite);
+#define perf_evlist__for_each_mmap(evlist, pos, overwrite) \
+ for ((pos) = perf_evlist__next_mmap((evlist), NULL, overwrite); \
+ (pos) != NULL; \
+ (pos) = perf_evlist__next_mmap((evlist), (pos), overwrite))
+
+LIBPERF_API void perf_evlist__set_leader(struct perf_evlist *evlist);
+LIBPERF_API int perf_evlist__nr_groups(struct perf_evlist *evlist);
+#endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h
new file mode 100644
index 0000000000..6f92204075
--- /dev/null
+++ b/tools/lib/perf/include/perf/evsel.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_EVSEL_H
+#define __LIBPERF_EVSEL_H
+
+#include <stdint.h>
+#include <perf/core.h>
+#include <stdbool.h>
+#include <linux/types.h>
+
+struct perf_evsel;
+struct perf_event_attr;
+struct perf_cpu_map;
+struct perf_thread_map;
+
+struct perf_counts_values {
+ union {
+ struct {
+ uint64_t val;
+ uint64_t ena;
+ uint64_t run;
+ uint64_t id;
+ uint64_t lost;
+ };
+ uint64_t values[5];
+ };
+};
+
+LIBPERF_API struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr);
+LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel);
+LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads);
+LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel);
+LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+LIBPERF_API void perf_evsel__munmap(struct perf_evsel *evsel);
+LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
+LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
+ struct perf_counts_values *count);
+LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
+LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread);
+LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel);
+LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
+LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
+LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
+LIBPERF_API void perf_counts_values__scale(struct perf_counts_values *count,
+ bool scale, __s8 *pscaled);
+
+#endif /* __LIBPERF_EVSEL_H */
diff --git a/tools/lib/perf/include/perf/mmap.h b/tools/lib/perf/include/perf/mmap.h
new file mode 100644
index 0000000000..9508ad90d8
--- /dev/null
+++ b/tools/lib/perf/include/perf/mmap.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_MMAP_H
+#define __LIBPERF_MMAP_H
+
+#include <perf/core.h>
+
+struct perf_mmap;
+union perf_event;
+
+LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
+LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
+LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
+LIBPERF_API union perf_event *perf_mmap__read_event(struct perf_mmap *map);
+
+#endif /* __LIBPERF_MMAP_H */
diff --git a/tools/lib/perf/include/perf/threadmap.h b/tools/lib/perf/include/perf/threadmap.h
new file mode 100644
index 0000000000..8b40e7777c
--- /dev/null
+++ b/tools/lib/perf/include/perf/threadmap.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_THREADMAP_H
+#define __LIBPERF_THREADMAP_H
+
+#include <perf/core.h>
+#include <sys/types.h>
+
+struct perf_thread_map;
+
+LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void);
+LIBPERF_API struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
+
+LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
+LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
+LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads);
+LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
+
+LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
+LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
+
+#endif /* __LIBPERF_THREADMAP_H */
diff --git a/tools/lib/perf/internal.h b/tools/lib/perf/internal.h
new file mode 100644
index 0000000000..2c27e158de
--- /dev/null
+++ b/tools/lib/perf/internal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_H
+#define __LIBPERF_INTERNAL_H
+
+#include <perf/core.h>
+
+void libperf_print(enum libperf_print_level level,
+ const char *format, ...)
+ __attribute__((format(printf, 2, 3)));
+
+#define __pr(level, fmt, ...) \
+do { \
+ libperf_print(level, "libperf: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_err(fmt, ...) __pr(LIBPERF_ERR, fmt, ##__VA_ARGS__)
+#define pr_warning(fmt, ...) __pr(LIBPERF_WARN, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) __pr(LIBPERF_INFO, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) __pr(LIBPERF_DEBUG, fmt, ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) __pr(LIBPERF_DEBUG2, fmt, ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) __pr(LIBPERF_DEBUG3, fmt, ##__VA_ARGS__)
+
+#endif /* __LIBPERF_INTERNAL_H */
diff --git a/tools/lib/perf/lib.c b/tools/lib/perf/lib.c
new file mode 100644
index 0000000000..696fb0ea67
--- /dev/null
+++ b/tools/lib/perf/lib.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <linux/kernel.h>
+#include <internal/lib.h>
+
+unsigned int page_size;
+
+static ssize_t ion(bool is_read, int fd, void *buf, size_t n)
+{
+ void *buf_start = buf;
+ size_t left = n;
+
+ while (left) {
+ /* buf must be treated as const if !is_read. */
+ ssize_t ret = is_read ? read(fd, buf, left) :
+ write(fd, buf, left);
+
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ return ret;
+
+ left -= ret;
+ buf += ret;
+ }
+
+ BUG_ON((size_t)(buf - buf_start) != n);
+ return n;
+}
+
+/*
+ * Read exactly 'n' bytes or return an error.
+ */
+ssize_t readn(int fd, void *buf, size_t n)
+{
+ return ion(true, fd, buf, n);
+}
+
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs)
+{
+ size_t left = n;
+
+ while (left) {
+ ssize_t ret = pread(fd, buf, left, offs);
+
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ return ret;
+
+ left -= ret;
+ buf += ret;
+ offs += ret;
+ }
+
+ return n;
+}
+
+/*
+ * Write exactly 'n' bytes or return an error.
+ */
+ssize_t writen(int fd, const void *buf, size_t n)
+{
+ /* ion does not modify buf. */
+ return ion(false, fd, (void *)buf, n);
+}
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
new file mode 100644
index 0000000000..190b56ae92
--- /dev/null
+++ b/tools/lib/perf/libperf.map
@@ -0,0 +1,59 @@
+LIBPERF_0.0.1 {
+ global:
+ libperf_init;
+ perf_cpu_map__dummy_new;
+ perf_cpu_map__default_new;
+ perf_cpu_map__get;
+ perf_cpu_map__put;
+ perf_cpu_map__new;
+ perf_cpu_map__read;
+ perf_cpu_map__nr;
+ perf_cpu_map__cpu;
+ perf_cpu_map__empty;
+ perf_cpu_map__max;
+ perf_cpu_map__has;
+ perf_thread_map__new_array;
+ perf_thread_map__new_dummy;
+ perf_thread_map__set_pid;
+ perf_thread_map__comm;
+ perf_thread_map__nr;
+ perf_thread_map__pid;
+ perf_thread_map__get;
+ perf_thread_map__put;
+ perf_evsel__new;
+ perf_evsel__delete;
+ perf_evsel__enable;
+ perf_evsel__disable;
+ perf_evsel__open;
+ perf_evsel__close;
+ perf_evsel__mmap;
+ perf_evsel__munmap;
+ perf_evsel__mmap_base;
+ perf_evsel__read;
+ perf_evsel__cpus;
+ perf_evsel__threads;
+ perf_evsel__attr;
+ perf_evlist__new;
+ perf_evlist__delete;
+ perf_evlist__open;
+ perf_evlist__close;
+ perf_evlist__enable;
+ perf_evlist__disable;
+ perf_evlist__add;
+ perf_evlist__remove;
+ perf_evlist__next;
+ perf_evlist__set_maps;
+ perf_evlist__poll;
+ perf_evlist__mmap;
+ perf_evlist__munmap;
+ perf_evlist__filter_pollfd;
+ perf_evlist__next_mmap;
+ perf_evlist__set_leader;
+ perf_mmap__consume;
+ perf_mmap__read_init;
+ perf_mmap__read_done;
+ perf_mmap__read_event;
+ perf_counts_values__scale;
+ local:
+ *;
+};
diff --git a/tools/lib/perf/libperf.pc.template b/tools/lib/perf/libperf.pc.template
new file mode 100644
index 0000000000..117e4a237b
--- /dev/null
+++ b/tools/lib/perf/libperf.pc.template
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+prefix=@PREFIX@
+libdir=@LIBDIR@
+includedir=${prefix}/include
+
+Name: libperf
+Description: perf library
+Version: @VERSION@
+Libs: -L${libdir} -lperf
+Cflags: -I${includedir}
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
new file mode 100644
index 0000000000..2184814b37
--- /dev/null
+++ b/tools/lib/perf/mmap.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/mman.h>
+#include <inttypes.h>
+#include <asm/bug.h>
+#include <errno.h>
+#include <string.h>
+#include <linux/ring_buffer.h>
+#include <linux/perf_event.h>
+#include <perf/mmap.h>
+#include <perf/event.h>
+#include <perf/evsel.h>
+#include <internal/mmap.h>
+#include <internal/lib.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/stringify.h>
+#include "internal.h"
+
+void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
+ bool overwrite, libperf_unmap_cb_t unmap_cb)
+{
+ map->fd = -1;
+ map->overwrite = overwrite;
+ map->unmap_cb = unmap_cb;
+ refcount_set(&map->refcnt, 0);
+ if (prev)
+ prev->next = map;
+}
+
+size_t perf_mmap__mmap_len(struct perf_mmap *map)
+{
+ return map->mask + 1 + page_size;
+}
+
+int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int fd, struct perf_cpu cpu)
+{
+ map->prev = 0;
+ map->mask = mp->mask;
+ map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+ MAP_SHARED, fd, 0);
+ if (map->base == MAP_FAILED) {
+ map->base = NULL;
+ return -1;
+ }
+
+ map->fd = fd;
+ map->cpu = cpu;
+ return 0;
+}
+
+void perf_mmap__munmap(struct perf_mmap *map)
+{
+ if (map && map->base != NULL) {
+ munmap(map->base, perf_mmap__mmap_len(map));
+ map->base = NULL;
+ map->fd = -1;
+ refcount_set(&map->refcnt, 0);
+ }
+ if (map && map->unmap_cb)
+ map->unmap_cb(map);
+}
+
+void perf_mmap__get(struct perf_mmap *map)
+{
+ refcount_inc(&map->refcnt);
+}
+
+void perf_mmap__put(struct perf_mmap *map)
+{
+ BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+
+ if (refcount_dec_and_test(&map->refcnt))
+ perf_mmap__munmap(map);
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
+{
+ ring_buffer_write_tail(md->base, tail);
+}
+
+u64 perf_mmap__read_head(struct perf_mmap *map)
+{
+ return ring_buffer_read_head(map->base);
+}
+
+static bool perf_mmap__empty(struct perf_mmap *map)
+{
+ struct perf_event_mmap_page *pc = map->base;
+
+ return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
+}
+
+void perf_mmap__consume(struct perf_mmap *map)
+{
+ if (!map->overwrite) {
+ u64 old = map->prev;
+
+ perf_mmap__write_tail(map, old);
+ }
+
+ if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ perf_mmap__put(map);
+}
+
+static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
+{
+ struct perf_event_header *pheader;
+ u64 evt_head = *start;
+ int size = mask + 1;
+
+ pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
+ pheader = (struct perf_event_header *)(buf + (*start & mask));
+ while (true) {
+ if (evt_head - *start >= (unsigned int)size) {
+ pr_debug("Finished reading overwrite ring buffer: rewind\n");
+ if (evt_head - *start > (unsigned int)size)
+ evt_head -= pheader->size;
+ *end = evt_head;
+ return 0;
+ }
+
+ pheader = (struct perf_event_header *)(buf + (evt_head & mask));
+
+ if (pheader->size == 0) {
+ pr_debug("Finished reading overwrite ring buffer: get start\n");
+ *end = evt_head;
+ return 0;
+ }
+
+ evt_head += pheader->size;
+ pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
+ }
+ WARN_ONCE(1, "Shouldn't get here\n");
+ return -1;
+}
+
+/*
+ * Report the start and end of the available data in ringbuffer
+ */
+static int __perf_mmap__read_init(struct perf_mmap *md)
+{
+ u64 head = perf_mmap__read_head(md);
+ u64 old = md->prev;
+ unsigned char *data = md->base + page_size;
+ unsigned long size;
+
+ md->start = md->overwrite ? head : old;
+ md->end = md->overwrite ? old : head;
+
+ if ((md->end - md->start) < md->flush)
+ return -EAGAIN;
+
+ size = md->end - md->start;
+ if (size > (unsigned long)(md->mask) + 1) {
+ if (!md->overwrite) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+
+ md->prev = head;
+ perf_mmap__consume(md);
+ return -EAGAIN;
+ }
+
+ /*
+ * Backward ring buffer is full. We still have a chance to read
+ * most of data from it.
+ */
+ if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int perf_mmap__read_init(struct perf_mmap *map)
+{
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return -ENOENT;
+
+ return __perf_mmap__read_init(map);
+}
+
+/*
+ * Mandatory for overwrite mode
+ * The direction of overwrite mode is backward.
+ * The last perf_mmap__read() will set tail to map->core.prev.
+ * Need to correct the map->core.prev to head which is the end of next read.
+ */
+void perf_mmap__read_done(struct perf_mmap *map)
+{
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return;
+
+ map->prev = perf_mmap__read_head(map);
+}
+
+/* When check_messup is true, 'end' must points to a good entry */
+static union perf_event *perf_mmap__read(struct perf_mmap *map,
+ u64 *startp, u64 end)
+{
+ unsigned char *data = map->base + page_size;
+ union perf_event *event = NULL;
+ int diff = end - *startp;
+
+ if (diff >= (int)sizeof(event->header)) {
+ size_t size;
+
+ event = (union perf_event *)&data[*startp & map->mask];
+ size = event->header.size;
+
+ if (size < sizeof(event->header) || diff < (int)size)
+ return NULL;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+ unsigned int offset = *startp;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = map->event_copy;
+
+ do {
+ cpy = min(map->mask + 1 - (offset & map->mask), len);
+ memcpy(dst, &data[offset & map->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = (union perf_event *)map->event_copy;
+ }
+
+ *startp += size;
+ }
+
+ return event;
+}
+
+/*
+ * Read event from ring buffer one by one.
+ * Return one event for each call.
+ *
+ * Usage:
+ * perf_mmap__read_init()
+ * while(event = perf_mmap__read_event()) {
+ * //process the event
+ * perf_mmap__consume()
+ * }
+ * perf_mmap__read_done()
+ */
+union perf_event *perf_mmap__read_event(struct perf_mmap *map)
+{
+ union perf_event *event;
+
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return NULL;
+
+ /* non-overwirte doesn't pause the ringbuffer */
+ if (!map->overwrite)
+ map->end = perf_mmap__read_head(map);
+
+ event = perf_mmap__read(map, &map->start, map->end);
+
+ if (!map->overwrite)
+ map->prev = map->start;
+
+ return event;
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+static u64 read_perf_counter(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((u64)high) << 32;
+}
+
+static u64 read_timestamp(void)
+{
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((u64)high) << 32;
+}
+#elif defined(__aarch64__)
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+static u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+#define PMEVCNTR_READ(idx) \
+ static u64 read_pmevcntr_##idx(void) { \
+ return read_sysreg(pmevcntr##idx##_el0); \
+ }
+
+PMEVCNTR_READ(0);
+PMEVCNTR_READ(1);
+PMEVCNTR_READ(2);
+PMEVCNTR_READ(3);
+PMEVCNTR_READ(4);
+PMEVCNTR_READ(5);
+PMEVCNTR_READ(6);
+PMEVCNTR_READ(7);
+PMEVCNTR_READ(8);
+PMEVCNTR_READ(9);
+PMEVCNTR_READ(10);
+PMEVCNTR_READ(11);
+PMEVCNTR_READ(12);
+PMEVCNTR_READ(13);
+PMEVCNTR_READ(14);
+PMEVCNTR_READ(15);
+PMEVCNTR_READ(16);
+PMEVCNTR_READ(17);
+PMEVCNTR_READ(18);
+PMEVCNTR_READ(19);
+PMEVCNTR_READ(20);
+PMEVCNTR_READ(21);
+PMEVCNTR_READ(22);
+PMEVCNTR_READ(23);
+PMEVCNTR_READ(24);
+PMEVCNTR_READ(25);
+PMEVCNTR_READ(26);
+PMEVCNTR_READ(27);
+PMEVCNTR_READ(28);
+PMEVCNTR_READ(29);
+PMEVCNTR_READ(30);
+
+/*
+ * Read a value direct from PMEVCNTR<idx>
+ */
+static u64 read_perf_counter(unsigned int counter)
+{
+ static u64 (* const read_f[])(void) = {
+ read_pmevcntr_0,
+ read_pmevcntr_1,
+ read_pmevcntr_2,
+ read_pmevcntr_3,
+ read_pmevcntr_4,
+ read_pmevcntr_5,
+ read_pmevcntr_6,
+ read_pmevcntr_7,
+ read_pmevcntr_8,
+ read_pmevcntr_9,
+ read_pmevcntr_10,
+ read_pmevcntr_11,
+ read_pmevcntr_13,
+ read_pmevcntr_12,
+ read_pmevcntr_14,
+ read_pmevcntr_15,
+ read_pmevcntr_16,
+ read_pmevcntr_17,
+ read_pmevcntr_18,
+ read_pmevcntr_19,
+ read_pmevcntr_20,
+ read_pmevcntr_21,
+ read_pmevcntr_22,
+ read_pmevcntr_23,
+ read_pmevcntr_24,
+ read_pmevcntr_25,
+ read_pmevcntr_26,
+ read_pmevcntr_27,
+ read_pmevcntr_28,
+ read_pmevcntr_29,
+ read_pmevcntr_30,
+ read_pmccntr
+ };
+
+ if (counter < ARRAY_SIZE(read_f))
+ return (read_f[counter])();
+
+ return 0;
+}
+
+static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+
+/* __riscv_xlen contains the witdh of the native base integer, here 64-bit */
+#elif defined(__riscv) && __riscv_xlen == 64
+
+/* TODO: implement rv32 support */
+
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, %1" \
+ : "=r" (__v) \
+ : "i" (csr) : ); \
+ __v; \
+})
+
+static unsigned long csr_read_num(int csr_num)
+{
+#define switchcase_csr_read(__csr_num, __val) {\
+ case __csr_num: \
+ __val = csr_read(__csr_num); \
+ break; }
+#define switchcase_csr_read_2(__csr_num, __val) {\
+ switchcase_csr_read(__csr_num + 0, __val) \
+ switchcase_csr_read(__csr_num + 1, __val)}
+#define switchcase_csr_read_4(__csr_num, __val) {\
+ switchcase_csr_read_2(__csr_num + 0, __val) \
+ switchcase_csr_read_2(__csr_num + 2, __val)}
+#define switchcase_csr_read_8(__csr_num, __val) {\
+ switchcase_csr_read_4(__csr_num + 0, __val) \
+ switchcase_csr_read_4(__csr_num + 4, __val)}
+#define switchcase_csr_read_16(__csr_num, __val) {\
+ switchcase_csr_read_8(__csr_num + 0, __val) \
+ switchcase_csr_read_8(__csr_num + 8, __val)}
+#define switchcase_csr_read_32(__csr_num, __val) {\
+ switchcase_csr_read_16(__csr_num + 0, __val) \
+ switchcase_csr_read_16(__csr_num + 16, __val)}
+
+ unsigned long ret = 0;
+
+ switch (csr_num) {
+ switchcase_csr_read_32(CSR_CYCLE, ret)
+ default:
+ break;
+ }
+
+ return ret;
+#undef switchcase_csr_read_32
+#undef switchcase_csr_read_16
+#undef switchcase_csr_read_8
+#undef switchcase_csr_read_4
+#undef switchcase_csr_read_2
+#undef switchcase_csr_read
+}
+
+static u64 read_perf_counter(unsigned int counter)
+{
+ return csr_read_num(CSR_CYCLE + counter);
+}
+
+static u64 read_timestamp(void)
+{
+ return csr_read_num(CSR_TIME);
+}
+
+#else
+static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
+static u64 read_timestamp(void) { return 0; }
+#endif
+
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count)
+{
+ struct perf_event_mmap_page *pc = map->base;
+ u32 seq, idx, time_mult = 0, time_shift = 0;
+ u64 cnt, cyc = 0, time_offset = 0, time_cycles = 0, time_mask = ~0ULL;
+
+ if (!pc || !pc->cap_user_rdpmc)
+ return -1;
+
+ do {
+ seq = READ_ONCE(pc->lock);
+ barrier();
+
+ count->ena = READ_ONCE(pc->time_enabled);
+ count->run = READ_ONCE(pc->time_running);
+
+ if (pc->cap_user_time && count->ena != count->run) {
+ cyc = read_timestamp();
+ time_mult = READ_ONCE(pc->time_mult);
+ time_shift = READ_ONCE(pc->time_shift);
+ time_offset = READ_ONCE(pc->time_offset);
+
+ if (pc->cap_user_time_short) {
+ time_cycles = READ_ONCE(pc->time_cycles);
+ time_mask = READ_ONCE(pc->time_mask);
+ }
+ }
+
+ idx = READ_ONCE(pc->index);
+ cnt = READ_ONCE(pc->offset);
+ if (pc->cap_user_rdpmc && idx) {
+ s64 evcnt = read_perf_counter(idx - 1);
+ u16 width = READ_ONCE(pc->pmc_width);
+
+ evcnt <<= 64 - width;
+ evcnt >>= 64 - width;
+ cnt += evcnt;
+ } else
+ return -1;
+
+ barrier();
+ } while (READ_ONCE(pc->lock) != seq);
+
+ if (count->ena != count->run) {
+ u64 delta;
+
+ /* Adjust for cap_usr_time_short, a nop if not */
+ cyc = time_cycles + ((cyc - time_cycles) & time_mask);
+
+ delta = time_offset + mul_u64_u32_shr(cyc, time_mult, time_shift);
+
+ count->ena += delta;
+ if (idx)
+ count->run += delta;
+ }
+
+ count->val = cnt;
+
+ return 0;
+}
diff --git a/tools/lib/perf/tests/Build b/tools/lib/perf/tests/Build
new file mode 100644
index 0000000000..56e81378d4
--- /dev/null
+++ b/tools/lib/perf/tests/Build
@@ -0,0 +1,5 @@
+tests-y += main.o
+tests-y += test-evsel.o
+tests-y += test-evlist.o
+tests-y += test-cpumap.o
+tests-y += test-threadmap.o
diff --git a/tools/lib/perf/tests/main.c b/tools/lib/perf/tests/main.c
new file mode 100644
index 0000000000..56423fd4db
--- /dev/null
+++ b/tools/lib/perf/tests/main.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <internal/tests.h>
+#include "tests.h"
+
+int tests_failed;
+int tests_verbose;
+
+int main(int argc, char **argv)
+{
+ __T("test cpumap", !test_cpumap(argc, argv));
+ __T("test threadmap", !test_threadmap(argc, argv));
+ __T("test evlist", !test_evlist(argc, argv));
+ __T("test evsel", !test_evsel(argc, argv));
+ return 0;
+}
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
new file mode 100644
index 0000000000..87b0510a55
--- /dev/null
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdarg.h>
+#include <stdio.h>
+#include <perf/cpumap.h>
+#include <internal/tests.h>
+#include "tests.h"
+
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
+int test_cpumap(int argc, char **argv)
+{
+ struct perf_cpu_map *cpus;
+ struct perf_cpu cpu;
+ int idx;
+
+ __T_START;
+
+ libperf_init(libperf_print);
+
+ cpus = perf_cpu_map__dummy_new();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__get(cpus);
+ perf_cpu_map__put(cpus);
+ perf_cpu_map__put(cpus);
+
+ cpus = perf_cpu_map__default_new();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ __T("wrong cpu number", cpu.cpu != -1);
+
+ perf_cpu_map__put(cpus);
+
+ __T_END;
+ return tests_failed == 0 ? 0 : -1;
+}
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
new file mode 100644
index 0000000000..ed616fc19b
--- /dev/null
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <linux/perf_event.h>
+#include <linux/limits.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <perf/mmap.h>
+#include <perf/event.h>
+#include <internal/tests.h>
+#include <api/fs/fs.h>
+#include "tests.h"
+#include <internal/evsel.h>
+
+#define EVENT_NUM 15
+#define WAIT_COUNT 100000000UL
+
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
+static int test_stat_cpu(void)
+{
+ struct perf_cpu_map *cpus;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel, *leader;
+ struct perf_event_attr attr1 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+ struct perf_event_attr attr2 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err, idx;
+
+ cpus = perf_cpu_map__new(NULL);
+ __T("failed to create cpus", cpus);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = leader = perf_evsel__new(&attr1);
+ __T("failed to create evsel1", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ evsel = perf_evsel__new(&attr2);
+ __T("failed to create evsel2", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
+ perf_evlist__set_maps(evlist, cpus, NULL);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ cpus = perf_evsel__cpus(evsel);
+
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
+ struct perf_counts_values counts = { .val = 0 };
+
+ perf_evsel__read(evsel, idx, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+ }
+ }
+
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+
+ perf_cpu_map__put(cpus);
+ return 0;
+}
+
+static int test_stat_thread(void)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel, *leader;
+ struct perf_event_attr attr1 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+ struct perf_event_attr attr2 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = leader = perf_evsel__new(&attr1);
+ __T("failed to create evsel1", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ evsel = perf_evsel__new(&attr2);
+ __T("failed to create evsel2", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
+ perf_evlist__set_maps(evlist, NULL, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+ }
+
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_stat_thread_enable(void)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel, *leader;
+ struct perf_event_attr attr1 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .disabled = 1,
+ };
+ struct perf_event_attr attr2 = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ .disabled = 1,
+ };
+ int err;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = leader = perf_evsel__new(&attr1);
+ __T("failed to create evsel1", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ evsel = perf_evsel__new(&attr2);
+ __T("failed to create evsel2", evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
+ perf_evlist__set_maps(evlist, NULL, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val == 0);
+ }
+
+ perf_evlist__enable(evlist);
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+ }
+
+ perf_evlist__disable(evlist);
+
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_mmap_thread(void)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_mmap *map;
+ struct perf_cpu_map *cpus;
+ struct perf_thread_map *threads;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_period = 1,
+ .wakeup_watermark = 1,
+ .disabled = 1,
+ };
+ char path[PATH_MAX];
+ int id, err, pid, go_pipe[2];
+ union perf_event *event;
+ int count = 0;
+
+ snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
+ sysfs__mountpoint());
+
+ if (filename__read_int(path, &id)) {
+ tests_failed++;
+ fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
+ return -1;
+ }
+
+ attr.config = id;
+
+ err = pipe(go_pipe);
+ __T("failed to create pipe", err == 0);
+
+ fflush(NULL);
+
+ pid = fork();
+ if (!pid) {
+ int i;
+ char bf;
+
+ read(go_pipe[0], &bf, 1);
+
+ /* Generate 100 prctl calls. */
+ for (i = 0; i < 100; i++)
+ prctl(0, 0, 0, 0, 0);
+
+ exit(0);
+ }
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ cpus = perf_cpu_map__dummy_new();
+ __T("failed to create cpus", cpus);
+
+ perf_thread_map__set_pid(threads, 0, pid);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ err = perf_evlist__mmap(evlist, 4);
+ __T("failed to mmap evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ /* kick the child and wait for it to finish */
+ write(go_pipe[1], "A", 1);
+ waitpid(pid, NULL, 0);
+
+ /*
+ * There's no need to call perf_evlist__disable,
+ * monitored process is dead now.
+ */
+
+ perf_evlist__for_each_mmap(evlist, map, false) {
+ if (perf_mmap__read_init(map) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ count++;
+ perf_mmap__consume(map);
+ }
+
+ perf_mmap__read_done(map);
+ }
+
+ /* calls perf_evlist__munmap/perf_evlist__close */
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+ perf_cpu_map__put(cpus);
+
+ /*
+ * The generated prctl calls should match the
+ * number of events in the buffer.
+ */
+ __T("failed count", count == 100);
+
+ return 0;
+}
+
+static int test_mmap_cpus(void)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_mmap *map;
+ struct perf_cpu_map *cpus;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_period = 1,
+ .wakeup_watermark = 1,
+ .disabled = 1,
+ };
+ cpu_set_t saved_mask;
+ char path[PATH_MAX];
+ int id, err, tmp;
+ struct perf_cpu cpu;
+ union perf_event *event;
+ int count = 0;
+
+ snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
+ sysfs__mountpoint());
+
+ if (filename__read_int(path, &id)) {
+ fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
+ return -1;
+ }
+
+ attr.config = id;
+
+ cpus = perf_cpu_map__new(NULL);
+ __T("failed to create cpus", cpus);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
+
+ perf_evlist__add(evlist, evsel);
+
+ perf_evlist__set_maps(evlist, cpus, NULL);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ err = perf_evlist__mmap(evlist, 4);
+ __T("failed to mmap evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ err = sched_getaffinity(0, sizeof(saved_mask), &saved_mask);
+ __T("sched_getaffinity failed", err == 0);
+
+ perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ cpu_set_t mask;
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu.cpu, &mask);
+
+ err = sched_setaffinity(0, sizeof(mask), &mask);
+ __T("sched_setaffinity failed", err == 0);
+
+ prctl(0, 0, 0, 0, 0);
+ }
+
+ err = sched_setaffinity(0, sizeof(saved_mask), &saved_mask);
+ __T("sched_setaffinity failed", err == 0);
+
+ perf_evlist__disable(evlist);
+
+ perf_evlist__for_each_mmap(evlist, map, false) {
+ if (perf_mmap__read_init(map) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ count++;
+ perf_mmap__consume(map);
+ }
+
+ perf_mmap__read_done(map);
+ }
+
+ /* calls perf_evlist__munmap/perf_evlist__close */
+ perf_evlist__delete(evlist);
+
+ /*
+ * The generated prctl events should match the
+ * number of cpus or be bigger (we are system-wide).
+ */
+ __T("failed count", count >= perf_cpu_map__nr(cpus));
+
+ perf_cpu_map__put(cpus);
+
+ return 0;
+}
+
+static double display_error(long long average,
+ long long high,
+ long long low,
+ long long expected)
+{
+ double error;
+
+ error = (((double)average - expected) / expected) * 100.0;
+
+ __T_VERBOSE(" Expected: %lld\n", expected);
+ __T_VERBOSE(" High: %lld Low: %lld Average: %lld\n",
+ high, low, average);
+
+ __T_VERBOSE(" Average Error = %.2f%%\n", error);
+
+ return error;
+}
+
+static int test_stat_multiplexing(void)
+{
+ struct perf_counts_values expected_counts = { .val = 0 };
+ struct perf_counts_values counts[EVENT_NUM] = {{ .val = 0 },};
+ struct perf_thread_map *threads;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING,
+ .disabled = 1,
+ };
+ int err, i, nonzero = 0;
+ unsigned long count;
+ long long max = 0, min = 0, avg = 0;
+ double error = 0.0;
+ s8 scaled = 0;
+
+ /* read for non-multiplexing event count */
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__enable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ /* wait loop */
+ count = WAIT_COUNT;
+ while (count--)
+ ;
+
+ perf_evsel__read(evsel, 0, 0, &expected_counts);
+ __T("failed to read value for evsel", expected_counts.val != 0);
+ __T("failed to read non-multiplexing event count",
+ expected_counts.ena == expected_counts.run);
+
+ err = perf_evsel__disable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+
+ /* read for multiplexing event count */
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ for (i = 0; i < EVENT_NUM; i++) {
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ perf_evlist__add(evlist, evsel);
+ }
+ perf_evlist__set_maps(evlist, NULL, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ /* wait loop */
+ count = WAIT_COUNT;
+ while (count--)
+ ;
+
+ i = 0;
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts[i]);
+ __T("failed to read value for evsel", counts[i].val != 0);
+ i++;
+ }
+
+ perf_evlist__disable(evlist);
+
+ min = counts[0].val;
+ for (i = 0; i < EVENT_NUM; i++) {
+ __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
+ i, counts[i].val, counts[i].run, counts[i].ena);
+
+ perf_counts_values__scale(&counts[i], true, &scaled);
+ if (scaled == 1) {
+ __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
+ counts[i].val,
+ (double)counts[i].run / (double)counts[i].ena * 100.0,
+ counts[i].run, counts[i].ena);
+ } else if (scaled == -1) {
+ __T_VERBOSE("\t Not Running\n");
+ } else {
+ __T_VERBOSE("\t Not Scaling\n");
+ }
+
+ if (counts[i].val > max)
+ max = counts[i].val;
+
+ if (counts[i].val < min)
+ min = counts[i].val;
+
+ avg += counts[i].val;
+
+ if (counts[i].val != 0)
+ nonzero++;
+ }
+
+ if (nonzero != 0)
+ avg = avg / nonzero;
+ else
+ avg = 0;
+
+ error = display_error(avg, max, min, expected_counts.val);
+
+ __T("Error out of range!", ((error <= 1.0) && (error >= -1.0)));
+
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+
+ return 0;
+}
+
+int test_evlist(int argc, char **argv)
+{
+ __T_START;
+
+ libperf_init(libperf_print);
+
+ test_stat_cpu();
+ test_stat_thread();
+ test_stat_thread_enable();
+ test_mmap_thread();
+ test_mmap_cpus();
+ test_stat_multiplexing();
+
+ __T_END;
+ return tests_failed == 0 ? 0 : -1;
+}
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
new file mode 100644
index 0000000000..a11fc51bfb
--- /dev/null
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <linux/kernel.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <perf/evsel.h>
+#include <internal/evsel.h>
+#include <internal/tests.h>
+#include "tests.h"
+
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
+static int test_stat_cpu(void)
+{
+ struct perf_cpu_map *cpus;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+ int err, idx;
+
+ cpus = perf_cpu_map__new(NULL);
+ __T("failed to create cpus", cpus);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, cpus, NULL);
+ __T("failed to open evsel", err == 0);
+
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
+ struct perf_counts_values counts = { .val = 0 };
+
+ perf_evsel__read(evsel, idx, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+ }
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_cpu_map__put(cpus);
+ return 0;
+}
+
+static int test_stat_thread(void)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_stat_thread_enable(void)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ .disabled = 1,
+ };
+ int err;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val == 0);
+
+ err = perf_evsel__enable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+
+ err = perf_evsel__disable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_stat_user_read(int event)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_mmap_page *pc;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
+ };
+ int err, i;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__mmap(evsel, 0);
+ __T("failed to mmap evsel", err == 0);
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+ __T("failed to get mmapped address", pc);
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ __T("userspace counter access not supported", pc->cap_user_rdpmc);
+ __T("userspace counter access not enabled", pc->index);
+ __T("userspace counter width not set", pc->pmc_width >= 32);
+#endif
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+
+ for (i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
+
+ __T_VERBOSE("\tloop = %u, ", count);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+
+ __T("invalid counter data", (end - start) > last);
+ last = end - start;
+ __T_VERBOSE("count = %llu\n", end - start);
+ }
+
+ perf_evsel__munmap(evsel);
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_stat_read_format_single(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *evsel;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ evsel = perf_evsel__new(attr);
+ __T("failed to create evsel", evsel);
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(evsel, 0, 0, &counts);
+
+ __T("failed to read value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read LOST", counts.lost == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+ return 0;
+}
+
+static int test_stat_read_format_group(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *leader, *member;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ attr->read_format |= PERF_FORMAT_GROUP;
+ leader = perf_evsel__new(attr);
+ __T("failed to create leader", leader);
+
+ attr->read_format &= ~PERF_FORMAT_GROUP;
+ member = perf_evsel__new(attr);
+ __T("failed to create member", member);
+
+ member->leader = leader;
+ leader->nr_members = 2;
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(leader, NULL, threads);
+ if (err < 0)
+ return 0;
+ err = perf_evsel__open(member, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(leader, 0, 0, &counts);
+
+ __T("failed to read leader value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read leader TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read leader TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read leader ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read leader LOST", counts.lost == 0);
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(member, 0, 0, &counts);
+
+ __T("failed to read member value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read member TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read member TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read member ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read member LOST", counts.lost == 0);
+
+ perf_evsel__close(member);
+ perf_evsel__close(leader);
+ perf_evsel__delete(member);
+ perf_evsel__delete(leader);
+ return 0;
+}
+
+static int test_stat_read_format(void)
+{
+ struct perf_thread_map *threads;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err, i;
+
+#define FMT(_fmt) PERF_FORMAT_ ## _fmt
+#define FMT_TIME (FMT(TOTAL_TIME_ENABLED) | FMT(TOTAL_TIME_RUNNING))
+
+ uint64_t test_formats [] = {
+ 0,
+ FMT_TIME,
+ FMT(ID),
+ FMT(LOST),
+ FMT_TIME | FMT(ID),
+ FMT_TIME | FMT(LOST),
+ FMT_TIME | FMT(ID) | FMT(LOST),
+ FMT(ID) | FMT(LOST),
+ };
+
+#undef FMT
+#undef FMT_TIME
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing single read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_single(&attr, threads);
+ __T("failed to read single format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+
+ threads = perf_thread_map__new_array(2, NULL);
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+ perf_thread_map__set_pid(threads, 1, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing group read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_group(&attr, threads);
+ __T("failed to read group format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+int test_evsel(int argc, char **argv)
+{
+ __T_START;
+
+ libperf_init(libperf_print);
+
+ test_stat_cpu();
+ test_stat_thread();
+ test_stat_thread_enable();
+ test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+ test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+ test_stat_read_format();
+
+ __T_END;
+ return tests_failed == 0 ? 0 : -1;
+}
diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c
new file mode 100644
index 0000000000..f728ad7002
--- /dev/null
+++ b/tools/lib/perf/tests/test-threadmap.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdarg.h>
+#include <stdio.h>
+#include <perf/threadmap.h>
+#include <internal/tests.h>
+#include "tests.h"
+
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
+static int test_threadmap_array(int nr, pid_t *array)
+{
+ struct perf_thread_map *threads;
+ int i;
+
+ threads = perf_thread_map__new_array(nr, array);
+ __T("Failed to allocate new thread map", threads);
+
+ __T("Unexpected number of threads", perf_thread_map__nr(threads) == nr);
+
+ for (i = 0; i < nr; i++) {
+ __T("Unexpected initial value of thread",
+ perf_thread_map__pid(threads, i) == (array ? array[i] : -1));
+ }
+
+ for (i = 1; i < nr; i++)
+ perf_thread_map__set_pid(threads, i, i * 100);
+
+ __T("Unexpected value of thread 0",
+ perf_thread_map__pid(threads, 0) == (array ? array[0] : -1));
+
+ for (i = 1; i < nr; i++) {
+ __T("Unexpected thread value",
+ perf_thread_map__pid(threads, i) == i * 100);
+ }
+
+ perf_thread_map__put(threads);
+
+ return 0;
+}
+
+#define THREADS_NR 10
+int test_threadmap(int argc, char **argv)
+{
+ struct perf_thread_map *threads;
+ pid_t thr_array[THREADS_NR];
+ int i;
+
+ __T_START;
+
+ libperf_init(libperf_print);
+
+ threads = perf_thread_map__new_dummy();
+ if (!threads)
+ return -1;
+
+ perf_thread_map__get(threads);
+ perf_thread_map__put(threads);
+ perf_thread_map__put(threads);
+
+ test_threadmap_array(THREADS_NR, NULL);
+
+ for (i = 0; i < THREADS_NR; i++)
+ thr_array[i] = i + 100;
+
+ test_threadmap_array(THREADS_NR, thr_array);
+
+ __T_END;
+ return tests_failed == 0 ? 0 : -1;
+}
diff --git a/tools/lib/perf/tests/tests.h b/tools/lib/perf/tests/tests.h
new file mode 100644
index 0000000000..604838f21b
--- /dev/null
+++ b/tools/lib/perf/tests/tests.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TESTS_H
+#define TESTS_H
+
+int test_cpumap(int argc, char **argv);
+int test_threadmap(int argc, char **argv);
+int test_evlist(int argc, char **argv);
+int test_evsel(int argc, char **argv);
+
+#endif /* TESTS_H */
diff --git a/tools/lib/perf/threadmap.c b/tools/lib/perf/threadmap.c
new file mode 100644
index 0000000000..07968f3ea0
--- /dev/null
+++ b/tools/lib/perf/threadmap.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <perf/threadmap.h>
+#include <stdlib.h>
+#include <linux/refcount.h>
+#include <internal/threadmap.h>
+#include <string.h>
+#include <asm/bug.h>
+#include <stdio.h>
+
+static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr)
+{
+ size_t size = (nr - start) * sizeof(map->map[0]);
+
+ memset(&map->map[start], 0, size);
+ map->err_thread = -1;
+}
+
+struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr)
+{
+ size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
+ int start = map ? map->nr : 0;
+
+ map = realloc(map, size);
+ /*
+ * We only realloc to add more items, let's reset new items.
+ */
+ if (map)
+ perf_thread_map__reset(map, start, nr);
+
+ return map;
+}
+
+#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
+
+void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid)
+{
+ map->map[idx].pid = pid;
+}
+
+char *perf_thread_map__comm(struct perf_thread_map *map, int idx)
+{
+ return map->map[idx].comm;
+}
+
+struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array)
+{
+ struct perf_thread_map *threads = thread_map__alloc(nr_threads);
+ int i;
+
+ if (!threads)
+ return NULL;
+
+ for (i = 0; i < nr_threads; i++)
+ perf_thread_map__set_pid(threads, i, array ? array[i] : -1);
+
+ threads->nr = nr_threads;
+ refcount_set(&threads->refcnt, 1);
+
+ return threads;
+}
+
+struct perf_thread_map *perf_thread_map__new_dummy(void)
+{
+ return perf_thread_map__new_array(1, NULL);
+}
+
+static void perf_thread_map__delete(struct perf_thread_map *threads)
+{
+ if (threads) {
+ int i;
+
+ WARN_ONCE(refcount_read(&threads->refcnt) != 0,
+ "thread map refcnt unbalanced\n");
+ for (i = 0; i < threads->nr; i++)
+ free(perf_thread_map__comm(threads, i));
+ free(threads);
+ }
+}
+
+struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map)
+{
+ if (map)
+ refcount_inc(&map->refcnt);
+ return map;
+}
+
+void perf_thread_map__put(struct perf_thread_map *map)
+{
+ if (map && refcount_dec_and_test(&map->refcnt))
+ perf_thread_map__delete(map);
+}
+
+int perf_thread_map__nr(struct perf_thread_map *threads)
+{
+ return threads ? threads->nr : 1;
+}
+
+pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx)
+{
+ return map->map[idx].pid;
+}
diff --git a/tools/lib/perf/xyarray.c b/tools/lib/perf/xyarray.c
new file mode 100644
index 0000000000..dcd901d154
--- /dev/null
+++ b/tools/lib/perf/xyarray.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <internal/xyarray.h>
+#include <linux/zalloc.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
+{
+ size_t row_size = ylen * entry_size;
+ struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size);
+
+ if (xy != NULL) {
+ xy->entry_size = entry_size;
+ xy->row_size = row_size;
+ xy->entries = xlen * ylen;
+ xy->max_x = xlen;
+ xy->max_y = ylen;
+ }
+
+ return xy;
+}
+
+void xyarray__reset(struct xyarray *xy)
+{
+ size_t n = xy->entries * xy->entry_size;
+
+ memset(xy->contents, 0, n);
+}
+
+void xyarray__delete(struct xyarray *xy)
+{
+ free(xy);
+}