summaryrefslogtreecommitdiffstats
path: root/tools/perf/scripts
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /tools/perf/scripts
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--tools/perf/scripts/Build2
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build5
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c122
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.xs42
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL18
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/README59
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm55
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm192
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm94
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/typemap1
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record3
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report10
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record3
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report10
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report3
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-report20
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record6
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report3
-rw-r--r--tools/perf/scripts/perl/check-perf-trace.pl106
-rw-r--r--tools/perf/scripts/perl/failed-syscalls.pl47
-rw-r--r--tools/perf/scripts/perl/rw-by-file.pl106
-rw-r--r--tools/perf/scripts/perl/rw-by-pid.pl184
-rw-r--r--tools/perf/scripts/perl/rwtop.pl203
-rw-r--r--tools/perf/scripts/perl/wakeup-latency.pl107
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build3
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c101
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py116
-rwxr-xr-xtools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py97
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py184
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py91
-rw-r--r--tools/perf/scripts/python/bin/compaction-times-record2
-rw-r--r--tools/perf/scripts/python/bin/compaction-times-report4
-rw-r--r--tools/perf/scripts/python/bin/event_analyzing_sample-record8
-rw-r--r--tools/perf/scripts/python/bin/event_analyzing_sample-report3
-rw-r--r--tools/perf/scripts/python/bin/export-to-postgresql-record8
-rw-r--r--tools/perf/scripts/python/bin/export-to-postgresql-report29
-rw-r--r--tools/perf/scripts/python/bin/export-to-sqlite-record8
-rw-r--r--tools/perf/scripts/python/bin/export-to-sqlite-report29
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record3
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report10
-rwxr-xr-xtools/perf/scripts/python/bin/flamegraph-record2
-rwxr-xr-xtools/perf/scripts/python/bin/flamegraph-report3
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-record2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-report4
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-record13
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-report3
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-record19
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-report3
-rwxr-xr-xtools/perf/scripts/python/bin/net_dropmonitor-record2
-rwxr-xr-xtools/perf/scripts/python/bin/net_dropmonitor-report4
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-record8
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-report5
-rw-r--r--tools/perf/scripts/python/bin/powerpc-hcalls-record2
-rw-r--r--tools/perf/scripts/python/bin/powerpc-hcalls-report2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-record2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-report3
-rw-r--r--tools/perf/scripts/python/bin/sctop-record3
-rw-r--r--tools/perf/scripts/python/bin/sctop-report24
-rwxr-xr-xtools/perf/scripts/python/bin/stackcollapse-record8
-rwxr-xr-xtools/perf/scripts/python/bin/stackcollapse-report3
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record3
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report10
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record3
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report10
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py84
-rw-r--r--tools/perf/scripts/python/compaction-times.py311
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py192
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py1111
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py796
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py5114
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py79
-rwxr-xr-xtools/perf/scripts/python/flamegraph.py126
-rw-r--r--tools/perf/scripts/python/futex-contention.py57
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py134
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py100
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py78
-rw-r--r--tools/perf/scripts/python/netdev-times.py472
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py202
-rw-r--r--tools/perf/scripts/python/sched-migration.py462
-rw-r--r--tools/perf/scripts/python/sctop.py89
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py127
-rw-r--r--tools/perf/scripts/python/stat-cpi.py79
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py75
-rw-r--r--tools/perf/scripts/python/syscall-counts.py65
87 files changed, 11997 insertions, 0 deletions
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
new file mode 100644
index 000000000..68d4b5457
--- /dev/null
+++ b/tools/perf/scripts/Build
@@ -0,0 +1,2 @@
+perf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
new file mode 100644
index 000000000..db0036129
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -0,0 +1,5 @@
+perf-y += Context.o
+
+CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
+CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
+CFLAGS_Context.o += -Wno-switch-default -Wno-shadow
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
new file mode 100644
index 000000000..25c47d23a
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the
+ * contents of Context.xs. Do not edit this file, edit Context.xs instead.
+ *
+ * ANY CHANGES MADE HERE WILL BE LOST!
+ */
+#include <stdbool.h>
+#ifndef HAS_BOOL
+# define HAS_BOOL 1
+#endif
+#line 1 "Context.xs"
+/*
+ * Context.xs. XS interfaces for perf script.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ */
+
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+#include "../../../util/trace-event.h"
+
+#ifndef PERL_UNUSED_VAR
+# define PERL_UNUSED_VAR(var) if (0) var = var
+#endif
+
+#line 42 "Context.c"
+
+XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
+XS(XS_Perf__Trace__Context_common_pc)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ if (items != 1)
+ Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context");
+ PERL_UNUSED_VAR(cv); /* -W */
+ {
+ struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
+ int RETVAL;
+ dXSTARG;
+
+ RETVAL = common_pc(context);
+ XSprePUSH; PUSHi((IV)RETVAL);
+ }
+ XSRETURN(1);
+}
+
+
+XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */
+XS(XS_Perf__Trace__Context_common_flags)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ if (items != 1)
+ Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context");
+ PERL_UNUSED_VAR(cv); /* -W */
+ {
+ struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
+ int RETVAL;
+ dXSTARG;
+
+ RETVAL = common_flags(context);
+ XSprePUSH; PUSHi((IV)RETVAL);
+ }
+ XSRETURN(1);
+}
+
+
+XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */
+XS(XS_Perf__Trace__Context_common_lock_depth)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ if (items != 1)
+ Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context");
+ PERL_UNUSED_VAR(cv); /* -W */
+ {
+ struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
+ int RETVAL;
+ dXSTARG;
+
+ RETVAL = common_lock_depth(context);
+ XSprePUSH; PUSHi((IV)RETVAL);
+ }
+ XSRETURN(1);
+}
+
+#ifdef __cplusplus
+extern "C"
+#endif
+XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */
+XS(boot_Perf__Trace__Context)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ const char* file = __FILE__;
+
+ PERL_UNUSED_VAR(cv); /* -W */
+ PERL_UNUSED_VAR(items); /* -W */
+ XS_VERSION_BOOTCHECK ;
+
+ newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$");
+ newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$");
+ newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$");
+ if (PL_unitcheckav)
+ call_list(PL_scopestack_ix, PL_unitcheckav);
+ XSRETURN_YES;
+}
+
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
new file mode 100644
index 000000000..8c7ea4244
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
@@ -0,0 +1,42 @@
+/*
+ * Context.xs. XS interfaces for perf script.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
+
+MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context
+PROTOTYPES: ENABLE
+
+int
+common_pc(context)
+ struct scripting_context * context
+
+int
+common_flags(context)
+ struct scripting_context * context
+
+int
+common_lock_depth(context)
+ struct scripting_context * context
+
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL
new file mode 100644
index 000000000..e8994332d
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+use 5.010000;
+use ExtUtils::MakeMaker;
+# See lib/ExtUtils/MakeMaker.pm for details of how to influence
+# the contents of the Makefile that is written.
+WriteMakefile(
+ NAME => 'Perf::Trace::Context',
+ VERSION_FROM => 'lib/Perf/Trace/Context.pm', # finds $VERSION
+ PREREQ_PM => {}, # e.g., Module::Name => 1.1
+ ($] >= 5.005 ? ## Add these new keywords supported since 5.005
+ (ABSTRACT_FROM => 'lib/Perf/Trace/Context.pm', # retrieve abstract from module
+ AUTHOR => 'Tom Zanussi <tzanussi@gmail.com>') : ()),
+ LIBS => [''], # e.g., '-lm'
+ DEFINE => '-I ../..', # e.g., '-DHAVE_SOMETHING'
+ INC => '-I.', # e.g., '-I. -I/usr/include/other'
+ # Un-comment this if you add C files to link with later:
+ OBJECT => 'Context.o', # link all the C files too
+);
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/README b/tools/perf/scripts/perl/Perf-Trace-Util/README
new file mode 100644
index 000000000..2f0c7f304
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/README
@@ -0,0 +1,59 @@
+Perf-Trace-Util version 0.01
+============================
+
+This module contains utility functions for use with perf script.
+
+Core.pm and Util.pm are pure Perl modules; Core.pm contains routines
+that the core perf support for Perl calls on and should always be
+'used', while Util.pm contains useful but optional utility functions
+that scripts may want to use. Context.pm contains the Perl->C
+interface that allows scripts to access data in the embedding perf
+executable; scripts wishing to do that should 'use Context.pm'.
+
+The Perl->C perf interface is completely driven by Context.xs. If you
+want to add new Perl functions that end up accessing C data in the
+perf executable, you add desciptions of the new functions here.
+scripting_context is a pointer to the perf data in the perf executable
+that you want to access - it's passed as the second parameter,
+$context, to all handler functions.
+
+After you do that:
+
+ perl Makefile.PL # to create a Makefile for the next step
+ make # to create Context.c
+
+ edit Context.c to add const to the char* file = __FILE__ line in
+ XS(boot_Perf__Trace__Context) to silence a warning/error.
+
+ You can delete the Makefile, object files and anything else that was
+ generated e.g. blib and shared library, etc, except for of course
+ Context.c
+
+ You should then be able to run the normal perf make as usual.
+
+INSTALLATION
+
+Building perf with perf script Perl scripting should install this
+module in the right place.
+
+You should make sure libperl and ExtUtils/Embed.pm are installed first
+e.g. apt-get install libperl-dev or yum install perl-ExtUtils-Embed.
+
+DEPENDENCIES
+
+This module requires these other modules and libraries:
+
+ None
+
+COPYRIGHT AND LICENCE
+
+Copyright (C) 2009 by Tom Zanussi <tzanussi@gmail.com>
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm
new file mode 100644
index 000000000..4e2f6039a
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm
@@ -0,0 +1,55 @@
+package Perf::Trace::Context;
+
+use 5.010000;
+use strict;
+use warnings;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw(
+) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+ common_pc common_flags common_lock_depth
+);
+
+our $VERSION = '0.01';
+
+require XSLoader;
+XSLoader::load('Perf::Trace::Context', $VERSION);
+
+1;
+__END__
+=head1 NAME
+
+Perf::Trace::Context - Perl extension for accessing functions in perf.
+
+=head1 SYNOPSIS
+
+ use Perf::Trace::Context;
+
+=head1 SEE ALSO
+
+Perf (script) documentation
+
+=head1 AUTHOR
+
+Tom Zanussi, E<lt>tzanussi@gmail.com<gt>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2009 by Tom Zanussi
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
+=cut
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm
new file mode 100644
index 000000000..9158458d3
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm
@@ -0,0 +1,192 @@
+package Perf::Trace::Core;
+
+use 5.010000;
+use strict;
+use warnings;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw(
+) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+define_flag_field define_flag_value flag_str dump_flag_fields
+define_symbolic_field define_symbolic_value symbol_str dump_symbolic_fields
+trace_flag_str
+);
+
+our $VERSION = '0.01';
+
+my %trace_flags = (0x00 => "NONE",
+ 0x01 => "IRQS_OFF",
+ 0x02 => "IRQS_NOSUPPORT",
+ 0x04 => "NEED_RESCHED",
+ 0x08 => "HARDIRQ",
+ 0x10 => "SOFTIRQ");
+
+sub trace_flag_str
+{
+ my ($value) = @_;
+
+ my $string;
+
+ my $print_delim = 0;
+
+ foreach my $idx (sort {$a <=> $b} keys %trace_flags) {
+ if (!$value && !$idx) {
+ $string .= "NONE";
+ last;
+ }
+
+ if ($idx && ($value & $idx) == $idx) {
+ if ($print_delim) {
+ $string .= " | ";
+ }
+ $string .= "$trace_flags{$idx}";
+ $print_delim = 1;
+ $value &= ~$idx;
+ }
+ }
+
+ return $string;
+}
+
+my %flag_fields;
+my %symbolic_fields;
+
+sub flag_str
+{
+ my ($event_name, $field_name, $value) = @_;
+
+ my $string;
+
+ if ($flag_fields{$event_name}{$field_name}) {
+ my $print_delim = 0;
+ foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event_name}{$field_name}{"values"}}) {
+ if (!$value && !$idx) {
+ $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}";
+ last;
+ }
+ if ($idx && ($value & $idx) == $idx) {
+ if ($print_delim && $flag_fields{$event_name}{$field_name}{'delim'}) {
+ $string .= " $flag_fields{$event_name}{$field_name}{'delim'} ";
+ }
+ $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}";
+ $print_delim = 1;
+ $value &= ~$idx;
+ }
+ }
+ }
+
+ return $string;
+}
+
+sub define_flag_field
+{
+ my ($event_name, $field_name, $delim) = @_;
+
+ $flag_fields{$event_name}{$field_name}{"delim"} = $delim;
+}
+
+sub define_flag_value
+{
+ my ($event_name, $field_name, $value, $field_str) = @_;
+
+ $flag_fields{$event_name}{$field_name}{"values"}{$value} = $field_str;
+}
+
+sub dump_flag_fields
+{
+ for my $event (keys %flag_fields) {
+ print "event $event:\n";
+ for my $field (keys %{$flag_fields{$event}}) {
+ print " field: $field:\n";
+ print " delim: $flag_fields{$event}{$field}{'delim'}\n";
+ foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) {
+ print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n";
+ }
+ }
+ }
+}
+
+sub symbol_str
+{
+ my ($event_name, $field_name, $value) = @_;
+
+ if ($symbolic_fields{$event_name}{$field_name}) {
+ foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event_name}{$field_name}{"values"}}) {
+ if (!$value && !$idx) {
+ return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}";
+ last;
+ }
+ if ($value == $idx) {
+ return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}";
+ }
+ }
+ }
+
+ return undef;
+}
+
+sub define_symbolic_field
+{
+ my ($event_name, $field_name) = @_;
+
+ # nothing to do, really
+}
+
+sub define_symbolic_value
+{
+ my ($event_name, $field_name, $value, $field_str) = @_;
+
+ $symbolic_fields{$event_name}{$field_name}{"values"}{$value} = $field_str;
+}
+
+sub dump_symbolic_fields
+{
+ for my $event (keys %symbolic_fields) {
+ print "event $event:\n";
+ for my $field (keys %{$symbolic_fields{$event}}) {
+ print " field: $field:\n";
+ foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) {
+ print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n";
+ }
+ }
+ }
+}
+
+1;
+__END__
+=head1 NAME
+
+Perf::Trace::Core - Perl extension for perf script
+
+=head1 SYNOPSIS
+
+ use Perf::Trace::Core
+
+=head1 SEE ALSO
+
+Perf (script) documentation
+
+=head1 AUTHOR
+
+Tom Zanussi, E<lt>tzanussi@gmail.com<gt>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2009 by Tom Zanussi
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
+=cut
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
new file mode 100644
index 000000000..053500114
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
@@ -0,0 +1,94 @@
+package Perf::Trace::Util;
+
+use 5.010000;
+use strict;
+use warnings;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw(
+) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs
+clear_term
+);
+
+our $VERSION = '0.01';
+
+sub avg
+{
+ my ($total, $n) = @_;
+
+ return $total / $n;
+}
+
+my $NSECS_PER_SEC = 1000000000;
+
+sub nsecs
+{
+ my ($secs, $nsecs) = @_;
+
+ return $secs * $NSECS_PER_SEC + $nsecs;
+}
+
+sub nsecs_secs {
+ my ($nsecs) = @_;
+
+ return $nsecs / $NSECS_PER_SEC;
+}
+
+sub nsecs_nsecs {
+ my ($nsecs) = @_;
+
+ return $nsecs % $NSECS_PER_SEC;
+}
+
+sub nsecs_str {
+ my ($nsecs) = @_;
+
+ my $str = sprintf("%5u.%09u", nsecs_secs($nsecs), nsecs_nsecs($nsecs));
+
+ return $str;
+}
+
+sub clear_term
+{
+ print "\x1b[H\x1b[2J";
+}
+
+1;
+__END__
+=head1 NAME
+
+Perf::Trace::Util - Perl extension for perf script
+
+=head1 SYNOPSIS
+
+ use Perf::Trace::Util;
+
+=head1 SEE ALSO
+
+Perf (script) documentation
+
+=head1 AUTHOR
+
+Tom Zanussi, E<lt>tzanussi@gmail.com<gt>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2009 by Tom Zanussi
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
+=cut
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/typemap b/tools/perf/scripts/perl/Perf-Trace-Util/typemap
new file mode 100644
index 000000000..840836804
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/typemap
@@ -0,0 +1 @@
+struct scripting_context * T_PTR
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record
new file mode 100644
index 000000000..423ad6aed
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/check-perf-trace-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
new file mode 100644
index 000000000..74685f318
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report
new file mode 100644
index 000000000..9f83cc1ad
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide failed syscalls
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/failed-syscalls.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
new file mode 100644
index 000000000..33efc8673
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
+
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report
new file mode 100644
index 000000000..77200b3f3
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-file-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: r/w activity for a program, by file
+# args: <comm>
+if [ $# -lt 1 ] ; then
+ echo "usage: rw-by-file <comm>"
+ exit
+fi
+comm=$1
+shift
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-file.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
new file mode 100644
index 000000000..7cb9db230
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report
new file mode 100644
index 000000000..a27b9f311
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: system-wide r/w activity
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-pid.pl
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
new file mode 100644
index 000000000..7cb9db230
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-report b/tools/perf/scripts/perl/bin/rwtop-report
new file mode 100644
index 000000000..83e11ec2e
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rwtop-report
@@ -0,0 +1,20 @@
+#!/bin/bash
+# description: system-wide r/w top
+# args: [interval]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 1 ] ; then
+ echo "usage: rwtop-report [interval]"
+ exit
+fi
+if [ "$n_args" -gt 0 ] ; then
+ interval=$1
+ shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/rwtop.pl $interval
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
new file mode 100644
index 000000000..464251a1b
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -0,0 +1,6 @@
+#!/bin/bash
+perf record -e sched:sched_switch -e sched:sched_wakeup $@
+
+
+
+
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report
new file mode 100644
index 000000000..889e8130c
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: system-wide min/max/avg wakeup latency
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/wakeup-latency.pl
diff --git a/tools/perf/scripts/perl/check-perf-trace.pl b/tools/perf/scripts/perl/check-perf-trace.pl
new file mode 100644
index 000000000..d307ce8fd
--- /dev/null
+++ b/tools/perf/scripts/perl/check-perf-trace.pl
@@ -0,0 +1,106 @@
+# perf script event handlers, generated by perf script -g perl
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# This script tests basic functionality such as flag and symbol
+# strings, common_xxx() calls back into perf, begin, end, unhandled
+# events, etc. Basically, if this script runs successfully and
+# displays expected results, perl scripting support should be ok.
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Context;
+use Perf::Trace::Util;
+
+sub trace_begin
+{
+ print "trace_begin\n";
+}
+
+sub trace_end
+{
+ print "trace_end\n";
+
+ print_unhandled();
+}
+
+sub irq::softirq_entry
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $vec) = @_;
+
+ print_header($event_name, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm);
+
+ print_uncommon($context);
+
+ printf("vec=%s\n",
+ symbol_str("irq::softirq_entry", "vec", $vec));
+}
+
+sub kmem::kmalloc
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $call_site, $ptr, $bytes_req, $bytes_alloc,
+ $gfp_flags) = @_;
+
+ print_header($event_name, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm);
+
+ print_uncommon($context);
+
+ printf("call_site=%p, ptr=%p, bytes_req=%u, bytes_alloc=%u, ".
+ "gfp_flags=%s\n",
+ $call_site, $ptr, $bytes_req, $bytes_alloc,
+
+ flag_str("kmem::kmalloc", "gfp_flags", $gfp_flags));
+}
+
+# print trace fields not included in handler args
+sub print_uncommon
+{
+ my ($context) = @_;
+
+ printf("common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, ",
+ common_pc($context), trace_flag_str(common_flags($context)),
+ common_lock_depth($context));
+
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain) = @_;
+
+ $unhandled{$event_name}++;
+}
+
+sub print_header
+{
+ my ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;
+
+ printf("%-20s %5u %05u.%09u %8u %-20s ",
+ $event_name, $cpu, $secs, $nsecs, $pid, $comm);
+}
diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl
new file mode 100644
index 000000000..05954a8f3
--- /dev/null
+++ b/tools/perf/scripts/perl/failed-syscalls.pl
@@ -0,0 +1,47 @@
+# failed system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Context;
+use Perf::Trace::Util;
+
+my $for_comm = shift;
+
+my %failed_syscalls;
+
+sub raw_syscalls::sys_exit
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $id, $ret) = @_;
+
+ if ($ret < 0) {
+ $failed_syscalls{$common_comm}++;
+ }
+}
+
+sub syscalls::sys_exit
+{
+ raw_syscalls::sys_exit(@_)
+}
+
+sub trace_end
+{
+ printf("\nfailed syscalls by comm:\n\n");
+
+ printf("%-20s %10s\n", "comm", "# errors");
+ printf("%-20s %6s %10s\n", "--------------------", "----------");
+
+ foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}}
+ keys %failed_syscalls) {
+ next if ($for_comm && $comm ne $for_comm);
+
+ printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
+ }
+}
diff --git a/tools/perf/scripts/perl/rw-by-file.pl b/tools/perf/scripts/perl/rw-by-file.pl
new file mode 100644
index 000000000..92a750b85
--- /dev/null
+++ b/tools/perf/scripts/perl/rw-by-file.pl
@@ -0,0 +1,106 @@
+#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+
+# Display r/w activity for files read/written to for a given program
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the status files. Those fields not available as handler params can
+# be retrieved via script functions of the form get_common_*().
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my $usage = "perf script -s rw-by-file.pl <comm>\n";
+
+my $for_comm = shift or die $usage;
+
+my %reads;
+my %writes;
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain, $nr, $fd, $buf, $count) = @_;
+
+ if ($common_comm eq $for_comm) {
+ $reads{$fd}{bytes_requested} += $count;
+ $reads{$fd}{total_reads}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain, $nr, $fd, $buf, $count) = @_;
+
+ if ($common_comm eq $for_comm) {
+ $writes{$fd}{bytes_written} += $count;
+ $writes{$fd}{total_writes}++;
+ }
+}
+
+sub trace_end
+{
+ printf("file read counts for $for_comm:\n\n");
+
+ printf("%6s %10s %10s\n", "fd", "# reads", "bytes_requested");
+ printf("%6s %10s %10s\n", "------", "----------", "-----------");
+
+ foreach my $fd (sort {$reads{$b}{bytes_requested} <=>
+ $reads{$a}{bytes_requested}} keys %reads) {
+ my $total_reads = $reads{$fd}{total_reads};
+ my $bytes_requested = $reads{$fd}{bytes_requested};
+ printf("%6u %10u %10u\n", $fd, $total_reads, $bytes_requested);
+ }
+
+ printf("\nfile write counts for $for_comm:\n\n");
+
+ printf("%6s %10s %10s\n", "fd", "# writes", "bytes_written");
+ printf("%6s %10s %10s\n", "------", "----------", "-----------");
+
+ foreach my $fd (sort {$writes{$b}{bytes_written} <=>
+ $writes{$a}{bytes_written}} keys %writes) {
+ my $total_writes = $writes{$fd}{total_writes};
+ my $bytes_written = $writes{$fd}{bytes_written};
+ printf("%6u %10u %10u\n", $fd, $total_writes, $bytes_written);
+ }
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain) = @_;
+
+ $unhandled{$event_name}++;
+}
+
+
diff --git a/tools/perf/scripts/perl/rw-by-pid.pl b/tools/perf/scripts/perl/rw-by-pid.pl
new file mode 100644
index 000000000..d789fe39c
--- /dev/null
+++ b/tools/perf/scripts/perl/rw-by-pid.pl
@@ -0,0 +1,184 @@
+#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+
+# Display r/w activity for all processes
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the status files. Those fields not available as handler params can
+# be retrieved via script functions of the form get_common_*().
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my %reads;
+my %writes;
+
+sub syscalls::sys_exit_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $ret) = @_;
+
+ if ($ret > 0) {
+ $reads{$common_pid}{bytes_read} += $ret;
+ } else {
+ if (!defined ($reads{$common_pid}{bytes_read})) {
+ $reads{$common_pid}{bytes_read} = 0;
+ }
+ $reads{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $fd, $buf, $count) = @_;
+
+ $reads{$common_pid}{bytes_requested} += $count;
+ $reads{$common_pid}{total_reads}++;
+ $reads{$common_pid}{comm} = $common_comm;
+}
+
+sub syscalls::sys_exit_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $ret) = @_;
+
+ if ($ret <= 0) {
+ $writes{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $fd, $buf, $count) = @_;
+
+ $writes{$common_pid}{bytes_written} += $count;
+ $writes{$common_pid}{total_writes}++;
+ $writes{$common_pid}{comm} = $common_comm;
+}
+
+sub trace_end
+{
+ printf("read counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s %10s\n", "pid", "comm",
+ "# reads", "bytes_requested", "bytes_read");
+ printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
+ "-----------", "----------", "----------");
+
+ foreach my $pid (sort { ($reads{$b}{bytes_read} || 0) <=>
+ ($reads{$a}{bytes_read} || 0) } keys %reads) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $total_reads = $reads{$pid}{total_reads} || 0;
+ my $bytes_requested = $reads{$pid}{bytes_requested} || 0;
+ my $bytes_read = $reads{$pid}{bytes_read} || 0;
+
+ printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
+ $total_reads, $bytes_requested, $bytes_read);
+ }
+
+ printf("\nfailed reads by pid:\n\n");
+
+ printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors");
+ printf("%6s %20s %6s %10s\n", "------", "--------------------",
+ "------", "----------");
+
+ my @errcounts = ();
+
+ foreach my $pid (keys %reads) {
+ foreach my $error (keys %{$reads{$pid}{errors}}) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $errcount = $reads{$pid}{errors}{$error} || 0;
+ push @errcounts, [$pid, $comm, $error, $errcount];
+ }
+ }
+
+ @errcounts = sort { $b->[3] <=> $a->[3] } @errcounts;
+
+ for my $i (0 .. $#errcounts) {
+ printf("%6d %-20s %6d %10s\n", $errcounts[$i][0],
+ $errcounts[$i][1], $errcounts[$i][2], $errcounts[$i][3]);
+ }
+
+ printf("\nwrite counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s\n", "pid", "comm",
+ "# writes", "bytes_written");
+ printf("%6s %-20s %10s %10s\n", "------", "--------------------",
+ "-----------", "----------");
+
+ foreach my $pid (sort { ($writes{$b}{bytes_written} || 0) <=>
+ ($writes{$a}{bytes_written} || 0)} keys %writes) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $total_writes = $writes{$pid}{total_writes} || 0;
+ my $bytes_written = $writes{$pid}{bytes_written} || 0;
+
+ printf("%6s %-20s %10s %10s\n", $pid, $comm,
+ $total_writes, $bytes_written);
+ }
+
+ printf("\nfailed writes by pid:\n\n");
+
+ printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors");
+ printf("%6s %20s %6s %10s\n", "------", "--------------------",
+ "------", "----------");
+
+ @errcounts = ();
+
+ foreach my $pid (keys %writes) {
+ foreach my $error (keys %{$writes{$pid}{errors}}) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $errcount = $writes{$pid}{errors}{$error} || 0;
+ push @errcounts, [$pid, $comm, $error, $errcount];
+ }
+ }
+
+ @errcounts = sort { $b->[3] <=> $a->[3] } @errcounts;
+
+ for my $i (0 .. $#errcounts) {
+ printf("%6d %-20s %6d %10s\n", $errcounts[$i][0],
+ $errcounts[$i][1], $errcounts[$i][2], $errcounts[$i][3]);
+ }
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl
new file mode 100644
index 000000000..eba4df67a
--- /dev/null
+++ b/tools/perf/scripts/perl/rwtop.pl
@@ -0,0 +1,203 @@
+#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+
+# read/write top
+#
+# Periodically displays system-wide r/w call activity, broken down by
+# pid. If an [interval] arg is specified, the display will be
+# refreshed every [interval] seconds. The default interval is 3
+# seconds.
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+use POSIX qw/SIGALRM SA_RESTART/;
+
+my $default_interval = 3;
+my $nlines = 20;
+my $print_thread;
+my $print_pending = 0;
+
+my %reads;
+my %writes;
+
+my $interval = shift;
+if (!$interval) {
+ $interval = $default_interval;
+}
+
+sub syscalls::sys_exit_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $ret) = @_;
+
+ print_check();
+
+ if ($ret > 0) {
+ $reads{$common_pid}{bytes_read} += $ret;
+ } else {
+ if (!defined ($reads{$common_pid}{bytes_read})) {
+ $reads{$common_pid}{bytes_read} = 0;
+ }
+ $reads{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $fd, $buf, $count) = @_;
+
+ print_check();
+
+ $reads{$common_pid}{bytes_requested} += $count;
+ $reads{$common_pid}{total_reads}++;
+ $reads{$common_pid}{comm} = $common_comm;
+}
+
+sub syscalls::sys_exit_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $ret) = @_;
+
+ print_check();
+
+ if ($ret <= 0) {
+ $writes{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $nr, $fd, $buf, $count) = @_;
+
+ print_check();
+
+ $writes{$common_pid}{bytes_written} += $count;
+ $writes{$common_pid}{total_writes}++;
+ $writes{$common_pid}{comm} = $common_comm;
+}
+
+sub trace_begin
+{
+ my $sa = POSIX::SigAction->new(\&set_print_pending);
+ $sa->flags(SA_RESTART);
+ $sa->safe(1);
+ POSIX::sigaction(SIGALRM, $sa) or die "Can't set SIGALRM handler: $!\n";
+ alarm 1;
+}
+
+sub trace_end
+{
+ print_unhandled();
+ print_totals();
+}
+
+sub print_check()
+{
+ if ($print_pending == 1) {
+ $print_pending = 0;
+ print_totals();
+ }
+}
+
+sub set_print_pending()
+{
+ $print_pending = 1;
+ alarm $interval;
+}
+
+sub print_totals
+{
+ my $count;
+
+ $count = 0;
+
+ clear_term();
+
+ printf("\nread counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s %10s\n", "pid", "comm",
+ "# reads", "bytes_req", "bytes_read");
+ printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
+ "----------", "----------", "----------");
+
+ foreach my $pid (sort { ($reads{$b}{bytes_read} || 0) <=>
+ ($reads{$a}{bytes_read} || 0) } keys %reads) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $total_reads = $reads{$pid}{total_reads} || 0;
+ my $bytes_requested = $reads{$pid}{bytes_requested} || 0;
+ my $bytes_read = $reads{$pid}{bytes_read} || 0;
+
+ printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
+ $total_reads, $bytes_requested, $bytes_read);
+
+ if (++$count == $nlines) {
+ last;
+ }
+ }
+
+ $count = 0;
+
+ printf("\nwrite counts by pid:\n\n");
+
+ printf("%6s %20s %10s %13s\n", "pid", "comm",
+ "# writes", "bytes_written");
+ printf("%6s %-20s %10s %13s\n", "------", "--------------------",
+ "----------", "-------------");
+
+ foreach my $pid (sort { ($writes{$b}{bytes_written} || 0) <=>
+ ($writes{$a}{bytes_written} || 0)} keys %writes) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $total_writes = $writes{$pid}{total_writes} || 0;
+ my $bytes_written = $writes{$pid}{bytes_written} || 0;
+
+ printf("%6s %-20s %10s %13s\n", $pid, $comm,
+ $total_writes, $bytes_written);
+
+ if (++$count == $nlines) {
+ last;
+ }
+ }
+
+ %reads = ();
+ %writes = ();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/perl/wakeup-latency.pl b/tools/perf/scripts/perl/wakeup-latency.pl
new file mode 100644
index 000000000..53444ff4e
--- /dev/null
+++ b/tools/perf/scripts/perl/wakeup-latency.pl
@@ -0,0 +1,107 @@
+#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+
+# Display avg/min/max wakeup latency
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the status files. Those fields not available as handler params can
+# be retrieved via script functions of the form get_common_*().
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my %last_wakeup;
+
+my $max_wakeup_latency;
+my $min_wakeup_latency;
+my $total_wakeup_latency = 0;
+my $total_wakeups = 0;
+
+sub sched::sched_switch
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $prev_comm, $prev_pid, $prev_prio, $prev_state, $next_comm, $next_pid,
+ $next_prio) = @_;
+
+ my $wakeup_ts = $last_wakeup{$common_cpu}{ts};
+ if ($wakeup_ts) {
+ my $switch_ts = nsecs($common_secs, $common_nsecs);
+ my $wakeup_latency = $switch_ts - $wakeup_ts;
+ if ($wakeup_latency > $max_wakeup_latency) {
+ $max_wakeup_latency = $wakeup_latency;
+ }
+ if ($wakeup_latency < $min_wakeup_latency) {
+ $min_wakeup_latency = $wakeup_latency;
+ }
+ $total_wakeup_latency += $wakeup_latency;
+ $total_wakeups++;
+ }
+ $last_wakeup{$common_cpu}{ts} = 0;
+}
+
+sub sched::sched_wakeup
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain,
+ $comm, $pid, $prio, $success, $target_cpu) = @_;
+
+ $last_wakeup{$target_cpu}{ts} = nsecs($common_secs, $common_nsecs);
+}
+
+sub trace_begin
+{
+ $min_wakeup_latency = 1000000000;
+ $max_wakeup_latency = 0;
+}
+
+sub trace_end
+{
+ printf("wakeup_latency stats:\n\n");
+ print "total_wakeups: $total_wakeups\n";
+ if ($total_wakeups) {
+ printf("avg_wakeup_latency (ns): %u\n",
+ avg($total_wakeup_latency, $total_wakeups));
+ } else {
+ printf("avg_wakeup_latency (ns): N/A\n");
+ }
+ printf("min_wakeup_latency (ns): %u\n", $min_wakeup_latency);
+ printf("max_wakeup_latency (ns): %u\n", $max_wakeup_latency);
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $common_callchain) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
new file mode 100644
index 000000000..7d0e33ce6
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -0,0 +1,3 @@
+perf-y += Context.o
+
+CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
new file mode 100644
index 000000000..0b7096847
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Context.c. Python interfaces for perf script.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ */
+
+#include <Python.h>
+#include "../../../util/trace-event.h"
+
+#if PY_MAJOR_VERSION < 3
+#define _PyCapsule_GetPointer(arg1, arg2) \
+ PyCObject_AsVoidPtr(arg1)
+
+PyMODINIT_FUNC initperf_trace_context(void);
+#else
+#define _PyCapsule_GetPointer(arg1, arg2) \
+ PyCapsule_GetPointer((arg1), (arg2))
+
+PyMODINIT_FUNC PyInit_perf_trace_context(void);
+#endif
+
+static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = _PyCapsule_GetPointer(context, NULL);
+ retval = common_pc(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_flags(PyObject *obj,
+ PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = _PyCapsule_GetPointer(context, NULL);
+ retval = common_flags(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
+ PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = _PyCapsule_GetPointer(context, NULL);
+ retval = common_lock_depth(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyMethodDef ContextMethods[] = {
+ { "common_pc", perf_trace_context_common_pc, METH_VARARGS,
+ "Get the common preempt count event field value."},
+ { "common_flags", perf_trace_context_common_flags, METH_VARARGS,
+ "Get the common flags event field value."},
+ { "common_lock_depth", perf_trace_context_common_lock_depth,
+ METH_VARARGS, "Get the common lock depth event field value."},
+ { NULL, NULL, 0, NULL}
+};
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC initperf_trace_context(void)
+{
+ (void) Py_InitModule("perf_trace_context", ContextMethods);
+}
+#else
+PyMODINIT_FUNC PyInit_perf_trace_context(void)
+{
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "perf_trace_context", /* m_name */
+ "", /* m_doc */
+ -1, /* m_size */
+ ContextMethods, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL, /* m_free */
+ };
+ return PyModule_Create(&moduledef);
+}
+#endif
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
new file mode 100644
index 000000000..54ace2f6b
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -0,0 +1,116 @@
+# Core.py - Python extension for perf script, core functions
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+from collections import defaultdict
+
+def autodict():
+ return defaultdict(autodict)
+
+flag_fields = autodict()
+symbolic_fields = autodict()
+
+def define_flag_field(event_name, field_name, delim):
+ flag_fields[event_name][field_name]['delim'] = delim
+
+def define_flag_value(event_name, field_name, value, field_str):
+ flag_fields[event_name][field_name]['values'][value] = field_str
+
+def define_symbolic_field(event_name, field_name):
+ # nothing to do, really
+ pass
+
+def define_symbolic_value(event_name, field_name, value, field_str):
+ symbolic_fields[event_name][field_name]['values'][value] = field_str
+
+def flag_str(event_name, field_name, value):
+ string = ""
+
+ if flag_fields[event_name][field_name]:
+ print_delim = 0
+ for idx in sorted(flag_fields[event_name][field_name]['values']):
+ if not value and not idx:
+ string += flag_fields[event_name][field_name]['values'][idx]
+ break
+ if idx and (value & idx) == idx:
+ if print_delim and flag_fields[event_name][field_name]['delim']:
+ string += " " + flag_fields[event_name][field_name]['delim'] + " "
+ string += flag_fields[event_name][field_name]['values'][idx]
+ print_delim = 1
+ value &= ~idx
+
+ return string
+
+def symbol_str(event_name, field_name, value):
+ string = ""
+
+ if symbolic_fields[event_name][field_name]:
+ for idx in sorted(symbolic_fields[event_name][field_name]['values']):
+ if not value and not idx:
+ string = symbolic_fields[event_name][field_name]['values'][idx]
+ break
+ if (value == idx):
+ string = symbolic_fields[event_name][field_name]['values'][idx]
+ break
+
+ return string
+
+trace_flags = { 0x00: "NONE", \
+ 0x01: "IRQS_OFF", \
+ 0x02: "IRQS_NOSUPPORT", \
+ 0x04: "NEED_RESCHED", \
+ 0x08: "HARDIRQ", \
+ 0x10: "SOFTIRQ" }
+
+def trace_flag_str(value):
+ string = ""
+ print_delim = 0
+
+ for idx in trace_flags:
+ if not value and not idx:
+ string += "NONE"
+ break
+
+ if idx and (value & idx) == idx:
+ if print_delim:
+ string += " | ";
+ string += trace_flags[idx]
+ print_delim = 1
+ value &= ~idx
+
+ return string
+
+
+def taskState(state):
+ states = {
+ 0 : "R",
+ 1 : "S",
+ 2 : "D",
+ 64: "DEAD"
+ }
+
+ if state not in states:
+ return "Unknown"
+
+ return states[state]
+
+
+class EventHeaders:
+ def __init__(self, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm, common_callchain):
+ self.cpu = common_cpu
+ self.secs = common_secs
+ self.nsecs = common_nsecs
+ self.pid = common_pid
+ self.comm = common_comm
+ self.callchain = common_callchain
+
+ def ts(self):
+ return (self.secs * (10 ** 9)) + self.nsecs
+
+ def ts_format(self):
+ return "%d.%d" % (self.secs, int(self.nsecs / 1000))
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
new file mode 100755
index 000000000..21a7a1298
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
@@ -0,0 +1,97 @@
+# EventClass.py
+# SPDX-License-Identifier: GPL-2.0
+#
+# This is a library defining some events types classes, which could
+# be used by other scripts to analyzing the perf samples.
+#
+# Currently there are just a few classes defined for examples,
+# PerfEvent is the base class for all perf event sample, PebsEvent
+# is a HW base Intel x86 PEBS event, and user could add more SW/HW
+# event classes based on requirements.
+from __future__ import print_function
+
+import struct
+
+# Event types, user could add more here
+EVTYPE_GENERIC = 0
+EVTYPE_PEBS = 1 # Basic PEBS event
+EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
+EVTYPE_IBS = 3
+
+#
+# Currently we don't have good way to tell the event type, but by
+# the size of raw buffer, raw PEBS event with load latency data's
+# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
+#
+def create_event(name, comm, dso, symbol, raw_buf):
+ if (len(raw_buf) == 144):
+ event = PebsEvent(name, comm, dso, symbol, raw_buf)
+ elif (len(raw_buf) == 176):
+ event = PebsNHM(name, comm, dso, symbol, raw_buf)
+ else:
+ event = PerfEvent(name, comm, dso, symbol, raw_buf)
+
+ return event
+
+class PerfEvent(object):
+ event_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
+ self.name = name
+ self.comm = comm
+ self.dso = dso
+ self.symbol = symbol
+ self.raw_buf = raw_buf
+ self.ev_type = ev_type
+ PerfEvent.event_num += 1
+
+ def show(self):
+ print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
+ (self.name, self.symbol, self.comm, self.dso))
+
+#
+# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
+# contains the context info when that event happened: the EFLAGS and
+# linear IP info, as well as all the registers.
+#
+class PebsEvent(PerfEvent):
+ pebs_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
+ tmp_buf=raw_buf[0:80]
+ flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
+ self.flags = flags
+ self.ip = ip
+ self.ax = ax
+ self.bx = bx
+ self.cx = cx
+ self.dx = dx
+ self.si = si
+ self.di = di
+ self.bp = bp
+ self.sp = sp
+
+ PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+ PebsEvent.pebs_num += 1
+ del tmp_buf
+
+#
+# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
+# in the four 64 bit words write after the PEBS data:
+# Status: records the IA32_PERF_GLOBAL_STATUS register value
+# DLA: Data Linear Address (EIP)
+# DSE: Data Source Encoding, where the latency happens, hit or miss
+# in L1/L2/L3 or IO operations
+# LAT: the actual latency in cycles
+#
+class PebsNHM(PebsEvent):
+ pebs_nhm_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
+ tmp_buf=raw_buf[144:176]
+ status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
+ self.status = status
+ self.dla = dla
+ self.dse = dse
+ self.lat = lat
+
+ PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+ PebsNHM.pebs_nhm_num += 1
+ del tmp_buf
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
new file mode 100644
index 000000000..cac7b2542
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
@@ -0,0 +1,184 @@
+# SchedGui.py - Python extension for perf script, basic GUI code for
+# traces drawing and overview.
+#
+# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+
+try:
+ import wx
+except ImportError:
+ raise ImportError("You need to install the wxpython lib for this script")
+
+
+class RootFrame(wx.Frame):
+ Y_OFFSET = 100
+ RECT_HEIGHT = 100
+ RECT_SPACE = 50
+ EVENT_MARKING_WIDTH = 5
+
+ def __init__(self, sched_tracer, title, parent = None, id = -1):
+ wx.Frame.__init__(self, parent, id, title)
+
+ (self.screen_width, self.screen_height) = wx.GetDisplaySize()
+ self.screen_width -= 10
+ self.screen_height -= 10
+ self.zoom = 0.5
+ self.scroll_scale = 20
+ self.sched_tracer = sched_tracer
+ self.sched_tracer.set_root_win(self)
+ (self.ts_start, self.ts_end) = sched_tracer.interval()
+ self.update_width_virtual()
+ self.nr_rects = sched_tracer.nr_rectangles() + 1
+ self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+
+ # whole window panel
+ self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
+
+ # scrollable container
+ self.scroll = wx.ScrolledWindow(self.panel)
+ self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
+ self.scroll.EnableScrolling(True, True)
+ self.scroll.SetFocus()
+
+ # scrollable drawing area
+ self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
+ self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
+ self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+ self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+ self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
+ self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+ self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+
+ self.scroll.Fit()
+ self.Fit()
+
+ self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
+
+ self.txt = None
+
+ self.Show(True)
+
+ def us_to_px(self, val):
+ return val / (10 ** 3) * self.zoom
+
+ def px_to_us(self, val):
+ return (val / self.zoom) * (10 ** 3)
+
+ def scroll_start(self):
+ (x, y) = self.scroll.GetViewStart()
+ return (x * self.scroll_scale, y * self.scroll_scale)
+
+ def scroll_start_us(self):
+ (x, y) = self.scroll_start()
+ return self.px_to_us(x)
+
+ def paint_rectangle_zone(self, nr, color, top_color, start, end):
+ offset_px = self.us_to_px(start - self.ts_start)
+ width_px = self.us_to_px(end - self.ts_start)
+
+ offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+ width_py = RootFrame.RECT_HEIGHT
+
+ dc = self.dc
+
+ if top_color is not None:
+ (r, g, b) = top_color
+ top_color = wx.Colour(r, g, b)
+ brush = wx.Brush(top_color, wx.SOLID)
+ dc.SetBrush(brush)
+ dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
+ width_py -= RootFrame.EVENT_MARKING_WIDTH
+ offset_py += RootFrame.EVENT_MARKING_WIDTH
+
+ (r ,g, b) = color
+ color = wx.Colour(r, g, b)
+ brush = wx.Brush(color, wx.SOLID)
+ dc.SetBrush(brush)
+ dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
+
+ def update_rectangles(self, dc, start, end):
+ start += self.ts_start
+ end += self.ts_start
+ self.sched_tracer.fill_zone(start, end)
+
+ def on_paint(self, event):
+ dc = wx.PaintDC(self.scroll_panel)
+ self.dc = dc
+
+ width = min(self.width_virtual, self.screen_width)
+ (x, y) = self.scroll_start()
+ start = self.px_to_us(x)
+ end = self.px_to_us(x + width)
+ self.update_rectangles(dc, start, end)
+
+ def rect_from_ypixel(self, y):
+ y -= RootFrame.Y_OFFSET
+ rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+ height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+
+ if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
+ return -1
+
+ return rect
+
+ def update_summary(self, txt):
+ if self.txt:
+ self.txt.Destroy()
+ self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
+
+
+ def on_mouse_down(self, event):
+ (x, y) = event.GetPositionTuple()
+ rect = self.rect_from_ypixel(y)
+ if rect == -1:
+ return
+
+ t = self.px_to_us(x) + self.ts_start
+
+ self.sched_tracer.mouse_down(rect, t)
+
+
+ def update_width_virtual(self):
+ self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
+
+ def __zoom(self, x):
+ self.update_width_virtual()
+ (xpos, ypos) = self.scroll.GetViewStart()
+ xpos = self.us_to_px(x) / self.scroll_scale
+ self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
+ self.Refresh()
+
+ def zoom_in(self):
+ x = self.scroll_start_us()
+ self.zoom *= 2
+ self.__zoom(x)
+
+ def zoom_out(self):
+ x = self.scroll_start_us()
+ self.zoom /= 2
+ self.__zoom(x)
+
+
+ def on_key_press(self, event):
+ key = event.GetRawKeyCode()
+ if key == ord("+"):
+ self.zoom_in()
+ return
+ if key == ord("-"):
+ self.zoom_out()
+ return
+
+ key = event.GetKeyCode()
+ (x, y) = self.scroll.GetViewStart()
+ if key == wx.WXK_RIGHT:
+ self.scroll.Scroll(x + 1, y)
+ elif key == wx.WXK_LEFT:
+ self.scroll.Scroll(x - 1, y)
+ elif key == wx.WXK_DOWN:
+ self.scroll.Scroll(x, y + 1)
+ elif key == wx.WXK_UP:
+ self.scroll.Scroll(x, y - 1)
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
new file mode 100644
index 000000000..7384dcb62
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -0,0 +1,91 @@
+# Util.py - Python extension for perf script, miscellaneous utility code
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+from __future__ import print_function
+
+import errno, os
+
+FUTEX_WAIT = 0
+FUTEX_WAKE = 1
+FUTEX_PRIVATE_FLAG = 128
+FUTEX_CLOCK_REALTIME = 256
+FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+
+NSECS_PER_SEC = 1000000000
+
+def avg(total, n):
+ return total / n
+
+def nsecs(secs, nsecs):
+ return secs * NSECS_PER_SEC + nsecs
+
+def nsecs_secs(nsecs):
+ return nsecs / NSECS_PER_SEC
+
+def nsecs_nsecs(nsecs):
+ return nsecs % NSECS_PER_SEC
+
+def nsecs_str(nsecs):
+ str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
+ return str
+
+def add_stats(dict, key, value):
+ if key not in dict:
+ dict[key] = (value, value, value, 1)
+ else:
+ min, max, avg, count = dict[key]
+ if value < min:
+ min = value
+ if value > max:
+ max = value
+ avg = (avg + value) / 2
+ dict[key] = (min, max, avg, count + 1)
+
+def clear_term():
+ print("\x1b[H\x1b[2J")
+
+audit_package_warned = False
+
+try:
+ import audit
+ machine_to_id = {
+ 'x86_64': audit.MACH_86_64,
+ 'alpha' : audit.MACH_ALPHA,
+ 'ia64' : audit.MACH_IA64,
+ 'ppc' : audit.MACH_PPC,
+ 'ppc64' : audit.MACH_PPC64,
+ 'ppc64le' : audit.MACH_PPC64LE,
+ 's390' : audit.MACH_S390,
+ 's390x' : audit.MACH_S390X,
+ 'i386' : audit.MACH_X86,
+ 'i586' : audit.MACH_X86,
+ 'i686' : audit.MACH_X86,
+ }
+ try:
+ machine_to_id['armeb'] = audit.MACH_ARMEB
+ except:
+ pass
+ machine_id = machine_to_id[os.uname()[4]]
+except:
+ if not audit_package_warned:
+ audit_package_warned = True
+ print("Install the audit-libs-python package to get syscall names.\n"
+ "For example:\n # apt-get install python-audit (Ubuntu)"
+ "\n # yum install audit-libs-python (Fedora)"
+ "\n etc.\n")
+
+def syscall_name(id):
+ try:
+ return audit.audit_syscall_to_name(id, machine_id)
+ except:
+ return str(id)
+
+def strerror(nr):
+ try:
+ return errno.errorcode[abs(nr)]
+ except:
+ return "Unknown %d errno" % nr
diff --git a/tools/perf/scripts/python/bin/compaction-times-record b/tools/perf/scripts/python/bin/compaction-times-record
new file mode 100644
index 000000000..6edcd40e1
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e compaction:mm_compaction_begin -e compaction:mm_compaction_end -e compaction:mm_compaction_migratepages -e compaction:mm_compaction_isolate_migratepages -e compaction:mm_compaction_isolate_freepages $@
diff --git a/tools/perf/scripts/python/bin/compaction-times-report b/tools/perf/scripts/python/bin/compaction-times-report
new file mode 100644
index 000000000..3dc13897c
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+#description: display time taken by mm compaction
+#args: [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]
+perf script -s "$PERF_EXEC_PATH"/scripts/python/compaction-times.py $@
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-record b/tools/perf/scripts/python/bin/event_analyzing_sample-record
new file mode 100644
index 000000000..5ce652dab
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# event_analyzing_sample.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-report b/tools/perf/scripts/python/bin/event_analyzing_sample-report
new file mode 100644
index 000000000..0941fc94e
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: analyze all perf samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py
diff --git a/tools/perf/scripts/python/bin/export-to-postgresql-record b/tools/perf/scripts/python/bin/export-to-postgresql-record
new file mode 100644
index 000000000..221d66e05
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-postgresql-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# export perf data to a postgresql database. Can cover
+# perf ip samples (excluding the tracepoints). No special
+# record requirements, just record what you want to export.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/export-to-postgresql-report b/tools/perf/scripts/python/bin/export-to-postgresql-report
new file mode 100644
index 000000000..cd335b6e2
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-postgresql-report
@@ -0,0 +1,29 @@
+#!/bin/bash
+# description: export perf data to a postgresql database
+# args: [database name] [columns] [calls]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 3 ] ; then
+ echo "usage: export-to-postgresql-report [database name] [columns] [calls]"
+ exit
+fi
+if [ "$n_args" -gt 2 ] ; then
+ dbname=$1
+ columns=$2
+ calls=$3
+ shift 3
+elif [ "$n_args" -gt 1 ] ; then
+ dbname=$1
+ columns=$2
+ shift 2
+elif [ "$n_args" -gt 0 ] ; then
+ dbname=$1
+ shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-postgresql.py $dbname $columns $calls
diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-record b/tools/perf/scripts/python/bin/export-to-sqlite-record
new file mode 100644
index 000000000..070204fd6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-sqlite-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# export perf data to a sqlite3 database. Can cover
+# perf ip samples (excluding the tracepoints). No special
+# record requirements, just record what you want to export.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-report b/tools/perf/scripts/python/bin/export-to-sqlite-report
new file mode 100644
index 000000000..5ff6033e7
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-sqlite-report
@@ -0,0 +1,29 @@
+#!/bin/bash
+# description: export perf data to a sqlite3 database
+# args: [database name] [columns] [calls]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 3 ] ; then
+ echo "usage: export-to-sqlite-report [database name] [columns] [calls]"
+ exit
+fi
+if [ "$n_args" -gt 2 ] ; then
+ dbname=$1
+ columns=$2
+ calls=$3
+ shift 3
+elif [ "$n_args" -gt 1 ] ; then
+ dbname=$1
+ columns=$2
+ shift 2
+elif [ "$n_args" -gt 0 ] ; then
+ dbname=$1
+ shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-sqlite.py $dbname $columns $calls
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
new file mode 100644
index 000000000..74685f318
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
new file mode 100644
index 000000000..fda5096d0
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide failed syscalls, by pid
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/flamegraph-record b/tools/perf/scripts/python/bin/flamegraph-record
new file mode 100755
index 000000000..7df5a19c0
--- /dev/null
+++ b/tools/perf/scripts/python/bin/flamegraph-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -g "$@"
diff --git a/tools/perf/scripts/python/bin/flamegraph-report b/tools/perf/scripts/python/bin/flamegraph-report
new file mode 100755
index 000000000..53c5dc90c
--- /dev/null
+++ b/tools/perf/scripts/python/bin/flamegraph-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: create flame graphs
+perf script -s "$PERF_EXEC_PATH"/scripts/python/flamegraph.py -- "$@"
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
new file mode 100644
index 000000000..b1495c9a9
--- /dev/null
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-report b/tools/perf/scripts/python/bin/futex-contention-report
new file mode 100644
index 000000000..6c4427109
--- /dev/null
+++ b/tools/perf/scripts/python/bin/futex-contention-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: futext contention measurement
+
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-record b/tools/perf/scripts/python/bin/intel-pt-events-record
new file mode 100644
index 000000000..10fe2b697
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-record
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+#
+# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
+# to be specified with appropriate config terms.
+#
+if ! echo "$@" | grep -q intel_pt ; then
+ echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
+ echo "and for power events it probably needs to be system wide i.e. -a option"
+ echo "For example: -a -e intel_pt/pwr_evt,branch=0/ sleep 1"
+ exit 1
+fi
+perf record $@
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-report b/tools/perf/scripts/python/bin/intel-pt-events-report
new file mode 100644
index 000000000..9a9c92fcd
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: print Intel PT Power Events and PTWRITE
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py \ No newline at end of file
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-record b/tools/perf/scripts/python/bin/mem-phys-addr-record
new file mode 100644
index 000000000..5a875122a
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-record
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+#
+# Profiling physical memory by all retired load instructions/uops event
+# MEM_INST_RETIRED.ALL_LOADS or MEM_UOPS_RETIRED.ALL_LOADS
+#
+
+load=`perf list | grep mem_inst_retired.all_loads`
+if [ -z "$load" ]; then
+ load=`perf list | grep mem_uops_retired.all_loads`
+fi
+if [ -z "$load" ]; then
+ echo "There is no event to count all retired load instructions/uops."
+ exit 1
+fi
+
+arg=$(echo $load | tr -d ' ')
+arg="$arg:P"
+perf record --phys-data -e $arg $@
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-report b/tools/perf/scripts/python/bin/mem-phys-addr-report
new file mode 100644
index 000000000..3f2b847e2
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: resolve physical address samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/mem-phys-addr.py
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-record b/tools/perf/scripts/python/bin/net_dropmonitor-record
new file mode 100755
index 000000000..423fb81da
--- /dev/null
+++ b/tools/perf/scripts/python/bin/net_dropmonitor-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e skb:kfree_skb $@
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-report b/tools/perf/scripts/python/bin/net_dropmonitor-report
new file mode 100755
index 000000000..8d698f5a0
--- /dev/null
+++ b/tools/perf/scripts/python/bin/net_dropmonitor-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: display a table of dropped frames
+
+perf script -s "$PERF_EXEC_PATH"/scripts/python/net_dropmonitor.py $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
new file mode 100644
index 000000000..558754b84
--- /dev/null
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+perf record -e net:net_dev_xmit -e net:net_dev_queue \
+ -e net:netif_receive_skb -e net:netif_rx \
+ -e skb:consume_skb -e skb:kfree_skb \
+ -e skb:skb_copy_datagram_iovec -e napi:napi_poll \
+ -e irq:irq_handler_entry -e irq:irq_handler_exit \
+ -e irq:softirq_entry -e irq:softirq_exit \
+ -e irq:softirq_raise $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report
new file mode 100644
index 000000000..8f759291d
--- /dev/null
+++ b/tools/perf/scripts/python/bin/netdev-times-report
@@ -0,0 +1,5 @@
+#!/bin/bash
+# description: display a process of packet and processing time
+# args: [tx] [rx] [dev=] [debug]
+
+perf script -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-record b/tools/perf/scripts/python/bin/powerpc-hcalls-record
new file mode 100644
index 000000000..b7402aa91
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e "{powerpc:hcall_entry,powerpc:hcall_exit}" $@
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-report b/tools/perf/scripts/python/bin/powerpc-hcalls-report
new file mode 100644
index 000000000..dd32ad746
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-report
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/powerpc-hcalls.py
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
new file mode 100644
index 000000000..7493fddbe
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sched-migration-report b/tools/perf/scripts/python/bin/sched-migration-report
new file mode 100644
index 000000000..68b037a18
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: sched migration overview
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
new file mode 100644
index 000000000..d6940841e
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/sctop-report b/tools/perf/scripts/python/bin/sctop-report
new file mode 100644
index 000000000..c32db2941
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-report
@@ -0,0 +1,24 @@
+#!/bin/bash
+# description: syscall top
+# args: [comm] [interval]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 2 ] ; then
+ echo "usage: sctop-report [comm] [interval]"
+ exit
+fi
+if [ "$n_args" -gt 1 ] ; then
+ comm=$1
+ interval=$2
+ shift 2
+elif [ "$n_args" -gt 0 ] ; then
+ interval=$1
+ shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval
diff --git a/tools/perf/scripts/python/bin/stackcollapse-record b/tools/perf/scripts/python/bin/stackcollapse-record
new file mode 100755
index 000000000..9d8f9f0f3
--- /dev/null
+++ b/tools/perf/scripts/python/bin/stackcollapse-record
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+#
+# stackcollapse.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record "$@"
diff --git a/tools/perf/scripts/python/bin/stackcollapse-report b/tools/perf/scripts/python/bin/stackcollapse-report
new file mode 100755
index 000000000..356b96563
--- /dev/null
+++ b/tools/perf/scripts/python/bin/stackcollapse-report
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: produce callgraphs in short form for scripting use
+perf script -s "$PERF_EXEC_PATH"/scripts/python/stackcollapse.py -- "$@"
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
new file mode 100644
index 000000000..d6940841e
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
new file mode 100644
index 000000000..16eb8d65c
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide syscall counts, by pid
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
new file mode 100644
index 000000000..d6940841e
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
new file mode 100644
index 000000000..0f0e9d453
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide syscall counts
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
new file mode 100644
index 000000000..d2c229548
--- /dev/null
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -0,0 +1,84 @@
+# perf script event handlers, generated by perf script -g python
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# This script tests basic functionality such as flag and symbol
+# strings, common_xxx() calls back into perf, begin, end, unhandled
+# events, etc. Basically, if this script runs successfully and
+# displays expected results, Python scripting support should be ok.
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from Core import *
+from perf_trace_context import *
+
+unhandled = autodict()
+
+def trace_begin():
+ print("trace_begin")
+ pass
+
+def trace_end():
+ print_unhandled()
+
+def irq__softirq_entry(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print_uncommon(context)
+
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
+
+def kmem__kmalloc(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print_uncommon(context)
+
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
+ (call_site, ptr, bytes_req, bytes_alloc,
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
+
+def print_header(event_name, cpu, secs, nsecs, pid, comm):
+ print("%-20s %5u %05u.%09u %8u %-20s " %
+ (event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')
+
+# print trace fields not included in handler args
+def print_uncommon(context):
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context)))
+
+def print_unhandled():
+ keys = unhandled.keys()
+ if not keys:
+ return
+
+ print("\nunhandled events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for event_name in keys:
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
new file mode 100644
index 000000000..2560a042d
--- /dev/null
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -0,0 +1,311 @@
+# report time spent in compaction
+# Licensed under the terms of the GNU GPL License version 2
+
+# testing:
+# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
+
+import os
+import sys
+import re
+
+import signal
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
+
+class popt:
+ DISP_DFL = 0
+ DISP_PROC = 1
+ DISP_PROC_VERBOSE=2
+
+class topt:
+ DISP_TIME = 0
+ DISP_MIG = 1
+ DISP_ISOLFREE = 2
+ DISP_ISOLMIG = 4
+ DISP_ALL = 7
+
+class comm_filter:
+ def __init__(self, re):
+ self.re = re
+
+ def filter(self, pid, comm):
+ m = self.re.search(comm)
+ return m == None or m.group() == ""
+
+class pid_filter:
+ def __init__(self, low, high):
+ self.low = (0 if low == "" else int(low))
+ self.high = (0 if high == "" else int(high))
+
+ def filter(self, pid, comm):
+ return not (pid >= self.low and (self.high == 0 or pid <= self.high))
+
+def set_type(t):
+ global opt_disp
+ opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
+
+def ns(sec, nsec):
+ return (sec * 1000000000) + nsec
+
+def time(ns):
+ return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
+
+class pair:
+ def __init__(self, aval, bval, alabel = None, blabel = None):
+ self.alabel = alabel
+ self.blabel = blabel
+ self.aval = aval
+ self.bval = bval
+
+ def __add__(self, rhs):
+ self.aval += rhs.aval
+ self.bval += rhs.bval
+ return self
+
+ def __str__(self):
+ return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
+
+class cnode:
+ def __init__(self, ns):
+ self.ns = ns
+ self.migrated = pair(0, 0, "moved", "failed")
+ self.fscan = pair(0,0, "scanned", "isolated")
+ self.mscan = pair(0,0, "scanned", "isolated")
+
+ def __add__(self, rhs):
+ self.ns += rhs.ns
+ self.migrated += rhs.migrated
+ self.fscan += rhs.fscan
+ self.mscan += rhs.mscan
+ return self
+
+ def __str__(self):
+ prev = 0
+ s = "%s " % time(self.ns)
+ if (opt_disp & topt.DISP_MIG):
+ s += "migration: %s" % self.migrated
+ prev = 1
+ if (opt_disp & topt.DISP_ISOLFREE):
+ s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
+ prev = 1
+ if (opt_disp & topt.DISP_ISOLMIG):
+ s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
+ return s
+
+ def complete(self, secs, nsecs):
+ self.ns = ns(secs, nsecs) - self.ns
+
+ def increment(self, migrated, fscan, mscan):
+ if (migrated != None):
+ self.migrated += migrated
+ if (fscan != None):
+ self.fscan += fscan
+ if (mscan != None):
+ self.mscan += mscan
+
+
+class chead:
+ heads = {}
+ val = cnode(0);
+ fobj = None
+
+ @classmethod
+ def add_filter(cls, filter):
+ cls.fobj = filter
+
+ @classmethod
+ def create_pending(cls, pid, comm, start_secs, start_nsecs):
+ filtered = 0
+ try:
+ head = cls.heads[pid]
+ filtered = head.is_filtered()
+ except KeyError:
+ if cls.fobj != None:
+ filtered = cls.fobj.filter(pid, comm)
+ head = cls.heads[pid] = chead(comm, pid, filtered)
+
+ if not filtered:
+ head.mark_pending(start_secs, start_nsecs)
+
+ @classmethod
+ def increment_pending(cls, pid, migrated, fscan, mscan):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.do_increment(migrated, fscan, mscan)
+ else:
+ sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+ @classmethod
+ def complete_pending(cls, pid, secs, nsecs):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.make_complete(secs, nsecs)
+ else:
+ sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+ @classmethod
+ def gen(cls):
+ if opt_proc != popt.DISP_DFL:
+ for i in cls.heads:
+ yield cls.heads[i]
+
+ @classmethod
+ def str(cls):
+ return cls.val
+
+ def __init__(self, comm, pid, filtered):
+ self.comm = comm
+ self.pid = pid
+ self.val = cnode(0)
+ self.pending = None
+ self.filtered = filtered
+ self.list = []
+
+ def __add__(self, rhs):
+ self.ns += rhs.ns
+ self.val += rhs.val
+ return self
+
+ def mark_pending(self, secs, nsecs):
+ self.pending = cnode(ns(secs, nsecs))
+
+ def do_increment(self, migrated, fscan, mscan):
+ self.pending.increment(migrated, fscan, mscan)
+
+ def make_complete(self, secs, nsecs):
+ self.pending.complete(secs, nsecs)
+ chead.val += self.pending
+
+ if opt_proc != popt.DISP_DFL:
+ self.val += self.pending
+
+ if opt_proc == popt.DISP_PROC_VERBOSE:
+ self.list.append(self.pending)
+ self.pending = None
+
+ def enumerate(self):
+ if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
+ for i, pelem in enumerate(self.list):
+ sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
+
+ def is_pending(self):
+ return self.pending != None
+
+ def is_filtered(self):
+ return self.filtered
+
+ def display(self):
+ if not self.is_filtered():
+ sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
+
+
+def trace_end():
+ sys.stdout.write("total: %s\n" % chead.str())
+ for i in chead.gen():
+ i.display(),
+ i.enumerate()
+
+def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, nr_migrated, nr_failed):
+
+ chead.increment_pending(common_pid,
+ pair(nr_migrated, nr_failed), None, None)
+
+def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+ chead.increment_pending(common_pid,
+ None, pair(nr_scanned, nr_taken), None)
+
+def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+ chead.increment_pending(common_pid,
+ None, None, pair(nr_scanned, nr_taken))
+
+def compaction__mm_compaction_end(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync, status):
+
+ chead.complete_pending(common_pid, common_secs, common_nsecs)
+
+def compaction__mm_compaction_begin(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync):
+
+ chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
+
+def pr_help():
+ global usage
+
+ sys.stdout.write(usage)
+ sys.stdout.write("\n")
+ sys.stdout.write("-h display this help\n")
+ sys.stdout.write("-p display by process\n")
+ sys.stdout.write("-pv display by process (verbose)\n")
+ sys.stdout.write("-t display stall times only\n")
+ sys.stdout.write("-m display stats for migration\n")
+ sys.stdout.write("-fs display stats for free scanner\n")
+ sys.stdout.write("-ms display stats for migration scanner\n")
+ sys.stdout.write("-u display results in microseconds (default nanoseconds)\n")
+
+
+comm_re = None
+pid_re = None
+pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+
+opt_proc = popt.DISP_DFL
+opt_disp = topt.DISP_ALL
+
+opt_ns = True
+
+argc = len(sys.argv) - 1
+if argc >= 1:
+ pid_re = re.compile(pid_regex)
+
+ for i, opt in enumerate(sys.argv[1:]):
+ if opt[0] == "-":
+ if opt == "-h":
+ pr_help()
+ exit(0);
+ elif opt == "-p":
+ opt_proc = popt.DISP_PROC
+ elif opt == "-pv":
+ opt_proc = popt.DISP_PROC_VERBOSE
+ elif opt == '-u':
+ opt_ns = False
+ elif opt == "-t":
+ set_type(topt.DISP_TIME)
+ elif opt == "-m":
+ set_type(topt.DISP_MIG)
+ elif opt == "-fs":
+ set_type(topt.DISP_ISOLFREE)
+ elif opt == "-ms":
+ set_type(topt.DISP_ISOLMIG)
+ else:
+ sys.exit(usage)
+
+ elif i == argc - 1:
+ m = pid_re.search(opt)
+ if m != None and m.group() != "":
+ if m.group(3) != None:
+ f = pid_filter(m.group(3), m.group(3))
+ else:
+ f = pid_filter(m.group(1), m.group(2))
+ else:
+ try:
+ comm_re=re.compile(opt)
+ except:
+ sys.stderr.write("invalid regex '%s'" % opt)
+ sys.exit(usage)
+ f = comm_filter(comm_re)
+
+ chead.add_filter(f)
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
new file mode 100644
index 000000000..aa1e2cfa2
--- /dev/null
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -0,0 +1,192 @@
+# event_analyzing_sample.py: general event handler in python
+# SPDX-License-Identifier: GPL-2.0
+#
+# Current perf report is already very powerful with the annotation integrated,
+# and this script is not trying to be as powerful as perf report, but
+# providing end user/developer a flexible way to analyze the events other
+# than trace points.
+#
+# The 2 database related functions in this script just show how to gather
+# the basic information, and users can modify and write their own functions
+# according to their specific requirement.
+#
+# The first function "show_general_events" just does a basic grouping for all
+# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
+# for a x86 HW PMU event: PEBS with load latency data.
+#
+
+from __future__ import print_function
+
+import os
+import sys
+import math
+import struct
+import sqlite3
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from EventClass import *
+
+#
+# If the perf.data has a big number of samples, then the insert operation
+# will be very time consuming (about 10+ minutes for 10000 samples) if the
+# .db database is on disk. Move the .db file to RAM based FS to speedup
+# the handling, which will cut the time down to several seconds.
+#
+con = sqlite3.connect("/dev/shm/perf.db")
+con.isolation_level = None
+
+def trace_begin():
+ print("In trace_begin:\n")
+
+ #
+ # Will create several tables at the start, pebs_ll is for PEBS data with
+ # load latency info, while gen_events is for general event.
+ #
+ con.execute("""
+ create table if not exists gen_events (
+ name text,
+ symbol text,
+ comm text,
+ dso text
+ );""")
+ con.execute("""
+ create table if not exists pebs_ll (
+ name text,
+ symbol text,
+ comm text,
+ dso text,
+ flags integer,
+ ip integer,
+ status integer,
+ dse integer,
+ dla integer,
+ lat integer
+ );""")
+
+#
+# Create and insert event object to a database so that user could
+# do more analysis with simple database commands.
+#
+def process_event(param_dict):
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if ("dso" in param_dict):
+ dso = param_dict["dso"]
+ else:
+ dso = "Unknown_dso"
+
+ if ("symbol" in param_dict):
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "Unknown_symbol"
+
+ # Create the event object and insert it to the right table in database
+ event = create_event(name, comm, dso, symbol, raw_buf)
+ insert_db(event)
+
+def insert_db(event):
+ if event.ev_type == EVTYPE_GENERIC:
+ con.execute("insert into gen_events values(?, ?, ?, ?)",
+ (event.name, event.symbol, event.comm, event.dso))
+ elif event.ev_type == EVTYPE_PEBS_LL:
+ event.ip &= 0x7fffffffffffffff
+ event.dla &= 0x7fffffffffffffff
+ con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
+ (event.name, event.symbol, event.comm, event.dso, event.flags,
+ event.ip, event.status, event.dse, event.dla, event.lat))
+
+def trace_end():
+ print("In trace_end:\n")
+ # We show the basic info for the 2 type of event classes
+ show_general_events()
+ show_pebs_ll()
+ con.close()
+
+#
+# As the event number may be very big, so we can't use linear way
+# to show the histogram in real number, but use a log2 algorithm.
+#
+
+def num2sym(num):
+ # Each number will have at least one '#'
+ snum = '#' * (int)(math.log(num, 2) + 1)
+ return snum
+
+def show_general_events():
+
+ # Check the total record number in the table
+ count = con.execute("select count(*) from gen_events")
+ for t in count:
+ print("There is %d records in gen_events table" % t[0])
+ if t[0] == 0:
+ return
+
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")
+
+ # Group by thread
+ commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
+ for row in commq:
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+ # Group by symbol
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
+ symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
+ for row in symbolq:
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+ # Group by dso
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
+ dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
+ for row in dsoq:
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+#
+# This function just shows the basic info, and we could do more with the
+# data in the tables, like checking the function parameters when some
+# big latency events happen.
+#
+def show_pebs_ll():
+
+ count = con.execute("select count(*) from pebs_ll")
+ for t in count:
+ print("There is %d records in pebs_ll table" % t[0])
+ if t[0] == 0:
+ return
+
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
+
+ # Group by thread
+ commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
+ for row in commq:
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+ # Group by symbol
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
+ symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
+ for row in symbolq:
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+ # Group by dse
+ dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
+ for row in dseq:
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+ # Group by latency
+ latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
+ for row in latq:
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
new file mode 100644
index 000000000..d187e46c2
--- /dev/null
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -0,0 +1,1111 @@
+# export-to-postgresql.py: export perf data to a postgresql database
+# Copyright (c) 2014, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+from __future__ import print_function
+
+import os
+import sys
+import struct
+import datetime
+
+# To use this script you will need to have installed package python-pyside which
+# provides LGPL-licensed Python bindings for Qt. You will also need the package
+# libqt4-sql-psql for Qt postgresql support.
+#
+# The script assumes postgresql is running on the local machine and that the
+# user has postgresql permissions to create databases. Examples of installing
+# postgresql and adding such a user are:
+#
+# fedora:
+#
+# $ sudo yum install postgresql postgresql-server qt-postgresql
+# $ sudo su - postgres -c initdb
+# $ sudo service postgresql start
+# $ sudo su - postgres
+# $ createuser -s <your user id here> # Older versions may not support -s, in which case answer the prompt below:
+# Shall the new role be a superuser? (y/n) y
+# $ sudo yum install python-pyside
+#
+# Alternately, to use Python3 and/or pyside 2, one of the following:
+# $ sudo yum install python3-pyside
+# $ pip install --user PySide2
+# $ pip3 install --user PySide2
+#
+# ubuntu:
+#
+# $ sudo apt-get install postgresql
+# $ sudo su - postgres
+# $ createuser -s <your user id here>
+# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
+#
+# Alternately, to use Python3 and/or pyside 2, one of the following:
+#
+# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
+# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
+# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
+#
+# An example of using this script with Intel PT:
+#
+# $ perf record -e intel_pt//u ls
+# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
+# 2015-05-29 12:49:23.464364 Creating database...
+# 2015-05-29 12:49:26.281717 Writing to intermediate files...
+# 2015-05-29 12:49:27.190383 Copying to database...
+# 2015-05-29 12:49:28.140451 Removing intermediate files...
+# 2015-05-29 12:49:28.147451 Adding primary keys
+# 2015-05-29 12:49:28.655683 Adding foreign keys
+# 2015-05-29 12:49:29.365350 Done
+#
+# To browse the database, psql can be used e.g.
+#
+# $ psql pt_example
+# pt_example=# select * from samples_view where id < 100;
+# pt_example=# \d+
+# pt_example=# \d+ samples_view
+# pt_example=# \q
+#
+# An example of using the database is provided by the script
+# exported-sql-viewer.py. Refer to that script for details.
+#
+# Tables:
+#
+# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
+#
+# samples
+#
+# 'samples' is the main table. It represents what instruction was executing at a point in time
+# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
+#
+# calls
+#
+# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
+# 'calls' is only created when the 'calls' option to this script is specified.
+#
+# call_paths
+#
+# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
+# 'calls_paths' is only created when the 'calls' option to this script is specified.
+#
+# branch_types
+#
+# 'branch_types' provides descriptions for each type of branch.
+#
+# comm_threads
+#
+# 'comm_threads' shows how 'comms' relates to 'threads'.
+#
+# comms
+#
+# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
+#
+# dsos
+#
+# 'dsos' contains a record for each executable file or library.
+#
+# machines
+#
+# 'machines' can be used to distinguish virtual machines if virtualization is supported.
+#
+# selected_events
+#
+# 'selected_events' contains a record for each kind of event that has been sampled.
+#
+# symbols
+#
+# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
+#
+# threads
+#
+# 'threads' contains a record for each thread.
+#
+# Views:
+#
+# Most of the tables have views for more friendly display. The views are:
+#
+# calls_view
+# call_paths_view
+# comm_threads_view
+# dsos_view
+# machines_view
+# samples_view
+# symbols_view
+# threads_view
+#
+# More examples of browsing the database with psql:
+# Note that some of the examples are not the most optimal SQL query.
+# Note that call information is only available if the script's 'calls' option has been used.
+#
+# Top 10 function calls (not aggregated by symbol):
+#
+# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
+#
+# Top 10 function calls (aggregated by symbol):
+#
+# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
+# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
+# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
+#
+# Note that the branch count gives a rough estimation of cpu usage, so functions
+# that took a long time but have a relatively low branch count must have spent time
+# waiting.
+#
+# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
+#
+# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
+#
+# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
+#
+# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
+#
+# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
+#
+# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
+#
+# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
+#
+# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
+#
+# Show transactions:
+#
+# SELECT * FROM samples_view WHERE event = 'transactions';
+#
+# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
+# Transaction aborts have branch_type_name 'transaction abort'
+#
+# Show transaction aborts:
+#
+# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
+#
+# To print a call stack requires walking the call_paths table. For example this python script:
+# #!/usr/bin/python2
+#
+# import sys
+# from PySide.QtSql import *
+#
+# if __name__ == '__main__':
+# if (len(sys.argv) < 3):
+# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
+# raise Exception("Too few arguments")
+# dbname = sys.argv[1]
+# call_path_id = sys.argv[2]
+# db = QSqlDatabase.addDatabase('QPSQL')
+# db.setDatabaseName(dbname)
+# if not db.open():
+# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+# query = QSqlQuery(db)
+# print " id ip symbol_id symbol dso_id dso_short_name"
+# while call_path_id != 0 and call_path_id != 1:
+# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
+# if not ret:
+# raise Exception("Query failed: " + query.lastError().text())
+# if not query.next():
+# raise Exception("Query failed")
+# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
+# call_path_id = query.value(6)
+
+pyside_version_1 = True
+if not "pyside-version-1" in sys.argv:
+ try:
+ from PySide2.QtSql import *
+ pyside_version_1 = False
+ except:
+ pass
+
+if pyside_version_1:
+ from PySide.QtSql import *
+
+if sys.version_info < (3, 0):
+ def toserverstr(str):
+ return str
+ def toclientstr(str):
+ return str
+else:
+ # Assume UTF-8 server_encoding and client_encoding
+ def toserverstr(str):
+ return bytes(str, "UTF_8")
+ def toclientstr(str):
+ return bytes(str, "UTF_8")
+
+# Need to access PostgreSQL C library directly to use COPY FROM STDIN
+from ctypes import *
+libpq = CDLL("libpq.so.5")
+PQconnectdb = libpq.PQconnectdb
+PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
+PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
+PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
+PQexec = libpq.PQexec
+PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
+PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
+PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
+PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
+PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
+PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = False
+
+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
+def usage():
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
+ printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1");
+ raise Exception("Too few or bad arguments")
+
+if (len(sys.argv) < 2):
+ usage()
+
+dbname = sys.argv[1]
+
+if (len(sys.argv) >= 3):
+ columns = sys.argv[2]
+else:
+ columns = "all"
+
+if columns not in ("all", "branches"):
+ usage()
+
+branches = (columns == "branches")
+
+for i in range(3,len(sys.argv)):
+ if (sys.argv[i] == "calls"):
+ perf_db_export_calls = True
+ elif (sys.argv[i] == "callchains"):
+ perf_db_export_callchains = True
+ elif (sys.argv[i] == "pyside-version-1"):
+ pass
+ else:
+ usage()
+
+output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
+os.mkdir(output_dir_name)
+
+def do_query(q, s):
+ if (q.exec_(s)):
+ return
+ raise Exception("Query failed: " + q.lastError().text())
+
+printdate("Creating database...")
+
+db = QSqlDatabase.addDatabase('QPSQL')
+query = QSqlQuery(db)
+db.setDatabaseName('postgres')
+db.open()
+try:
+ do_query(query, 'CREATE DATABASE ' + dbname)
+except:
+ os.rmdir(output_dir_name)
+ raise
+query.finish()
+query.clear()
+db.close()
+
+db.setDatabaseName(dbname)
+db.open()
+
+query = QSqlQuery(db)
+do_query(query, 'SET client_min_messages TO WARNING')
+
+do_query(query, 'CREATE TABLE selected_events ('
+ 'id bigint NOT NULL,'
+ 'name varchar(80))')
+do_query(query, 'CREATE TABLE machines ('
+ 'id bigint NOT NULL,'
+ 'pid integer,'
+ 'root_dir varchar(4096))')
+do_query(query, 'CREATE TABLE threads ('
+ 'id bigint NOT NULL,'
+ 'machine_id bigint,'
+ 'process_id bigint,'
+ 'pid integer,'
+ 'tid integer)')
+do_query(query, 'CREATE TABLE comms ('
+ 'id bigint NOT NULL,'
+ 'comm varchar(16),'
+ 'c_thread_id bigint,'
+ 'c_time bigint,'
+ 'exec_flag boolean)')
+do_query(query, 'CREATE TABLE comm_threads ('
+ 'id bigint NOT NULL,'
+ 'comm_id bigint,'
+ 'thread_id bigint)')
+do_query(query, 'CREATE TABLE dsos ('
+ 'id bigint NOT NULL,'
+ 'machine_id bigint,'
+ 'short_name varchar(256),'
+ 'long_name varchar(4096),'
+ 'build_id varchar(64))')
+do_query(query, 'CREATE TABLE symbols ('
+ 'id bigint NOT NULL,'
+ 'dso_id bigint,'
+ 'sym_start bigint,'
+ 'sym_end bigint,'
+ 'binding integer,'
+ 'name varchar(2048))')
+do_query(query, 'CREATE TABLE branch_types ('
+ 'id integer NOT NULL,'
+ 'name varchar(80))')
+
+if branches:
+ do_query(query, 'CREATE TABLE samples ('
+ 'id bigint NOT NULL,'
+ 'evsel_id bigint,'
+ 'machine_id bigint,'
+ 'thread_id bigint,'
+ 'comm_id bigint,'
+ 'dso_id bigint,'
+ 'symbol_id bigint,'
+ 'sym_offset bigint,'
+ 'ip bigint,'
+ 'time bigint,'
+ 'cpu integer,'
+ 'to_dso_id bigint,'
+ 'to_symbol_id bigint,'
+ 'to_sym_offset bigint,'
+ 'to_ip bigint,'
+ 'branch_type integer,'
+ 'in_tx boolean,'
+ 'call_path_id bigint,'
+ 'insn_count bigint,'
+ 'cyc_count bigint)')
+else:
+ do_query(query, 'CREATE TABLE samples ('
+ 'id bigint NOT NULL,'
+ 'evsel_id bigint,'
+ 'machine_id bigint,'
+ 'thread_id bigint,'
+ 'comm_id bigint,'
+ 'dso_id bigint,'
+ 'symbol_id bigint,'
+ 'sym_offset bigint,'
+ 'ip bigint,'
+ 'time bigint,'
+ 'cpu integer,'
+ 'to_dso_id bigint,'
+ 'to_symbol_id bigint,'
+ 'to_sym_offset bigint,'
+ 'to_ip bigint,'
+ 'period bigint,'
+ 'weight bigint,'
+ 'transaction bigint,'
+ 'data_src bigint,'
+ 'branch_type integer,'
+ 'in_tx boolean,'
+ 'call_path_id bigint,'
+ 'insn_count bigint,'
+ 'cyc_count bigint)')
+
+if perf_db_export_calls or perf_db_export_callchains:
+ do_query(query, 'CREATE TABLE call_paths ('
+ 'id bigint NOT NULL,'
+ 'parent_id bigint,'
+ 'symbol_id bigint,'
+ 'ip bigint)')
+if perf_db_export_calls:
+ do_query(query, 'CREATE TABLE calls ('
+ 'id bigint NOT NULL,'
+ 'thread_id bigint,'
+ 'comm_id bigint,'
+ 'call_path_id bigint,'
+ 'call_time bigint,'
+ 'return_time bigint,'
+ 'branch_count bigint,'
+ 'call_id bigint,'
+ 'return_id bigint,'
+ 'parent_call_path_id bigint,'
+ 'flags integer,'
+ 'parent_id bigint,'
+ 'insn_count bigint,'
+ 'cyc_count bigint)')
+
+do_query(query, 'CREATE TABLE ptwrite ('
+ 'id bigint NOT NULL,'
+ 'payload bigint,'
+ 'exact_ip boolean)')
+
+do_query(query, 'CREATE TABLE cbr ('
+ 'id bigint NOT NULL,'
+ 'cbr integer,'
+ 'mhz integer,'
+ 'percent integer)')
+
+do_query(query, 'CREATE TABLE mwait ('
+ 'id bigint NOT NULL,'
+ 'hints integer,'
+ 'extensions integer)')
+
+do_query(query, 'CREATE TABLE pwre ('
+ 'id bigint NOT NULL,'
+ 'cstate integer,'
+ 'subcstate integer,'
+ 'hw boolean)')
+
+do_query(query, 'CREATE TABLE exstop ('
+ 'id bigint NOT NULL,'
+ 'exact_ip boolean)')
+
+do_query(query, 'CREATE TABLE pwrx ('
+ 'id bigint NOT NULL,'
+ 'deepest_cstate integer,'
+ 'last_cstate integer,'
+ 'wake_reason integer)')
+
+do_query(query, 'CREATE TABLE context_switches ('
+ 'id bigint NOT NULL,'
+ 'machine_id bigint,'
+ 'time bigint,'
+ 'cpu integer,'
+ 'thread_out_id bigint,'
+ 'comm_out_id bigint,'
+ 'thread_in_id bigint,'
+ 'comm_in_id bigint,'
+ 'flags integer)')
+
+do_query(query, 'CREATE VIEW machines_view AS '
+ 'SELECT '
+ 'id,'
+ 'pid,'
+ 'root_dir,'
+ 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
+ ' FROM machines')
+
+do_query(query, 'CREATE VIEW dsos_view AS '
+ 'SELECT '
+ 'id,'
+ 'machine_id,'
+ '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+ 'short_name,'
+ 'long_name,'
+ 'build_id'
+ ' FROM dsos')
+
+do_query(query, 'CREATE VIEW symbols_view AS '
+ 'SELECT '
+ 'id,'
+ 'name,'
+ '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
+ 'dso_id,'
+ 'sym_start,'
+ 'sym_end,'
+ 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
+ ' FROM symbols')
+
+do_query(query, 'CREATE VIEW threads_view AS '
+ 'SELECT '
+ 'id,'
+ 'machine_id,'
+ '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+ 'process_id,'
+ 'pid,'
+ 'tid'
+ ' FROM threads')
+
+do_query(query, 'CREATE VIEW comm_threads_view AS '
+ 'SELECT '
+ 'comm_id,'
+ '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+ 'thread_id,'
+ '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+ '(SELECT tid FROM threads WHERE id = thread_id) AS tid'
+ ' FROM comm_threads')
+
+if perf_db_export_calls or perf_db_export_callchains:
+ do_query(query, 'CREATE VIEW call_paths_view AS '
+ 'SELECT '
+ 'c.id,'
+ 'to_hex(c.ip) AS ip,'
+ 'c.symbol_id,'
+ '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
+ '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
+ '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
+ 'c.parent_id,'
+ 'to_hex(p.ip) AS parent_ip,'
+ 'p.symbol_id AS parent_symbol_id,'
+ '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
+ '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
+ '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
+ ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
+ do_query(query, 'CREATE VIEW calls_view AS '
+ 'SELECT '
+ 'calls.id,'
+ 'thread_id,'
+ '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+ '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+ '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+ 'call_path_id,'
+ 'to_hex(ip) AS ip,'
+ 'symbol_id,'
+ '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+ 'call_time,'
+ 'return_time,'
+ 'return_time - call_time AS elapsed_time,'
+ 'branch_count,'
+ 'insn_count,'
+ 'cyc_count,'
+ 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,'
+ 'call_id,'
+ 'return_id,'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
+ ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
+
+do_query(query, 'CREATE VIEW samples_view AS '
+ 'SELECT '
+ 'id,'
+ 'time,'
+ 'cpu,'
+ '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+ '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+ '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+ '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
+ 'to_hex(ip) AS ip_hex,'
+ '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+ 'sym_offset,'
+ '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
+ 'to_hex(to_ip) AS to_ip_hex,'
+ '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
+ 'to_sym_offset,'
+ '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
+ '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
+ 'in_tx,'
+ 'insn_count,'
+ 'cyc_count,'
+ 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC'
+ ' FROM samples')
+
+do_query(query, 'CREATE VIEW ptwrite_view AS '
+ 'SELECT '
+ 'ptwrite.id,'
+ 'time,'
+ 'cpu,'
+ 'to_hex(payload) AS payload_hex,'
+ 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip'
+ ' FROM ptwrite'
+ ' INNER JOIN samples ON samples.id = ptwrite.id')
+
+do_query(query, 'CREATE VIEW cbr_view AS '
+ 'SELECT '
+ 'cbr.id,'
+ 'time,'
+ 'cpu,'
+ 'cbr,'
+ 'mhz,'
+ 'percent'
+ ' FROM cbr'
+ ' INNER JOIN samples ON samples.id = cbr.id')
+
+do_query(query, 'CREATE VIEW mwait_view AS '
+ 'SELECT '
+ 'mwait.id,'
+ 'time,'
+ 'cpu,'
+ 'to_hex(hints) AS hints_hex,'
+ 'to_hex(extensions) AS extensions_hex'
+ ' FROM mwait'
+ ' INNER JOIN samples ON samples.id = mwait.id')
+
+do_query(query, 'CREATE VIEW pwre_view AS '
+ 'SELECT '
+ 'pwre.id,'
+ 'time,'
+ 'cpu,'
+ 'cstate,'
+ 'subcstate,'
+ 'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw'
+ ' FROM pwre'
+ ' INNER JOIN samples ON samples.id = pwre.id')
+
+do_query(query, 'CREATE VIEW exstop_view AS '
+ 'SELECT '
+ 'exstop.id,'
+ 'time,'
+ 'cpu,'
+ 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip'
+ ' FROM exstop'
+ ' INNER JOIN samples ON samples.id = exstop.id')
+
+do_query(query, 'CREATE VIEW pwrx_view AS '
+ 'SELECT '
+ 'pwrx.id,'
+ 'time,'
+ 'cpu,'
+ 'deepest_cstate,'
+ 'last_cstate,'
+ 'CASE WHEN wake_reason=1 THEN \'Interrupt\''
+ ' WHEN wake_reason=2 THEN \'Timer Deadline\''
+ ' WHEN wake_reason=4 THEN \'Monitored Address\''
+ ' WHEN wake_reason=8 THEN \'HW\''
+ ' ELSE CAST ( wake_reason AS VARCHAR(2) )'
+ 'END AS wake_reason'
+ ' FROM pwrx'
+ ' INNER JOIN samples ON samples.id = pwrx.id')
+
+do_query(query, 'CREATE VIEW power_events_view AS '
+ 'SELECT '
+ 'samples.id,'
+ 'samples.time,'
+ 'samples.cpu,'
+ 'selected_events.name AS event,'
+ 'FORMAT(\'%6s\', cbr.cbr) AS cbr,'
+ 'FORMAT(\'%6s\', cbr.mhz) AS MHz,'
+ 'FORMAT(\'%5s\', cbr.percent) AS percent,'
+ 'to_hex(mwait.hints) AS hints_hex,'
+ 'to_hex(mwait.extensions) AS extensions_hex,'
+ 'FORMAT(\'%3s\', pwre.cstate) AS cstate,'
+ 'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,'
+ 'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,'
+ 'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,'
+ 'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,'
+ 'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,'
+ 'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\''
+ ' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\''
+ ' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\''
+ ' WHEN pwrx.wake_reason=8 THEN \'HW\''
+ ' ELSE FORMAT(\'%2s\', pwrx.wake_reason)'
+ 'END AS wake_reason'
+ ' FROM cbr'
+ ' FULL JOIN mwait ON mwait.id = cbr.id'
+ ' FULL JOIN pwre ON pwre.id = cbr.id'
+ ' FULL JOIN exstop ON exstop.id = cbr.id'
+ ' FULL JOIN pwrx ON pwrx.id = cbr.id'
+ ' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)'
+ ' INNER JOIN selected_events ON selected_events.id = samples.evsel_id'
+ ' ORDER BY samples.id')
+
+do_query(query, 'CREATE VIEW context_switches_view AS '
+ 'SELECT '
+ 'context_switches.id,'
+ 'context_switches.machine_id,'
+ 'context_switches.time,'
+ 'context_switches.cpu,'
+ 'th_out.pid AS pid_out,'
+ 'th_out.tid AS tid_out,'
+ 'comm_out.comm AS comm_out,'
+ 'th_in.pid AS pid_in,'
+ 'th_in.tid AS tid_in,'
+ 'comm_in.comm AS comm_in,'
+ 'CASE WHEN context_switches.flags = 0 THEN \'in\''
+ ' WHEN context_switches.flags = 1 THEN \'out\''
+ ' WHEN context_switches.flags = 3 THEN \'out preempt\''
+ ' ELSE CAST ( context_switches.flags AS VARCHAR(11) )'
+ 'END AS flags'
+ ' FROM context_switches'
+ ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id'
+ ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id'
+ ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id'
+ ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id')
+
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
+
+def open_output_file(file_name):
+ path_name = output_dir_name + "/" + file_name
+ file = open(path_name, "wb+")
+ file.write(file_header)
+ return file
+
+def close_output_file(file):
+ file.write(file_trailer)
+ file.close()
+
+def copy_output_file_direct(file, table_name):
+ close_output_file(file)
+ sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
+ do_query(query, sql)
+
+# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
+def copy_output_file(file, table_name):
+ conn = PQconnectdb(toclientstr("dbname = " + dbname))
+ if (PQstatus(conn)):
+ raise Exception("COPY FROM STDIN PQconnectdb failed")
+ file.write(file_trailer)
+ file.seek(0)
+ sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
+ res = PQexec(conn, toclientstr(sql))
+ if (PQresultStatus(res) != 4):
+ raise Exception("COPY FROM STDIN PQexec failed")
+ data = file.read(65536)
+ while (len(data)):
+ ret = PQputCopyData(conn, data, len(data))
+ if (ret != 1):
+ raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
+ data = file.read(65536)
+ ret = PQputCopyEnd(conn, None)
+ if (ret != 1):
+ raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
+ PQfinish(conn)
+
+def remove_output_file(file):
+ name = file.name
+ file.close()
+ os.unlink(name)
+
+evsel_file = open_output_file("evsel_table.bin")
+machine_file = open_output_file("machine_table.bin")
+thread_file = open_output_file("thread_table.bin")
+comm_file = open_output_file("comm_table.bin")
+comm_thread_file = open_output_file("comm_thread_table.bin")
+dso_file = open_output_file("dso_table.bin")
+symbol_file = open_output_file("symbol_table.bin")
+branch_type_file = open_output_file("branch_type_table.bin")
+sample_file = open_output_file("sample_table.bin")
+if perf_db_export_calls or perf_db_export_callchains:
+ call_path_file = open_output_file("call_path_table.bin")
+if perf_db_export_calls:
+ call_file = open_output_file("call_table.bin")
+ptwrite_file = open_output_file("ptwrite_table.bin")
+cbr_file = open_output_file("cbr_table.bin")
+mwait_file = open_output_file("mwait_table.bin")
+pwre_file = open_output_file("pwre_table.bin")
+exstop_file = open_output_file("exstop_table.bin")
+pwrx_file = open_output_file("pwrx_table.bin")
+context_switches_file = open_output_file("context_switches_table.bin")
+
+def trace_begin():
+ printdate("Writing to intermediate files...")
+ # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
+ evsel_table(0, "unknown")
+ machine_table(0, 0, "unknown")
+ thread_table(0, 0, 0, -1, -1)
+ comm_table(0, "unknown", 0, 0, 0)
+ dso_table(0, 0, "unknown", "unknown", "")
+ symbol_table(0, 0, 0, 0, 0, "unknown")
+ sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ if perf_db_export_calls or perf_db_export_callchains:
+ call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+unhandled_count = 0
+
+def is_table_empty(table_name):
+ do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
+ if query.next():
+ return False
+ return True
+
+def drop(table_name):
+ do_query(query, 'DROP VIEW ' + table_name + '_view');
+ do_query(query, 'DROP TABLE ' + table_name);
+
+def trace_end():
+ printdate("Copying to database...")
+ copy_output_file(evsel_file, "selected_events")
+ copy_output_file(machine_file, "machines")
+ copy_output_file(thread_file, "threads")
+ copy_output_file(comm_file, "comms")
+ copy_output_file(comm_thread_file, "comm_threads")
+ copy_output_file(dso_file, "dsos")
+ copy_output_file(symbol_file, "symbols")
+ copy_output_file(branch_type_file, "branch_types")
+ copy_output_file(sample_file, "samples")
+ if perf_db_export_calls or perf_db_export_callchains:
+ copy_output_file(call_path_file, "call_paths")
+ if perf_db_export_calls:
+ copy_output_file(call_file, "calls")
+ copy_output_file(ptwrite_file, "ptwrite")
+ copy_output_file(cbr_file, "cbr")
+ copy_output_file(mwait_file, "mwait")
+ copy_output_file(pwre_file, "pwre")
+ copy_output_file(exstop_file, "exstop")
+ copy_output_file(pwrx_file, "pwrx")
+ copy_output_file(context_switches_file, "context_switches")
+
+ printdate("Removing intermediate files...")
+ remove_output_file(evsel_file)
+ remove_output_file(machine_file)
+ remove_output_file(thread_file)
+ remove_output_file(comm_file)
+ remove_output_file(comm_thread_file)
+ remove_output_file(dso_file)
+ remove_output_file(symbol_file)
+ remove_output_file(branch_type_file)
+ remove_output_file(sample_file)
+ if perf_db_export_calls or perf_db_export_callchains:
+ remove_output_file(call_path_file)
+ if perf_db_export_calls:
+ remove_output_file(call_file)
+ remove_output_file(ptwrite_file)
+ remove_output_file(cbr_file)
+ remove_output_file(mwait_file)
+ remove_output_file(pwre_file)
+ remove_output_file(exstop_file)
+ remove_output_file(pwrx_file)
+ remove_output_file(context_switches_file)
+ os.rmdir(output_dir_name)
+ printdate("Adding primary keys")
+ do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
+ if perf_db_export_calls or perf_db_export_callchains:
+ do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
+ if perf_db_export_calls:
+ do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)')
+ do_query(query, 'ALTER TABLE context_switches ADD PRIMARY KEY (id)')
+
+ printdate("Adding foreign keys")
+ do_query(query, 'ALTER TABLE threads '
+ 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
+ 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
+ do_query(query, 'ALTER TABLE comms '
+ 'ADD CONSTRAINT threadfk FOREIGN KEY (c_thread_id) REFERENCES threads (id)')
+ do_query(query, 'ALTER TABLE comm_threads '
+ 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
+ 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
+ do_query(query, 'ALTER TABLE dsos '
+ 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
+ do_query(query, 'ALTER TABLE symbols '
+ 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
+ do_query(query, 'ALTER TABLE samples '
+ 'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
+ 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
+ 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
+ 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
+ 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
+ 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
+ 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
+ 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
+ if perf_db_export_calls or perf_db_export_callchains:
+ do_query(query, 'ALTER TABLE call_paths '
+ 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
+ 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
+ if perf_db_export_calls:
+ do_query(query, 'ALTER TABLE calls '
+ 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
+ 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
+ 'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
+ 'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
+ 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
+ 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
+ do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
+ do_query(query, 'ALTER TABLE comms ADD has_calls boolean')
+ do_query(query, 'UPDATE comms SET has_calls = TRUE WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)')
+ do_query(query, 'ALTER TABLE ptwrite '
+ 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
+ do_query(query, 'ALTER TABLE cbr '
+ 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
+ do_query(query, 'ALTER TABLE mwait '
+ 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
+ do_query(query, 'ALTER TABLE pwre '
+ 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
+ do_query(query, 'ALTER TABLE exstop '
+ 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
+ do_query(query, 'ALTER TABLE pwrx '
+ 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)')
+ do_query(query, 'ALTER TABLE context_switches '
+ 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
+ 'ADD CONSTRAINT toutfk FOREIGN KEY (thread_out_id) REFERENCES threads (id),'
+ 'ADD CONSTRAINT tinfk FOREIGN KEY (thread_in_id) REFERENCES threads (id),'
+ 'ADD CONSTRAINT coutfk FOREIGN KEY (comm_out_id) REFERENCES comms (id),'
+ 'ADD CONSTRAINT cinfk FOREIGN KEY (comm_in_id) REFERENCES comms (id)')
+
+ printdate("Dropping unused tables")
+ if is_table_empty("ptwrite"):
+ drop("ptwrite")
+ if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
+ do_query(query, 'DROP VIEW power_events_view');
+ drop("mwait")
+ drop("pwre")
+ drop("exstop")
+ drop("pwrx")
+ if is_table_empty("cbr"):
+ drop("cbr")
+ if is_table_empty("context_switches"):
+ drop("context_switches")
+
+ if (unhandled_count):
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ global unhandled_count
+ unhandled_count += 1
+
+def sched__sched_switch(*x):
+ pass
+
+def evsel_table(evsel_id, evsel_name, *x):
+ evsel_name = toserverstr(evsel_name)
+ n = len(evsel_name)
+ fmt = "!hiqi" + str(n) + "s"
+ value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
+ evsel_file.write(value)
+
+def machine_table(machine_id, pid, root_dir, *x):
+ root_dir = toserverstr(root_dir)
+ n = len(root_dir)
+ fmt = "!hiqiii" + str(n) + "s"
+ value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
+ machine_file.write(value)
+
+def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
+ value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
+ thread_file.write(value)
+
+def comm_table(comm_id, comm_str, thread_id, time, exec_flag, *x):
+ comm_str = toserverstr(comm_str)
+ n = len(comm_str)
+ fmt = "!hiqi" + str(n) + "s" + "iqiqiB"
+ value = struct.pack(fmt, 5, 8, comm_id, n, comm_str, 8, thread_id, 8, time, 1, exec_flag)
+ comm_file.write(value)
+
+def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
+ fmt = "!hiqiqiq"
+ value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
+ comm_thread_file.write(value)
+
+def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+ short_name = toserverstr(short_name)
+ long_name = toserverstr(long_name)
+ build_id = toserverstr(build_id)
+ n1 = len(short_name)
+ n2 = len(long_name)
+ n3 = len(build_id)
+ fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
+ value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
+ dso_file.write(value)
+
+def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+ symbol_name = toserverstr(symbol_name)
+ n = len(symbol_name)
+ fmt = "!hiqiqiqiqiii" + str(n) + "s"
+ value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
+ symbol_file.write(value)
+
+def branch_type_table(branch_type, name, *x):
+ name = toserverstr(name)
+ n = len(name)
+ fmt = "!hiii" + str(n) + "s"
+ value = struct.pack(fmt, 2, 4, branch_type, n, name)
+ branch_type_file.write(value)
+
+def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x):
+ if branches:
+ value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
+ else:
+ value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
+ sample_file.write(value)
+
+def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
+ fmt = "!hiqiqiqiq"
+ value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
+ call_path_file.write(value)
+
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x):
+ fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq"
+ value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt)
+ call_file.write(value)
+
+def ptwrite(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ flags = data[0]
+ payload = data[1]
+ exact_ip = flags & 1
+ value = struct.pack("!hiqiqiB", 3, 8, id, 8, payload, 1, exact_ip)
+ ptwrite_file.write(value)
+
+def cbr(id, raw_buf):
+ data = struct.unpack_from("<BBBBII", raw_buf)
+ cbr = data[0]
+ MHz = (data[4] + 500) / 1000
+ percent = ((cbr * 1000 / data[2]) + 5) / 10
+ value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
+ cbr_file.write(value)
+
+def mwait(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ hints = payload & 0xff
+ extensions = (payload >> 32) & 0x3
+ value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions)
+ mwait_file.write(value)
+
+def pwre(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ hw = (payload >> 7) & 1
+ cstate = (payload >> 12) & 0xf
+ subcstate = (payload >> 8) & 0xf
+ value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw)
+ pwre_file.write(value)
+
+def exstop(id, raw_buf):
+ data = struct.unpack_from("<I", raw_buf)
+ flags = data[0]
+ exact_ip = flags & 1
+ value = struct.pack("!hiqiB", 2, 8, id, 1, exact_ip)
+ exstop_file.write(value)
+
+def pwrx(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ deepest_cstate = payload & 0xf
+ last_cstate = (payload >> 4) & 0xf
+ wake_reason = (payload >> 8) & 0xf
+ value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason)
+ pwrx_file.write(value)
+
+def synth_data(id, config, raw_buf, *x):
+ if config == 0:
+ ptwrite(id, raw_buf)
+ elif config == 1:
+ mwait(id, raw_buf)
+ elif config == 2:
+ pwre(id, raw_buf)
+ elif config == 3:
+ exstop(id, raw_buf)
+ elif config == 4:
+ pwrx(id, raw_buf)
+ elif config == 5:
+ cbr(id, raw_buf)
+
+def context_switch_table(id, machine_id, time, cpu, thread_out_id, comm_out_id, thread_in_id, comm_in_id, flags, *x):
+ fmt = "!hiqiqiqiiiqiqiqiqii"
+ value = struct.pack(fmt, 9, 8, id, 8, machine_id, 8, time, 4, cpu, 8, thread_out_id, 8, comm_out_id, 8, thread_in_id, 8, comm_in_id, 4, flags)
+ context_switches_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
new file mode 100644
index 000000000..8043a7272
--- /dev/null
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -0,0 +1,796 @@
+# export-to-sqlite.py: export perf data to a sqlite3 database
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+from __future__ import print_function
+
+import os
+import sys
+import struct
+import datetime
+
+# To use this script you will need to have installed package python-pyside which
+# provides LGPL-licensed Python bindings for Qt. You will also need the package
+# libqt4-sql-sqlite for Qt sqlite3 support.
+#
+# Examples of installing pyside:
+#
+# ubuntu:
+#
+# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
+#
+# Alternately, to use Python3 and/or pyside 2, one of the following:
+#
+# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
+# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
+# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
+# fedora:
+#
+# $ sudo yum install python-pyside
+#
+# Alternately, to use Python3 and/or pyside 2, one of the following:
+# $ sudo yum install python3-pyside
+# $ pip install --user PySide2
+# $ pip3 install --user PySide2
+#
+# An example of using this script with Intel PT:
+#
+# $ perf record -e intel_pt//u ls
+# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
+# 2017-07-31 14:26:07.326913 Creating database...
+# 2017-07-31 14:26:07.538097 Writing records...
+# 2017-07-31 14:26:09.889292 Adding indexes
+# 2017-07-31 14:26:09.958746 Done
+#
+# To browse the database, sqlite3 can be used e.g.
+#
+# $ sqlite3 pt_example
+# sqlite> .header on
+# sqlite> select * from samples_view where id < 10;
+# sqlite> .mode column
+# sqlite> select * from samples_view where id < 10;
+# sqlite> .tables
+# sqlite> .schema samples_view
+# sqlite> .quit
+#
+# An example of using the database is provided by the script
+# exported-sql-viewer.py. Refer to that script for details.
+#
+# The database structure is practically the same as created by the script
+# export-to-postgresql.py. Refer to that script for details. A notable
+# difference is the 'transaction' column of the 'samples' table which is
+# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.
+
+pyside_version_1 = True
+if not "pyside-version-1" in sys.argv:
+ try:
+ from PySide2.QtSql import *
+ pyside_version_1 = False
+ except:
+ pass
+
+if pyside_version_1:
+ from PySide.QtSql import *
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = False
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
+def usage():
+ printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
+ printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1");
+ raise Exception("Too few or bad arguments")
+
+if (len(sys.argv) < 2):
+ usage()
+
+dbname = sys.argv[1]
+
+if (len(sys.argv) >= 3):
+ columns = sys.argv[2]
+else:
+ columns = "all"
+
+if columns not in ("all", "branches"):
+ usage()
+
+branches = (columns == "branches")
+
+for i in range(3,len(sys.argv)):
+ if (sys.argv[i] == "calls"):
+ perf_db_export_calls = True
+ elif (sys.argv[i] == "callchains"):
+ perf_db_export_callchains = True
+ elif (sys.argv[i] == "pyside-version-1"):
+ pass
+ else:
+ usage()
+
+def do_query(q, s):
+ if (q.exec_(s)):
+ return
+ raise Exception("Query failed: " + q.lastError().text())
+
+def do_query_(q):
+ if (q.exec_()):
+ return
+ raise Exception("Query failed: " + q.lastError().text())
+
+printdate("Creating database ...")
+
+db_exists = False
+try:
+ f = open(dbname)
+ f.close()
+ db_exists = True
+except:
+ pass
+
+if db_exists:
+ raise Exception(dbname + " already exists")
+
+db = QSqlDatabase.addDatabase('QSQLITE')
+db.setDatabaseName(dbname)
+db.open()
+
+query = QSqlQuery(db)
+
+do_query(query, 'PRAGMA journal_mode = OFF')
+do_query(query, 'BEGIN TRANSACTION')
+
+do_query(query, 'CREATE TABLE selected_events ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'name varchar(80))')
+do_query(query, 'CREATE TABLE machines ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'pid integer,'
+ 'root_dir varchar(4096))')
+do_query(query, 'CREATE TABLE threads ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'machine_id bigint,'
+ 'process_id bigint,'
+ 'pid integer,'
+ 'tid integer)')
+do_query(query, 'CREATE TABLE comms ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'comm varchar(16),'
+ 'c_thread_id bigint,'
+ 'c_time bigint,'
+ 'exec_flag boolean)')
+do_query(query, 'CREATE TABLE comm_threads ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'comm_id bigint,'
+ 'thread_id bigint)')
+do_query(query, 'CREATE TABLE dsos ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'machine_id bigint,'
+ 'short_name varchar(256),'
+ 'long_name varchar(4096),'
+ 'build_id varchar(64))')
+do_query(query, 'CREATE TABLE symbols ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'dso_id bigint,'
+ 'sym_start bigint,'
+ 'sym_end bigint,'
+ 'binding integer,'
+ 'name varchar(2048))')
+do_query(query, 'CREATE TABLE branch_types ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'name varchar(80))')
+
+if branches:
+ do_query(query, 'CREATE TABLE samples ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'evsel_id bigint,'
+ 'machine_id bigint,'
+ 'thread_id bigint,'
+ 'comm_id bigint,'
+ 'dso_id bigint,'
+ 'symbol_id bigint,'
+ 'sym_offset bigint,'
+ 'ip bigint,'
+ 'time bigint,'
+ 'cpu integer,'
+ 'to_dso_id bigint,'
+ 'to_symbol_id bigint,'
+ 'to_sym_offset bigint,'
+ 'to_ip bigint,'
+ 'branch_type integer,'
+ 'in_tx boolean,'
+ 'call_path_id bigint,'
+ 'insn_count bigint,'
+ 'cyc_count bigint)')
+else:
+ do_query(query, 'CREATE TABLE samples ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'evsel_id bigint,'
+ 'machine_id bigint,'
+ 'thread_id bigint,'
+ 'comm_id bigint,'
+ 'dso_id bigint,'
+ 'symbol_id bigint,'
+ 'sym_offset bigint,'
+ 'ip bigint,'
+ 'time bigint,'
+ 'cpu integer,'
+ 'to_dso_id bigint,'
+ 'to_symbol_id bigint,'
+ 'to_sym_offset bigint,'
+ 'to_ip bigint,'
+ 'period bigint,'
+ 'weight bigint,'
+ 'transaction_ bigint,'
+ 'data_src bigint,'
+ 'branch_type integer,'
+ 'in_tx boolean,'
+ 'call_path_id bigint,'
+ 'insn_count bigint,'
+ 'cyc_count bigint)')
+
+if perf_db_export_calls or perf_db_export_callchains:
+ do_query(query, 'CREATE TABLE call_paths ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'parent_id bigint,'
+ 'symbol_id bigint,'
+ 'ip bigint)')
+if perf_db_export_calls:
+ do_query(query, 'CREATE TABLE calls ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'thread_id bigint,'
+ 'comm_id bigint,'
+ 'call_path_id bigint,'
+ 'call_time bigint,'
+ 'return_time bigint,'
+ 'branch_count bigint,'
+ 'call_id bigint,'
+ 'return_id bigint,'
+ 'parent_call_path_id bigint,'
+ 'flags integer,'
+ 'parent_id bigint,'
+ 'insn_count bigint,'
+ 'cyc_count bigint)')
+
+do_query(query, 'CREATE TABLE ptwrite ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'payload bigint,'
+ 'exact_ip integer)')
+
+do_query(query, 'CREATE TABLE cbr ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'cbr integer,'
+ 'mhz integer,'
+ 'percent integer)')
+
+do_query(query, 'CREATE TABLE mwait ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'hints integer,'
+ 'extensions integer)')
+
+do_query(query, 'CREATE TABLE pwre ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'cstate integer,'
+ 'subcstate integer,'
+ 'hw integer)')
+
+do_query(query, 'CREATE TABLE exstop ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'exact_ip integer)')
+
+do_query(query, 'CREATE TABLE pwrx ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'deepest_cstate integer,'
+ 'last_cstate integer,'
+ 'wake_reason integer)')
+
+do_query(query, 'CREATE TABLE context_switches ('
+ 'id integer NOT NULL PRIMARY KEY,'
+ 'machine_id bigint,'
+ 'time bigint,'
+ 'cpu integer,'
+ 'thread_out_id bigint,'
+ 'comm_out_id bigint,'
+ 'thread_in_id bigint,'
+ 'comm_in_id bigint,'
+ 'flags integer)')
+
+# printf was added to sqlite in version 3.8.3
+sqlite_has_printf = False
+try:
+ do_query(query, 'SELECT printf("") FROM machines')
+ sqlite_has_printf = True
+except:
+ pass
+
+def emit_to_hex(x):
+ if sqlite_has_printf:
+ return 'printf("%x", ' + x + ')'
+ else:
+ return x
+
+do_query(query, 'CREATE VIEW machines_view AS '
+ 'SELECT '
+ 'id,'
+ 'pid,'
+ 'root_dir,'
+ 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
+ ' FROM machines')
+
+do_query(query, 'CREATE VIEW dsos_view AS '
+ 'SELECT '
+ 'id,'
+ 'machine_id,'
+ '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+ 'short_name,'
+ 'long_name,'
+ 'build_id'
+ ' FROM dsos')
+
+do_query(query, 'CREATE VIEW symbols_view AS '
+ 'SELECT '
+ 'id,'
+ 'name,'
+ '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
+ 'dso_id,'
+ 'sym_start,'
+ 'sym_end,'
+ 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
+ ' FROM symbols')
+
+do_query(query, 'CREATE VIEW threads_view AS '
+ 'SELECT '
+ 'id,'
+ 'machine_id,'
+ '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+ 'process_id,'
+ 'pid,'
+ 'tid'
+ ' FROM threads')
+
+do_query(query, 'CREATE VIEW comm_threads_view AS '
+ 'SELECT '
+ 'comm_id,'
+ '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+ 'thread_id,'
+ '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+ '(SELECT tid FROM threads WHERE id = thread_id) AS tid'
+ ' FROM comm_threads')
+
+if perf_db_export_calls or perf_db_export_callchains:
+ do_query(query, 'CREATE VIEW call_paths_view AS '
+ 'SELECT '
+ 'c.id,'
+ + emit_to_hex('c.ip') + ' AS ip,'
+ 'c.symbol_id,'
+ '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
+ '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
+ '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
+ 'c.parent_id,'
+ + emit_to_hex('p.ip') + ' AS parent_ip,'
+ 'p.symbol_id AS parent_symbol_id,'
+ '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
+ '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
+ '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
+ ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
+ do_query(query, 'CREATE VIEW calls_view AS '
+ 'SELECT '
+ 'calls.id,'
+ 'thread_id,'
+ '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+ '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+ '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+ 'call_path_id,'
+ + emit_to_hex('ip') + ' AS ip,'
+ 'symbol_id,'
+ '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+ 'call_time,'
+ 'return_time,'
+ 'return_time - call_time AS elapsed_time,'
+ 'branch_count,'
+ 'insn_count,'
+ 'cyc_count,'
+ 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,'
+ 'call_id,'
+ 'return_id,'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
+ ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
+
+do_query(query, 'CREATE VIEW samples_view AS '
+ 'SELECT '
+ 'id,'
+ 'time,'
+ 'cpu,'
+ '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+ '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+ '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+ '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
+ + emit_to_hex('ip') + ' AS ip_hex,'
+ '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+ 'sym_offset,'
+ '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
+ + emit_to_hex('to_ip') + ' AS to_ip_hex,'
+ '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
+ 'to_sym_offset,'
+ '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
+ '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
+ 'in_tx,'
+ 'insn_count,'
+ 'cyc_count,'
+ 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC'
+ ' FROM samples')
+
+do_query(query, 'CREATE VIEW ptwrite_view AS '
+ 'SELECT '
+ 'ptwrite.id,'
+ 'time,'
+ 'cpu,'
+ + emit_to_hex('payload') + ' AS payload_hex,'
+ 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
+ ' FROM ptwrite'
+ ' INNER JOIN samples ON samples.id = ptwrite.id')
+
+do_query(query, 'CREATE VIEW cbr_view AS '
+ 'SELECT '
+ 'cbr.id,'
+ 'time,'
+ 'cpu,'
+ 'cbr,'
+ 'mhz,'
+ 'percent'
+ ' FROM cbr'
+ ' INNER JOIN samples ON samples.id = cbr.id')
+
+do_query(query, 'CREATE VIEW mwait_view AS '
+ 'SELECT '
+ 'mwait.id,'
+ 'time,'
+ 'cpu,'
+ + emit_to_hex('hints') + ' AS hints_hex,'
+ + emit_to_hex('extensions') + ' AS extensions_hex'
+ ' FROM mwait'
+ ' INNER JOIN samples ON samples.id = mwait.id')
+
+do_query(query, 'CREATE VIEW pwre_view AS '
+ 'SELECT '
+ 'pwre.id,'
+ 'time,'
+ 'cpu,'
+ 'cstate,'
+ 'subcstate,'
+ 'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw'
+ ' FROM pwre'
+ ' INNER JOIN samples ON samples.id = pwre.id')
+
+do_query(query, 'CREATE VIEW exstop_view AS '
+ 'SELECT '
+ 'exstop.id,'
+ 'time,'
+ 'cpu,'
+ 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
+ ' FROM exstop'
+ ' INNER JOIN samples ON samples.id = exstop.id')
+
+do_query(query, 'CREATE VIEW pwrx_view AS '
+ 'SELECT '
+ 'pwrx.id,'
+ 'time,'
+ 'cpu,'
+ 'deepest_cstate,'
+ 'last_cstate,'
+ 'CASE WHEN wake_reason=1 THEN \'Interrupt\''
+ ' WHEN wake_reason=2 THEN \'Timer Deadline\''
+ ' WHEN wake_reason=4 THEN \'Monitored Address\''
+ ' WHEN wake_reason=8 THEN \'HW\''
+ ' ELSE wake_reason '
+ 'END AS wake_reason'
+ ' FROM pwrx'
+ ' INNER JOIN samples ON samples.id = pwrx.id')
+
+do_query(query, 'CREATE VIEW power_events_view AS '
+ 'SELECT '
+ 'samples.id,'
+ 'time,'
+ 'cpu,'
+ 'selected_events.name AS event,'
+ 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,'
+ 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,'
+ 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,'
+ 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,'
+ 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,'
+ 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,'
+ 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,'
+ 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,'
+ 'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,'
+ 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,'
+ 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,'
+ 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT '
+ 'CASE WHEN wake_reason=1 THEN \'Interrupt\''
+ ' WHEN wake_reason=2 THEN \'Timer Deadline\''
+ ' WHEN wake_reason=4 THEN \'Monitored Address\''
+ ' WHEN wake_reason=8 THEN \'HW\''
+ ' ELSE wake_reason '
+ 'END'
+ ' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason'
+ ' FROM samples'
+ ' INNER JOIN selected_events ON selected_events.id = evsel_id'
+ ' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')')
+
+do_query(query, 'CREATE VIEW context_switches_view AS '
+ 'SELECT '
+ 'context_switches.id,'
+ 'context_switches.machine_id,'
+ 'context_switches.time,'
+ 'context_switches.cpu,'
+ 'th_out.pid AS pid_out,'
+ 'th_out.tid AS tid_out,'
+ 'comm_out.comm AS comm_out,'
+ 'th_in.pid AS pid_in,'
+ 'th_in.tid AS tid_in,'
+ 'comm_in.comm AS comm_in,'
+ 'CASE WHEN context_switches.flags = 0 THEN \'in\''
+ ' WHEN context_switches.flags = 1 THEN \'out\''
+ ' WHEN context_switches.flags = 3 THEN \'out preempt\''
+ ' ELSE context_switches.flags '
+ 'END AS flags'
+ ' FROM context_switches'
+ ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id'
+ ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id'
+ ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id'
+ ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id')
+
+do_query(query, 'END TRANSACTION')
+
+evsel_query = QSqlQuery(db)
+evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
+machine_query = QSqlQuery(db)
+machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
+thread_query = QSqlQuery(db)
+thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
+comm_query = QSqlQuery(db)
+comm_query.prepare("INSERT INTO comms VALUES (?, ?, ?, ?, ?)")
+comm_thread_query = QSqlQuery(db)
+comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
+dso_query = QSqlQuery(db)
+dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
+symbol_query = QSqlQuery(db)
+symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
+branch_type_query = QSqlQuery(db)
+branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
+sample_query = QSqlQuery(db)
+if branches:
+ sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+else:
+ sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+if perf_db_export_calls or perf_db_export_callchains:
+ call_path_query = QSqlQuery(db)
+ call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
+if perf_db_export_calls:
+ call_query = QSqlQuery(db)
+ call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+ptwrite_query = QSqlQuery(db)
+ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)")
+cbr_query = QSqlQuery(db)
+cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)")
+mwait_query = QSqlQuery(db)
+mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)")
+pwre_query = QSqlQuery(db)
+pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)")
+exstop_query = QSqlQuery(db)
+exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)")
+pwrx_query = QSqlQuery(db)
+pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)")
+context_switch_query = QSqlQuery(db)
+context_switch_query.prepare("INSERT INTO context_switches VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)")
+
+def trace_begin():
+ printdate("Writing records...")
+ do_query(query, 'BEGIN TRANSACTION')
+ # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
+ evsel_table(0, "unknown")
+ machine_table(0, 0, "unknown")
+ thread_table(0, 0, 0, -1, -1)
+ comm_table(0, "unknown", 0, 0, 0)
+ dso_table(0, 0, "unknown", "unknown", "")
+ symbol_table(0, 0, 0, 0, 0, "unknown")
+ sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ if perf_db_export_calls or perf_db_export_callchains:
+ call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+unhandled_count = 0
+
+def is_table_empty(table_name):
+ do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
+ if query.next():
+ return False
+ return True
+
+def drop(table_name):
+ do_query(query, 'DROP VIEW ' + table_name + '_view');
+ do_query(query, 'DROP TABLE ' + table_name);
+
+def trace_end():
+ do_query(query, 'END TRANSACTION')
+
+ printdate("Adding indexes")
+ if perf_db_export_calls:
+ do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
+ do_query(query, 'ALTER TABLE comms ADD has_calls boolean')
+ do_query(query, 'UPDATE comms SET has_calls = 1 WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)')
+
+ printdate("Dropping unused tables")
+ if is_table_empty("ptwrite"):
+ drop("ptwrite")
+ if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
+ do_query(query, 'DROP VIEW power_events_view');
+ drop("mwait")
+ drop("pwre")
+ drop("exstop")
+ drop("pwrx")
+ if is_table_empty("cbr"):
+ drop("cbr")
+ if is_table_empty("context_switches"):
+ drop("context_switches")
+
+ if (unhandled_count):
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ global unhandled_count
+ unhandled_count += 1
+
+def sched__sched_switch(*x):
+ pass
+
+def bind_exec(q, n, x):
+ for xx in x[0:n]:
+ q.addBindValue(str(xx))
+ do_query_(q)
+
+def evsel_table(*x):
+ bind_exec(evsel_query, 2, x)
+
+def machine_table(*x):
+ bind_exec(machine_query, 3, x)
+
+def thread_table(*x):
+ bind_exec(thread_query, 5, x)
+
+def comm_table(*x):
+ bind_exec(comm_query, 5, x)
+
+def comm_thread_table(*x):
+ bind_exec(comm_thread_query, 3, x)
+
+def dso_table(*x):
+ bind_exec(dso_query, 5, x)
+
+def symbol_table(*x):
+ bind_exec(symbol_query, 6, x)
+
+def branch_type_table(*x):
+ bind_exec(branch_type_query, 2, x)
+
+def sample_table(*x):
+ if branches:
+ for xx in x[0:15]:
+ sample_query.addBindValue(str(xx))
+ for xx in x[19:24]:
+ sample_query.addBindValue(str(xx))
+ do_query_(sample_query)
+ else:
+ bind_exec(sample_query, 24, x)
+
+def call_path_table(*x):
+ bind_exec(call_path_query, 4, x)
+
+def call_return_table(*x):
+ bind_exec(call_query, 14, x)
+
+def ptwrite(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ flags = data[0]
+ payload = data[1]
+ exact_ip = flags & 1
+ ptwrite_query.addBindValue(str(id))
+ ptwrite_query.addBindValue(str(payload))
+ ptwrite_query.addBindValue(str(exact_ip))
+ do_query_(ptwrite_query)
+
+def cbr(id, raw_buf):
+ data = struct.unpack_from("<BBBBII", raw_buf)
+ cbr = data[0]
+ MHz = (data[4] + 500) / 1000
+ percent = ((cbr * 1000 / data[2]) + 5) / 10
+ cbr_query.addBindValue(str(id))
+ cbr_query.addBindValue(str(cbr))
+ cbr_query.addBindValue(str(MHz))
+ cbr_query.addBindValue(str(percent))
+ do_query_(cbr_query)
+
+def mwait(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ hints = payload & 0xff
+ extensions = (payload >> 32) & 0x3
+ mwait_query.addBindValue(str(id))
+ mwait_query.addBindValue(str(hints))
+ mwait_query.addBindValue(str(extensions))
+ do_query_(mwait_query)
+
+def pwre(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ hw = (payload >> 7) & 1
+ cstate = (payload >> 12) & 0xf
+ subcstate = (payload >> 8) & 0xf
+ pwre_query.addBindValue(str(id))
+ pwre_query.addBindValue(str(cstate))
+ pwre_query.addBindValue(str(subcstate))
+ pwre_query.addBindValue(str(hw))
+ do_query_(pwre_query)
+
+def exstop(id, raw_buf):
+ data = struct.unpack_from("<I", raw_buf)
+ flags = data[0]
+ exact_ip = flags & 1
+ exstop_query.addBindValue(str(id))
+ exstop_query.addBindValue(str(exact_ip))
+ do_query_(exstop_query)
+
+def pwrx(id, raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ deepest_cstate = payload & 0xf
+ last_cstate = (payload >> 4) & 0xf
+ wake_reason = (payload >> 8) & 0xf
+ pwrx_query.addBindValue(str(id))
+ pwrx_query.addBindValue(str(deepest_cstate))
+ pwrx_query.addBindValue(str(last_cstate))
+ pwrx_query.addBindValue(str(wake_reason))
+ do_query_(pwrx_query)
+
+def synth_data(id, config, raw_buf, *x):
+ if config == 0:
+ ptwrite(id, raw_buf)
+ elif config == 1:
+ mwait(id, raw_buf)
+ elif config == 2:
+ pwre(id, raw_buf)
+ elif config == 3:
+ exstop(id, raw_buf)
+ elif config == 4:
+ pwrx(id, raw_buf)
+ elif config == 5:
+ cbr(id, raw_buf)
+
+def context_switch_table(*x):
+ bind_exec(context_switch_query, 9, x)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
new file mode 100755
index 000000000..711d4f9f5
--- /dev/null
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -0,0 +1,5114 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# exported-sql-viewer.py: view data from sql database
+# Copyright (c) 2014-2018, Intel Corporation.
+
+# To use this script you will need to have exported data using either the
+# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
+# scripts for details.
+#
+# Following on from the example in the export scripts, a
+# call-graph can be displayed for the pt_example database like this:
+#
+# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
+#
+# Note that for PostgreSQL, this script supports connecting to remote databases
+# by setting hostname, port, username, password, and dbname e.g.
+#
+# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
+#
+# The result is a GUI window with a tree representing a context-sensitive
+# call-graph. Expanding a couple of levels of the tree and adjusting column
+# widths to suit will display something like:
+#
+# Call Graph: pt_example
+# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
+# v- ls
+# v- 2638:2638
+# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
+# |- unknown unknown 1 13198 0.1 1 0.0
+# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
+# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
+# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
+# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
+# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
+# >- __libc_csu_init ls 1 10354 0.1 10 0.0
+# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
+# v- main ls 1 8182043 99.6 180254 99.9
+#
+# Points to note:
+# The top level is a command name (comm)
+# The next level is a thread (pid:tid)
+# Subsequent levels are functions
+# 'Count' is the number of calls
+# 'Time' is the elapsed time until the function returns
+# Percentages are relative to the level above
+# 'Branch Count' is the total number of branches for that function and all
+# functions that it calls
+
+# There is also a "All branches" report, which displays branches and
+# possibly disassembly. However, presently, the only supported disassembler is
+# Intel XED, and additionally the object code must be present in perf build ID
+# cache. To use Intel XED, libxed.so must be present. To build and install
+# libxed.so:
+# git clone https://github.com/intelxed/mbuild.git mbuild
+# git clone https://github.com/intelxed/xed
+# cd xed
+# ./mfile.py --share
+# sudo ./mfile.py --prefix=/usr/local install
+# sudo ldconfig
+#
+# Example report:
+#
+# Time CPU Command PID TID Branch Type In Tx Branch
+# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
+# 7fab593ea260 48 89 e7 mov %rsp, %rdi
+# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
+# 7fab593ea260 48 89 e7 mov %rsp, %rdi
+# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
+# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
+# 7fab593ea930 55 pushq %rbp
+# 7fab593ea931 48 89 e5 mov %rsp, %rbp
+# 7fab593ea934 41 57 pushq %r15
+# 7fab593ea936 41 56 pushq %r14
+# 7fab593ea938 41 55 pushq %r13
+# 7fab593ea93a 41 54 pushq %r12
+# 7fab593ea93c 53 pushq %rbx
+# 7fab593ea93d 48 89 fb mov %rdi, %rbx
+# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
+# 7fab593ea944 0f 31 rdtsc
+# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
+# 7fab593ea94a 89 c0 mov %eax, %eax
+# 7fab593ea94c 48 09 c2 or %rax, %rdx
+# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
+# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
+# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
+# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
+# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+
+from __future__ import print_function
+
+import sys
+# Only change warnings if the python -W option was not used
+if not sys.warnoptions:
+ import warnings
+ # PySide2 causes deprecation warnings, ignore them.
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
+import argparse
+import weakref
+import threading
+import string
+try:
+ # Python2
+ import cPickle as pickle
+ # size of pickled integer big enough for record size
+ glb_nsz = 8
+except ImportError:
+ import pickle
+ glb_nsz = 16
+import re
+import os
+import random
+import copy
+import math
+
+pyside_version_1 = True
+if not "--pyside-version-1" in sys.argv:
+ try:
+ from PySide2.QtCore import *
+ from PySide2.QtGui import *
+ from PySide2.QtSql import *
+ from PySide2.QtWidgets import *
+ pyside_version_1 = False
+ except:
+ pass
+
+if pyside_version_1:
+ from PySide.QtCore import *
+ from PySide.QtGui import *
+ from PySide.QtSql import *
+
+from decimal import Decimal, ROUND_HALF_UP
+from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
+ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
+from multiprocessing import Process, Array, Value, Event
+
+# xrange is range in Python3
+try:
+ xrange
+except NameError:
+ xrange = range
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
+# Data formatting helpers
+
+def tohex(ip):
+ if ip < 0:
+ ip += 1 << 64
+ return "%x" % ip
+
+def offstr(offset):
+ if offset:
+ return "+0x%x" % offset
+ return ""
+
+def dsoname(name):
+ if name == "[kernel.kallsyms]":
+ return "[kernel]"
+ return name
+
+def findnth(s, sub, n, offs=0):
+ pos = s.find(sub)
+ if pos < 0:
+ return pos
+ if n <= 1:
+ return offs + pos
+ return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
+
+# Percent to one decimal place
+
+def PercentToOneDP(n, d):
+ if not d:
+ return "0.0"
+ x = (n * Decimal(100)) / d
+ return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
+
+# Helper for queries that must not fail
+
+def QueryExec(query, stmt):
+ ret = query.exec_(stmt)
+ if not ret:
+ raise Exception("Query failed: " + query.lastError().text())
+
+# Background thread
+
+class Thread(QThread):
+
+ done = Signal(object)
+
+ def __init__(self, task, param=None, parent=None):
+ super(Thread, self).__init__(parent)
+ self.task = task
+ self.param = param
+
+ def run(self):
+ while True:
+ if self.param is None:
+ done, result = self.task()
+ else:
+ done, result = self.task(self.param)
+ self.done.emit(result)
+ if done:
+ break
+
+# Tree data model
+
+class TreeModel(QAbstractItemModel):
+
+ def __init__(self, glb, params, parent=None):
+ super(TreeModel, self).__init__(parent)
+ self.glb = glb
+ self.params = params
+ self.root = self.GetRoot()
+ self.last_row_read = 0
+
+ def Item(self, parent):
+ if parent.isValid():
+ return parent.internalPointer()
+ else:
+ return self.root
+
+ def rowCount(self, parent):
+ result = self.Item(parent).childCount()
+ if result < 0:
+ result = 0
+ self.dataChanged.emit(parent, parent)
+ return result
+
+ def hasChildren(self, parent):
+ return self.Item(parent).hasChildren()
+
+ def headerData(self, section, orientation, role):
+ if role == Qt.TextAlignmentRole:
+ return self.columnAlignment(section)
+ if role != Qt.DisplayRole:
+ return None
+ if orientation != Qt.Horizontal:
+ return None
+ return self.columnHeader(section)
+
+ def parent(self, child):
+ child_item = child.internalPointer()
+ if child_item is self.root:
+ return QModelIndex()
+ parent_item = child_item.getParentItem()
+ return self.createIndex(parent_item.getRow(), 0, parent_item)
+
+ def index(self, row, column, parent):
+ child_item = self.Item(parent).getChildItem(row)
+ return self.createIndex(row, column, child_item)
+
+ def DisplayData(self, item, index):
+ return item.getData(index.column())
+
+ def FetchIfNeeded(self, row):
+ if row > self.last_row_read:
+ self.last_row_read = row
+ if row + 10 >= self.root.child_count:
+ self.fetcher.Fetch(glb_chunk_sz)
+
+ def columnAlignment(self, column):
+ return Qt.AlignLeft
+
+ def columnFont(self, column):
+ return None
+
+ def data(self, index, role):
+ if role == Qt.TextAlignmentRole:
+ return self.columnAlignment(index.column())
+ if role == Qt.FontRole:
+ return self.columnFont(index.column())
+ if role != Qt.DisplayRole:
+ return None
+ item = index.internalPointer()
+ return self.DisplayData(item, index)
+
+# Table data model
+
+class TableModel(QAbstractTableModel):
+
+ def __init__(self, parent=None):
+ super(TableModel, self).__init__(parent)
+ self.child_count = 0
+ self.child_items = []
+ self.last_row_read = 0
+
+ def Item(self, parent):
+ if parent.isValid():
+ return parent.internalPointer()
+ else:
+ return self
+
+ def rowCount(self, parent):
+ return self.child_count
+
+ def headerData(self, section, orientation, role):
+ if role == Qt.TextAlignmentRole:
+ return self.columnAlignment(section)
+ if role != Qt.DisplayRole:
+ return None
+ if orientation != Qt.Horizontal:
+ return None
+ return self.columnHeader(section)
+
+ def index(self, row, column, parent):
+ return self.createIndex(row, column, self.child_items[row])
+
+ def DisplayData(self, item, index):
+ return item.getData(index.column())
+
+ def FetchIfNeeded(self, row):
+ if row > self.last_row_read:
+ self.last_row_read = row
+ if row + 10 >= self.child_count:
+ self.fetcher.Fetch(glb_chunk_sz)
+
+ def columnAlignment(self, column):
+ return Qt.AlignLeft
+
+ def columnFont(self, column):
+ return None
+
+ def data(self, index, role):
+ if role == Qt.TextAlignmentRole:
+ return self.columnAlignment(index.column())
+ if role == Qt.FontRole:
+ return self.columnFont(index.column())
+ if role != Qt.DisplayRole:
+ return None
+ item = index.internalPointer()
+ return self.DisplayData(item, index)
+
+# Model cache
+
+model_cache = weakref.WeakValueDictionary()
+model_cache_lock = threading.Lock()
+
+def LookupCreateModel(model_name, create_fn):
+ model_cache_lock.acquire()
+ try:
+ model = model_cache[model_name]
+ except:
+ model = None
+ if model is None:
+ model = create_fn()
+ model_cache[model_name] = model
+ model_cache_lock.release()
+ return model
+
+def LookupModel(model_name):
+ model_cache_lock.acquire()
+ try:
+ model = model_cache[model_name]
+ except:
+ model = None
+ model_cache_lock.release()
+ return model
+
+# Find bar
+
+class FindBar():
+
+ def __init__(self, parent, finder, is_reg_expr=False):
+ self.finder = finder
+ self.context = []
+ self.last_value = None
+ self.last_pattern = None
+
+ label = QLabel("Find:")
+ label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.textbox = QComboBox()
+ self.textbox.setEditable(True)
+ self.textbox.currentIndexChanged.connect(self.ValueChanged)
+
+ self.progress = QProgressBar()
+ self.progress.setRange(0, 0)
+ self.progress.hide()
+
+ if is_reg_expr:
+ self.pattern = QCheckBox("Regular Expression")
+ else:
+ self.pattern = QCheckBox("Pattern")
+ self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.next_button = QToolButton()
+ self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
+ self.next_button.released.connect(lambda: self.NextPrev(1))
+
+ self.prev_button = QToolButton()
+ self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
+ self.prev_button.released.connect(lambda: self.NextPrev(-1))
+
+ self.close_button = QToolButton()
+ self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
+ self.close_button.released.connect(self.Deactivate)
+
+ self.hbox = QHBoxLayout()
+ self.hbox.setContentsMargins(0, 0, 0, 0)
+
+ self.hbox.addWidget(label)
+ self.hbox.addWidget(self.textbox)
+ self.hbox.addWidget(self.progress)
+ self.hbox.addWidget(self.pattern)
+ self.hbox.addWidget(self.next_button)
+ self.hbox.addWidget(self.prev_button)
+ self.hbox.addWidget(self.close_button)
+
+ self.bar = QWidget()
+ self.bar.setLayout(self.hbox)
+ self.bar.hide()
+
+ def Widget(self):
+ return self.bar
+
+ def Activate(self):
+ self.bar.show()
+ self.textbox.lineEdit().selectAll()
+ self.textbox.setFocus()
+
+ def Deactivate(self):
+ self.bar.hide()
+
+ def Busy(self):
+ self.textbox.setEnabled(False)
+ self.pattern.hide()
+ self.next_button.hide()
+ self.prev_button.hide()
+ self.progress.show()
+
+ def Idle(self):
+ self.textbox.setEnabled(True)
+ self.progress.hide()
+ self.pattern.show()
+ self.next_button.show()
+ self.prev_button.show()
+
+ def Find(self, direction):
+ value = self.textbox.currentText()
+ pattern = self.pattern.isChecked()
+ self.last_value = value
+ self.last_pattern = pattern
+ self.finder.Find(value, direction, pattern, self.context)
+
+ def ValueChanged(self):
+ value = self.textbox.currentText()
+ pattern = self.pattern.isChecked()
+ index = self.textbox.currentIndex()
+ data = self.textbox.itemData(index)
+ # Store the pattern in the combo box to keep it with the text value
+ if data == None:
+ self.textbox.setItemData(index, pattern)
+ else:
+ self.pattern.setChecked(data)
+ self.Find(0)
+
+ def NextPrev(self, direction):
+ value = self.textbox.currentText()
+ pattern = self.pattern.isChecked()
+ if value != self.last_value:
+ index = self.textbox.findText(value)
+ # Allow for a button press before the value has been added to the combo box
+ if index < 0:
+ index = self.textbox.count()
+ self.textbox.addItem(value, pattern)
+ self.textbox.setCurrentIndex(index)
+ return
+ else:
+ self.textbox.setItemData(index, pattern)
+ elif pattern != self.last_pattern:
+ # Keep the pattern recorded in the combo box up to date
+ index = self.textbox.currentIndex()
+ self.textbox.setItemData(index, pattern)
+ self.Find(direction)
+
+ def NotFound(self):
+ QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
+
+# Context-sensitive call graph data model item base
+
+class CallGraphLevelItemBase(object):
+
+ def __init__(self, glb, params, row, parent_item):
+ self.glb = glb
+ self.params = params
+ self.row = row
+ self.parent_item = parent_item
+ self.query_done = False
+ self.child_count = 0
+ self.child_items = []
+ if parent_item:
+ self.level = parent_item.level + 1
+ else:
+ self.level = 0
+
+ def getChildItem(self, row):
+ return self.child_items[row]
+
+ def getParentItem(self):
+ return self.parent_item
+
+ def getRow(self):
+ return self.row
+
+ def childCount(self):
+ if not self.query_done:
+ self.Select()
+ if not self.child_count:
+ return -1
+ return self.child_count
+
+ def hasChildren(self):
+ if not self.query_done:
+ return True
+ return self.child_count > 0
+
+ def getData(self, column):
+ return self.data[column]
+
+# Context-sensitive call graph data model level 2+ item base
+
+class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
+ super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.call_path_id = call_path_id
+ self.insn_cnt = insn_cnt
+ self.cyc_cnt = cyc_cnt
+ self.branch_count = branch_count
+ self.time = time
+
+ def Select(self):
+ self.query_done = True
+ query = QSqlQuery(self.glb.db)
+ if self.params.have_ipc:
+ ipc_str = ", SUM(insn_count), SUM(cyc_count)"
+ else:
+ ipc_str = ""
+ QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE parent_call_path_id = " + str(self.call_path_id) +
+ " AND comm_id = " + str(self.comm_id) +
+ " AND thread_id = " + str(self.thread_id) +
+ " GROUP BY call_path_id, name, short_name"
+ " ORDER BY call_path_id")
+ while query.next():
+ if self.params.have_ipc:
+ insn_cnt = int(query.value(5))
+ cyc_cnt = int(query.value(6))
+ branch_count = int(query.value(7))
+ else:
+ insn_cnt = 0
+ cyc_cnt = 0
+ branch_count = int(query.value(5))
+ child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Context-sensitive call graph data model level three item
+
+class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
+
+ def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
+ super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
+ dso = dsoname(dso)
+ if self.params.have_ipc:
+ insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
+ cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
+ br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
+ ipc = CalcIPC(cyc_cnt, insn_cnt)
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
+ else:
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = call_path_id
+
+# Context-sensitive call graph data model level two item
+
+class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
+
+ def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item)
+ if self.params.have_ipc:
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
+ else:
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallGraphLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.insn_cnt += child_item.insn_cnt
+ self.cyc_cnt += child_item.cyc_cnt
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ if self.params.have_ipc:
+ child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
+ child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
+ child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
+ else:
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Context-sensitive call graph data model level one item
+
+class CallGraphLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, params, row, comm_id, comm, parent_item):
+ super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item)
+ if self.params.have_ipc:
+ self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
+ else:
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Context-sensitive call graph data model root item
+
+class CallGraphRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, params):
+ super(CallGraphRootItem, self).__init__(glb, params, 0, None)
+ self.dbid = 0
+ self.query_done = True
+ if_has_calls = ""
+ if IsSelectable(glb.db, "comms", columns = "has_calls"):
+ if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call graph model parameters
+
+class CallGraphModelParams():
+
+ def __init__(self, glb, parent=None):
+ self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count")
+
+# Context-sensitive call graph data model base
+
+class CallGraphModelBase(TreeModel):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent)
+
+ def FindSelect(self, value, pattern, query):
+ if pattern:
+ # postgresql and sqlite pattern patching differences:
+ # postgresql LIKE is case sensitive but sqlite LIKE is not
+ # postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
+ # postgresql supports ILIKE which is case insensitive
+ # sqlite supports GLOB (text only) which uses * and ? and is case sensitive
+ if not self.glb.dbref.is_sqlite3:
+ # Escape % and _
+ s = value.replace("%", "\%")
+ s = s.replace("_", "\_")
+ # Translate * and ? into SQL LIKE pattern characters % and _
+ trans = string.maketrans("*?", "%_")
+ match = " LIKE '" + str(s).translate(trans) + "'"
+ else:
+ match = " GLOB '" + str(value) + "'"
+ else:
+ match = " = '" + str(value) + "'"
+ self.DoFindSelect(query, match)
+
+ def Found(self, query, found):
+ if found:
+ return self.FindPath(query)
+ return []
+
+ def FindValue(self, value, pattern, query, last_value, last_pattern):
+ if last_value == value and pattern == last_pattern:
+ found = query.first()
+ else:
+ self.FindSelect(value, pattern, query)
+ found = query.next()
+ return self.Found(query, found)
+
+ def FindNext(self, query):
+ found = query.next()
+ if not found:
+ found = query.first()
+ return self.Found(query, found)
+
+ def FindPrev(self, query):
+ found = query.previous()
+ if not found:
+ found = query.last()
+ return self.Found(query, found)
+
+ def FindThread(self, c):
+ if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
+ ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
+ elif c.direction > 0:
+ ids = self.FindNext(c.query)
+ else:
+ ids = self.FindPrev(c.query)
+ return (True, ids)
+
+ def Find(self, value, direction, pattern, context, callback):
+ class Context():
+ def __init__(self, *x):
+ self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
+ def Update(self, *x):
+ self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
+ if len(context):
+ context[0].Update(value, direction, pattern)
+ else:
+ context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
+ # Use a thread so the UI is not blocked during the SELECT
+ thread = Thread(self.FindThread, context[0])
+ thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
+ thread.start()
+
+ def FindDone(self, thread, callback, ids):
+ callback(ids)
+
+# Context-sensitive call graph data model
+
+class CallGraphModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallGraphRootItem(self.glb, self.params)
+
+ def columnCount(self, parent=None):
+ if self.params.have_ipc:
+ return 12
+ else:
+ return 7
+
+ def columnHeader(self, column):
+ if self.params.have_ipc:
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
+ else:
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ if self.params.have_ipc:
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ else:
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE calls.id <> 0"
+ " AND symbols.name" + match +
+ " GROUP BY comm_id, thread_id, call_path_id"
+ " ORDER BY comm_id, thread_id, call_path_id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM call_paths"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ # The call path root is not used
+ if ids[0] == 1:
+ del ids[0]
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Call tree data model level 2+ item base
+
+class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
+ super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.calls_id = calls_id
+ self.call_time = call_time
+ self.time = time
+ self.insn_cnt = insn_cnt
+ self.cyc_cnt = cyc_cnt
+ self.branch_count = branch_count
+
+ def Select(self):
+ self.query_done = True
+ if self.calls_id == 0:
+ comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
+ else:
+ comm_thread = ""
+ if self.params.have_ipc:
+ ipc_str = ", insn_count, cyc_count"
+ else:
+ ipc_str = ""
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
+ " ORDER BY call_time, calls.id")
+ while query.next():
+ if self.params.have_ipc:
+ insn_cnt = int(query.value(5))
+ cyc_cnt = int(query.value(6))
+ branch_count = int(query.value(7))
+ else:
+ insn_cnt = 0
+ cyc_cnt = 0
+ branch_count = int(query.value(5))
+ child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model level three item
+
+class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item)
+ dso = dsoname(dso)
+ if self.params.have_ipc:
+ insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
+ cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
+ br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
+ ipc = CalcIPC(cyc_cnt, insn_cnt)
+ self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
+ else:
+ self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = calls_id
+
+# Call tree data model level two item
+
+class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, 0, parent_item)
+ if self.params.have_ipc:
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
+ else:
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallTreeLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.insn_cnt += child_item.insn_cnt
+ self.cyc_cnt += child_item.cyc_cnt
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ if self.params.have_ipc:
+ child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
+ child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
+ child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
+ else:
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Call tree data model level one item
+
+class CallTreeLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, params, row, comm_id, comm, parent_item):
+ super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item)
+ if self.params.have_ipc:
+ self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
+ else:
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model root item
+
+class CallTreeRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, params):
+ super(CallTreeRootItem, self).__init__(glb, params, 0, None)
+ self.dbid = 0
+ self.query_done = True
+ if_has_calls = ""
+ if IsSelectable(glb.db, "comms", columns = "has_calls"):
+ if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call Tree data model
+
+class CallTreeModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallTreeRootItem(self.glb, self.params)
+
+ def columnCount(self, parent=None):
+ if self.params.have_ipc:
+ return 12
+ else:
+ return 7
+
+ def columnHeader(self, column):
+ if self.params.have_ipc:
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
+ else:
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ if self.params.have_ipc:
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ else:
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT calls.id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE calls.id <> 0"
+ " AND symbols.name" + match +
+ " ORDER BY comm_id, thread_id, call_time, calls.id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM calls"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Vertical layout
+
+class HBoxLayout(QHBoxLayout):
+
+ def __init__(self, *children):
+ super(HBoxLayout, self).__init__()
+
+ self.layout().setContentsMargins(0, 0, 0, 0)
+ for child in children:
+ if child.isWidgetType():
+ self.layout().addWidget(child)
+ else:
+ self.layout().addLayout(child)
+
+# Horizontal layout
+
+class VBoxLayout(QVBoxLayout):
+
+ def __init__(self, *children):
+ super(VBoxLayout, self).__init__()
+
+ self.layout().setContentsMargins(0, 0, 0, 0)
+ for child in children:
+ if child.isWidgetType():
+ self.layout().addWidget(child)
+ else:
+ self.layout().addLayout(child)
+
+# Vertical layout widget
+
+class VBox():
+
+ def __init__(self, *children):
+ self.vbox = QWidget()
+ self.vbox.setLayout(VBoxLayout(*children))
+
+ def Widget(self):
+ return self.vbox
+
+# Tree window base
+
+class TreeWindowBase(QMdiSubWindow):
+
+ def __init__(self, parent=None):
+ super(TreeWindowBase, self).__init__(parent)
+
+ self.model = None
+ self.find_bar = None
+
+ self.view = QTreeView()
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
+
+ self.context_menu = TreeContextMenu(self.view)
+
+ def DisplayFound(self, ids):
+ if not len(ids):
+ return False
+ parent = QModelIndex()
+ for dbid in ids:
+ found = False
+ n = self.model.rowCount(parent)
+ for row in xrange(n):
+ child = self.model.index(row, 0, parent)
+ if child.internalPointer().dbid == dbid:
+ found = True
+ self.view.setExpanded(parent, True)
+ self.view.setCurrentIndex(child)
+ parent = child
+ break
+ if not found:
+ break
+ return found
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.model.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, ids):
+ found = True
+ if not self.DisplayFound(ids):
+ found = False
+ self.find_bar.Idle()
+ if not found:
+ self.find_bar.NotFound()
+
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+# Call tree window
+
+class CallTreeWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None, thread_at_time=None):
+ super(CallTreeWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
+
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+
+ if thread_at_time:
+ self.DisplayThreadAtTime(*thread_at_time)
+
+ def DisplayThreadAtTime(self, comm_id, thread_id, time):
+ parent = QModelIndex()
+ for dbid in (comm_id, thread_id):
+ found = False
+ n = self.model.rowCount(parent)
+ for row in xrange(n):
+ child = self.model.index(row, 0, parent)
+ if child.internalPointer().dbid == dbid:
+ found = True
+ self.view.setExpanded(parent, True)
+ self.view.setCurrentIndex(child)
+ parent = child
+ break
+ if not found:
+ return
+ found = False
+ while True:
+ n = self.model.rowCount(parent)
+ if not n:
+ return
+ last_child = None
+ for row in xrange(n):
+ self.view.setExpanded(parent, True)
+ child = self.model.index(row, 0, parent)
+ child_call_time = child.internalPointer().call_time
+ if child_call_time < time:
+ last_child = child
+ elif child_call_time == time:
+ self.view.setCurrentIndex(child)
+ return
+ elif child_call_time > time:
+ break
+ if not last_child:
+ if not found:
+ child = self.model.index(0, 0, parent)
+ self.view.setExpanded(parent, True)
+ self.view.setCurrentIndex(child)
+ return
+ found = True
+ self.view.setExpanded(parent, True)
+ self.view.setCurrentIndex(last_child)
+ parent = last_child
+
+# ExecComm() gets the comm_id of the command string that was set when the process exec'd i.e. the program name
+
+def ExecComm(db, thread_id, time):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT comm_threads.comm_id, comms.c_time, comms.exec_flag"
+ " FROM comm_threads"
+ " INNER JOIN comms ON comms.id = comm_threads.comm_id"
+ " WHERE comm_threads.thread_id = " + str(thread_id) +
+ " ORDER BY comms.c_time, comms.id")
+ first = None
+ last = None
+ while query.next():
+ if first is None:
+ first = query.value(0)
+ if query.value(2) and Decimal(query.value(1)) <= Decimal(time):
+ last = query.value(0)
+ if not(last is None):
+ return last
+ return first
+
+# Container for (x, y) data
+
+class XY():
+ def __init__(self, x=0, y=0):
+ self.x = x
+ self.y = y
+
+ def __str__(self):
+ return "XY({}, {})".format(str(self.x), str(self.y))
+
+# Container for sub-range data
+
+class Subrange():
+ def __init__(self, lo=0, hi=0):
+ self.lo = lo
+ self.hi = hi
+
+ def __str__(self):
+ return "Subrange({}, {})".format(str(self.lo), str(self.hi))
+
+# Graph data region base class
+
+class GraphDataRegion(object):
+
+ def __init__(self, key, title = "", ordinal = ""):
+ self.key = key
+ self.title = title
+ self.ordinal = ordinal
+
+# Function to sort GraphDataRegion
+
+def GraphDataRegionOrdinal(data_region):
+ return data_region.ordinal
+
+# Attributes for a graph region
+
+class GraphRegionAttribute():
+
+ def __init__(self, colour):
+ self.colour = colour
+
+# Switch graph data region represents a task
+
+class SwitchGraphDataRegion(GraphDataRegion):
+
+ def __init__(self, key, exec_comm_id, pid, tid, comm, thread_id, comm_id):
+ super(SwitchGraphDataRegion, self).__init__(key)
+
+ self.title = str(pid) + " / " + str(tid) + " " + comm
+ # Order graph legend within exec comm by pid / tid / time
+ self.ordinal = str(pid).rjust(16) + str(exec_comm_id).rjust(8) + str(tid).rjust(16)
+ self.exec_comm_id = exec_comm_id
+ self.pid = pid
+ self.tid = tid
+ self.comm = comm
+ self.thread_id = thread_id
+ self.comm_id = comm_id
+
+# Graph data point
+
+class GraphDataPoint():
+
+ def __init__(self, data, index, x, y, altx=None, alty=None, hregion=None, vregion=None):
+ self.data = data
+ self.index = index
+ self.x = x
+ self.y = y
+ self.altx = altx
+ self.alty = alty
+ self.hregion = hregion
+ self.vregion = vregion
+
+# Graph data (single graph) base class
+
+class GraphData(object):
+
+ def __init__(self, collection, xbase=Decimal(0), ybase=Decimal(0)):
+ self.collection = collection
+ self.points = []
+ self.xbase = xbase
+ self.ybase = ybase
+ self.title = ""
+
+ def AddPoint(self, x, y, altx=None, alty=None, hregion=None, vregion=None):
+ index = len(self.points)
+
+ x = float(Decimal(x) - self.xbase)
+ y = float(Decimal(y) - self.ybase)
+
+ self.points.append(GraphDataPoint(self, index, x, y, altx, alty, hregion, vregion))
+
+ def XToData(self, x):
+ return Decimal(x) + self.xbase
+
+ def YToData(self, y):
+ return Decimal(y) + self.ybase
+
+# Switch graph data (for one CPU)
+
+class SwitchGraphData(GraphData):
+
+ def __init__(self, db, collection, cpu, xbase):
+ super(SwitchGraphData, self).__init__(collection, xbase)
+
+ self.cpu = cpu
+ self.title = "CPU " + str(cpu)
+ self.SelectSwitches(db)
+
+ def SelectComms(self, db, thread_id, last_comm_id, start_time, end_time):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT id, c_time"
+ " FROM comms"
+ " WHERE c_thread_id = " + str(thread_id) +
+ " AND exec_flag = " + self.collection.glb.dbref.TRUE +
+ " AND c_time >= " + str(start_time) +
+ " AND c_time <= " + str(end_time) +
+ " ORDER BY c_time, id")
+ while query.next():
+ comm_id = query.value(0)
+ if comm_id == last_comm_id:
+ continue
+ time = query.value(1)
+ hregion = self.HRegion(db, thread_id, comm_id, time)
+ self.AddPoint(time, 1000, None, None, hregion)
+
+ def SelectSwitches(self, db):
+ last_time = None
+ last_comm_id = None
+ last_thread_id = None
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT time, thread_out_id, thread_in_id, comm_out_id, comm_in_id, flags"
+ " FROM context_switches"
+ " WHERE machine_id = " + str(self.collection.machine_id) +
+ " AND cpu = " + str(self.cpu) +
+ " ORDER BY time, id")
+ while query.next():
+ flags = int(query.value(5))
+ if flags & 1:
+ # Schedule-out: detect and add exec's
+ if last_thread_id == query.value(1) and last_comm_id is not None and last_comm_id != query.value(3):
+ self.SelectComms(db, last_thread_id, last_comm_id, last_time, query.value(0))
+ continue
+ # Schedule-in: add data point
+ if len(self.points) == 0:
+ start_time = self.collection.glb.StartTime(self.collection.machine_id)
+ hregion = self.HRegion(db, query.value(1), query.value(3), start_time)
+ self.AddPoint(start_time, 1000, None, None, hregion)
+ time = query.value(0)
+ comm_id = query.value(4)
+ thread_id = query.value(2)
+ hregion = self.HRegion(db, thread_id, comm_id, time)
+ self.AddPoint(time, 1000, None, None, hregion)
+ last_time = time
+ last_comm_id = comm_id
+ last_thread_id = thread_id
+
+ def NewHRegion(self, db, key, thread_id, comm_id, time):
+ exec_comm_id = ExecComm(db, thread_id, time)
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT pid, tid FROM threads WHERE id = " + str(thread_id))
+ if query.next():
+ pid = query.value(0)
+ tid = query.value(1)
+ else:
+ pid = -1
+ tid = -1
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT comm FROM comms WHERE id = " + str(comm_id))
+ if query.next():
+ comm = query.value(0)
+ else:
+ comm = ""
+ return SwitchGraphDataRegion(key, exec_comm_id, pid, tid, comm, thread_id, comm_id)
+
+ def HRegion(self, db, thread_id, comm_id, time):
+ key = str(thread_id) + ":" + str(comm_id)
+ hregion = self.collection.LookupHRegion(key)
+ if hregion is None:
+ hregion = self.NewHRegion(db, key, thread_id, comm_id, time)
+ self.collection.AddHRegion(key, hregion)
+ return hregion
+
+# Graph data collection (multiple related graphs) base class
+
+class GraphDataCollection(object):
+
+ def __init__(self, glb):
+ self.glb = glb
+ self.data = []
+ self.hregions = {}
+ self.xrangelo = None
+ self.xrangehi = None
+ self.yrangelo = None
+ self.yrangehi = None
+ self.dp = XY(0, 0)
+
+ def AddGraphData(self, data):
+ self.data.append(data)
+
+ def LookupHRegion(self, key):
+ if key in self.hregions:
+ return self.hregions[key]
+ return None
+
+ def AddHRegion(self, key, hregion):
+ self.hregions[key] = hregion
+
+# Switch graph data collection (SwitchGraphData for each CPU)
+
+class SwitchGraphDataCollection(GraphDataCollection):
+
+ def __init__(self, glb, db, machine_id):
+ super(SwitchGraphDataCollection, self).__init__(glb)
+
+ self.machine_id = machine_id
+ self.cpus = self.SelectCPUs(db)
+
+ self.xrangelo = glb.StartTime(machine_id)
+ self.xrangehi = glb.FinishTime(machine_id)
+
+ self.yrangelo = Decimal(0)
+ self.yrangehi = Decimal(1000)
+
+ for cpu in self.cpus:
+ self.AddGraphData(SwitchGraphData(db, self, cpu, self.xrangelo))
+
+ def SelectCPUs(self, db):
+ cpus = []
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT DISTINCT cpu"
+ " FROM context_switches"
+ " WHERE machine_id = " + str(self.machine_id))
+ while query.next():
+ cpus.append(int(query.value(0)))
+ return sorted(cpus)
+
+# Switch graph data graphics item displays the graphed data
+
+class SwitchGraphDataGraphicsItem(QGraphicsItem):
+
+ def __init__(self, data, graph_width, graph_height, attrs, event_handler, parent=None):
+ super(SwitchGraphDataGraphicsItem, self).__init__(parent)
+
+ self.data = data
+ self.graph_width = graph_width
+ self.graph_height = graph_height
+ self.attrs = attrs
+ self.event_handler = event_handler
+ self.setAcceptHoverEvents(True)
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.graph_width, self.graph_height)
+
+ def PaintPoint(self, painter, last, x):
+ if not(last is None or last.hregion.pid == 0 or x < self.attrs.subrange.x.lo):
+ if last.x < self.attrs.subrange.x.lo:
+ x0 = self.attrs.subrange.x.lo
+ else:
+ x0 = last.x
+ if x > self.attrs.subrange.x.hi:
+ x1 = self.attrs.subrange.x.hi
+ else:
+ x1 = x - 1
+ x0 = self.attrs.XToPixel(x0)
+ x1 = self.attrs.XToPixel(x1)
+
+ y0 = self.attrs.YToPixel(last.y)
+
+ colour = self.attrs.region_attributes[last.hregion.key].colour
+
+ width = x1 - x0 + 1
+ if width < 2:
+ painter.setPen(colour)
+ painter.drawLine(x0, self.graph_height - y0, x0, self.graph_height)
+ else:
+ painter.fillRect(x0, self.graph_height - y0, width, self.graph_height - 1, colour)
+
+ def paint(self, painter, option, widget):
+ last = None
+ for point in self.data.points:
+ self.PaintPoint(painter, last, point.x)
+ if point.x > self.attrs.subrange.x.hi:
+ break;
+ last = point
+ self.PaintPoint(painter, last, self.attrs.subrange.x.hi + 1)
+
+ def BinarySearchPoint(self, target):
+ lower_pos = 0
+ higher_pos = len(self.data.points)
+ while True:
+ pos = int((lower_pos + higher_pos) / 2)
+ val = self.data.points[pos].x
+ if target >= val:
+ lower_pos = pos
+ else:
+ higher_pos = pos
+ if higher_pos <= lower_pos + 1:
+ return lower_pos
+
+ def XPixelToData(self, x):
+ x = self.attrs.PixelToX(x)
+ if x < self.data.points[0].x:
+ x = 0
+ pos = 0
+ low = True
+ else:
+ pos = self.BinarySearchPoint(x)
+ low = False
+ return (low, pos, self.data.XToData(x))
+
+ def EventToData(self, event):
+ no_data = (None,) * 4
+ if len(self.data.points) < 1:
+ return no_data
+ x = event.pos().x()
+ if x < 0:
+ return no_data
+ low0, pos0, time_from = self.XPixelToData(x)
+ low1, pos1, time_to = self.XPixelToData(x + 1)
+ hregions = set()
+ hregion_times = []
+ if not low1:
+ for i in xrange(pos0, pos1 + 1):
+ hregion = self.data.points[i].hregion
+ hregions.add(hregion)
+ if i == pos0:
+ time = time_from
+ else:
+ time = self.data.XToData(self.data.points[i].x)
+ hregion_times.append((hregion, time))
+ return (time_from, time_to, hregions, hregion_times)
+
+ def hoverMoveEvent(self, event):
+ time_from, time_to, hregions, hregion_times = self.EventToData(event)
+ if time_from is not None:
+ self.event_handler.PointEvent(self.data.cpu, time_from, time_to, hregions)
+
+ def hoverLeaveEvent(self, event):
+ self.event_handler.NoPointEvent()
+
+ def mousePressEvent(self, event):
+ if event.button() != Qt.RightButton:
+ super(SwitchGraphDataGraphicsItem, self).mousePressEvent(event)
+ return
+ time_from, time_to, hregions, hregion_times = self.EventToData(event)
+ if hregion_times:
+ self.event_handler.RightClickEvent(self.data.cpu, hregion_times, event.screenPos())
+
+# X-axis graphics item
+
+class XAxisGraphicsItem(QGraphicsItem):
+
+ def __init__(self, width, parent=None):
+ super(XAxisGraphicsItem, self).__init__(parent)
+
+ self.width = width
+ self.max_mark_sz = 4
+ self.height = self.max_mark_sz + 1
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def Step(self):
+ attrs = self.parentItem().attrs
+ subrange = attrs.subrange.x
+ t = subrange.hi - subrange.lo
+ s = (3.0 * t) / self.width
+ n = 1.0
+ while s > n:
+ n = n * 10.0
+ return n
+
+ def PaintMarks(self, painter, at_y, lo, hi, step, i):
+ attrs = self.parentItem().attrs
+ x = lo
+ while x <= hi:
+ xp = attrs.XToPixel(x)
+ if i % 10:
+ if i % 5:
+ sz = 1
+ else:
+ sz = 2
+ else:
+ sz = self.max_mark_sz
+ i = 0
+ painter.drawLine(xp, at_y, xp, at_y + sz)
+ x += step
+ i += 1
+
+ def paint(self, painter, option, widget):
+ # Using QPainter::drawLine(int x1, int y1, int x2, int y2) so x2 = width -1
+ painter.drawLine(0, 0, self.width - 1, 0)
+ n = self.Step()
+ attrs = self.parentItem().attrs
+ subrange = attrs.subrange.x
+ if subrange.lo:
+ x_offset = n - (subrange.lo % n)
+ else:
+ x_offset = 0.0
+ x = subrange.lo + x_offset
+ i = (x / n) % 10
+ self.PaintMarks(painter, 0, x, subrange.hi, n, i)
+
+ def ScaleDimensions(self):
+ n = self.Step()
+ attrs = self.parentItem().attrs
+ lo = attrs.subrange.x.lo
+ hi = (n * 10.0) + lo
+ width = attrs.XToPixel(hi)
+ if width > 500:
+ width = 0
+ return (n, lo, hi, width)
+
+ def PaintScale(self, painter, at_x, at_y):
+ n, lo, hi, width = self.ScaleDimensions()
+ if not width:
+ return
+ painter.drawLine(at_x, at_y, at_x + width, at_y)
+ self.PaintMarks(painter, at_y, lo, hi, n, 0)
+
+ def ScaleWidth(self):
+ n, lo, hi, width = self.ScaleDimensions()
+ return width
+
+ def ScaleHeight(self):
+ return self.height
+
+ def ScaleUnit(self):
+ return self.Step() * 10
+
+# Scale graphics item base class
+
+class ScaleGraphicsItem(QGraphicsItem):
+
+ def __init__(self, axis, parent=None):
+ super(ScaleGraphicsItem, self).__init__(parent)
+ self.axis = axis
+
+ def boundingRect(self):
+ scale_width = self.axis.ScaleWidth()
+ if not scale_width:
+ return QRectF()
+ return QRectF(0, 0, self.axis.ScaleWidth() + 100, self.axis.ScaleHeight())
+
+ def paint(self, painter, option, widget):
+ scale_width = self.axis.ScaleWidth()
+ if not scale_width:
+ return
+ self.axis.PaintScale(painter, 0, 5)
+ x = scale_width + 4
+ painter.drawText(QPointF(x, 10), self.Text())
+
+ def Unit(self):
+ return self.axis.ScaleUnit()
+
+ def Text(self):
+ return ""
+
+# Switch graph scale graphics item
+
+class SwitchScaleGraphicsItem(ScaleGraphicsItem):
+
+ def __init__(self, axis, parent=None):
+ super(SwitchScaleGraphicsItem, self).__init__(axis, parent)
+
+ def Text(self):
+ unit = self.Unit()
+ if unit >= 1000000000:
+ unit = int(unit / 1000000000)
+ us = "s"
+ elif unit >= 1000000:
+ unit = int(unit / 1000000)
+ us = "ms"
+ elif unit >= 1000:
+ unit = int(unit / 1000)
+ us = "us"
+ else:
+ unit = int(unit)
+ us = "ns"
+ return " = " + str(unit) + " " + us
+
+# Switch graph graphics item contains graph title, scale, x/y-axis, and the graphed data
+
+class SwitchGraphGraphicsItem(QGraphicsItem):
+
+ def __init__(self, collection, data, attrs, event_handler, first, parent=None):
+ super(SwitchGraphGraphicsItem, self).__init__(parent)
+ self.collection = collection
+ self.data = data
+ self.attrs = attrs
+ self.event_handler = event_handler
+
+ margin = 20
+ title_width = 50
+
+ self.title_graphics = QGraphicsSimpleTextItem(data.title, self)
+
+ self.title_graphics.setPos(margin, margin)
+ graph_width = attrs.XToPixel(attrs.subrange.x.hi) + 1
+ graph_height = attrs.YToPixel(attrs.subrange.y.hi) + 1
+
+ self.graph_origin_x = margin + title_width + margin
+ self.graph_origin_y = graph_height + margin
+
+ x_axis_size = 1
+ y_axis_size = 1
+ self.yline = QGraphicsLineItem(0, 0, 0, graph_height, self)
+
+ self.x_axis = XAxisGraphicsItem(graph_width, self)
+ self.x_axis.setPos(self.graph_origin_x, self.graph_origin_y + 1)
+
+ if first:
+ self.scale_item = SwitchScaleGraphicsItem(self.x_axis, self)
+ self.scale_item.setPos(self.graph_origin_x, self.graph_origin_y + 10)
+
+ self.yline.setPos(self.graph_origin_x - y_axis_size, self.graph_origin_y - graph_height)
+
+ self.axis_point = QGraphicsLineItem(0, 0, 0, 0, self)
+ self.axis_point.setPos(self.graph_origin_x - 1, self.graph_origin_y +1)
+
+ self.width = self.graph_origin_x + graph_width + margin
+ self.height = self.graph_origin_y + margin
+
+ self.graph = SwitchGraphDataGraphicsItem(data, graph_width, graph_height, attrs, event_handler, self)
+ self.graph.setPos(self.graph_origin_x, self.graph_origin_y - graph_height)
+
+ if parent and 'EnableRubberBand' in dir(parent):
+ parent.EnableRubberBand(self.graph_origin_x, self.graph_origin_x + graph_width - 1, self)
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def paint(self, painter, option, widget):
+ pass
+
+ def RBXToPixel(self, x):
+ return self.attrs.PixelToX(x - self.graph_origin_x)
+
+ def RBXRangeToPixel(self, x0, x1):
+ return (self.RBXToPixel(x0), self.RBXToPixel(x1 + 1))
+
+ def RBPixelToTime(self, x):
+ if x < self.data.points[0].x:
+ return self.data.XToData(0)
+ return self.data.XToData(x)
+
+ def RBEventTimes(self, x0, x1):
+ x0, x1 = self.RBXRangeToPixel(x0, x1)
+ time_from = self.RBPixelToTime(x0)
+ time_to = self.RBPixelToTime(x1)
+ return (time_from, time_to)
+
+ def RBEvent(self, x0, x1):
+ time_from, time_to = self.RBEventTimes(x0, x1)
+ self.event_handler.RangeEvent(time_from, time_to)
+
+ def RBMoveEvent(self, x0, x1):
+ if x1 < x0:
+ x0, x1 = x1, x0
+ self.RBEvent(x0, x1)
+
+ def RBReleaseEvent(self, x0, x1, selection_state):
+ if x1 < x0:
+ x0, x1 = x1, x0
+ x0, x1 = self.RBXRangeToPixel(x0, x1)
+ self.event_handler.SelectEvent(x0, x1, selection_state)
+
+# Graphics item to draw a vertical bracket (used to highlight "forward" sub-range)
+
+class VerticalBracketGraphicsItem(QGraphicsItem):
+
+ def __init__(self, parent=None):
+ super(VerticalBracketGraphicsItem, self).__init__(parent)
+
+ self.width = 0
+ self.height = 0
+ self.hide()
+
+ def SetSize(self, width, height):
+ self.width = width + 1
+ self.height = height + 1
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def paint(self, painter, option, widget):
+ colour = QColor(255, 255, 0, 32)
+ painter.fillRect(0, 0, self.width, self.height, colour)
+ x1 = self.width - 1
+ y1 = self.height - 1
+ painter.drawLine(0, 0, x1, 0)
+ painter.drawLine(0, 0, 0, 3)
+ painter.drawLine(x1, 0, x1, 3)
+ painter.drawLine(0, y1, x1, y1)
+ painter.drawLine(0, y1, 0, y1 - 3)
+ painter.drawLine(x1, y1, x1, y1 - 3)
+
+# Graphics item to contain graphs arranged vertically
+
+class VertcalGraphSetGraphicsItem(QGraphicsItem):
+
+ def __init__(self, collection, attrs, event_handler, child_class, parent=None):
+ super(VertcalGraphSetGraphicsItem, self).__init__(parent)
+
+ self.collection = collection
+
+ self.top = 10
+
+ self.width = 0
+ self.height = self.top
+
+ self.rubber_band = None
+ self.rb_enabled = False
+
+ first = True
+ for data in collection.data:
+ child = child_class(collection, data, attrs, event_handler, first, self)
+ child.setPos(0, self.height + 1)
+ rect = child.boundingRect()
+ if rect.right() > self.width:
+ self.width = rect.right()
+ self.height = self.height + rect.bottom() + 1
+ first = False
+
+ self.bracket = VerticalBracketGraphicsItem(self)
+
+ def EnableRubberBand(self, xlo, xhi, rb_event_handler):
+ if self.rb_enabled:
+ return
+ self.rb_enabled = True
+ self.rb_in_view = False
+ self.setAcceptedMouseButtons(Qt.LeftButton)
+ self.rb_xlo = xlo
+ self.rb_xhi = xhi
+ self.rb_event_handler = rb_event_handler
+ self.mousePressEvent = self.MousePressEvent
+ self.mouseMoveEvent = self.MouseMoveEvent
+ self.mouseReleaseEvent = self.MouseReleaseEvent
+
+ def boundingRect(self):
+ return QRectF(0, 0, self.width, self.height)
+
+ def paint(self, painter, option, widget):
+ pass
+
+ def RubberBandParent(self):
+ scene = self.scene()
+ view = scene.views()[0]
+ viewport = view.viewport()
+ return viewport
+
+ def RubberBandSetGeometry(self, rect):
+ scene_rectf = self.mapRectToScene(QRectF(rect))
+ scene = self.scene()
+ view = scene.views()[0]
+ poly = view.mapFromScene(scene_rectf)
+ self.rubber_band.setGeometry(poly.boundingRect())
+
+ def SetSelection(self, selection_state):
+ if self.rubber_band:
+ if selection_state:
+ self.RubberBandSetGeometry(selection_state)
+ self.rubber_band.show()
+ else:
+ self.rubber_band.hide()
+
+ def SetBracket(self, rect):
+ if rect:
+ x, y, width, height = rect.x(), rect.y(), rect.width(), rect.height()
+ self.bracket.setPos(x, y)
+ self.bracket.SetSize(width, height)
+ self.bracket.show()
+ else:
+ self.bracket.hide()
+
+ def RubberBandX(self, event):
+ x = event.pos().toPoint().x()
+ if x < self.rb_xlo:
+ x = self.rb_xlo
+ elif x > self.rb_xhi:
+ x = self.rb_xhi
+ else:
+ self.rb_in_view = True
+ return x
+
+ def RubberBandRect(self, x):
+ if self.rb_origin.x() <= x:
+ width = x - self.rb_origin.x()
+ rect = QRect(self.rb_origin, QSize(width, self.height))
+ else:
+ width = self.rb_origin.x() - x
+ top_left = QPoint(self.rb_origin.x() - width, self.rb_origin.y())
+ rect = QRect(top_left, QSize(width, self.height))
+ return rect
+
+ def MousePressEvent(self, event):
+ self.rb_in_view = False
+ x = self.RubberBandX(event)
+ self.rb_origin = QPoint(x, self.top)
+ if self.rubber_band is None:
+ self.rubber_band = QRubberBand(QRubberBand.Rectangle, self.RubberBandParent())
+ self.RubberBandSetGeometry(QRect(self.rb_origin, QSize(0, self.height)))
+ if self.rb_in_view:
+ self.rubber_band.show()
+ self.rb_event_handler.RBMoveEvent(x, x)
+ else:
+ self.rubber_band.hide()
+
+ def MouseMoveEvent(self, event):
+ x = self.RubberBandX(event)
+ rect = self.RubberBandRect(x)
+ self.RubberBandSetGeometry(rect)
+ if self.rb_in_view:
+ self.rubber_band.show()
+ self.rb_event_handler.RBMoveEvent(self.rb_origin.x(), x)
+
+ def MouseReleaseEvent(self, event):
+ x = self.RubberBandX(event)
+ if self.rb_in_view:
+ selection_state = self.RubberBandRect(x)
+ else:
+ selection_state = None
+ self.rb_event_handler.RBReleaseEvent(self.rb_origin.x(), x, selection_state)
+
+# Switch graph legend data model
+
+class SwitchGraphLegendModel(QAbstractTableModel):
+
+ def __init__(self, collection, region_attributes, parent=None):
+ super(SwitchGraphLegendModel, self).__init__(parent)
+
+ self.region_attributes = region_attributes
+
+ self.child_items = sorted(collection.hregions.values(), key=GraphDataRegionOrdinal)
+ self.child_count = len(self.child_items)
+
+ self.highlight_set = set()
+
+ self.column_headers = ("pid", "tid", "comm")
+
+ def rowCount(self, parent):
+ return self.child_count
+
+ def headerData(self, section, orientation, role):
+ if role != Qt.DisplayRole:
+ return None
+ if orientation != Qt.Horizontal:
+ return None
+ return self.columnHeader(section)
+
+ def index(self, row, column, parent):
+ return self.createIndex(row, column, self.child_items[row])
+
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
+ def data(self, index, role):
+ if role == Qt.BackgroundRole:
+ child = self.child_items[index.row()]
+ if child in self.highlight_set:
+ return self.region_attributes[child.key].colour
+ return None
+ if role == Qt.ForegroundRole:
+ child = self.child_items[index.row()]
+ if child in self.highlight_set:
+ return QColor(255, 255, 255)
+ return self.region_attributes[child.key].colour
+ if role != Qt.DisplayRole:
+ return None
+ hregion = self.child_items[index.row()]
+ col = index.column()
+ if col == 0:
+ return hregion.pid
+ if col == 1:
+ return hregion.tid
+ if col == 2:
+ return hregion.comm
+ return None
+
+ def SetHighlight(self, row, set_highlight):
+ child = self.child_items[row]
+ top_left = self.createIndex(row, 0, child)
+ bottom_right = self.createIndex(row, len(self.column_headers) - 1, child)
+ self.dataChanged.emit(top_left, bottom_right)
+
+ def Highlight(self, highlight_set):
+ for row in xrange(self.child_count):
+ child = self.child_items[row]
+ if child in self.highlight_set:
+ if child not in highlight_set:
+ self.SetHighlight(row, False)
+ elif child in highlight_set:
+ self.SetHighlight(row, True)
+ self.highlight_set = highlight_set
+
+# Switch graph legend is a table
+
+class SwitchGraphLegend(QWidget):
+
+ def __init__(self, collection, region_attributes, parent=None):
+ super(SwitchGraphLegend, self).__init__(parent)
+
+ self.data_model = SwitchGraphLegendModel(collection, region_attributes)
+
+ self.model = QSortFilterProxyModel()
+ self.model.setSourceModel(self.data_model)
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+ self.view.sortByColumn(-1, Qt.AscendingOrder)
+ self.view.setSortingEnabled(True)
+ self.view.resizeColumnsToContents()
+ self.view.resizeRowsToContents()
+
+ self.vbox = VBoxLayout(self.view)
+ self.setLayout(self.vbox)
+
+ sz1 = self.view.columnWidth(0) + self.view.columnWidth(1) + self.view.columnWidth(2) + 2
+ sz1 = sz1 + self.view.verticalScrollBar().sizeHint().width()
+ self.saved_size = sz1
+
+ def resizeEvent(self, event):
+ self.saved_size = self.size().width()
+ super(SwitchGraphLegend, self).resizeEvent(event)
+
+ def Highlight(self, highlight_set):
+ self.data_model.Highlight(highlight_set)
+ self.update()
+
+ def changeEvent(self, event):
+ if event.type() == QEvent.FontChange:
+ self.view.resizeRowsToContents()
+ self.view.resizeColumnsToContents()
+ # Need to resize rows again after column resize
+ self.view.resizeRowsToContents()
+ super(SwitchGraphLegend, self).changeEvent(event)
+
+# Random colour generation
+
+def RGBColourTooLight(r, g, b):
+ if g > 230:
+ return True
+ if g <= 160:
+ return False
+ if r <= 180 and g <= 180:
+ return False
+ if r < 60:
+ return False
+ return True
+
+def GenerateColours(x):
+ cs = [0]
+ for i in xrange(1, x):
+ cs.append(int((255.0 / i) + 0.5))
+ colours = []
+ for r in cs:
+ for g in cs:
+ for b in cs:
+ # Exclude black and colours that look too light against a white background
+ if (r, g, b) == (0, 0, 0) or RGBColourTooLight(r, g, b):
+ continue
+ colours.append(QColor(r, g, b))
+ return colours
+
+def GenerateNColours(n):
+ for x in xrange(2, n + 2):
+ colours = GenerateColours(x)
+ if len(colours) >= n:
+ return colours
+ return []
+
+def GenerateNRandomColours(n, seed):
+ colours = GenerateNColours(n)
+ random.seed(seed)
+ random.shuffle(colours)
+ return colours
+
+# Graph attributes, in particular the scale and subrange that change when zooming
+
+class GraphAttributes():
+
+ def __init__(self, scale, subrange, region_attributes, dp):
+ self.scale = scale
+ self.subrange = subrange
+ self.region_attributes = region_attributes
+ # Rounding avoids errors due to finite floating point precision
+ self.dp = dp # data decimal places
+ self.Update()
+
+ def XToPixel(self, x):
+ return int(round((x - self.subrange.x.lo) * self.scale.x, self.pdp.x))
+
+ def YToPixel(self, y):
+ return int(round((y - self.subrange.y.lo) * self.scale.y, self.pdp.y))
+
+ def PixelToXRounded(self, px):
+ return round((round(px, 0) / self.scale.x), self.dp.x) + self.subrange.x.lo
+
+ def PixelToYRounded(self, py):
+ return round((round(py, 0) / self.scale.y), self.dp.y) + self.subrange.y.lo
+
+ def PixelToX(self, px):
+ x = self.PixelToXRounded(px)
+ if self.pdp.x == 0:
+ rt = self.XToPixel(x)
+ if rt > px:
+ return x - 1
+ return x
+
+ def PixelToY(self, py):
+ y = self.PixelToYRounded(py)
+ if self.pdp.y == 0:
+ rt = self.YToPixel(y)
+ if rt > py:
+ return y - 1
+ return y
+
+ def ToPDP(self, dp, scale):
+ # Calculate pixel decimal places:
+ # (10 ** dp) is the minimum delta in the data
+ # scale it to get the minimum delta in pixels
+ # log10 gives the number of decimals places negatively
+ # subtrace 1 to divide by 10
+ # round to the lower negative number
+ # change the sign to get the number of decimals positively
+ x = math.log10((10 ** dp) * scale)
+ if x < 0:
+ x -= 1
+ x = -int(math.floor(x) - 0.1)
+ else:
+ x = 0
+ return x
+
+ def Update(self):
+ x = self.ToPDP(self.dp.x, self.scale.x)
+ y = self.ToPDP(self.dp.y, self.scale.y)
+ self.pdp = XY(x, y) # pixel decimal places
+
+# Switch graph splitter which divides the CPU graphs from the legend
+
+class SwitchGraphSplitter(QSplitter):
+
+ def __init__(self, parent=None):
+ super(SwitchGraphSplitter, self).__init__(parent)
+
+ self.first_time = False
+
+ def resizeEvent(self, ev):
+ if self.first_time:
+ self.first_time = False
+ sz1 = self.widget(1).view.columnWidth(0) + self.widget(1).view.columnWidth(1) + self.widget(1).view.columnWidth(2) + 2
+ sz1 = sz1 + self.widget(1).view.verticalScrollBar().sizeHint().width()
+ sz0 = self.size().width() - self.handleWidth() - sz1
+ self.setSizes([sz0, sz1])
+ elif not(self.widget(1).saved_size is None):
+ sz1 = self.widget(1).saved_size
+ sz0 = self.size().width() - self.handleWidth() - sz1
+ self.setSizes([sz0, sz1])
+ super(SwitchGraphSplitter, self).resizeEvent(ev)
+
+# Graph widget base class
+
+class GraphWidget(QWidget):
+
+ graph_title_changed = Signal(object)
+
+ def __init__(self, parent=None):
+ super(GraphWidget, self).__init__(parent)
+
+ def GraphTitleChanged(self, title):
+ self.graph_title_changed.emit(title)
+
+ def Title(self):
+ return ""
+
+# Display time in s, ms, us or ns
+
+def ToTimeStr(val):
+ val = Decimal(val)
+ if val >= 1000000000:
+ return "{} s".format((val / 1000000000).quantize(Decimal("0.000000001")))
+ if val >= 1000000:
+ return "{} ms".format((val / 1000000).quantize(Decimal("0.000001")))
+ if val >= 1000:
+ return "{} us".format((val / 1000).quantize(Decimal("0.001")))
+ return "{} ns".format(val.quantize(Decimal("1")))
+
+# Switch (i.e. context switch i.e. Time Chart by CPU) graph widget which contains the CPU graphs and the legend and control buttons
+
+class SwitchGraphWidget(GraphWidget):
+
+ def __init__(self, glb, collection, parent=None):
+ super(SwitchGraphWidget, self).__init__(parent)
+
+ self.glb = glb
+ self.collection = collection
+
+ self.back_state = []
+ self.forward_state = []
+ self.selection_state = (None, None)
+ self.fwd_rect = None
+ self.start_time = self.glb.StartTime(collection.machine_id)
+
+ i = 0
+ hregions = collection.hregions.values()
+ colours = GenerateNRandomColours(len(hregions), 1013)
+ region_attributes = {}
+ for hregion in hregions:
+ if hregion.pid == 0 and hregion.tid == 0:
+ region_attributes[hregion.key] = GraphRegionAttribute(QColor(0, 0, 0))
+ else:
+ region_attributes[hregion.key] = GraphRegionAttribute(colours[i])
+ i = i + 1
+
+ # Default to entire range
+ xsubrange = Subrange(0.0, float(collection.xrangehi - collection.xrangelo) + 1.0)
+ ysubrange = Subrange(0.0, float(collection.yrangehi - collection.yrangelo) + 1.0)
+ subrange = XY(xsubrange, ysubrange)
+
+ scale = self.GetScaleForRange(subrange)
+
+ self.attrs = GraphAttributes(scale, subrange, region_attributes, collection.dp)
+
+ self.item = VertcalGraphSetGraphicsItem(collection, self.attrs, self, SwitchGraphGraphicsItem)
+
+ self.scene = QGraphicsScene()
+ self.scene.addItem(self.item)
+
+ self.view = QGraphicsView(self.scene)
+ self.view.centerOn(0, 0)
+ self.view.setAlignment(Qt.AlignLeft | Qt.AlignTop)
+
+ self.legend = SwitchGraphLegend(collection, region_attributes)
+
+ self.splitter = SwitchGraphSplitter()
+ self.splitter.addWidget(self.view)
+ self.splitter.addWidget(self.legend)
+
+ self.point_label = QLabel("")
+ self.point_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
+
+ self.back_button = QToolButton()
+ self.back_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))
+ self.back_button.setDisabled(True)
+ self.back_button.released.connect(lambda: self.Back())
+
+ self.forward_button = QToolButton()
+ self.forward_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight))
+ self.forward_button.setDisabled(True)
+ self.forward_button.released.connect(lambda: self.Forward())
+
+ self.zoom_button = QToolButton()
+ self.zoom_button.setText("Zoom")
+ self.zoom_button.setDisabled(True)
+ self.zoom_button.released.connect(lambda: self.Zoom())
+
+ self.hbox = HBoxLayout(self.back_button, self.forward_button, self.zoom_button, self.point_label)
+
+ self.vbox = VBoxLayout(self.splitter, self.hbox)
+
+ self.setLayout(self.vbox)
+
+ def GetScaleForRangeX(self, xsubrange):
+ # Default graph 1000 pixels wide
+ dflt = 1000.0
+ r = xsubrange.hi - xsubrange.lo
+ return dflt / r
+
+ def GetScaleForRangeY(self, ysubrange):
+ # Default graph 50 pixels high
+ dflt = 50.0
+ r = ysubrange.hi - ysubrange.lo
+ return dflt / r
+
+ def GetScaleForRange(self, subrange):
+ # Default graph 1000 pixels wide, 50 pixels high
+ xscale = self.GetScaleForRangeX(subrange.x)
+ yscale = self.GetScaleForRangeY(subrange.y)
+ return XY(xscale, yscale)
+
+ def PointEvent(self, cpu, time_from, time_to, hregions):
+ text = "CPU: " + str(cpu)
+ time_from = time_from.quantize(Decimal(1))
+ rel_time_from = time_from - self.glb.StartTime(self.collection.machine_id)
+ text = text + " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ")"
+ self.point_label.setText(text)
+ self.legend.Highlight(hregions)
+
+ def RightClickEvent(self, cpu, hregion_times, pos):
+ if not IsSelectable(self.glb.db, "calls", "WHERE parent_id >= 0"):
+ return
+ menu = QMenu(self.view)
+ for hregion, time in hregion_times:
+ thread_at_time = (hregion.exec_comm_id, hregion.thread_id, time)
+ menu_text = "Show Call Tree for {} {}:{} at {}".format(hregion.comm, hregion.pid, hregion.tid, time)
+ menu.addAction(CreateAction(menu_text, "Show Call Tree", lambda a=None, args=thread_at_time: self.RightClickSelect(args), self.view))
+ menu.exec_(pos)
+
+ def RightClickSelect(self, args):
+ CallTreeWindow(self.glb, self.glb.mainwindow, thread_at_time=args)
+
+ def NoPointEvent(self):
+ self.point_label.setText("")
+ self.legend.Highlight({})
+
+ def RangeEvent(self, time_from, time_to):
+ time_from = time_from.quantize(Decimal(1))
+ time_to = time_to.quantize(Decimal(1))
+ if time_to <= time_from:
+ self.point_label.setText("")
+ return
+ rel_time_from = time_from - self.start_time
+ rel_time_to = time_to - self.start_time
+ text = " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ") to: " + str(time_to) + " (+" + ToTimeStr(rel_time_to) + ")"
+ text = text + " duration: " + ToTimeStr(time_to - time_from)
+ self.point_label.setText(text)
+
+ def BackState(self):
+ return (self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect)
+
+ def PushBackState(self):
+ state = copy.deepcopy(self.BackState())
+ self.back_state.append(state)
+ self.back_button.setEnabled(True)
+
+ def PopBackState(self):
+ self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.back_state.pop()
+ self.attrs.Update()
+ if not self.back_state:
+ self.back_button.setDisabled(True)
+
+ def PushForwardState(self):
+ state = copy.deepcopy(self.BackState())
+ self.forward_state.append(state)
+ self.forward_button.setEnabled(True)
+
+ def PopForwardState(self):
+ self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.forward_state.pop()
+ self.attrs.Update()
+ if not self.forward_state:
+ self.forward_button.setDisabled(True)
+
+ def Title(self):
+ time_from = self.collection.xrangelo + Decimal(self.attrs.subrange.x.lo)
+ time_to = self.collection.xrangelo + Decimal(self.attrs.subrange.x.hi)
+ rel_time_from = time_from - self.start_time
+ rel_time_to = time_to - self.start_time
+ title = "+" + ToTimeStr(rel_time_from) + " to +" + ToTimeStr(rel_time_to)
+ title = title + " (" + ToTimeStr(time_to - time_from) + ")"
+ return title
+
+ def Update(self):
+ selected_subrange, selection_state = self.selection_state
+ self.item.SetSelection(selection_state)
+ self.item.SetBracket(self.fwd_rect)
+ self.zoom_button.setDisabled(selected_subrange is None)
+ self.GraphTitleChanged(self.Title())
+ self.item.update(self.item.boundingRect())
+
+ def Back(self):
+ if not self.back_state:
+ return
+ self.PushForwardState()
+ self.PopBackState()
+ self.Update()
+
+ def Forward(self):
+ if not self.forward_state:
+ return
+ self.PushBackState()
+ self.PopForwardState()
+ self.Update()
+
+ def SelectEvent(self, x0, x1, selection_state):
+ if selection_state is None:
+ selected_subrange = None
+ else:
+ if x1 - x0 < 1.0:
+ x1 += 1.0
+ selected_subrange = Subrange(x0, x1)
+ self.selection_state = (selected_subrange, selection_state)
+ self.zoom_button.setDisabled(selected_subrange is None)
+
+ def Zoom(self):
+ selected_subrange, selection_state = self.selection_state
+ if selected_subrange is None:
+ return
+ self.fwd_rect = selection_state
+ self.item.SetSelection(None)
+ self.PushBackState()
+ self.attrs.subrange.x = selected_subrange
+ self.forward_state = []
+ self.forward_button.setDisabled(True)
+ self.selection_state = (None, None)
+ self.fwd_rect = None
+ self.attrs.scale.x = self.GetScaleForRangeX(self.attrs.subrange.x)
+ self.attrs.Update()
+ self.Update()
+
+# Slow initialization - perform non-GUI initialization in a separate thread and put up a modal message box while waiting
+
+class SlowInitClass():
+
+ def __init__(self, glb, title, init_fn):
+ self.init_fn = init_fn
+ self.done = False
+ self.result = None
+
+ self.msg_box = QMessageBox(glb.mainwindow)
+ self.msg_box.setText("Initializing " + title + ". Please wait.")
+ self.msg_box.setWindowTitle("Initializing " + title)
+ self.msg_box.setWindowIcon(glb.mainwindow.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+ self.init_thread = Thread(self.ThreadFn, glb)
+ self.init_thread.done.connect(lambda: self.Done(), Qt.QueuedConnection)
+
+ self.init_thread.start()
+
+ def Done(self):
+ self.msg_box.done(0)
+
+ def ThreadFn(self, glb):
+ conn_name = "SlowInitClass" + str(os.getpid())
+ db, dbname = glb.dbref.Open(conn_name)
+ self.result = self.init_fn(db)
+ self.done = True
+ return (True, 0)
+
+ def Result(self):
+ while not self.done:
+ self.msg_box.exec_()
+ self.init_thread.wait()
+ return self.result
+
+def SlowInit(glb, title, init_fn):
+ init = SlowInitClass(glb, title, init_fn)
+ return init.Result()
+
+# Time chart by CPU window
+
+class TimeChartByCPUWindow(QMdiSubWindow):
+
+ def __init__(self, glb, parent=None):
+ super(TimeChartByCPUWindow, self).__init__(parent)
+
+ self.glb = glb
+ self.machine_id = glb.HostMachineId()
+ self.collection_name = "SwitchGraphDataCollection " + str(self.machine_id)
+
+ collection = LookupModel(self.collection_name)
+ if collection is None:
+ collection = SlowInit(glb, "Time Chart", self.Init)
+
+ self.widget = SwitchGraphWidget(glb, collection, self)
+ self.view = self.widget
+
+ self.base_title = "Time Chart by CPU"
+ self.setWindowTitle(self.base_title + self.widget.Title())
+ self.widget.graph_title_changed.connect(self.GraphTitleChanged)
+
+ self.setWidget(self.widget)
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, self.windowTitle())
+
+ def Init(self, db):
+ return LookupCreateModel(self.collection_name, lambda : SwitchGraphDataCollection(self.glb, db, self.machine_id))
+
+ def GraphTitleChanged(self, title):
+ self.setWindowTitle(self.base_title + " : " + title)
+
+# Child data item finder
+
+class ChildDataItemFinder():
+
+ def __init__(self, root):
+ self.root = root
+ self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
+ self.rows = []
+ self.pos = 0
+
+ def FindSelect(self):
+ self.rows = []
+ if self.pattern:
+ pattern = re.compile(self.value)
+ for child in self.root.child_items:
+ for column_data in child.data:
+ if re.search(pattern, str(column_data)) is not None:
+ self.rows.append(child.row)
+ break
+ else:
+ for child in self.root.child_items:
+ for column_data in child.data:
+ if self.value in str(column_data):
+ self.rows.append(child.row)
+ break
+
+ def FindValue(self):
+ self.pos = 0
+ if self.last_value != self.value or self.pattern != self.last_pattern:
+ self.FindSelect()
+ if not len(self.rows):
+ return -1
+ return self.rows[self.pos]
+
+ def FindThread(self):
+ if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
+ row = self.FindValue()
+ elif len(self.rows):
+ if self.direction > 0:
+ self.pos += 1
+ if self.pos >= len(self.rows):
+ self.pos = 0
+ else:
+ self.pos -= 1
+ if self.pos < 0:
+ self.pos = len(self.rows) - 1
+ row = self.rows[self.pos]
+ else:
+ row = -1
+ return (True, row)
+
+ def Find(self, value, direction, pattern, context, callback):
+ self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
+ # Use a thread so the UI is not blocked
+ thread = Thread(self.FindThread)
+ thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
+ thread.start()
+
+ def FindDone(self, thread, callback, row):
+ callback(row)
+
+# Number of database records to fetch in one go
+
+glb_chunk_sz = 10000
+
+# Background process for SQL data fetcher
+
+class SQLFetcherProcess():
+
+ def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
+ # Need a unique connection name
+ conn_name = "SQLFetcher" + str(os.getpid())
+ self.db, dbname = dbref.Open(conn_name)
+ self.sql = sql
+ self.buffer = buffer
+ self.head = head
+ self.tail = tail
+ self.fetch_count = fetch_count
+ self.fetching_done = fetching_done
+ self.process_target = process_target
+ self.wait_event = wait_event
+ self.fetched_event = fetched_event
+ self.prep = prep
+ self.query = QSqlQuery(self.db)
+ self.query_limit = 0 if "$$last_id$$" in sql else 2
+ self.last_id = -1
+ self.fetched = 0
+ self.more = True
+ self.local_head = self.head.value
+ self.local_tail = self.tail.value
+
+ def Select(self):
+ if self.query_limit:
+ if self.query_limit == 1:
+ return
+ self.query_limit -= 1
+ stmt = self.sql.replace("$$last_id$$", str(self.last_id))
+ QueryExec(self.query, stmt)
+
+ def Next(self):
+ if not self.query.next():
+ self.Select()
+ if not self.query.next():
+ return None
+ self.last_id = self.query.value(0)
+ return self.prep(self.query)
+
+ def WaitForTarget(self):
+ while True:
+ self.wait_event.clear()
+ target = self.process_target.value
+ if target > self.fetched or target < 0:
+ break
+ self.wait_event.wait()
+ return target
+
+ def HasSpace(self, sz):
+ if self.local_tail <= self.local_head:
+ space = len(self.buffer) - self.local_head
+ if space > sz:
+ return True
+ if space >= glb_nsz:
+ # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
+ nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
+ self.buffer[self.local_head : self.local_head + len(nd)] = nd
+ self.local_head = 0
+ if self.local_tail - self.local_head > sz:
+ return True
+ return False
+
+ def WaitForSpace(self, sz):
+ if self.HasSpace(sz):
+ return
+ while True:
+ self.wait_event.clear()
+ self.local_tail = self.tail.value
+ if self.HasSpace(sz):
+ return
+ self.wait_event.wait()
+
+ def AddToBuffer(self, obj):
+ d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
+ n = len(d)
+ nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
+ sz = n + glb_nsz
+ self.WaitForSpace(sz)
+ pos = self.local_head
+ self.buffer[pos : pos + len(nd)] = nd
+ self.buffer[pos + glb_nsz : pos + sz] = d
+ self.local_head += sz
+
+ def FetchBatch(self, batch_size):
+ fetched = 0
+ while batch_size > fetched:
+ obj = self.Next()
+ if obj is None:
+ self.more = False
+ break
+ self.AddToBuffer(obj)
+ fetched += 1
+ if fetched:
+ self.fetched += fetched
+ with self.fetch_count.get_lock():
+ self.fetch_count.value += fetched
+ self.head.value = self.local_head
+ self.fetched_event.set()
+
+ def Run(self):
+ while self.more:
+ target = self.WaitForTarget()
+ if target < 0:
+ break
+ batch_size = min(glb_chunk_sz, target - self.fetched)
+ self.FetchBatch(batch_size)
+ self.fetching_done.value = True
+ self.fetched_event.set()
+
+def SQLFetcherFn(*x):
+ process = SQLFetcherProcess(*x)
+ process.Run()
+
+# SQL data fetcher
+
+class SQLFetcher(QObject):
+
+ done = Signal(object)
+
+ def __init__(self, glb, sql, prep, process_data, parent=None):
+ super(SQLFetcher, self).__init__(parent)
+ self.process_data = process_data
+ self.more = True
+ self.target = 0
+ self.last_target = 0
+ self.fetched = 0
+ self.buffer_size = 16 * 1024 * 1024
+ self.buffer = Array(c_char, self.buffer_size, lock=False)
+ self.head = Value(c_longlong)
+ self.tail = Value(c_longlong)
+ self.local_tail = 0
+ self.fetch_count = Value(c_longlong)
+ self.fetching_done = Value(c_bool)
+ self.last_count = 0
+ self.process_target = Value(c_longlong)
+ self.wait_event = Event()
+ self.fetched_event = Event()
+ glb.AddInstanceToShutdownOnExit(self)
+ self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
+ self.process.start()
+ self.thread = Thread(self.Thread)
+ self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
+ self.thread.start()
+
+ def Shutdown(self):
+ # Tell the thread and process to exit
+ self.process_target.value = -1
+ self.wait_event.set()
+ self.more = False
+ self.fetching_done.value = True
+ self.fetched_event.set()
+
+ def Thread(self):
+ if not self.more:
+ return True, 0
+ while True:
+ self.fetched_event.clear()
+ fetch_count = self.fetch_count.value
+ if fetch_count != self.last_count:
+ break
+ if self.fetching_done.value:
+ self.more = False
+ return True, 0
+ self.fetched_event.wait()
+ count = fetch_count - self.last_count
+ self.last_count = fetch_count
+ self.fetched += count
+ return False, count
+
+ def Fetch(self, nr):
+ if not self.more:
+ # -1 inidcates there are no more
+ return -1
+ result = self.fetched
+ extra = result + nr - self.target
+ if extra > 0:
+ self.target += extra
+ # process_target < 0 indicates shutting down
+ if self.process_target.value >= 0:
+ self.process_target.value = self.target
+ self.wait_event.set()
+ return result
+
+ def RemoveFromBuffer(self):
+ pos = self.local_tail
+ if len(self.buffer) - pos < glb_nsz:
+ pos = 0
+ n = pickle.loads(self.buffer[pos : pos + glb_nsz])
+ if n == 0:
+ pos = 0
+ n = pickle.loads(self.buffer[0 : glb_nsz])
+ pos += glb_nsz
+ obj = pickle.loads(self.buffer[pos : pos + n])
+ self.local_tail = pos + n
+ return obj
+
+ def ProcessData(self, count):
+ for i in xrange(count):
+ obj = self.RemoveFromBuffer()
+ self.process_data(obj)
+ self.tail.value = self.local_tail
+ self.wait_event.set()
+ self.done.emit(count)
+
+# Fetch more records bar
+
+class FetchMoreRecordsBar():
+
+ def __init__(self, model, parent):
+ self.model = model
+
+ self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
+ self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.fetch_count = QSpinBox()
+ self.fetch_count.setRange(1, 1000000)
+ self.fetch_count.setValue(10)
+ self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.fetch = QPushButton("Go!")
+ self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+ self.fetch.released.connect(self.FetchMoreRecords)
+
+ self.progress = QProgressBar()
+ self.progress.setRange(0, 100)
+ self.progress.hide()
+
+ self.done_label = QLabel("All records fetched")
+ self.done_label.hide()
+
+ self.spacer = QLabel("")
+
+ self.close_button = QToolButton()
+ self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
+ self.close_button.released.connect(self.Deactivate)
+
+ self.hbox = QHBoxLayout()
+ self.hbox.setContentsMargins(0, 0, 0, 0)
+
+ self.hbox.addWidget(self.label)
+ self.hbox.addWidget(self.fetch_count)
+ self.hbox.addWidget(self.fetch)
+ self.hbox.addWidget(self.spacer)
+ self.hbox.addWidget(self.progress)
+ self.hbox.addWidget(self.done_label)
+ self.hbox.addWidget(self.close_button)
+
+ self.bar = QWidget()
+ self.bar.setLayout(self.hbox)
+ self.bar.show()
+
+ self.in_progress = False
+ self.model.progress.connect(self.Progress)
+
+ self.done = False
+
+ if not model.HasMoreRecords():
+ self.Done()
+
+ def Widget(self):
+ return self.bar
+
+ def Activate(self):
+ self.bar.show()
+ self.fetch.setFocus()
+
+ def Deactivate(self):
+ self.bar.hide()
+
+ def Enable(self, enable):
+ self.fetch.setEnabled(enable)
+ self.fetch_count.setEnabled(enable)
+
+ def Busy(self):
+ self.Enable(False)
+ self.fetch.hide()
+ self.spacer.hide()
+ self.progress.show()
+
+ def Idle(self):
+ self.in_progress = False
+ self.Enable(True)
+ self.progress.hide()
+ self.fetch.show()
+ self.spacer.show()
+
+ def Target(self):
+ return self.fetch_count.value() * glb_chunk_sz
+
+ def Done(self):
+ self.done = True
+ self.Idle()
+ self.label.hide()
+ self.fetch_count.hide()
+ self.fetch.hide()
+ self.spacer.hide()
+ self.done_label.show()
+
+ def Progress(self, count):
+ if self.in_progress:
+ if count:
+ percent = ((count - self.start) * 100) / self.Target()
+ if percent >= 100:
+ self.Idle()
+ else:
+ self.progress.setValue(percent)
+ if not count:
+ # Count value of zero means no more records
+ self.Done()
+
+ def FetchMoreRecords(self):
+ if self.done:
+ return
+ self.progress.setValue(0)
+ self.Busy()
+ self.in_progress = True
+ self.start = self.model.FetchMoreRecords(self.Target())
+
+# Brance data model level two item
+
+class BranchLevelTwoItem():
+
+ def __init__(self, row, col, text, parent_item):
+ self.row = row
+ self.parent_item = parent_item
+ self.data = [""] * (col + 1)
+ self.data[col] = text
+ self.level = 2
+
+ def getParentItem(self):
+ return self.parent_item
+
+ def getRow(self):
+ return self.row
+
+ def childCount(self):
+ return 0
+
+ def hasChildren(self):
+ return False
+
+ def getData(self, column):
+ return self.data[column]
+
+# Brance data model level one item
+
+class BranchLevelOneItem():
+
+ def __init__(self, glb, row, data, parent_item):
+ self.glb = glb
+ self.row = row
+ self.parent_item = parent_item
+ self.child_count = 0
+ self.child_items = []
+ self.data = data[1:]
+ self.dbid = data[0]
+ self.level = 1
+ self.query_done = False
+ self.br_col = len(self.data) - 1
+
+ def getChildItem(self, row):
+ return self.child_items[row]
+
+ def getParentItem(self):
+ return self.parent_item
+
+ def getRow(self):
+ return self.row
+
+ def Select(self):
+ self.query_done = True
+
+ if not self.glb.have_disassembler:
+ return
+
+ query = QSqlQuery(self.glb.db)
+
+ QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
+ " FROM samples"
+ " INNER JOIN dsos ON samples.to_dso_id = dsos.id"
+ " INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
+ " WHERE samples.id = " + str(self.dbid))
+ if not query.next():
+ return
+ cpu = query.value(0)
+ dso = query.value(1)
+ sym = query.value(2)
+ if dso == 0 or sym == 0:
+ return
+ off = query.value(3)
+ short_name = query.value(4)
+ long_name = query.value(5)
+ build_id = query.value(6)
+ sym_start = query.value(7)
+ ip = query.value(8)
+
+ QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
+ " FROM samples"
+ " INNER JOIN symbols ON samples.symbol_id = symbols.id"
+ " WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
+ " ORDER BY samples.id"
+ " LIMIT 1")
+ if not query.next():
+ return
+ if query.value(0) != dso:
+ # Cannot disassemble from one dso to another
+ return
+ bsym = query.value(1)
+ boff = query.value(2)
+ bsym_start = query.value(3)
+ if bsym == 0:
+ return
+ tot = bsym_start + boff + 1 - sym_start - off
+ if tot <= 0 or tot > 16384:
+ return
+
+ inst = self.glb.disassembler.Instruction()
+ f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
+ if not f:
+ return
+ mode = 0 if Is64Bit(f) else 1
+ self.glb.disassembler.SetMode(inst, mode)
+
+ buf_sz = tot + 16
+ buf = create_string_buffer(tot + 16)
+ f.seek(sym_start + off)
+ buf.value = f.read(buf_sz)
+ buf_ptr = addressof(buf)
+ i = 0
+ while tot > 0:
+ cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
+ if cnt:
+ byte_str = tohex(ip).rjust(16)
+ for k in xrange(cnt):
+ byte_str += " %02x" % ord(buf[i])
+ i += 1
+ while k < 15:
+ byte_str += " "
+ k += 1
+ self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self))
+ self.child_count += 1
+ else:
+ return
+ buf_ptr += cnt
+ tot -= cnt
+ buf_sz -= cnt
+ ip += cnt
+
+ def childCount(self):
+ if not self.query_done:
+ self.Select()
+ if not self.child_count:
+ return -1
+ return self.child_count
+
+ def hasChildren(self):
+ if not self.query_done:
+ return True
+ return self.child_count > 0
+
+ def getData(self, column):
+ return self.data[column]
+
+# Brance data model root item
+
+class BranchRootItem():
+
+ def __init__(self):
+ self.child_count = 0
+ self.child_items = []
+ self.level = 0
+
+ def getChildItem(self, row):
+ return self.child_items[row]
+
+ def getParentItem(self):
+ return None
+
+ def getRow(self):
+ return 0
+
+ def childCount(self):
+ return self.child_count
+
+ def hasChildren(self):
+ return self.child_count > 0
+
+ def getData(self, column):
+ return ""
+
+# Calculate instructions per cycle
+
+def CalcIPC(cyc_cnt, insn_cnt):
+ if cyc_cnt and insn_cnt:
+ ipc = Decimal(float(insn_cnt) / cyc_cnt)
+ ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP))
+ else:
+ ipc = "0"
+ return ipc
+
+# Branch data preparation
+
+def BranchDataPrepBr(query, data):
+ data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+ " (" + dsoname(query.value(11)) + ")" + " -> " +
+ tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+ " (" + dsoname(query.value(15)) + ")")
+
+def BranchDataPrepIPC(query, data):
+ insn_cnt = query.value(16)
+ cyc_cnt = query.value(17)
+ ipc = CalcIPC(cyc_cnt, insn_cnt)
+ data.append(insn_cnt)
+ data.append(cyc_cnt)
+ data.append(ipc)
+
+def BranchDataPrep(query):
+ data = []
+ for i in xrange(0, 8):
+ data.append(query.value(i))
+ BranchDataPrepBr(query, data)
+ return data
+
+def BranchDataPrepWA(query):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, 8):
+ data.append(query.value(i))
+ BranchDataPrepBr(query, data)
+ return data
+
+def BranchDataWithIPCPrep(query):
+ data = []
+ for i in xrange(0, 8):
+ data.append(query.value(i))
+ BranchDataPrepIPC(query, data)
+ BranchDataPrepBr(query, data)
+ return data
+
+def BranchDataWithIPCPrepWA(query):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, 8):
+ data.append(query.value(i))
+ BranchDataPrepIPC(query, data)
+ BranchDataPrepBr(query, data)
+ return data
+
+# Branch data model
+
+class BranchModel(TreeModel):
+
+ progress = Signal(object)
+
+ def __init__(self, glb, event_id, where_clause, parent=None):
+ super(BranchModel, self).__init__(glb, None, parent)
+ self.event_id = event_id
+ self.more = True
+ self.populated = 0
+ self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count")
+ if self.have_ipc:
+ select_ipc = ", insn_count, cyc_count"
+ prep_fn = BranchDataWithIPCPrep
+ prep_wa_fn = BranchDataWithIPCPrepWA
+ else:
+ select_ipc = ""
+ prep_fn = BranchDataPrep
+ prep_wa_fn = BranchDataPrepWA
+ sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
+ " CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
+ " ip, symbols.name, sym_offset, dsos.short_name,"
+ " to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
+ + select_ipc +
+ " FROM samples"
+ " INNER JOIN comms ON comm_id = comms.id"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " INNER JOIN branch_types ON branch_type = branch_types.id"
+ " INNER JOIN symbols ON symbol_id = symbols.id"
+ " INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
+ " INNER JOIN dsos ON samples.dso_id = dsos.id"
+ " INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
+ " WHERE samples.id > $$last_id$$" + where_clause +
+ " AND evsel_id = " + str(self.event_id) +
+ " ORDER BY samples.id"
+ " LIMIT " + str(glb_chunk_sz))
+ if pyside_version_1 and sys.version_info[0] == 3:
+ prep = prep_fn
+ else:
+ prep = prep_wa_fn
+ self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
+ self.fetcher.done.connect(self.Update)
+ self.fetcher.Fetch(glb_chunk_sz)
+
+ def GetRoot(self):
+ return BranchRootItem()
+
+ def columnCount(self, parent=None):
+ if self.have_ipc:
+ return 11
+ else:
+ return 8
+
+ def columnHeader(self, column):
+ if self.have_ipc:
+ return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column]
+ else:
+ return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
+
+ def columnFont(self, column):
+ if self.have_ipc:
+ br_col = 10
+ else:
+ br_col = 7
+ if column != br_col:
+ return None
+ return QFont("Monospace")
+
+ def DisplayData(self, item, index):
+ if item.level == 1:
+ self.FetchIfNeeded(item.row)
+ return item.getData(index.column())
+
+ def AddSample(self, data):
+ child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
+ self.root.child_items.append(child)
+ self.populated += 1
+
+ def Update(self, fetched):
+ if not fetched:
+ self.more = False
+ self.progress.emit(0)
+ child_count = self.root.child_count
+ count = self.populated - child_count
+ if count > 0:
+ parent = QModelIndex()
+ self.beginInsertRows(parent, child_count, child_count + count - 1)
+ self.insertRows(child_count, count, parent)
+ self.root.child_count += count
+ self.endInsertRows()
+ self.progress.emit(self.root.child_count)
+
+ def FetchMoreRecords(self, count):
+ current = self.root.child_count
+ if self.more:
+ self.fetcher.Fetch(count)
+ else:
+ self.progress.emit(0)
+ return current
+
+ def HasMoreRecords(self):
+ return self.more
+
+# Report Variables
+
+class ReportVars():
+
+ def __init__(self, name = "", where_clause = "", limit = ""):
+ self.name = name
+ self.where_clause = where_clause
+ self.limit = limit
+
+ def UniqueId(self):
+ return str(self.where_clause + ";" + self.limit)
+
+# Branch window
+
+class BranchWindow(QMdiSubWindow):
+
+ def __init__(self, glb, event_id, report_vars, parent=None):
+ super(BranchWindow, self).__init__(parent)
+
+ model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
+
+ self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
+
+ self.view = QTreeView()
+ self.view.setUniformRowHeights(True)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
+ self.view.setModel(self.model)
+
+ self.ResizeColumnsToContents()
+
+ self.context_menu = TreeContextMenu(self.view)
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.model.root)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
+
+ def ResizeColumnToContents(self, column, n):
+ # Using the view's resizeColumnToContents() here is extrememly slow
+ # so implement a crude alternative
+ mm = "MM" if column else "MMMM"
+ font = self.view.font()
+ metrics = QFontMetrics(font)
+ max = 0
+ for row in xrange(n):
+ val = self.model.root.child_items[row].data[column]
+ len = metrics.width(str(val) + mm)
+ max = len if len > max else max
+ val = self.model.columnHeader(column)
+ len = metrics.width(str(val) + mm)
+ max = len if len > max else max
+ self.view.setColumnWidth(column, max)
+
+ def ResizeColumnsToContents(self):
+ n = min(self.model.root.child_count, 100)
+ if n < 1:
+ # No data yet, so connect a signal to notify when there is
+ self.model.rowsInserted.connect(self.UpdateColumnWidths)
+ return
+ columns = self.model.columnCount()
+ for i in xrange(columns):
+ self.ResizeColumnToContents(i, n)
+
+ def UpdateColumnWidths(self, *x):
+ # This only needs to be done once, so disconnect the signal now
+ self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
+ self.ResizeColumnsToContents()
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ else:
+ self.find_bar.NotFound()
+
+# Line edit data item
+
+class LineEditDataItem(object):
+
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
+ self.glb = glb
+ self.label = label
+ self.placeholder_text = placeholder_text
+ self.parent = parent
+ self.id = id
+
+ self.value = default
+
+ self.widget = QLineEdit(default)
+ self.widget.editingFinished.connect(self.Validate)
+ self.widget.textChanged.connect(self.Invalidate)
+ self.red = False
+ self.error = ""
+ self.validated = True
+
+ if placeholder_text:
+ self.widget.setPlaceholderText(placeholder_text)
+
+ def TurnTextRed(self):
+ if not self.red:
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+
+ def TurnTextNormal(self):
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ self.TurnTextRed()
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def Invalidate(self):
+ self.validated = False
+
+ def DoValidate(self, input_string):
+ self.value = input_string.strip()
+
+ def Validate(self):
+ self.validated = True
+ self.error = ""
+ self.TurnTextNormal()
+ self.parent.ClearMessage()
+ input_string = self.widget.text()
+ if not len(input_string.strip()):
+ self.value = ""
+ return
+ self.DoValidate(input_string)
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+# Non-negative integer ranges dialog data item
+
+class NonNegativeIntegerRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.column_name = column_name
+
+ def DoValidate(self, input_string):
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+
+# Positive integer dialog data item
+
+class PositiveIntegerDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
+ super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
+
+ def DoValidate(self, input_string):
+ if not self.IsNumber(input_string.strip()):
+ return self.InvalidValue(input_string)
+ value = int(input_string.strip())
+ if value <= 0:
+ return self.InvalidValue(input_string)
+ self.value = str(value)
+
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+
+ def ValueToIds(self, value):
+ ids = []
+ query = QSqlQuery(self.glb.db)
+ stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
+ ret = query.exec_(stmt)
+ if ret:
+ while query.next():
+ ids.append(str(query.value(0)))
+ return ids
+
+ def DoValidate(self, input_string):
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+
+# Sample time ranges dialog data item converted and validated using 'samples' SQL table
+
+class SampleTimeRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ self.column_name = column_name
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.first_time = int(glb.HostStartTime())
+ self.last_time = int(glb.HostFinishTime())
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ def IdBetween(self, query, lower_id, higher_id, order):
+ QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
+ if query.next():
+ return True, int(query.value(0))
+ else:
+ return False, 0
+
+ def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
+ query = QSqlQuery(self.glb.db)
+ while True:
+ next_id = int((lower_id + higher_id) / 2)
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ if not query.next():
+ ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
+ if not ok:
+ ok, dbid = self.IdBetween(query, next_id, higher_id, "")
+ if not ok:
+ return str(higher_id)
+ next_id = dbid
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ next_time = int(query.value(0))
+ if get_floor:
+ if target_time > next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(higher_id)
+ else:
+ if target_time >= next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(lower_id)
+
+ def ConvertRelativeTime(self, val):
+ mult = 1
+ suffix = val[-2:]
+ if suffix == "ms":
+ mult = 1000000
+ elif suffix == "us":
+ mult = 1000
+ elif suffix == "ns":
+ mult = 1
+ else:
+ return val
+ val = val[:-2].strip()
+ if not self.IsNumber(val):
+ return val
+ val = int(val) * mult
+ if val >= 0:
+ val += self.first_time
+ else:
+ val += self.last_time
+ return str(val)
+
+ def ConvertTimeRange(self, vrange):
+ if vrange[0] == "":
+ vrange[0] = str(self.first_time)
+ if vrange[1] == "":
+ vrange[1] = str(self.last_time)
+ vrange[0] = self.ConvertRelativeTime(vrange[0])
+ vrange[1] = self.ConvertRelativeTime(vrange[1])
+ if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return False
+ beg_range = max(int(vrange[0]), self.first_time)
+ end_range = min(int(vrange[1]), self.last_time)
+ if beg_range > self.last_time or end_range < self.first_time:
+ return False
+ vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
+ vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
+ return True
+
+ def AddTimeRange(self, value, ranges):
+ n = value.count("-")
+ if n == 1:
+ pass
+ elif n == 2:
+ if value.split("-")[1].strip() == "":
+ n = 1
+ elif n == 3:
+ n = 2
+ else:
+ return False
+ pos = findnth(value, "-", n)
+ vrange = [value[:pos].strip() ,value[pos+1:].strip()]
+ if self.ConvertTimeRange(vrange):
+ ranges.append(vrange)
+ return True
+ return False
+
+ def DoValidate(self, input_string):
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
+
+# Report Dialog Base
+
+class ReportDialogBase(QDialog):
+
+ def __init__(self, glb, title, items, partial, parent=None):
+ super(ReportDialogBase, self).__init__(parent)
+
+ self.glb = glb
+
+ self.report_vars = ReportVars()
+
+ self.setWindowTitle(title)
+ self.setMinimumWidth(600)
+
+ self.data_items = [x(glb, self) for x in items]
+
+ self.partial = partial
+
+ self.grid = QGridLayout()
+
+ for row in xrange(len(self.data_items)):
+ self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
+ self.grid.addWidget(self.data_items[row].widget, row, 1)
+
+ self.status = QLabel()
+
+ self.ok_button = QPushButton("Ok", self)
+ self.ok_button.setDefault(True)
+ self.ok_button.released.connect(self.Ok)
+ self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.cancel_button = QPushButton("Cancel", self)
+ self.cancel_button.released.connect(self.reject)
+ self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.hbox = QHBoxLayout()
+ #self.hbox.addStretch()
+ self.hbox.addWidget(self.status)
+ self.hbox.addWidget(self.ok_button)
+ self.hbox.addWidget(self.cancel_button)
+
+ self.vbox = QVBoxLayout()
+ self.vbox.addLayout(self.grid)
+ self.vbox.addLayout(self.hbox)
+
+ self.setLayout(self.vbox)
+
+ def Ok(self):
+ vars = self.report_vars
+ for d in self.data_items:
+ if d.id == "REPORTNAME":
+ vars.name = d.value
+ if not vars.name:
+ self.ShowMessage("Report name is required")
+ return
+ for d in self.data_items:
+ if not d.IsValid():
+ return
+ for d in self.data_items[1:]:
+ if d.id == "LIMIT":
+ vars.limit = d.value
+ elif len(d.value):
+ if len(vars.where_clause):
+ vars.where_clause += " AND "
+ vars.where_clause += d.value
+ if len(vars.where_clause):
+ if self.partial:
+ vars.where_clause = " AND ( " + vars.where_clause + " ) "
+ else:
+ vars.where_clause = " WHERE " + vars.where_clause + " "
+ self.accept()
+
+ def ShowMessage(self, msg):
+ self.status.setText("<font color=#FF0000>" + msg)
+
+ def ClearMessage(self):
+ self.status.setText("")
+
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Selected Branches"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
+ lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
+ super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
+
+# Event list
+
+def GetEventList(db):
+ events = []
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
+ while query.next():
+ events.append(query.value(0))
+ return events
+
+# Is a table selectable
+
+def IsSelectable(db, table, sql = "", columns = "*"):
+ query = QSqlQuery(db)
+ try:
+ QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1")
+ except:
+ return False
+ return True
+
+# SQL table data model item
+
+class SQLTableItem():
+
+ def __init__(self, row, data):
+ self.row = row
+ self.data = data
+
+ def getData(self, column):
+ return self.data[column]
+
+# SQL table data model
+
+class SQLTableModel(TableModel):
+
+ progress = Signal(object)
+
+ def __init__(self, glb, sql, column_headers, parent=None):
+ super(SQLTableModel, self).__init__(parent)
+ self.glb = glb
+ self.more = True
+ self.populated = 0
+ self.column_headers = column_headers
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
+ self.fetcher.done.connect(self.Update)
+ self.fetcher.Fetch(glb_chunk_sz)
+
+ def DisplayData(self, item, index):
+ self.FetchIfNeeded(item.row)
+ return item.getData(index.column())
+
+ def AddSample(self, data):
+ child = SQLTableItem(self.populated, data)
+ self.child_items.append(child)
+ self.populated += 1
+
+ def Update(self, fetched):
+ if not fetched:
+ self.more = False
+ self.progress.emit(0)
+ child_count = self.child_count
+ count = self.populated - child_count
+ if count > 0:
+ parent = QModelIndex()
+ self.beginInsertRows(parent, child_count, child_count + count - 1)
+ self.insertRows(child_count, count, parent)
+ self.child_count += count
+ self.endInsertRows()
+ self.progress.emit(self.child_count)
+
+ def FetchMoreRecords(self, count):
+ current = self.child_count
+ if self.more:
+ self.fetcher.Fetch(count)
+ else:
+ self.progress.emit(0)
+ return current
+
+ def HasMoreRecords(self):
+ return self.more
+
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
+ def SQLTableDataPrep(self, query, count):
+ data = []
+ for i in xrange(count):
+ data.append(query.value(i))
+ return data
+
+# SQL automatic table data model
+
+class SQLAutoTableModel(SQLTableModel):
+
+ def __init__(self, glb, table_name, parent=None):
+ sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
+ if table_name == "comm_threads_view":
+ # For now, comm_threads_view has no id column
+ sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
+ column_headers = []
+ query = QSqlQuery(glb.db)
+ if glb.dbref.is_sqlite3:
+ QueryExec(query, "PRAGMA table_info(" + table_name + ")")
+ while query.next():
+ column_headers.append(query.value(1))
+ if table_name == "sqlite_master":
+ sql = "SELECT * FROM " + table_name
+ else:
+ if table_name[:19] == "information_schema.":
+ sql = "SELECT * FROM " + table_name
+ select_table_name = table_name[19:]
+ schema = "information_schema"
+ else:
+ select_table_name = table_name
+ schema = "public"
+ QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
+ while query.next():
+ column_headers.append(query.value(0))
+ if pyside_version_1 and sys.version_info[0] == 3:
+ if table_name == "samples_view":
+ self.SQLTableDataPrep = self.samples_view_DataPrep
+ if table_name == "samples":
+ self.SQLTableDataPrep = self.samples_DataPrep
+ super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
+
+ def samples_view_DataPrep(self, query, count):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, count):
+ data.append(query.value(i))
+ return data
+
+ def samples_DataPrep(self, query, count):
+ data = []
+ for i in xrange(9):
+ data.append(query.value(i))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(9)))
+ for i in xrange(10, count):
+ data.append(query.value(i))
+ return data
+
+# Base class for custom ResizeColumnsToContents
+
+class ResizeColumnsToContentsBase(QObject):
+
+ def __init__(self, parent=None):
+ super(ResizeColumnsToContentsBase, self).__init__(parent)
+
+ def ResizeColumnToContents(self, column, n):
+ # Using the view's resizeColumnToContents() here is extrememly slow
+ # so implement a crude alternative
+ font = self.view.font()
+ metrics = QFontMetrics(font)
+ max = 0
+ for row in xrange(n):
+ val = self.data_model.child_items[row].data[column]
+ len = metrics.width(str(val) + "MM")
+ max = len if len > max else max
+ val = self.data_model.columnHeader(column)
+ len = metrics.width(str(val) + "MM")
+ max = len if len > max else max
+ self.view.setColumnWidth(column, max)
+
+ def ResizeColumnsToContents(self):
+ n = min(self.data_model.child_count, 100)
+ if n < 1:
+ # No data yet, so connect a signal to notify when there is
+ self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
+ return
+ columns = self.data_model.columnCount()
+ for i in xrange(columns):
+ self.ResizeColumnToContents(i, n)
+
+ def UpdateColumnWidths(self, *x):
+ # This only needs to be done once, so disconnect the signal now
+ self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
+ self.ResizeColumnsToContents()
+
+# Convert value to CSV
+
+def ToCSValue(val):
+ if '"' in val:
+ val = val.replace('"', '""')
+ if "," in val or '"' in val:
+ val = '"' + val + '"'
+ return val
+
+# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
+
+glb_max_cols = 1000
+
+def RowColumnKey(a):
+ return a.row() * glb_max_cols + a.column()
+
+# Copy selected table cells to clipboard
+
+def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
+ indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
+ idx_cnt = len(indexes)
+ if not idx_cnt:
+ return
+ if idx_cnt == 1:
+ with_hdr=False
+ min_row = indexes[0].row()
+ max_row = indexes[0].row()
+ min_col = indexes[0].column()
+ max_col = indexes[0].column()
+ for i in indexes:
+ min_row = min(min_row, i.row())
+ max_row = max(max_row, i.row())
+ min_col = min(min_col, i.column())
+ max_col = max(max_col, i.column())
+ if max_col > glb_max_cols:
+ raise RuntimeError("glb_max_cols is too low")
+ max_width = [0] * (1 + max_col - min_col)
+ for i in indexes:
+ c = i.column() - min_col
+ max_width[c] = max(max_width[c], len(str(i.data())))
+ text = ""
+ pad = ""
+ sep = ""
+ if with_hdr:
+ model = indexes[0].model()
+ for col in range(min_col, max_col + 1):
+ val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
+ if as_csv:
+ text += sep + ToCSValue(val)
+ sep = ","
+ else:
+ c = col - min_col
+ max_width[c] = max(max_width[c], len(val))
+ width = max_width[c]
+ align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
+ if align & Qt.AlignRight:
+ val = val.rjust(width)
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ text += "\n"
+ pad = ""
+ sep = ""
+ last_row = min_row
+ for i in indexes:
+ if i.row() > last_row:
+ last_row = i.row()
+ text += "\n"
+ pad = ""
+ sep = ""
+ if as_csv:
+ text += sep + ToCSValue(str(i.data()))
+ sep = ","
+ else:
+ width = max_width[i.column() - min_col]
+ if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
+ val = str(i.data()).rjust(width)
+ else:
+ val = str(i.data())
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ QApplication.clipboard().setText(text)
+
+def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
+ indexes = view.selectedIndexes()
+ if not len(indexes):
+ return
+
+ selection = view.selectionModel()
+
+ first = None
+ for i in indexes:
+ above = view.indexAbove(i)
+ if not selection.isSelected(above):
+ first = i
+ break
+
+ if first is None:
+ raise RuntimeError("CopyTreeCellsToClipboard internal error")
+
+ model = first.model()
+ row_cnt = 0
+ col_cnt = model.columnCount(first)
+ max_width = [0] * col_cnt
+
+ indent_sz = 2
+ indent_str = " " * indent_sz
+
+ expanded_mark_sz = 2
+ if sys.version_info[0] == 3:
+ expanded_mark = "\u25BC "
+ not_expanded_mark = "\u25B6 "
+ else:
+ expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
+ not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
+ leaf_mark = " "
+
+ if not as_csv:
+ pos = first
+ while True:
+ row_cnt += 1
+ row = pos.row()
+ for c in range(col_cnt):
+ i = pos.sibling(row, c)
+ if c:
+ n = len(str(i.data()))
+ else:
+ n = len(str(i.data()).strip())
+ n += (i.internalPointer().level - 1) * indent_sz
+ n += expanded_mark_sz
+ max_width[c] = max(max_width[c], n)
+ pos = view.indexBelow(pos)
+ if not selection.isSelected(pos):
+ break
+
+ text = ""
+ pad = ""
+ sep = ""
+ if with_hdr:
+ for c in range(col_cnt):
+ val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
+ if as_csv:
+ text += sep + ToCSValue(val)
+ sep = ","
+ else:
+ max_width[c] = max(max_width[c], len(val))
+ width = max_width[c]
+ align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
+ if align & Qt.AlignRight:
+ val = val.rjust(width)
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ text += "\n"
+ pad = ""
+ sep = ""
+
+ pos = first
+ while True:
+ row = pos.row()
+ for c in range(col_cnt):
+ i = pos.sibling(row, c)
+ val = str(i.data())
+ if not c:
+ if model.hasChildren(i):
+ if view.isExpanded(i):
+ mark = expanded_mark
+ else:
+ mark = not_expanded_mark
+ else:
+ mark = leaf_mark
+ val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
+ if as_csv:
+ text += sep + ToCSValue(val)
+ sep = ","
+ else:
+ width = max_width[c]
+ if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
+ val = val.rjust(width)
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ pos = view.indexBelow(pos)
+ if not selection.isSelected(pos):
+ break
+ text = text.rstrip() + "\n"
+ pad = ""
+ sep = ""
+
+ QApplication.clipboard().setText(text)
+
+def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
+ view.CopyCellsToClipboard(view, as_csv, with_hdr)
+
+def CopyCellsToClipboardHdr(view):
+ CopyCellsToClipboard(view, False, True)
+
+def CopyCellsToClipboardCSV(view):
+ CopyCellsToClipboard(view, True, True)
+
+# Context menu
+
+class ContextMenu(object):
+
+ def __init__(self, view):
+ self.view = view
+ self.view.setContextMenuPolicy(Qt.CustomContextMenu)
+ self.view.customContextMenuRequested.connect(self.ShowContextMenu)
+
+ def ShowContextMenu(self, pos):
+ menu = QMenu(self.view)
+ self.AddActions(menu)
+ menu.exec_(self.view.mapToGlobal(pos))
+
+ def AddCopy(self, menu):
+ menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
+ menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
+
+ def AddActions(self, menu):
+ self.AddCopy(menu)
+
+class TreeContextMenu(ContextMenu):
+
+ def __init__(self, view):
+ super(TreeContextMenu, self).__init__(view)
+
+ def AddActions(self, menu):
+ i = self.view.currentIndex()
+ text = str(i.data()).strip()
+ if len(text):
+ menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
+ self.AddCopy(menu)
+
+# Table window
+
+class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+ def __init__(self, glb, table_name, parent=None):
+ super(TableWindow, self).__init__(parent)
+
+ self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
+
+ self.model = QSortFilterProxyModel()
+ self.model.setSourceModel(self.data_model)
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+ self.view.sortByColumn(-1, Qt.AscendingOrder)
+ self.view.setSortingEnabled(True)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
+
+ self.ResizeColumnsToContents()
+
+ self.context_menu = ContextMenu(self.view)
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.data_model)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
+ else:
+ self.find_bar.NotFound()
+
+# Table list
+
+def GetTableList(glb):
+ tables = []
+ query = QSqlQuery(glb.db)
+ if glb.dbref.is_sqlite3:
+ QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
+ else:
+ QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
+ while query.next():
+ tables.append(query.value(0))
+ if glb.dbref.is_sqlite3:
+ tables.append("sqlite_master")
+ else:
+ tables.append("information_schema.tables")
+ tables.append("information_schema.views")
+ tables.append("information_schema.columns")
+ return tables
+
+# Top Calls data model
+
+class TopCallsModel(SQLTableModel):
+
+ def __init__(self, glb, report_vars, parent=None):
+ text = ""
+ if not glb.dbref.is_sqlite3:
+ text = "::text"
+ limit = ""
+ if len(report_vars.limit):
+ limit = " LIMIT " + report_vars.limit
+ sql = ("SELECT comm, pid, tid, name,"
+ " CASE"
+ " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
+ " ELSE short_name"
+ " END AS dso,"
+ " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
+ " CASE"
+ " WHEN (calls.flags = 1) THEN 'no call'" + text +
+ " WHEN (calls.flags = 2) THEN 'no return'" + text +
+ " WHEN (calls.flags = 3) THEN 'no call/return'" + text +
+ " ELSE ''" + text +
+ " END AS flags"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " INNER JOIN comms ON calls.comm_id = comms.id"
+ " INNER JOIN threads ON calls.thread_id = threads.id" +
+ report_vars.where_clause +
+ " ORDER BY elapsed_time DESC" +
+ limit
+ )
+ column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
+ self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
+ super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
+
+ def columnAlignment(self, column):
+ return self.alignment[column]
+
+# Top Calls report creation dialog
+
+class TopCallsDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Top Calls by Elapsed Time"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
+ lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
+ super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
+
+# Top Calls window
+
+class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+ def __init__(self, glb, report_vars, parent=None):
+ super(TopCallsWindow, self).__init__(parent)
+
+ self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
+ self.model = self.data_model
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
+
+ self.context_menu = ContextMenu(self.view)
+
+ self.ResizeColumnsToContents()
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.model)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ else:
+ self.find_bar.NotFound()
+
+# Action Definition
+
+def CreateAction(label, tip, callback, parent=None, shortcut=None):
+ action = QAction(label, parent)
+ if shortcut != None:
+ action.setShortcuts(shortcut)
+ action.setStatusTip(tip)
+ action.triggered.connect(callback)
+ return action
+
+# Typical application actions
+
+def CreateExitAction(app, parent=None):
+ return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
+
+# Typical MDI actions
+
+def CreateCloseActiveWindowAction(mdi_area):
+ return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
+
+def CreateCloseAllWindowsAction(mdi_area):
+ return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
+
+def CreateTileWindowsAction(mdi_area):
+ return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
+
+def CreateCascadeWindowsAction(mdi_area):
+ return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
+
+def CreateNextWindowAction(mdi_area):
+ return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
+
+def CreatePreviousWindowAction(mdi_area):
+ return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
+
+# Typical MDI window menu
+
+class WindowMenu():
+
+ def __init__(self, mdi_area, menu):
+ self.mdi_area = mdi_area
+ self.window_menu = menu.addMenu("&Windows")
+ self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
+ self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
+ self.tile_windows = CreateTileWindowsAction(mdi_area)
+ self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
+ self.next_window = CreateNextWindowAction(mdi_area)
+ self.previous_window = CreatePreviousWindowAction(mdi_area)
+ self.window_menu.aboutToShow.connect(self.Update)
+
+ def Update(self):
+ self.window_menu.clear()
+ sub_window_count = len(self.mdi_area.subWindowList())
+ have_sub_windows = sub_window_count != 0
+ self.close_active_window.setEnabled(have_sub_windows)
+ self.close_all_windows.setEnabled(have_sub_windows)
+ self.tile_windows.setEnabled(have_sub_windows)
+ self.cascade_windows.setEnabled(have_sub_windows)
+ self.next_window.setEnabled(have_sub_windows)
+ self.previous_window.setEnabled(have_sub_windows)
+ self.window_menu.addAction(self.close_active_window)
+ self.window_menu.addAction(self.close_all_windows)
+ self.window_menu.addSeparator()
+ self.window_menu.addAction(self.tile_windows)
+ self.window_menu.addAction(self.cascade_windows)
+ self.window_menu.addSeparator()
+ self.window_menu.addAction(self.next_window)
+ self.window_menu.addAction(self.previous_window)
+ if sub_window_count == 0:
+ return
+ self.window_menu.addSeparator()
+ nr = 1
+ for sub_window in self.mdi_area.subWindowList():
+ label = str(nr) + " " + sub_window.name
+ if nr < 10:
+ label = "&" + label
+ action = self.window_menu.addAction(label)
+ action.setCheckable(True)
+ action.setChecked(sub_window == self.mdi_area.activeSubWindow())
+ action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x))
+ self.window_menu.addAction(action)
+ nr += 1
+
+ def setActiveSubWindow(self, nr):
+ self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
+
+# Help text
+
+glb_help_text = """
+<h1>Contents</h1>
+<style>
+p.c1 {
+ text-indent: 40px;
+}
+p.c2 {
+ text-indent: 80px;
+}
+}
+</style>
+<p class=c1><a href=#reports>1. Reports</a></p>
+<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
+<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
+<p class=c2><a href=#allbranches>1.3 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
+<p class=c1><a href=#charts>2. Charts</a></p>
+<p class=c2><a href=#timechartbycpu>2.1 Time chart by CPU</a></p>
+<p class=c1><a href=#tables>3. Tables</a></p>
+<h1 id=reports>1. Reports</h1>
+<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
+The result is a GUI window with a tree representing a context-sensitive
+call-graph. Expanding a couple of levels of the tree and adjusting column
+widths to suit will display something like:
+<pre>
+ Call Graph: pt_example
+Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
+v- ls
+ v- 2638:2638
+ v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
+ |- unknown unknown 1 13198 0.1 1 0.0
+ >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
+ >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
+ v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
+ >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
+ >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
+ >- __libc_csu_init ls 1 10354 0.1 10 0.0
+ |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
+ v- main ls 1 8182043 99.6 180254 99.9
+</pre>
+<h3>Points to note:</h3>
+<ul>
+<li>The top level is a command name (comm)</li>
+<li>The next level is a thread (pid:tid)</li>
+<li>Subsequent levels are functions</li>
+<li>'Count' is the number of calls</li>
+<li>'Time' is the elapsed time until the function returns</li>
+<li>Percentages are relative to the level above</li>
+<li>'Branch Count' is the total number of branches for that function and all functions that it calls
+</ul>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
+The pattern matching symbols are ? for any character and * for zero or more characters.
+<h2 id=calltree>1.2 Call Tree</h2>
+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
+Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
+<h2 id=allbranches>1.3 All branches</h2>
+The All branches report displays all branches in chronological order.
+Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
+<h3>Disassembly</h3>
+Open a branch to display disassembly. This only works if:
+<ol>
+<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
+<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
+The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
+One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
+or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
+</ol>
+<h4 id=xed>Intel XED Setup</h4>
+To use Intel XED, libxed.so must be present. To build and install libxed.so:
+<pre>
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+</pre>
+<h3>Instructions per Cycle (IPC)</h3>
+If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'.
+<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch.
+Due to the granularity of timing information, the number of cycles for some code blocks will not be known.
+In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period
+since the previous displayed 'IPC'.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<h2 id=selectedbranches>1.4 Selected branches</h2>
+This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
+by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+<h3>1.4.1 Time ranges</h3>
+The time ranges hint text shows the total time range. Relative time ranges can also be entered in
+ms, us or ns. Also, negative values are relative to the end of trace. Examples:
+<pre>
+ 81073085947329-81073085958238 From 81073085947329 to 81073085958238
+ 100us-200us From 100us to 200us
+ 10ms- From 10ms to the end
+ -100ns The first 100ns
+ -10ms- The last 10ms
+</pre>
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
+The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
+The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
+<h1 id=charts>2. Charts</h1>
+<h2 id=timechartbycpu>2.1 Time chart by CPU</h2>
+This chart displays context switch information when that data is available. Refer to context_switches_view on the Tables menu.
+<h3>Features</h3>
+<ol>
+<li>Mouse over to highight the task and show the time</li>
+<li>Drag the mouse to select a region and zoom by pushing the Zoom button</li>
+<li>Go back and forward by pressing the arrow buttons</li>
+<li>If call information is available, right-click to show a call tree opened to that task and time.
+Note, the call tree may take some time to appear, and there may not be call information for the task or time selected.
+</li>
+</ol>
+<h3>Important</h3>
+The graph can be misleading in the following respects:
+<ol>
+<li>The graph shows the first task on each CPU as running from the beginning of the time range.
+Because tracing might start on different CPUs at different times, that is not necessarily the case.
+Refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
+<li>Similarly, the last task on each CPU can be showing running longer than it really was.
+Again, refer to context_switches_view on the Tables menu to understand what data the graph is based upon.</li>
+<li>When the mouse is over a task, the highlighted task might not be visible on the legend without scrolling if the legend does not fit fully in the window</li>
+</ol>
+<h1 id=tables>3. Tables</h1>
+The Tables menu shows all tables and views in the database. Most tables have an associated view
+which displays the information in a more friendly way. Not all data for large tables is fetched
+immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
+but that can be slow for large tables.
+<p>There are also tables of database meta-information.
+For SQLite3 databases, the sqlite_master table is included.
+For PostgreSQL databases, information_schema.tables/views/columns are included.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
+will go to the next/previous result in id order, instead of display order.
+"""
+
+# Help window
+
+class HelpWindow(QMdiSubWindow):
+
+ def __init__(self, glb, parent=None):
+ super(HelpWindow, self).__init__(parent)
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setWidget(self.text)
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
+
+# Main window that only displays the help text
+
+class HelpOnlyWindow(QMainWindow):
+
+ def __init__(self, parent=None):
+ super(HelpOnlyWindow, self).__init__(parent)
+
+ self.setMinimumSize(200, 100)
+ self.resize(800, 600)
+ self.setWindowTitle("Exported SQL Viewer Help")
+ self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setCentralWidget(self.text)
+
+# PostqreSQL server version
+
+def PostqreSQLServerVersion(db):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT VERSION()")
+ if query.next():
+ v_str = query.value(0)
+ v_list = v_str.strip().split(" ")
+ if v_list[0] == "PostgreSQL" and v_list[2] == "on":
+ return v_list[1]
+ return v_str
+ return "Unknown"
+
+# SQLite version
+
+def SQLiteVersion(db):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT sqlite_version()")
+ if query.next():
+ return query.value(0)
+ return "Unknown"
+
+# About dialog
+
+class AboutDialog(QDialog):
+
+ def __init__(self, glb, parent=None):
+ super(AboutDialog, self).__init__(parent)
+
+ self.setWindowTitle("About Exported SQL Viewer")
+ self.setMinimumWidth(300)
+
+ pyside_version = "1" if pyside_version_1 else "2"
+
+ text = "<pre>"
+ text += "Python version: " + sys.version.split(" ")[0] + "\n"
+ text += "PySide version: " + pyside_version + "\n"
+ text += "Qt version: " + qVersion() + "\n"
+ if glb.dbref.is_sqlite3:
+ text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
+ else:
+ text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
+ text += "</pre>"
+
+ self.text = QTextBrowser()
+ self.text.setHtml(text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.vbox = QVBoxLayout()
+ self.vbox.addWidget(self.text)
+
+ self.setLayout(self.vbox)
+
+# Font resize
+
+def ResizeFont(widget, diff):
+ font = widget.font()
+ sz = font.pointSize()
+ font.setPointSize(sz + diff)
+ widget.setFont(font)
+
+def ShrinkFont(widget):
+ ResizeFont(widget, -1)
+
+def EnlargeFont(widget):
+ ResizeFont(widget, 1)
+
+# Unique name for sub-windows
+
+def NumberedWindowName(name, nr):
+ if nr > 1:
+ name += " <" + str(nr) + ">"
+ return name
+
+def UniqueSubWindowName(mdi_area, name):
+ nr = 1
+ while True:
+ unique_name = NumberedWindowName(name, nr)
+ ok = True
+ for sub_window in mdi_area.subWindowList():
+ if sub_window.name == unique_name:
+ ok = False
+ break
+ if ok:
+ return unique_name
+ nr += 1
+
+# Add a sub-window
+
+def AddSubWindow(mdi_area, sub_window, name):
+ unique_name = UniqueSubWindowName(mdi_area, name)
+ sub_window.setMinimumSize(200, 100)
+ sub_window.resize(800, 600)
+ sub_window.setWindowTitle(unique_name)
+ sub_window.setAttribute(Qt.WA_DeleteOnClose)
+ sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
+ sub_window.name = unique_name
+ mdi_area.addSubWindow(sub_window)
+ sub_window.show()
+
+# Main window
+
+class MainWindow(QMainWindow):
+
+ def __init__(self, glb, parent=None):
+ super(MainWindow, self).__init__(parent)
+
+ self.glb = glb
+
+ self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
+ self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
+ self.setMinimumSize(200, 100)
+
+ self.mdi_area = QMdiArea()
+ self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
+ self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
+
+ self.setCentralWidget(self.mdi_area)
+
+ menu = self.menuBar()
+
+ file_menu = menu.addMenu("&File")
+ file_menu.addAction(CreateExitAction(glb.app, self))
+
+ edit_menu = menu.addMenu("&Edit")
+ edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
+ edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
+ edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
+ edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
+ edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
+ edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
+
+ reports_menu = menu.addMenu("&Reports")
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+
+ if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
+ reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
+
+ self.EventMenu(GetEventList(glb.db), reports_menu)
+
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
+
+ if IsSelectable(glb.db, "context_switches"):
+ charts_menu = menu.addMenu("&Charts")
+ charts_menu.addAction(CreateAction("&Time chart by CPU", "Create a new window displaying time charts by CPU", self.TimeChartByCPU, self))
+
+ self.TableMenu(GetTableList(glb), menu)
+
+ self.window_menu = WindowMenu(self.mdi_area, menu)
+
+ help_menu = menu.addMenu("&Help")
+ help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+ help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
+
+ def Try(self, fn):
+ win = self.mdi_area.activeSubWindow()
+ if win:
+ try:
+ fn(win.view)
+ except:
+ pass
+
+ def CopyToClipboard(self):
+ self.Try(CopyCellsToClipboardHdr)
+
+ def CopyToClipboardCSV(self):
+ self.Try(CopyCellsToClipboardCSV)
+
+ def Find(self):
+ win = self.mdi_area.activeSubWindow()
+ if win:
+ try:
+ win.find_bar.Activate()
+ except:
+ pass
+
+ def FetchMoreRecords(self):
+ win = self.mdi_area.activeSubWindow()
+ if win:
+ try:
+ win.fetch_bar.Activate()
+ except:
+ pass
+
+ def ShrinkFont(self):
+ self.Try(ShrinkFont)
+
+ def EnlargeFont(self):
+ self.Try(EnlargeFont)
+
+ def EventMenu(self, events, reports_menu):
+ branches_events = 0
+ for event in events:
+ event = event.split(":")[0]
+ if event == "branches":
+ branches_events += 1
+ dbid = 0
+ for event in events:
+ dbid += 1
+ event = event.split(":")[0]
+ if event == "branches":
+ label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
+ reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self))
+ label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
+ reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self))
+
+ def TimeChartByCPU(self):
+ TimeChartByCPUWindow(self.glb, self)
+
+ def TableMenu(self, tables, menu):
+ table_menu = menu.addMenu("&Tables")
+ for table in tables:
+ table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self))
+
+ def NewCallGraph(self):
+ CallGraphWindow(self.glb, self)
+
+ def NewCallTree(self):
+ CallTreeWindow(self.glb, self)
+
+ def NewTopCalls(self):
+ dialog = TopCallsDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ TopCallsWindow(self.glb, dialog.report_vars, self)
+
+ def NewBranchView(self, event_id):
+ BranchWindow(self.glb, event_id, ReportVars(), self)
+
+ def NewSelectedBranchView(self, event_id):
+ dialog = SelectedBranchDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ BranchWindow(self.glb, event_id, dialog.report_vars, self)
+
+ def NewTableView(self, table_name):
+ TableWindow(self.glb, table_name, self)
+
+ def Help(self):
+ HelpWindow(self.glb, self)
+
+ def About(self):
+ dialog = AboutDialog(self.glb, self)
+ dialog.exec_()
+
+# XED Disassembler
+
+class xed_state_t(Structure):
+
+ _fields_ = [
+ ("mode", c_int),
+ ("width", c_int)
+ ]
+
+class XEDInstruction():
+
+ def __init__(self, libxed):
+ # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
+ xedd_t = c_byte * 512
+ self.xedd = xedd_t()
+ self.xedp = addressof(self.xedd)
+ libxed.xed_decoded_inst_zero(self.xedp)
+ self.state = xed_state_t()
+ self.statep = addressof(self.state)
+ # Buffer for disassembled instruction text
+ self.buffer = create_string_buffer(256)
+ self.bufferp = addressof(self.buffer)
+
+class LibXED():
+
+ def __init__(self):
+ try:
+ self.libxed = CDLL("libxed.so")
+ except:
+ self.libxed = None
+ if not self.libxed:
+ self.libxed = CDLL("/usr/local/lib/libxed.so")
+
+ self.xed_tables_init = self.libxed.xed_tables_init
+ self.xed_tables_init.restype = None
+ self.xed_tables_init.argtypes = []
+
+ self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
+ self.xed_decoded_inst_zero.restype = None
+ self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
+
+ self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
+ self.xed_operand_values_set_mode.restype = None
+ self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
+
+ self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
+ self.xed_decoded_inst_zero_keep_mode.restype = None
+ self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
+
+ self.xed_decode = self.libxed.xed_decode
+ self.xed_decode.restype = c_int
+ self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
+
+ self.xed_format_context = self.libxed.xed_format_context
+ self.xed_format_context.restype = c_uint
+ self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
+
+ self.xed_tables_init()
+
+ def Instruction(self):
+ return XEDInstruction(self)
+
+ def SetMode(self, inst, mode):
+ if mode:
+ inst.state.mode = 4 # 32-bit
+ inst.state.width = 4 # 4 bytes
+ else:
+ inst.state.mode = 1 # 64-bit
+ inst.state.width = 8 # 8 bytes
+ self.xed_operand_values_set_mode(inst.xedp, inst.statep)
+
+ def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
+ self.xed_decoded_inst_zero_keep_mode(inst.xedp)
+ err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
+ if err:
+ return 0, ""
+ # Use AT&T mode (2), alternative is Intel (3)
+ ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
+ if not ok:
+ return 0, ""
+ if sys.version_info[0] == 2:
+ result = inst.buffer.value
+ else:
+ result = inst.buffer.value.decode()
+ # Return instruction length and the disassembled instruction text
+ # For now, assume the length is in byte 166
+ return inst.xedd[166], result
+
+def TryOpen(file_name):
+ try:
+ return open(file_name, "rb")
+ except:
+ return None
+
+def Is64Bit(f):
+ result = sizeof(c_void_p)
+ # ELF support only
+ pos = f.tell()
+ f.seek(0)
+ header = f.read(7)
+ f.seek(pos)
+ magic = header[0:4]
+ if sys.version_info[0] == 2:
+ eclass = ord(header[4])
+ encoding = ord(header[5])
+ version = ord(header[6])
+ else:
+ eclass = header[4]
+ encoding = header[5]
+ version = header[6]
+ if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
+ result = True if eclass == 2 else False
+ return result
+
+# Global data
+
+class Glb():
+
+ def __init__(self, dbref, db, dbname):
+ self.dbref = dbref
+ self.db = db
+ self.dbname = dbname
+ self.home_dir = os.path.expanduser("~")
+ self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
+ if self.buildid_dir:
+ self.buildid_dir += "/.build-id/"
+ else:
+ self.buildid_dir = self.home_dir + "/.debug/.build-id/"
+ self.app = None
+ self.mainwindow = None
+ self.instances_to_shutdown_on_exit = weakref.WeakSet()
+ try:
+ self.disassembler = LibXED()
+ self.have_disassembler = True
+ except:
+ self.have_disassembler = False
+ self.host_machine_id = 0
+ self.host_start_time = 0
+ self.host_finish_time = 0
+
+ def FileFromBuildId(self, build_id):
+ file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
+ return TryOpen(file_name)
+
+ def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
+ # Assume current machine i.e. no support for virtualization
+ if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
+ file_name = os.getenv("PERF_KCORE")
+ f = TryOpen(file_name) if file_name else None
+ if f:
+ return f
+ # For now, no special handling if long_name is /proc/kcore
+ f = TryOpen(long_name)
+ if f:
+ return f
+ f = self.FileFromBuildId(build_id)
+ if f:
+ return f
+ return None
+
+ def AddInstanceToShutdownOnExit(self, instance):
+ self.instances_to_shutdown_on_exit.add(instance)
+
+ # Shutdown any background processes or threads
+ def ShutdownInstances(self):
+ for x in self.instances_to_shutdown_on_exit:
+ try:
+ x.Shutdown()
+ except:
+ pass
+
+ def GetHostMachineId(self):
+ query = QSqlQuery(self.db)
+ QueryExec(query, "SELECT id FROM machines WHERE pid = -1")
+ if query.next():
+ self.host_machine_id = query.value(0)
+ else:
+ self.host_machine_id = 0
+ return self.host_machine_id
+
+ def HostMachineId(self):
+ if self.host_machine_id:
+ return self.host_machine_id
+ return self.GetHostMachineId()
+
+ def SelectValue(self, sql):
+ query = QSqlQuery(self.db)
+ try:
+ QueryExec(query, sql)
+ except:
+ return None
+ if query.next():
+ return Decimal(query.value(0))
+ return None
+
+ def SwitchesMinTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM context_switches"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id LIMIT 1")
+
+ def SwitchesMaxTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM context_switches"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id DESC LIMIT 1")
+
+ def SamplesMinTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM samples"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id LIMIT 1")
+
+ def SamplesMaxTime(self, machine_id):
+ return self.SelectValue("SELECT time"
+ " FROM samples"
+ " WHERE time != 0 AND machine_id = " + str(machine_id) +
+ " ORDER BY id DESC LIMIT 1")
+
+ def CallsMinTime(self, machine_id):
+ return self.SelectValue("SELECT calls.call_time"
+ " FROM calls"
+ " INNER JOIN threads ON threads.thread_id = calls.thread_id"
+ " WHERE calls.call_time != 0 AND threads.machine_id = " + str(machine_id) +
+ " ORDER BY calls.id LIMIT 1")
+
+ def CallsMaxTime(self, machine_id):
+ return self.SelectValue("SELECT calls.return_time"
+ " FROM calls"
+ " INNER JOIN threads ON threads.thread_id = calls.thread_id"
+ " WHERE calls.return_time != 0 AND threads.machine_id = " + str(machine_id) +
+ " ORDER BY calls.return_time DESC LIMIT 1")
+
+ def GetStartTime(self, machine_id):
+ t0 = self.SwitchesMinTime(machine_id)
+ t1 = self.SamplesMinTime(machine_id)
+ t2 = self.CallsMinTime(machine_id)
+ if t0 is None or (not(t1 is None) and t1 < t0):
+ t0 = t1
+ if t0 is None or (not(t2 is None) and t2 < t0):
+ t0 = t2
+ return t0
+
+ def GetFinishTime(self, machine_id):
+ t0 = self.SwitchesMaxTime(machine_id)
+ t1 = self.SamplesMaxTime(machine_id)
+ t2 = self.CallsMaxTime(machine_id)
+ if t0 is None or (not(t1 is None) and t1 > t0):
+ t0 = t1
+ if t0 is None or (not(t2 is None) and t2 > t0):
+ t0 = t2
+ return t0
+
+ def HostStartTime(self):
+ if self.host_start_time:
+ return self.host_start_time
+ self.host_start_time = self.GetStartTime(self.HostMachineId())
+ return self.host_start_time
+
+ def HostFinishTime(self):
+ if self.host_finish_time:
+ return self.host_finish_time
+ self.host_finish_time = self.GetFinishTime(self.HostMachineId())
+ return self.host_finish_time
+
+ def StartTime(self, machine_id):
+ if machine_id == self.HostMachineId():
+ return self.HostStartTime()
+ return self.GetStartTime(machine_id)
+
+ def FinishTime(self, machine_id):
+ if machine_id == self.HostMachineId():
+ return self.HostFinishTime()
+ return self.GetFinishTime(machine_id)
+
+# Database reference
+
+class DBRef():
+
+ def __init__(self, is_sqlite3, dbname):
+ self.is_sqlite3 = is_sqlite3
+ self.dbname = dbname
+ self.TRUE = "TRUE"
+ self.FALSE = "FALSE"
+ # SQLite prior to version 3.23 does not support TRUE and FALSE
+ if self.is_sqlite3:
+ self.TRUE = "1"
+ self.FALSE = "0"
+
+ def Open(self, connection_name):
+ dbname = self.dbname
+ if self.is_sqlite3:
+ db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
+ else:
+ db = QSqlDatabase.addDatabase("QPSQL", connection_name)
+ opts = dbname.split()
+ for opt in opts:
+ if "=" in opt:
+ opt = opt.split("=")
+ if opt[0] == "hostname":
+ db.setHostName(opt[1])
+ elif opt[0] == "port":
+ db.setPort(int(opt[1]))
+ elif opt[0] == "username":
+ db.setUserName(opt[1])
+ elif opt[0] == "password":
+ db.setPassword(opt[1])
+ elif opt[0] == "dbname":
+ dbname = opt[1]
+ else:
+ dbname = opt
+
+ db.setDatabaseName(dbname)
+ if not db.open():
+ raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+ return db, dbname
+
+# Main
+
+def Main():
+ usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \
+ " or: exported-sql-viewer.py --help-only"
+ ap = argparse.ArgumentParser(usage = usage_str, add_help = False)
+ ap.add_argument("--pyside-version-1", action='store_true')
+ ap.add_argument("dbname", nargs="?")
+ ap.add_argument("--help-only", action='store_true')
+ args = ap.parse_args()
+
+ if args.help_only:
+ app = QApplication(sys.argv)
+ mainwindow = HelpOnlyWindow()
+ mainwindow.show()
+ err = app.exec_()
+ sys.exit(err)
+
+ dbname = args.dbname
+ if dbname is None:
+ ap.print_usage()
+ print("Too few arguments")
+ sys.exit(1)
+
+ is_sqlite3 = False
+ try:
+ f = open(dbname, "rb")
+ if f.read(15) == b'SQLite format 3':
+ is_sqlite3 = True
+ f.close()
+ except:
+ pass
+
+ dbref = DBRef(is_sqlite3, dbname)
+ db, dbname = dbref.Open("main")
+ glb = Glb(dbref, db, dbname)
+ app = QApplication(sys.argv)
+ glb.app = app
+ mainwindow = MainWindow(glb)
+ glb.mainwindow = mainwindow
+ mainwindow.show()
+ err = app.exec_()
+ glb.ShutdownInstances()
+ db.close()
+ sys.exit(err)
+
+if __name__ == "__main__":
+ Main()
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
new file mode 100644
index 000000000..310efe5e7
--- /dev/null
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -0,0 +1,79 @@
+# failed system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals, broken down by pid.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
+
+for_comm = None
+for_pid = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ try:
+ for_pid = int(sys.argv[1])
+ except:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ print("Press control+C to stop and show the summary")
+
+def trace_end():
+ print_error_totals()
+
+def raw_syscalls__sys_exit(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, ret):
+ if (for_comm and common_comm != for_comm) or \
+ (for_pid and common_pid != for_pid ):
+ return
+
+ if ret < 0:
+ try:
+ syscalls[common_comm][common_pid][id][ret] += 1
+ except TypeError:
+ syscalls[common_comm][common_pid][id][ret] = 1
+
+def syscalls__sys_exit(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, ret):
+ raw_syscalls__sys_exit(**locals())
+
+def print_error_totals():
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/flamegraph.py b/tools/perf/scripts/python/flamegraph.py
new file mode 100755
index 000000000..65780013f
--- /dev/null
+++ b/tools/perf/scripts/python/flamegraph.py
@@ -0,0 +1,126 @@
+# flamegraph.py - create flame graphs from perf samples
+# SPDX-License-Identifier: GPL-2.0
+#
+# Usage:
+#
+# perf record -a -g -F 99 sleep 60
+# perf script report flamegraph
+#
+# Combined:
+#
+# perf script flamegraph -a -F 99 sleep 60
+#
+# Written by Andreas Gerstmayr <agerstmayr@redhat.com>
+# Flame Graphs invented by Brendan Gregg <bgregg@netflix.com>
+# Works in tandem with d3-flame-graph by Martin Spier <mspier@netflix.com>
+
+from __future__ import print_function
+import sys
+import os
+import io
+import argparse
+import json
+
+
+class Node:
+ def __init__(self, name, libtype=""):
+ self.name = name
+ self.libtype = libtype
+ self.value = 0
+ self.children = []
+
+ def toJSON(self):
+ return {
+ "n": self.name,
+ "l": self.libtype,
+ "v": self.value,
+ "c": self.children
+ }
+
+
+class FlameGraphCLI:
+ def __init__(self, args):
+ self.args = args
+ self.stack = Node("root")
+
+ if self.args.format == "html" and \
+ not os.path.isfile(self.args.template):
+ print("Flame Graph template {} does not exist. Please install "
+ "the js-d3-flame-graph (RPM) or libjs-d3-flame-graph (deb) "
+ "package, specify an existing flame graph template "
+ "(--template PATH) or another output format "
+ "(--format FORMAT).".format(self.args.template),
+ file=sys.stderr)
+ sys.exit(1)
+
+ def find_or_create_node(self, node, name, dso):
+ libtype = "kernel" if dso == "[kernel.kallsyms]" else ""
+ if name is None:
+ name = "[unknown]"
+
+ for child in node.children:
+ if child.name == name and child.libtype == libtype:
+ return child
+
+ child = Node(name, libtype)
+ node.children.append(child)
+ return child
+
+ def process_event(self, event):
+ node = self.find_or_create_node(self.stack, event["comm"], None)
+ if "callchain" in event:
+ for entry in reversed(event['callchain']):
+ node = self.find_or_create_node(
+ node, entry.get("sym", {}).get("name"), event.get("dso"))
+ else:
+ node = self.find_or_create_node(
+ node, entry.get("symbol"), event.get("dso"))
+ node.value += 1
+
+ def trace_end(self):
+ json_str = json.dumps(self.stack, default=lambda x: x.toJSON())
+
+ if self.args.format == "html":
+ try:
+ with io.open(self.args.template, encoding="utf-8") as f:
+ output_str = f.read().replace("/** @flamegraph_json **/",
+ json_str)
+ except IOError as e:
+ print("Error reading template file: {}".format(e), file=sys.stderr)
+ sys.exit(1)
+ output_fn = self.args.output or "flamegraph.html"
+ else:
+ output_str = json_str
+ output_fn = self.args.output or "stacks.json"
+
+ if output_fn == "-":
+ with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
+ out.write(output_str)
+ else:
+ print("dumping data to {}".format(output_fn))
+ try:
+ with io.open(output_fn, "w", encoding="utf-8") as out:
+ out.write(output_str)
+ except IOError as e:
+ print("Error writing output file: {}".format(e), file=sys.stderr)
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Create flame graphs.")
+ parser.add_argument("-f", "--format",
+ default="html", choices=["json", "html"],
+ help="output file format")
+ parser.add_argument("-o", "--output",
+ help="output file name")
+ parser.add_argument("--template",
+ default="/usr/share/d3-flame-graph/d3-flamegraph-base.html",
+ help="path to flamegraph HTML template")
+ parser.add_argument("-i", "--input",
+ help=argparse.SUPPRESS)
+
+ args = parser.parse_args()
+ cli = FlameGraphCLI(args)
+
+ process_event = cli.process_event
+ trace_end = cli.trace_end
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
new file mode 100644
index 000000000..7e884d46f
--- /dev/null
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -0,0 +1,57 @@
+# futex contention
+# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Translation of:
+#
+# http://sourceware.org/systemtap/wiki/WSFutexContention
+#
+# to perf python scripting.
+#
+# Measures futex contention
+
+from __future__ import print_function
+
+import os
+import sys
+sys.path.append(os.environ['PERF_EXEC_PATH'] +
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+from Util import *
+
+process_names = {}
+thread_thislock = {}
+thread_blocktime = {}
+
+lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
+process_names = {} # long-lived pid-to-execname mapping
+
+
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
+ nr, uaddr, op, val, utime, uaddr2, val3):
+ cmd = op & FUTEX_CMD_MASK
+ if cmd != FUTEX_WAIT:
+ return # we don't care about originators of WAKE events
+
+ process_names[tid] = comm
+ thread_thislock[tid] = uaddr
+ thread_blocktime[tid] = nsecs(s, ns)
+
+
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
+ nr, ret):
+ if tid in thread_blocktime:
+ elapsed = nsecs(s, ns) - thread_blocktime[tid]
+ add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
+ del thread_blocktime[tid]
+ del thread_thislock[tid]
+
+
+def trace_begin():
+ print("Press control+C to stop and show the summary")
+
+
+def trace_end():
+ for (tid, lock) in lock_waits:
+ min, max, avg, count = lock_waits[tid, lock]
+ print("%s[%d] lock %x contended %d times, %d avg ns [max: %d ns, min %d ns]" %
+ (process_names[tid], tid, lock, count, avg, max, min))
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
new file mode 100644
index 000000000..a73847c8f
--- /dev/null
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -0,0 +1,134 @@
+# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+from __future__ import print_function
+
+import os
+import sys
+import struct
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+def trace_begin():
+ print("Intel PT Power Events and PTWRITE")
+
+def trace_end():
+ print("End")
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
+
+def print_ptwrite(raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ flags = data[0]
+ payload = data[1]
+ exact_ip = flags & 1
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
+
+def print_cbr(raw_buf):
+ data = struct.unpack_from("<BBBBII", raw_buf)
+ cbr = data[0]
+ f = (data[4] + 500) / 1000
+ p = ((cbr * 1000 / data[2]) + 5) / 10
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
+
+def print_mwait(raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ hints = payload & 0xff
+ extensions = (payload >> 32) & 0x3
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
+
+def print_pwre(raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ hw = (payload >> 7) & 1
+ cstate = (payload >> 12) & 0xf
+ subcstate = (payload >> 8) & 0xf
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')
+
+def print_exstop(raw_buf):
+ data = struct.unpack_from("<I", raw_buf)
+ flags = data[0]
+ exact_ip = flags & 1
+ print("IP: %u" % (exact_ip), end=' ')
+
+def print_pwrx(raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ payload = data[1]
+ deepest_cstate = payload & 0xf
+ last_cstate = (payload >> 4) & 0xf
+ wake_reason = (payload >> 8) & 0xf
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')
+
+def print_common_start(comm, sample, name):
+ ts = sample["time"]
+ cpu = sample["cpu"]
+ pid = sample["pid"]
+ tid = sample["tid"]
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')
+
+def print_common_ip(sample, symbol, dso):
+ ip = sample["ip"]
+ print("%16x %s (%s)" % (ip, symbol, dso))
+
+def process_event(param_dict):
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if "dso" in param_dict:
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if "symbol" in param_dict:
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"
+
+ if name == "ptwrite":
+ print_common_start(comm, sample, name)
+ print_ptwrite(raw_buf)
+ print_common_ip(sample, symbol, dso)
+ elif name == "cbr":
+ print_common_start(comm, sample, name)
+ print_cbr(raw_buf)
+ print_common_ip(sample, symbol, dso)
+ elif name == "mwait":
+ print_common_start(comm, sample, name)
+ print_mwait(raw_buf)
+ print_common_ip(sample, symbol, dso)
+ elif name == "pwre":
+ print_common_start(comm, sample, name)
+ print_pwre(raw_buf)
+ print_common_ip(sample, symbol, dso)
+ elif name == "exstop":
+ print_common_start(comm, sample, name)
+ print_exstop(raw_buf)
+ print_common_ip(sample, symbol, dso)
+ elif name == "pwrx":
+ print_common_start(comm, sample, name)
+ print_pwrx(raw_buf)
+ print_common_ip(sample, symbol, dso)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
new file mode 100644
index 000000000..1f332e72b
--- /dev/null
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -0,0 +1,100 @@
+# mem-phys-addr.py: Resolve physical address samples
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2018, Intel Corporation.
+
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+import struct
+import re
+import bisect
+import collections
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+#physical address ranges for System RAM
+system_ram = []
+#physical address ranges for Persistent Memory
+pmem = []
+#file object for proc iomem
+f = None
+#Count for each type of memory
+load_mem_type_cnt = collections.Counter()
+#perf event name
+event_name = None
+
+def parse_iomem():
+ global f
+ f = open('/proc/iomem', 'r')
+ for i, j in enumerate(f):
+ m = re.split('-|:',j,2)
+ if m[2].strip() == 'System RAM':
+ system_ram.append(int(m[0], 16))
+ system_ram.append(int(m[1], 16))
+ if m[2].strip() == 'Persistent Memory':
+ pmem.append(int(m[0], 16))
+ pmem.append(int(m[1], 16))
+
+def print_memory_type():
+ print("Event: %s" % (event_name))
+ print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
+ print("%-40s %10s %10s\n" % ("----------------------------------------",
+ "-----------", "-----------"),
+ end='');
+ total = sum(load_mem_type_cnt.values())
+ for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')
+
+def trace_begin():
+ parse_iomem()
+
+def trace_end():
+ print_memory_type()
+ f.close()
+
+def is_system_ram(phys_addr):
+ #/proc/iomem is sorted
+ position = bisect.bisect(system_ram, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def is_persistent_mem(phys_addr):
+ position = bisect.bisect(pmem, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def find_memory_type(phys_addr):
+ if phys_addr == 0:
+ return "N/A"
+ if is_system_ram(phys_addr):
+ return "System RAM"
+
+ if is_persistent_mem(phys_addr):
+ return "Persistent Memory"
+
+ #slow path, search all
+ f.seek(0, 0)
+ for j in f:
+ m = re.split('-|:',j,2)
+ if int(m[0], 16) <= phys_addr <= int(m[1], 16):
+ return m[2]
+ return "N/A"
+
+def process_event(param_dict):
+ name = param_dict["ev_name"]
+ sample = param_dict["sample"]
+ phys_addr = sample["phys_addr"]
+
+ global event_name
+ if event_name == None:
+ event_name = name
+ load_mem_type_cnt[find_memory_type(phys_addr)] += 1
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
new file mode 100755
index 000000000..101059971
--- /dev/null
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -0,0 +1,78 @@
+# Monitor the system for dropped packets and proudce a report of drop locations and counts
+# SPDX-License-Identifier: GPL-2.0
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+drop_log = {}
+kallsyms = []
+
+def get_kallsyms_table():
+ global kallsyms
+
+ try:
+ f = open("/proc/kallsyms", "r")
+ except:
+ return
+
+ for line in f:
+ loc = int(line.split()[0], 16)
+ name = line.split()[2]
+ kallsyms.append((loc, name))
+ kallsyms.sort()
+
+def get_sym(sloc):
+ loc = int(sloc)
+
+ # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
+ # kallsyms[i][0] > loc for all end <= i < len(kallsyms)
+ start, end = -1, len(kallsyms)
+ while end != start + 1:
+ pivot = (start + end) // 2
+ if loc < kallsyms[pivot][0]:
+ end = pivot
+ else:
+ start = pivot
+
+ # Now (start == -1 or kallsyms[start][0] <= loc)
+ # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
+ if start >= 0:
+ symloc, name = kallsyms[start]
+ return (name, loc - symloc)
+ else:
+ return (None, 0)
+
+def print_drop_table():
+ print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT"))
+ for i in drop_log.keys():
+ (sym, off) = get_sym(i)
+ if sym == None:
+ sym = i
+ print("%25s %25s %25s" % (sym, off, drop_log[i]))
+
+
+def trace_begin():
+ print("Starting trace (Ctrl-C to dump results)")
+
+def trace_end():
+ print("Gathering kallsyms data")
+ get_kallsyms_table()
+ print_drop_table()
+
+# called from perf, when it finds a correspoinding event
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
+ skbaddr, location, protocol):
+ slocation = str(location)
+ try:
+ drop_log[slocation] = drop_log[slocation] + 1
+ except:
+ drop_log[slocation] = 1
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
new file mode 100644
index 000000000..ea0c8b90a
--- /dev/null
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -0,0 +1,472 @@
+# Display a process of packets and processed time.
+# SPDX-License-Identifier: GPL-2.0
+# It helps us to investigate networking or network device.
+#
+# options
+# tx: show only tx chart
+# rx: show only rx chart
+# dev=: show only thing related to specified device
+# debug: work with debug mode. It shows buffer status.
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+from functools import cmp_to_key
+
+all_event_list = []; # insert all tracepoint event related with this script
+irq_dic = {}; # key is cpu and value is a list which stacks irqs
+ # which raise NET_RX softirq
+net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
+ # and a list which stacks receive
+receive_hunk_list = []; # a list which include a sequence of receive events
+rx_skb_list = []; # received packet list for matching
+ # skb_copy_datagram_iovec
+
+buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
+ # tx_xmit_list
+of_count_rx_skb_list = 0; # overflow count
+
+tx_queue_list = []; # list of packets which pass through dev_queue_xmit
+of_count_tx_queue_list = 0; # overflow count
+
+tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
+of_count_tx_xmit_list = 0; # overflow count
+
+tx_free_list = []; # list of packets which is freed
+
+# options
+show_tx = 0;
+show_rx = 0;
+dev = 0; # store a name of device specified by option "dev="
+debug = 0;
+
+# indices of event_info tuple
+EINFO_IDX_NAME= 0
+EINFO_IDX_CONTEXT=1
+EINFO_IDX_CPU= 2
+EINFO_IDX_TIME= 3
+EINFO_IDX_PID= 4
+EINFO_IDX_COMM= 5
+
+# Calculate a time interval(msec) from src(nsec) to dst(nsec)
+def diff_msec(src, dst):
+ return (dst - src) / 1000000.0
+
+# Display a process of transmitting a packet
+def print_transmit(hunk):
+ if dev != 0 and hunk['dev'].find(dev) < 0:
+ return
+ print("%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" %
+ (hunk['dev'], hunk['len'],
+ nsecs_secs(hunk['queue_t']),
+ nsecs_nsecs(hunk['queue_t'])/1000,
+ diff_msec(hunk['queue_t'], hunk['xmit_t']),
+ diff_msec(hunk['xmit_t'], hunk['free_t'])))
+
+# Format for displaying rx packet processing
+PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
+PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
+PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
+PF_JOINT= " |"
+PF_WJOINT= " | |"
+PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
+PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
+PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
+PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
+PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
+
+# Display a process of received packets and interrputs associated with
+# a NET_RX softirq
+def print_receive(hunk):
+ show_hunk = 0
+ irq_list = hunk['irq_list']
+ cpu = irq_list[0]['cpu']
+ base_t = irq_list[0]['irq_ent_t']
+ # check if this hunk should be showed
+ if dev != 0:
+ for i in range(len(irq_list)):
+ if irq_list[i]['name'].find(dev) >= 0:
+ show_hunk = 1
+ break
+ else:
+ show_hunk = 1
+ if show_hunk == 0:
+ return
+
+ print("%d.%06dsec cpu=%d" %
+ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu))
+ for i in range(len(irq_list)):
+ print(PF_IRQ_ENTRY %
+ (diff_msec(base_t, irq_list[i]['irq_ent_t']),
+ irq_list[i]['irq'], irq_list[i]['name']))
+ print(PF_JOINT)
+ irq_event_list = irq_list[i]['event_list']
+ for j in range(len(irq_event_list)):
+ irq_event = irq_event_list[j]
+ if irq_event['event'] == 'netif_rx':
+ print(PF_NET_RX %
+ (diff_msec(base_t, irq_event['time']),
+ irq_event['skbaddr']))
+ print(PF_JOINT)
+ print(PF_SOFT_ENTRY %
+ diff_msec(base_t, hunk['sirq_ent_t']))
+ print(PF_JOINT)
+ event_list = hunk['event_list']
+ for i in range(len(event_list)):
+ event = event_list[i]
+ if event['event_name'] == 'napi_poll':
+ print(PF_NAPI_POLL %
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
+ if i == len(event_list) - 1:
+ print("")
+ else:
+ print(PF_JOINT)
+ else:
+ print(PF_NET_RECV %
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
+ event['len']))
+ if 'comm' in event.keys():
+ print(PF_WJOINT)
+ print(PF_CPY_DGRAM %
+ (diff_msec(base_t, event['comm_t']),
+ event['pid'], event['comm']))
+ elif 'handle' in event.keys():
+ print(PF_WJOINT)
+ if event['handle'] == "kfree_skb":
+ print(PF_KFREE_SKB %
+ (diff_msec(base_t,
+ event['comm_t']),
+ event['location']))
+ elif event['handle'] == "consume_skb":
+ print(PF_CONS_SKB %
+ diff_msec(base_t,
+ event['comm_t']))
+ print(PF_JOINT)
+
+def trace_begin():
+ global show_tx
+ global show_rx
+ global dev
+ global debug
+
+ for i in range(len(sys.argv)):
+ if i == 0:
+ continue
+ arg = sys.argv[i]
+ if arg == 'tx':
+ show_tx = 1
+ elif arg =='rx':
+ show_rx = 1
+ elif arg.find('dev=',0, 4) >= 0:
+ dev = arg[4:]
+ elif arg == 'debug':
+ debug = 1
+ if show_tx == 0 and show_rx == 0:
+ show_tx = 1
+ show_rx = 1
+
+def trace_end():
+ # order all events in time
+ all_event_list.sort(key=cmp_to_key(lambda a,b :a[EINFO_IDX_TIME] < b[EINFO_IDX_TIME]))
+ # process all events
+ for i in range(len(all_event_list)):
+ event_info = all_event_list[i]
+ name = event_info[EINFO_IDX_NAME]
+ if name == 'irq__softirq_exit':
+ handle_irq_softirq_exit(event_info)
+ elif name == 'irq__softirq_entry':
+ handle_irq_softirq_entry(event_info)
+ elif name == 'irq__softirq_raise':
+ handle_irq_softirq_raise(event_info)
+ elif name == 'irq__irq_handler_entry':
+ handle_irq_handler_entry(event_info)
+ elif name == 'irq__irq_handler_exit':
+ handle_irq_handler_exit(event_info)
+ elif name == 'napi__napi_poll':
+ handle_napi_poll(event_info)
+ elif name == 'net__netif_receive_skb':
+ handle_netif_receive_skb(event_info)
+ elif name == 'net__netif_rx':
+ handle_netif_rx(event_info)
+ elif name == 'skb__skb_copy_datagram_iovec':
+ handle_skb_copy_datagram_iovec(event_info)
+ elif name == 'net__net_dev_queue':
+ handle_net_dev_queue(event_info)
+ elif name == 'net__net_dev_xmit':
+ handle_net_dev_xmit(event_info)
+ elif name == 'skb__kfree_skb':
+ handle_kfree_skb(event_info)
+ elif name == 'skb__consume_skb':
+ handle_consume_skb(event_info)
+ # display receive hunks
+ if show_rx:
+ for i in range(len(receive_hunk_list)):
+ print_receive(receive_hunk_list[i])
+ # display transmit hunks
+ if show_tx:
+ print(" dev len Qdisc "
+ " netdevice free")
+ for i in range(len(tx_free_list)):
+ print_transmit(tx_free_list[i])
+ if debug:
+ print("debug buffer status")
+ print("----------------------------")
+ print("xmit Qdisc:remain:%d overflow:%d" %
+ (len(tx_queue_list), of_count_tx_queue_list))
+ print("xmit netdevice:remain:%d overflow:%d" %
+ (len(tx_xmit_list), of_count_tx_xmit_list))
+ print("receive:remain:%d overflow:%d" %
+ (len(rx_skb_list), of_count_rx_skb_list))
+
+# called from perf, when it finds a correspoinding event
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
+ if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+ return
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+ all_event_list.append(event_info)
+
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
+ if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+ return
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+ all_event_list.append(event_info)
+
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
+ if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+ return
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+ all_event_list.append(event_info)
+
+def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
+ callchain, irq, irq_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ irq, irq_name)
+ all_event_list.append(event_info)
+
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
+ all_event_list.append(event_info)
+
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
+ dev_name, work=None, budget=None):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ napi, dev_name, work, budget)
+ all_event_list.append(event_info)
+
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
+ skblen, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, dev_name)
+ all_event_list.append(event_info)
+
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
+ skblen, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, dev_name)
+ all_event_list.append(event_info)
+
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
+ skbaddr, skblen, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, dev_name)
+ all_event_list.append(event_info)
+
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
+ skbaddr, skblen, rc, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, rc ,dev_name)
+ all_event_list.append(event_info)
+
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
+ skbaddr, protocol, location):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, protocol, location)
+ all_event_list.append(event_info)
+
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr)
+ all_event_list.append(event_info)
+
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
+ skbaddr, skblen):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen)
+ all_event_list.append(event_info)
+
+def handle_irq_handler_entry(event_info):
+ (name, context, cpu, time, pid, comm, irq, irq_name) = event_info
+ if cpu not in irq_dic.keys():
+ irq_dic[cpu] = []
+ irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
+ irq_dic[cpu].append(irq_record)
+
+def handle_irq_handler_exit(event_info):
+ (name, context, cpu, time, pid, comm, irq, ret) = event_info
+ if cpu not in irq_dic.keys():
+ return
+ irq_record = irq_dic[cpu].pop()
+ if irq != irq_record['irq']:
+ return
+ irq_record.update({'irq_ext_t':time})
+ # if an irq doesn't include NET_RX softirq, drop.
+ if 'event_list' in irq_record.keys():
+ irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_raise(event_info):
+ (name, context, cpu, time, pid, comm, vec) = event_info
+ if cpu not in irq_dic.keys() \
+ or len(irq_dic[cpu]) == 0:
+ return
+ irq_record = irq_dic[cpu].pop()
+ if 'event_list' in irq_record.keys():
+ irq_event_list = irq_record['event_list']
+ else:
+ irq_event_list = []
+ irq_event_list.append({'time':time, 'event':'sirq_raise'})
+ irq_record.update({'event_list':irq_event_list})
+ irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_entry(event_info):
+ (name, context, cpu, time, pid, comm, vec) = event_info
+ net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
+
+def handle_irq_softirq_exit(event_info):
+ (name, context, cpu, time, pid, comm, vec) = event_info
+ irq_list = []
+ event_list = 0
+ if cpu in irq_dic.keys():
+ irq_list = irq_dic[cpu]
+ del irq_dic[cpu]
+ if cpu in net_rx_dic.keys():
+ sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
+ event_list = net_rx_dic[cpu]['event_list']
+ del net_rx_dic[cpu]
+ if irq_list == [] or event_list == 0:
+ return
+ rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
+ 'irq_list':irq_list, 'event_list':event_list}
+ # merge information realted to a NET_RX softirq
+ receive_hunk_list.append(rec_data)
+
+def handle_napi_poll(event_info):
+ (name, context, cpu, time, pid, comm, napi, dev_name,
+ work, budget) = event_info
+ if cpu in net_rx_dic.keys():
+ event_list = net_rx_dic[cpu]['event_list']
+ rec_data = {'event_name':'napi_poll',
+ 'dev':dev_name, 'event_t':time,
+ 'work':work, 'budget':budget}
+ event_list.append(rec_data)
+
+def handle_netif_rx(event_info):
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, dev_name) = event_info
+ if cpu not in irq_dic.keys() \
+ or len(irq_dic[cpu]) == 0:
+ return
+ irq_record = irq_dic[cpu].pop()
+ if 'event_list' in irq_record.keys():
+ irq_event_list = irq_record['event_list']
+ else:
+ irq_event_list = []
+ irq_event_list.append({'time':time, 'event':'netif_rx',
+ 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
+ irq_record.update({'event_list':irq_event_list})
+ irq_dic[cpu].append(irq_record)
+
+def handle_netif_receive_skb(event_info):
+ global of_count_rx_skb_list
+
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, dev_name) = event_info
+ if cpu in net_rx_dic.keys():
+ rec_data = {'event_name':'netif_receive_skb',
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ event_list = net_rx_dic[cpu]['event_list']
+ event_list.append(rec_data)
+ rx_skb_list.insert(0, rec_data)
+ if len(rx_skb_list) > buffer_budget:
+ rx_skb_list.pop()
+ of_count_rx_skb_list += 1
+
+def handle_net_dev_queue(event_info):
+ global of_count_tx_queue_list
+
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, dev_name) = event_info
+ skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
+ tx_queue_list.insert(0, skb)
+ if len(tx_queue_list) > buffer_budget:
+ tx_queue_list.pop()
+ of_count_tx_queue_list += 1
+
+def handle_net_dev_xmit(event_info):
+ global of_count_tx_xmit_list
+
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, rc, dev_name) = event_info
+ if rc == 0: # NETDEV_TX_OK
+ for i in range(len(tx_queue_list)):
+ skb = tx_queue_list[i]
+ if skb['skbaddr'] == skbaddr:
+ skb['xmit_t'] = time
+ tx_xmit_list.insert(0, skb)
+ del tx_queue_list[i]
+ if len(tx_xmit_list) > buffer_budget:
+ tx_xmit_list.pop()
+ of_count_tx_xmit_list += 1
+ return
+
+def handle_kfree_skb(event_info):
+ (name, context, cpu, time, pid, comm,
+ skbaddr, protocol, location) = event_info
+ for i in range(len(tx_queue_list)):
+ skb = tx_queue_list[i]
+ if skb['skbaddr'] == skbaddr:
+ del tx_queue_list[i]
+ return
+ for i in range(len(tx_xmit_list)):
+ skb = tx_xmit_list[i]
+ if skb['skbaddr'] == skbaddr:
+ skb['free_t'] = time
+ tx_free_list.append(skb)
+ del tx_xmit_list[i]
+ return
+ for i in range(len(rx_skb_list)):
+ rec_data = rx_skb_list[i]
+ if rec_data['skbaddr'] == skbaddr:
+ rec_data.update({'handle':"kfree_skb",
+ 'comm':comm, 'pid':pid, 'comm_t':time})
+ del rx_skb_list[i]
+ return
+
+def handle_consume_skb(event_info):
+ (name, context, cpu, time, pid, comm, skbaddr) = event_info
+ for i in range(len(tx_xmit_list)):
+ skb = tx_xmit_list[i]
+ if skb['skbaddr'] == skbaddr:
+ skb['free_t'] = time
+ tx_free_list.append(skb)
+ del tx_xmit_list[i]
+ return
+
+def handle_skb_copy_datagram_iovec(event_info):
+ (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
+ for i in range(len(rx_skb_list)):
+ rec_data = rx_skb_list[i]
+ if skbaddr == rec_data['skbaddr']:
+ rec_data.update({'handle':"skb_copy_datagram_iovec",
+ 'comm':comm, 'pid':pid, 'comm_t':time})
+ del rx_skb_list[i]
+ return
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
new file mode 100644
index 000000000..8b78dc790
--- /dev/null
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -0,0 +1,202 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Ravi Bangoria, IBM Corporation
+#
+# Hypervisor call statisics
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+# output: {
+# opcode: {
+# 'min': minimum time nsec
+# 'max': maximum time nsec
+# 'time': average time nsec
+# 'cnt': counter
+# } ...
+# }
+output = {}
+
+# d_enter: {
+# cpu: {
+# opcode: nsec
+# } ...
+# }
+d_enter = {}
+
+hcall_table = {
+ 4: 'H_REMOVE',
+ 8: 'H_ENTER',
+ 12: 'H_READ',
+ 16: 'H_CLEAR_MOD',
+ 20: 'H_CLEAR_REF',
+ 24: 'H_PROTECT',
+ 28: 'H_GET_TCE',
+ 32: 'H_PUT_TCE',
+ 36: 'H_SET_SPRG0',
+ 40: 'H_SET_DABR',
+ 44: 'H_PAGE_INIT',
+ 48: 'H_SET_ASR',
+ 52: 'H_ASR_ON',
+ 56: 'H_ASR_OFF',
+ 60: 'H_LOGICAL_CI_LOAD',
+ 64: 'H_LOGICAL_CI_STORE',
+ 68: 'H_LOGICAL_CACHE_LOAD',
+ 72: 'H_LOGICAL_CACHE_STORE',
+ 76: 'H_LOGICAL_ICBI',
+ 80: 'H_LOGICAL_DCBF',
+ 84: 'H_GET_TERM_CHAR',
+ 88: 'H_PUT_TERM_CHAR',
+ 92: 'H_REAL_TO_LOGICAL',
+ 96: 'H_HYPERVISOR_DATA',
+ 100: 'H_EOI',
+ 104: 'H_CPPR',
+ 108: 'H_IPI',
+ 112: 'H_IPOLL',
+ 116: 'H_XIRR',
+ 120: 'H_MIGRATE_DMA',
+ 124: 'H_PERFMON',
+ 220: 'H_REGISTER_VPA',
+ 224: 'H_CEDE',
+ 228: 'H_CONFER',
+ 232: 'H_PROD',
+ 236: 'H_GET_PPP',
+ 240: 'H_SET_PPP',
+ 244: 'H_PURR',
+ 248: 'H_PIC',
+ 252: 'H_REG_CRQ',
+ 256: 'H_FREE_CRQ',
+ 260: 'H_VIO_SIGNAL',
+ 264: 'H_SEND_CRQ',
+ 272: 'H_COPY_RDMA',
+ 276: 'H_REGISTER_LOGICAL_LAN',
+ 280: 'H_FREE_LOGICAL_LAN',
+ 284: 'H_ADD_LOGICAL_LAN_BUFFER',
+ 288: 'H_SEND_LOGICAL_LAN',
+ 292: 'H_BULK_REMOVE',
+ 304: 'H_MULTICAST_CTRL',
+ 308: 'H_SET_XDABR',
+ 312: 'H_STUFF_TCE',
+ 316: 'H_PUT_TCE_INDIRECT',
+ 332: 'H_CHANGE_LOGICAL_LAN_MAC',
+ 336: 'H_VTERM_PARTNER_INFO',
+ 340: 'H_REGISTER_VTERM',
+ 344: 'H_FREE_VTERM',
+ 348: 'H_RESET_EVENTS',
+ 352: 'H_ALLOC_RESOURCE',
+ 356: 'H_FREE_RESOURCE',
+ 360: 'H_MODIFY_QP',
+ 364: 'H_QUERY_QP',
+ 368: 'H_REREGISTER_PMR',
+ 372: 'H_REGISTER_SMR',
+ 376: 'H_QUERY_MR',
+ 380: 'H_QUERY_MW',
+ 384: 'H_QUERY_HCA',
+ 388: 'H_QUERY_PORT',
+ 392: 'H_MODIFY_PORT',
+ 396: 'H_DEFINE_AQP1',
+ 400: 'H_GET_TRACE_BUFFER',
+ 404: 'H_DEFINE_AQP0',
+ 408: 'H_RESIZE_MR',
+ 412: 'H_ATTACH_MCQP',
+ 416: 'H_DETACH_MCQP',
+ 420: 'H_CREATE_RPT',
+ 424: 'H_REMOVE_RPT',
+ 428: 'H_REGISTER_RPAGES',
+ 432: 'H_DISABLE_AND_GETC',
+ 436: 'H_ERROR_DATA',
+ 440: 'H_GET_HCA_INFO',
+ 444: 'H_GET_PERF_COUNT',
+ 448: 'H_MANAGE_TRACE',
+ 468: 'H_FREE_LOGICAL_LAN_BUFFER',
+ 472: 'H_POLL_PENDING',
+ 484: 'H_QUERY_INT_STATE',
+ 580: 'H_ILLAN_ATTRIBUTES',
+ 592: 'H_MODIFY_HEA_QP',
+ 596: 'H_QUERY_HEA_QP',
+ 600: 'H_QUERY_HEA',
+ 604: 'H_QUERY_HEA_PORT',
+ 608: 'H_MODIFY_HEA_PORT',
+ 612: 'H_REG_BCMC',
+ 616: 'H_DEREG_BCMC',
+ 620: 'H_REGISTER_HEA_RPAGES',
+ 624: 'H_DISABLE_AND_GET_HEA',
+ 628: 'H_GET_HEA_INFO',
+ 632: 'H_ALLOC_HEA_RESOURCE',
+ 644: 'H_ADD_CONN',
+ 648: 'H_DEL_CONN',
+ 664: 'H_JOIN',
+ 676: 'H_VASI_STATE',
+ 688: 'H_ENABLE_CRQ',
+ 696: 'H_GET_EM_PARMS',
+ 720: 'H_SET_MPP',
+ 724: 'H_GET_MPP',
+ 748: 'H_HOME_NODE_ASSOCIATIVITY',
+ 756: 'H_BEST_ENERGY',
+ 764: 'H_XIRR_X',
+ 768: 'H_RANDOM',
+ 772: 'H_COP',
+ 788: 'H_GET_MPP_X',
+ 796: 'H_SET_MODE',
+ 61440: 'H_RTAS',
+}
+
+def hcall_table_lookup(opcode):
+ if (opcode in hcall_table):
+ return hcall_table[opcode]
+ else:
+ return opcode
+
+print_ptrn = '%-28s%10s%10s%10s%10s'
+
+def trace_end():
+ print(print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)'))
+ print('-' * 68)
+ for opcode in output:
+ h_name = hcall_table_lookup(opcode)
+ time = output[opcode]['time']
+ cnt = output[opcode]['cnt']
+ min_t = output[opcode]['min']
+ max_t = output[opcode]['max']
+
+ print(print_ptrn % (h_name, cnt, min_t, max_t, time//cnt))
+
+def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
+ opcode, retval):
+ if (cpu in d_enter and opcode in d_enter[cpu]):
+ diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
+
+ if (opcode in output):
+ output[opcode]['time'] += diff
+ output[opcode]['cnt'] += 1
+ if (output[opcode]['min'] > diff):
+ output[opcode]['min'] = diff
+ if (output[opcode]['max'] < diff):
+ output[opcode]['max'] = diff
+ else:
+ output[opcode] = {
+ 'time': diff,
+ 'cnt': 1,
+ 'min': diff,
+ 'max': diff,
+ }
+
+ del d_enter[cpu][opcode]
+# else:
+# print("Can't find matching hcall_enter event. Ignoring sample")
+
+def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
+ callchain, opcode):
+ if (cpu in d_enter):
+ d_enter[cpu][opcode] = nsecs(sec, nsec)
+ else:
+ d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
new file mode 100644
index 000000000..8196e3087
--- /dev/null
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -0,0 +1,462 @@
+# Cpu task migration overview toy
+#
+# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
+#
+# perf script event handlers have been generated by perf script -g python
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+from __future__ import print_function
+
+import os
+import sys
+
+from collections import defaultdict
+try:
+ from UserList import UserList
+except ImportError:
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from SchedGui import *
+
+
+threads = { 0 : "idle"}
+
+def thread_name(pid):
+ return "%s:%d" % (threads[pid], pid)
+
+class RunqueueEventUnknown:
+ @staticmethod
+ def color():
+ return None
+
+ def __repr__(self):
+ return "unknown"
+
+class RunqueueEventSleep:
+ @staticmethod
+ def color():
+ return (0, 0, 0xff)
+
+ def __init__(self, sleeper):
+ self.sleeper = sleeper
+
+ def __repr__(self):
+ return "%s gone to sleep" % thread_name(self.sleeper)
+
+class RunqueueEventWakeup:
+ @staticmethod
+ def color():
+ return (0xff, 0xff, 0)
+
+ def __init__(self, wakee):
+ self.wakee = wakee
+
+ def __repr__(self):
+ return "%s woke up" % thread_name(self.wakee)
+
+class RunqueueEventFork:
+ @staticmethod
+ def color():
+ return (0, 0xff, 0)
+
+ def __init__(self, child):
+ self.child = child
+
+ def __repr__(self):
+ return "new forked task %s" % thread_name(self.child)
+
+class RunqueueMigrateIn:
+ @staticmethod
+ def color():
+ return (0, 0xf0, 0xff)
+
+ def __init__(self, new):
+ self.new = new
+
+ def __repr__(self):
+ return "task migrated in %s" % thread_name(self.new)
+
+class RunqueueMigrateOut:
+ @staticmethod
+ def color():
+ return (0xff, 0, 0xff)
+
+ def __init__(self, old):
+ self.old = old
+
+ def __repr__(self):
+ return "task migrated out %s" % thread_name(self.old)
+
+class RunqueueSnapshot:
+ def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
+ self.tasks = tuple(tasks)
+ self.event = event
+
+ def sched_switch(self, prev, prev_state, next):
+ event = RunqueueEventUnknown()
+
+ if taskState(prev_state) == "R" and next in self.tasks \
+ and prev in self.tasks:
+ return self
+
+ if taskState(prev_state) != "R":
+ event = RunqueueEventSleep(prev)
+
+ next_tasks = list(self.tasks[:])
+ if prev in self.tasks:
+ if taskState(prev_state) != "R":
+ next_tasks.remove(prev)
+ elif taskState(prev_state) == "R":
+ next_tasks.append(prev)
+
+ if next not in next_tasks:
+ next_tasks.append(next)
+
+ return RunqueueSnapshot(next_tasks, event)
+
+ def migrate_out(self, old):
+ if old not in self.tasks:
+ return self
+ next_tasks = [task for task in self.tasks if task != old]
+
+ return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
+
+ def __migrate_in(self, new, event):
+ if new in self.tasks:
+ self.event = event
+ return self
+ next_tasks = self.tasks[:] + tuple([new])
+
+ return RunqueueSnapshot(next_tasks, event)
+
+ def migrate_in(self, new):
+ return self.__migrate_in(new, RunqueueMigrateIn(new))
+
+ def wake_up(self, new):
+ return self.__migrate_in(new, RunqueueEventWakeup(new))
+
+ def wake_up_new(self, new):
+ return self.__migrate_in(new, RunqueueEventFork(new))
+
+ def load(self):
+ """ Provide the number of tasks on the runqueue.
+ Don't count idle"""
+ return len(self.tasks) - 1
+
+ def __repr__(self):
+ ret = self.tasks.__repr__()
+ ret += self.origin_tostring()
+
+ return ret
+
+class TimeSlice:
+ def __init__(self, start, prev):
+ self.start = start
+ self.prev = prev
+ self.end = start
+ # cpus that triggered the event
+ self.event_cpus = []
+ if prev is not None:
+ self.total_load = prev.total_load
+ self.rqs = prev.rqs.copy()
+ else:
+ self.rqs = defaultdict(RunqueueSnapshot)
+ self.total_load = 0
+
+ def __update_total_load(self, old_rq, new_rq):
+ diff = new_rq.load() - old_rq.load()
+ self.total_load += diff
+
+ def sched_switch(self, ts_list, prev, prev_state, next, cpu):
+ old_rq = self.prev.rqs[cpu]
+ new_rq = old_rq.sched_switch(prev, prev_state, next)
+
+ if old_rq is new_rq:
+ return
+
+ self.rqs[cpu] = new_rq
+ self.__update_total_load(old_rq, new_rq)
+ ts_list.append(self)
+ self.event_cpus = [cpu]
+
+ def migrate(self, ts_list, new, old_cpu, new_cpu):
+ if old_cpu == new_cpu:
+ return
+ old_rq = self.prev.rqs[old_cpu]
+ out_rq = old_rq.migrate_out(new)
+ self.rqs[old_cpu] = out_rq
+ self.__update_total_load(old_rq, out_rq)
+
+ new_rq = self.prev.rqs[new_cpu]
+ in_rq = new_rq.migrate_in(new)
+ self.rqs[new_cpu] = in_rq
+ self.__update_total_load(new_rq, in_rq)
+
+ ts_list.append(self)
+
+ if old_rq is not out_rq:
+ self.event_cpus.append(old_cpu)
+ self.event_cpus.append(new_cpu)
+
+ def wake_up(self, ts_list, pid, cpu, fork):
+ old_rq = self.prev.rqs[cpu]
+ if fork:
+ new_rq = old_rq.wake_up_new(pid)
+ else:
+ new_rq = old_rq.wake_up(pid)
+
+ if new_rq is old_rq:
+ return
+ self.rqs[cpu] = new_rq
+ self.__update_total_load(old_rq, new_rq)
+ ts_list.append(self)
+ self.event_cpus = [cpu]
+
+ def next(self, t):
+ self.end = t
+ return TimeSlice(t, self)
+
+class TimeSliceList(UserList):
+ def __init__(self, arg = []):
+ self.data = arg
+
+ def get_time_slice(self, ts):
+ if len(self.data) == 0:
+ slice = TimeSlice(ts, TimeSlice(-1, None))
+ else:
+ slice = self.data[-1].next(ts)
+ return slice
+
+ def find_time_slice(self, ts):
+ start = 0
+ end = len(self.data)
+ found = -1
+ searching = True
+ while searching:
+ if start == end or start == end - 1:
+ searching = False
+
+ i = (end + start) / 2
+ if self.data[i].start <= ts and self.data[i].end >= ts:
+ found = i
+ end = i
+ continue
+
+ if self.data[i].end < ts:
+ start = i
+
+ elif self.data[i].start > ts:
+ end = i
+
+ return found
+
+ def set_root_win(self, win):
+ self.root_win = win
+
+ def mouse_down(self, cpu, t):
+ idx = self.find_time_slice(t)
+ if idx == -1:
+ return
+
+ ts = self[idx]
+ rq = ts.rqs[cpu]
+ raw = "CPU: %d\n" % cpu
+ raw += "Last event : %s\n" % rq.event.__repr__()
+ raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
+ raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
+ raw += "Load = %d\n" % rq.load()
+ for t in rq.tasks:
+ raw += "%s \n" % thread_name(t)
+
+ self.root_win.update_summary(raw)
+
+ def update_rectangle_cpu(self, slice, cpu):
+ rq = slice.rqs[cpu]
+
+ if slice.total_load != 0:
+ load_rate = rq.load() / float(slice.total_load)
+ else:
+ load_rate = 0
+
+ red_power = int(0xff - (0xff * load_rate))
+ color = (0xff, red_power, red_power)
+
+ top_color = None
+
+ if cpu in slice.event_cpus:
+ top_color = rq.event.color()
+
+ self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
+
+ def fill_zone(self, start, end):
+ i = self.find_time_slice(start)
+ if i == -1:
+ return
+
+ for i in range(i, len(self.data)):
+ timeslice = self.data[i]
+ if timeslice.start > end:
+ return
+
+ for cpu in timeslice.rqs:
+ self.update_rectangle_cpu(timeslice, cpu)
+
+ def interval(self):
+ if len(self.data) == 0:
+ return (0, 0)
+
+ return (self.data[0].start, self.data[-1].end)
+
+ def nr_rectangles(self):
+ last_ts = self.data[-1]
+ max_cpu = 0
+ for cpu in last_ts.rqs:
+ if cpu > max_cpu:
+ max_cpu = cpu
+ return max_cpu
+
+
+class SchedEventProxy:
+ def __init__(self):
+ self.current_tsk = defaultdict(lambda : -1)
+ self.timeslices = TimeSliceList()
+
+ def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
+ next_comm, next_pid, next_prio):
+ """ Ensure the task we sched out this cpu is really the one
+ we logged. Otherwise we may have missed traces """
+
+ on_cpu_task = self.current_tsk[headers.cpu]
+
+ if on_cpu_task != -1 and on_cpu_task != prev_pid:
+ print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+ headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+
+ threads[prev_pid] = prev_comm
+ threads[next_pid] = next_comm
+ self.current_tsk[headers.cpu] = next_pid
+
+ ts = self.timeslices.get_time_slice(headers.ts())
+ ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
+
+ def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
+ ts = self.timeslices.get_time_slice(headers.ts())
+ ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
+
+ def wake_up(self, headers, comm, pid, success, target_cpu, fork):
+ if success == 0:
+ return
+ ts = self.timeslices.get_time_slice(headers.ts())
+ ts.wake_up(self.timeslices, pid, target_cpu, fork)
+
+
+def trace_begin():
+ global parser
+ parser = SchedEventProxy()
+
+def trace_end():
+ app = wx.App(False)
+ timeslices = parser.timeslices
+ frame = RootFrame(timeslices, "Migration")
+ app.MainLoop()
+
+def sched__sched_stat_runtime(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, runtime, vruntime):
+ pass
+
+def sched__sched_stat_iowait(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, delay):
+ pass
+
+def sched__sched_stat_sleep(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, delay):
+ pass
+
+def sched__sched_stat_wait(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, delay):
+ pass
+
+def sched__sched_process_fork(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, parent_comm, parent_pid, child_comm, child_pid):
+ pass
+
+def sched__sched_process_wait(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio):
+ pass
+
+def sched__sched_process_exit(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio):
+ pass
+
+def sched__sched_process_free(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio):
+ pass
+
+def sched__sched_migrate_task(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio, orig_cpu,
+ dest_cpu):
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm, common_callchain)
+ parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
+
+def sched__sched_switch(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm, common_callchain,
+ prev_comm, prev_pid, prev_prio, prev_state,
+ next_comm, next_pid, next_prio):
+
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm, common_callchain)
+ parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
+ next_comm, next_pid, next_prio)
+
+def sched__sched_wakeup_new(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio, success,
+ target_cpu):
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm, common_callchain)
+ parser.wake_up(headers, comm, pid, success, target_cpu, 1)
+
+def sched__sched_wakeup(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio, success,
+ target_cpu):
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm, common_callchain)
+ parser.wake_up(headers, comm, pid, success, target_cpu, 0)
+
+def sched__sched_wait_task(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid, prio):
+ pass
+
+def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, ret):
+ pass
+
+def sched__sched_kthread_stop(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, comm, pid):
+ pass
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ pass
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
new file mode 100644
index 000000000..6e0278dcb
--- /dev/null
+++ b/tools/perf/scripts/python/sctop.py
@@ -0,0 +1,89 @@
+# system call top
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Periodically displays system-wide system call totals, broken down by
+# syscall. If a [comm] arg is specified, only syscalls called by
+# [comm] are displayed. If an [interval] arg is specified, the display
+# will be refreshed every [interval] seconds. The default interval is
+# 3 seconds.
+
+from __future__ import print_function
+
+import os, sys, time
+
+try:
+ import thread
+except ImportError:
+ import _thread as thread
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+usage = "perf script -s sctop.py [comm] [interval]\n";
+
+for_comm = None
+default_interval = 3
+interval = default_interval
+
+if len(sys.argv) > 3:
+ sys.exit(usage)
+
+if len(sys.argv) > 2:
+ for_comm = sys.argv[1]
+ interval = int(sys.argv[2])
+elif len(sys.argv) > 1:
+ try:
+ interval = int(sys.argv[1])
+ except ValueError:
+ for_comm = sys.argv[1]
+ interval = default_interval
+
+syscalls = autodict()
+
+def trace_begin():
+ thread.start_new_thread(print_syscall_totals, (interval,))
+ pass
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ raw_syscalls__sys_enter(**locals())
+
+def print_syscall_totals(interval):
+ while 1:
+ clear_term()
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" %
+ ("----------------------------------------",
+ "----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
+ try:
+ print("%-40s %10d" % (syscall_name(id), val))
+ except TypeError:
+ pass
+ syscalls.clear()
+ time.sleep(interval)
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
new file mode 100755
index 000000000..b1c4def14
--- /dev/null
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -0,0 +1,127 @@
+# stackcollapse.py - format perf samples with one line per distinct call stack
+# SPDX-License-Identifier: GPL-2.0
+#
+# This script's output has two space-separated fields. The first is a semicolon
+# separated stack including the program name (from the "comm" field) and the
+# function names from the call stack. The second is a count:
+#
+# swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2
+#
+# The file is sorted according to the first field.
+#
+# Input may be created and processed using:
+#
+# perf record -a -g -F 99 sleep 60
+# perf script report stackcollapse > out.stacks-folded
+#
+# (perf script record stackcollapse works too).
+#
+# Written by Paolo Bonzini <pbonzini@redhat.com>
+# Based on Brendan Gregg's stackcollapse-perf.pl script.
+
+from __future__ import print_function
+
+import os
+import sys
+from collections import defaultdict
+from optparse import OptionParser, make_option
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from EventClass import *
+
+# command line parsing
+
+option_list = [
+ # formatting options for the bottom entry of the stack
+ make_option("--include-tid", dest="include_tid",
+ action="store_true", default=False,
+ help="include thread id in stack"),
+ make_option("--include-pid", dest="include_pid",
+ action="store_true", default=False,
+ help="include process id in stack"),
+ make_option("--no-comm", dest="include_comm",
+ action="store_false", default=True,
+ help="do not separate stacks according to comm"),
+ make_option("--tidy-java", dest="tidy_java",
+ action="store_true", default=False,
+ help="beautify Java signatures"),
+ make_option("--kernel", dest="annotate_kernel",
+ action="store_true", default=False,
+ help="annotate kernel functions with _[k]")
+]
+
+parser = OptionParser(option_list=option_list)
+(opts, args) = parser.parse_args()
+
+if len(args) != 0:
+ parser.error("unexpected command line argument")
+if opts.include_tid and not opts.include_comm:
+ parser.error("requesting tid but not comm is invalid")
+if opts.include_pid and not opts.include_comm:
+ parser.error("requesting pid but not comm is invalid")
+
+# event handlers
+
+lines = defaultdict(lambda: 0)
+
+def process_event(param_dict):
+ def tidy_function_name(sym, dso):
+ if sym is None:
+ sym = '[unknown]'
+
+ sym = sym.replace(';', ':')
+ if opts.tidy_java:
+ # the original stackcollapse-perf.pl script gives the
+ # example of converting this:
+ # Lorg/mozilla/javascript/MemberBox;.<init>(Ljava/lang/reflect/Method;)V
+ # to this:
+ # org/mozilla/javascript/MemberBox:.init
+ sym = sym.replace('<', '')
+ sym = sym.replace('>', '')
+ if sym[0] == 'L' and sym.find('/'):
+ sym = sym[1:]
+ try:
+ sym = sym[:sym.index('(')]
+ except ValueError:
+ pass
+
+ if opts.annotate_kernel and dso == '[kernel.kallsyms]':
+ return sym + '_[k]'
+ else:
+ return sym
+
+ stack = list()
+ if 'callchain' in param_dict:
+ for entry in param_dict['callchain']:
+ entry.setdefault('sym', dict())
+ entry['sym'].setdefault('name', None)
+ entry.setdefault('dso', None)
+ stack.append(tidy_function_name(entry['sym']['name'],
+ entry['dso']))
+ else:
+ param_dict.setdefault('symbol', None)
+ param_dict.setdefault('dso', None)
+ stack.append(tidy_function_name(param_dict['symbol'],
+ param_dict['dso']))
+
+ if opts.include_comm:
+ comm = param_dict["comm"].replace(' ', '_')
+ sep = "-"
+ if opts.include_pid:
+ comm = comm + sep + str(param_dict['sample']['pid'])
+ sep = "/"
+ if opts.include_tid:
+ comm = comm + sep + str(param_dict['sample']['tid'])
+ stack.append(comm)
+
+ stack_string = ';'.join(reversed(stack))
+ lines[stack_string] = lines[stack_string] + 1
+
+def trace_end():
+ list = sorted(lines)
+ for stack in list:
+ print("%s %d" % (stack, lines[stack]))
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
new file mode 100644
index 000000000..01fa933ff
--- /dev/null
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from __future__ import print_function
+
+data = {}
+times = []
+threads = []
+cpus = []
+
+def get_key(time, event, cpu, thread):
+ return "%d-%s-%d-%d" % (time, event, cpu, thread)
+
+def store_key(time, cpu, thread):
+ if (time not in times):
+ times.append(time)
+
+ if (cpu not in cpus):
+ cpus.append(cpu)
+
+ if (thread not in threads):
+ threads.append(thread)
+
+def store(time, event, cpu, thread, val, ena, run):
+ #print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" %
+ # (event, cpu, thread, time, val, ena, run))
+
+ store_key(time, cpu, thread)
+ key = get_key(time, event, cpu, thread)
+ data[key] = [ val, ena, run]
+
+def get(time, event, cpu, thread):
+ key = get_key(time, event, cpu, thread)
+ return data[key][0]
+
+def stat__cycles_k(cpu, thread, time, val, ena, run):
+ store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions_k(cpu, thread, time, val, ena, run):
+ store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__cycles_u(cpu, thread, time, val, ena, run):
+ store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions_u(cpu, thread, time, val, ena, run):
+ store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__cycles(cpu, thread, time, val, ena, run):
+ store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions(cpu, thread, time, val, ena, run):
+ store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__interval(time):
+ for cpu in cpus:
+ for thread in threads:
+ cyc = get(time, "cycles", cpu, thread)
+ ins = get(time, "instructions", cpu, thread)
+ cpi = 0
+
+ if ins != 0:
+ cpi = cyc/float(ins)
+
+ print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins))
+
+def trace_end():
+ pass
+# XXX trace_end callback could be used as an alternative place
+# to compute same values as in the script above:
+#
+# for time in times:
+# for cpu in cpus:
+# for thread in threads:
+# cyc = get(time, "cycles", cpu, thread)
+# ins = get(time, "instructions", cpu, thread)
+#
+# if ins != 0:
+# cpi = cyc/float(ins)
+#
+# print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
new file mode 100644
index 000000000..f254e40c6
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -0,0 +1,75 @@
+# system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+from __future__ import print_function
+
+import os, sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import syscall_name
+
+usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
+
+for_comm = None
+for_pid = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ try:
+ for_pid = int(sys.argv[1])
+ except:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ print("Press control+C to stop and show the summary")
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
+ if (for_comm and common_comm != for_comm) or \
+ (for_pid and common_pid != for_pid ):
+ return
+ try:
+ syscalls[common_comm][common_pid][id] += 1
+ except TypeError:
+ syscalls[common_comm][common_pid][id] = 1
+
+def syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ raw_syscalls__sys_enter(**locals())
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
new file mode 100644
index 000000000..8adb95ff1
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -0,0 +1,65 @@
+# system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import syscall_name
+
+usage = "perf script -s syscall-counts.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ print("Press control+C to stop and show the summary")
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
+ raw_syscalls__sys_enter(**locals())
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))