From 109be507377fe7f6e8819ac94041d3fdcdf6fd2f Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 15:18:25 +0200 Subject: Adding upstream version 1.19.8. Signed-off-by: Daniel Baumann --- src/cmd/vendor/github.com/google/pprof/AUTHORS | 7 + .../vendor/github.com/google/pprof/CONTRIBUTORS | 16 + src/cmd/vendor/github.com/google/pprof/LICENSE | 202 + .../github.com/google/pprof/driver/driver.go | 298 + .../google/pprof/internal/binutils/addr2liner.go | 238 + .../pprof/internal/binutils/addr2liner_llvm.go | 181 + .../pprof/internal/binutils/addr2liner_nm.go | 144 + .../google/pprof/internal/binutils/binutils.go | 738 ++ .../google/pprof/internal/binutils/disasm.go | 180 + .../github.com/google/pprof/internal/driver/cli.go | 367 + .../google/pprof/internal/driver/commands.go | 459 + .../google/pprof/internal/driver/config.go | 371 + .../google/pprof/internal/driver/driver.go | 367 + .../google/pprof/internal/driver/driver_focus.go | 219 + .../google/pprof/internal/driver/fetch.go | 591 ++ .../google/pprof/internal/driver/flags.go | 71 + .../google/pprof/internal/driver/flamegraph.go | 106 + .../google/pprof/internal/driver/html/common.css | 272 + .../google/pprof/internal/driver/html/common.js | 693 ++ .../pprof/internal/driver/html/flamegraph.html | 103 + .../google/pprof/internal/driver/html/graph.html | 16 + .../google/pprof/internal/driver/html/header.html | 113 + .../pprof/internal/driver/html/plaintext.html | 18 + .../google/pprof/internal/driver/html/source.html | 18 + .../google/pprof/internal/driver/html/top.html | 114 + .../google/pprof/internal/driver/interactive.go | 418 + .../google/pprof/internal/driver/options.go | 100 + .../google/pprof/internal/driver/settings.go | 159 + .../github.com/google/pprof/internal/driver/svg.go | 80 + .../google/pprof/internal/driver/tagroot.go | 129 + .../google/pprof/internal/driver/tempfile.go | 60 + .../google/pprof/internal/driver/webhtml.go | 68 + .../google/pprof/internal/driver/webui.go | 465 + .../google/pprof/internal/elfexec/elfexec.go | 383 + .../google/pprof/internal/graph/dotgraph.go | 494 + .../google/pprof/internal/graph/graph.go | 1170 +++ .../pprof/internal/measurement/measurement.go | 293 + .../google/pprof/internal/plugin/plugin.go | 216 + .../google/pprof/internal/report/report.go | 1321 +++ .../google/pprof/internal/report/source.go | 1114 +++ .../google/pprof/internal/report/source_html.go | 75 + .../google/pprof/internal/report/synth.go | 39 + .../google/pprof/internal/symbolizer/symbolizer.go | 379 + .../google/pprof/internal/symbolz/symbolz.go | 200 + .../google/pprof/internal/transport/transport.go | 131 + .../github.com/google/pprof/profile/encode.go | 576 ++ .../github.com/google/pprof/profile/filter.go | 270 + .../github.com/google/pprof/profile/index.go | 64 + .../google/pprof/profile/legacy_java_profile.go | 315 + .../google/pprof/profile/legacy_profile.go | 1229 +++ .../github.com/google/pprof/profile/merge.go | 482 + .../github.com/google/pprof/profile/profile.go | 814 ++ .../github.com/google/pprof/profile/proto.go | 370 + .../github.com/google/pprof/profile/prune.go | 178 + .../d3flamegraph/D3_FLAME_GRAPH_LICENSE | 201 + .../pprof/third_party/d3flamegraph/D3_LICENSE | 13 + .../pprof/third_party/d3flamegraph/README.md | 33 + .../third_party/d3flamegraph/d3_flame_graph.go | 65 + .../google/pprof/third_party/d3flamegraph/index.js | 13 + .../third_party/d3flamegraph/package-lock.json | 1106 +++ .../pprof/third_party/d3flamegraph/package.json | 17 + .../pprof/third_party/d3flamegraph/update.sh | 62 + .../third_party/d3flamegraph/webpack.config.js | 13 + .../google/pprof/third_party/svgpan/LICENSE | 27 + .../google/pprof/third_party/svgpan/svgpan.go | 297 + .../github.com/ianlancetaylor/demangle/.gitignore | 13 + .../github.com/ianlancetaylor/demangle/LICENSE | 27 + .../github.com/ianlancetaylor/demangle/README.md | 3 + .../github.com/ianlancetaylor/demangle/ast.go | 4119 ++++++++ .../github.com/ianlancetaylor/demangle/demangle.go | 3315 +++++++ .../github.com/ianlancetaylor/demangle/rust.go | 1119 +++ src/cmd/vendor/golang.org/x/arch/AUTHORS | 3 + src/cmd/vendor/golang.org/x/arch/CONTRIBUTORS | 3 + src/cmd/vendor/golang.org/x/arch/LICENSE | 27 + src/cmd/vendor/golang.org/x/arch/PATENTS | 22 + .../vendor/golang.org/x/arch/arm/armasm/Makefile | 2 + .../vendor/golang.org/x/arch/arm/armasm/decode.go | 567 ++ src/cmd/vendor/golang.org/x/arch/arm/armasm/gnu.go | 164 + .../vendor/golang.org/x/arch/arm/armasm/inst.go | 438 + .../vendor/golang.org/x/arch/arm/armasm/plan9x.go | 398 + .../vendor/golang.org/x/arch/arm/armasm/tables.go | 9552 +++++++++++++++++++ .../vendor/golang.org/x/arch/arm64/arm64asm/arg.go | 494 + .../golang.org/x/arch/arm64/arm64asm/condition.go | 329 + .../x/arch/arm64/arm64asm/condition_util.go | 81 + .../golang.org/x/arch/arm64/arm64asm/decode.go | 2777 ++++++ .../vendor/golang.org/x/arch/arm64/arm64asm/gnu.go | 35 + .../golang.org/x/arch/arm64/arm64asm/inst.go | 1128 +++ .../golang.org/x/arch/arm64/arm64asm/inst.json | 1219 +++ .../golang.org/x/arch/arm64/arm64asm/plan9x.go | 742 ++ .../golang.org/x/arch/arm64/arm64asm/tables.go | 3366 +++++++ .../golang.org/x/arch/ppc64/ppc64asm/decode.go | 209 + .../vendor/golang.org/x/arch/ppc64/ppc64asm/doc.go | 6 + .../golang.org/x/arch/ppc64/ppc64asm/field.go | 88 + .../vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go | 471 + .../golang.org/x/arch/ppc64/ppc64asm/inst.go | 355 + .../golang.org/x/arch/ppc64/ppc64asm/plan9.go | 365 + .../golang.org/x/arch/ppc64/ppc64asm/tables.go | 5771 ++++++++++++ .../vendor/golang.org/x/arch/x86/x86asm/Makefile | 3 + .../vendor/golang.org/x/arch/x86/x86asm/decode.go | 1724 ++++ src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go | 956 ++ .../vendor/golang.org/x/arch/x86/x86asm/inst.go | 649 ++ .../vendor/golang.org/x/arch/x86/x86asm/intel.go | 560 ++ .../vendor/golang.org/x/arch/x86/x86asm/plan9x.go | 386 + .../vendor/golang.org/x/arch/x86/x86asm/tables.go | 9924 ++++++++++++++++++++ src/cmd/vendor/golang.org/x/crypto/AUTHORS | 3 + src/cmd/vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + src/cmd/vendor/golang.org/x/crypto/LICENSE | 27 + src/cmd/vendor/golang.org/x/crypto/PATENTS | 22 + .../vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 + src/cmd/vendor/golang.org/x/mod/LICENSE | 27 + src/cmd/vendor/golang.org/x/mod/PATENTS | 22 + .../golang.org/x/mod/internal/lazyregexp/lazyre.go | 78 + src/cmd/vendor/golang.org/x/mod/modfile/print.go | 174 + src/cmd/vendor/golang.org/x/mod/modfile/read.go | 958 ++ src/cmd/vendor/golang.org/x/mod/modfile/rule.go | 1556 +++ src/cmd/vendor/golang.org/x/mod/modfile/work.go | 234 + src/cmd/vendor/golang.org/x/mod/module/module.go | 841 ++ src/cmd/vendor/golang.org/x/mod/module/pseudo.go | 250 + src/cmd/vendor/golang.org/x/mod/semver/semver.go | 401 + src/cmd/vendor/golang.org/x/mod/sumdb/cache.go | 59 + src/cmd/vendor/golang.org/x/mod/sumdb/client.go | 671 ++ .../vendor/golang.org/x/mod/sumdb/dirhash/hash.go | 132 + src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go | 679 ++ src/cmd/vendor/golang.org/x/mod/sumdb/server.go | 180 + src/cmd/vendor/golang.org/x/mod/sumdb/test.go | 124 + src/cmd/vendor/golang.org/x/mod/sumdb/tlog/note.go | 135 + src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tile.go | 435 + src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tlog.go | 597 ++ src/cmd/vendor/golang.org/x/mod/zip/zip.go | 972 ++ src/cmd/vendor/golang.org/x/sync/AUTHORS | 3 + src/cmd/vendor/golang.org/x/sync/CONTRIBUTORS | 3 + src/cmd/vendor/golang.org/x/sync/LICENSE | 27 + src/cmd/vendor/golang.org/x/sync/PATENTS | 22 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + src/cmd/vendor/golang.org/x/sys/AUTHORS | 3 + src/cmd/vendor/golang.org/x/sys/CONTRIBUTORS | 3 + src/cmd/vendor/golang.org/x/sys/LICENSE | 27 + src/cmd/vendor/golang.org/x/sys/PATENTS | 22 + .../x/sys/internal/unsafeheader/unsafeheader.go | 30 + src/cmd/vendor/golang.org/x/sys/plan9/asm.s | 8 + .../vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 + .../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 + .../vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 + .../vendor/golang.org/x/sys/plan9/const_plan9.go | 70 + src/cmd/vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 + src/cmd/vendor/golang.org/x/sys/plan9/env_plan9.go | 31 + .../vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 + src/cmd/vendor/golang.org/x/sys/plan9/mkall.sh | 150 + src/cmd/vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 + .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 + .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 22 + src/cmd/vendor/golang.org/x/sys/plan9/pwd_plan9.go | 24 + src/cmd/vendor/golang.org/x/sys/plan9/race.go | 31 + src/cmd/vendor/golang.org/x/sys/plan9/race0.go | 26 + src/cmd/vendor/golang.org/x/sys/plan9/str.go | 23 + src/cmd/vendor/golang.org/x/sys/plan9/syscall.go | 118 + .../vendor/golang.org/x/sys/plan9/syscall_plan9.go | 361 + .../golang.org/x/sys/plan9/zsyscall_plan9_386.go | 285 + .../golang.org/x/sys/plan9/zsyscall_plan9_amd64.go | 285 + .../golang.org/x/sys/plan9/zsyscall_plan9_arm.go | 285 + .../vendor/golang.org/x/sys/plan9/zsysnum_plan9.go | 49 + src/cmd/vendor/golang.org/x/sys/unix/.gitignore | 2 + src/cmd/vendor/golang.org/x/sys/unix/README.md | 184 + .../vendor/golang.org/x/sys/unix/affinity_linux.go | 86 + src/cmd/vendor/golang.org/x/sys/unix/aliases.go | 15 + .../vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 18 + src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_386.s | 29 + .../vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 29 + src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 29 + .../vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 29 + .../vendor/golang.org/x/sys/unix/asm_linux_386.s | 66 + .../vendor/golang.org/x/sys/unix/asm_linux_amd64.s | 58 + .../vendor/golang.org/x/sys/unix/asm_linux_arm.s | 57 + .../vendor/golang.org/x/sys/unix/asm_linux_arm64.s | 53 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 57 + .../vendor/golang.org/x/sys/unix/asm_linux_mipsx.s | 55 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 45 + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 49 + .../vendor/golang.org/x/sys/unix/asm_linux_s390x.s | 57 + .../golang.org/x/sys/unix/asm_openbsd_mips64.s | 30 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 18 + .../vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 + .../golang.org/x/sys/unix/bluetooth_linux.go | 36 + .../vendor/golang.org/x/sys/unix/cap_freebsd.go | 196 + src/cmd/vendor/golang.org/x/sys/unix/constants.go | 14 + .../vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 + .../vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 + src/cmd/vendor/golang.org/x/sys/unix/dev_darwin.go | 24 + .../vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 + .../vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 + src/cmd/vendor/golang.org/x/sys/unix/dev_linux.go | 42 + src/cmd/vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 + .../vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 + src/cmd/vendor/golang.org/x/sys/unix/dev_zos.go | 29 + src/cmd/vendor/golang.org/x/sys/unix/dirent.go | 103 + src/cmd/vendor/golang.org/x/sys/unix/endian_big.go | 10 + .../vendor/golang.org/x/sys/unix/endian_little.go | 10 + src/cmd/vendor/golang.org/x/sys/unix/env_unix.go | 32 + src/cmd/vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + .../golang.org/x/sys/unix/errors_freebsd_386.go | 233 + .../golang.org/x/sys/unix/errors_freebsd_amd64.go | 233 + .../golang.org/x/sys/unix/errors_freebsd_arm.go | 226 + .../golang.org/x/sys/unix/errors_freebsd_arm64.go | 17 + src/cmd/vendor/golang.org/x/sys/unix/fcntl.go | 37 + .../vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 + .../golang.org/x/sys/unix/fcntl_linux_32bit.go | 14 + src/cmd/vendor/golang.org/x/sys/unix/fdset.go | 30 + .../vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + src/cmd/vendor/golang.org/x/sys/unix/gccgo.go | 60 + src/cmd/vendor/golang.org/x/sys/unix/gccgo_c.c | 45 + .../golang.org/x/sys/unix/gccgo_linux_amd64.go | 21 + .../vendor/golang.org/x/sys/unix/ifreq_linux.go | 142 + src/cmd/vendor/golang.org/x/sys/unix/ioctl.go | 75 + .../vendor/golang.org/x/sys/unix/ioctl_linux.go | 219 + src/cmd/vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 + src/cmd/vendor/golang.org/x/sys/unix/mkall.sh | 231 + src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh | 772 ++ .../vendor/golang.org/x/sys/unix/pagesize_unix.go | 16 + .../vendor/golang.org/x/sys/unix/pledge_openbsd.go | 163 + .../vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 + src/cmd/vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 + src/cmd/vendor/golang.org/x/sys/unix/race.go | 31 + src/cmd/vendor/golang.org/x/sys/unix/race0.go | 26 + .../golang.org/x/sys/unix/readdirent_getdents.go | 13 + .../x/sys/unix/readdirent_getdirentries.go | 20 + .../golang.org/x/sys/unix/sockcmsg_dragonfly.go | 16 + .../vendor/golang.org/x/sys/unix/sockcmsg_linux.go | 85 + .../vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 93 + .../golang.org/x/sys/unix/sockcmsg_unix_other.go | 47 + src/cmd/vendor/golang.org/x/sys/unix/str.go | 27 + src/cmd/vendor/golang.org/x/sys/unix/syscall.go | 95 + .../vendor/golang.org/x/sys/unix/syscall_aix.go | 551 ++ .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 + .../golang.org/x/sys/unix/syscall_aix_ppc64.go | 85 + .../vendor/golang.org/x/sys/unix/syscall_bsd.go | 625 ++ .../golang.org/x/sys/unix/syscall_darwin.1_12.go | 32 + .../golang.org/x/sys/unix/syscall_darwin.1_13.go | 108 + .../vendor/golang.org/x/sys/unix/syscall_darwin.go | 733 ++ .../golang.org/x/sys/unix/syscall_darwin_amd64.go | 51 + .../golang.org/x/sys/unix/syscall_darwin_arm64.go | 51 + .../x/sys/unix/syscall_darwin_libSystem.go | 27 + .../golang.org/x/sys/unix/syscall_dragonfly.go | 544 ++ .../x/sys/unix/syscall_dragonfly_amd64.go | 57 + .../golang.org/x/sys/unix/syscall_freebsd.go | 869 ++ .../golang.org/x/sys/unix/syscall_freebsd_386.go | 67 + .../golang.org/x/sys/unix/syscall_freebsd_amd64.go | 67 + .../golang.org/x/sys/unix/syscall_freebsd_arm.go | 63 + .../golang.org/x/sys/unix/syscall_freebsd_arm64.go | 63 + .../golang.org/x/sys/unix/syscall_illumos.go | 186 + .../vendor/golang.org/x/sys/unix/syscall_linux.go | 2456 +++++ .../golang.org/x/sys/unix/syscall_linux_386.go | 346 + .../golang.org/x/sys/unix/syscall_linux_alarm.go | 14 + .../golang.org/x/sys/unix/syscall_linux_amd64.go | 151 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../golang.org/x/sys/unix/syscall_linux_arm.go | 248 + .../golang.org/x/sys/unix/syscall_linux_arm64.go | 199 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 15 + .../golang.org/x/sys/unix/syscall_linux_gc_386.go | 17 + .../golang.org/x/sys/unix/syscall_linux_gc_arm.go | 14 + .../x/sys/unix/syscall_linux_gccgo_386.go | 31 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 21 + .../golang.org/x/sys/unix/syscall_linux_loong64.go | 191 + .../golang.org/x/sys/unix/syscall_linux_mips64x.go | 195 + .../golang.org/x/sys/unix/syscall_linux_mipsx.go | 207 + .../golang.org/x/sys/unix/syscall_linux_ppc.go | 236 + .../golang.org/x/sys/unix/syscall_linux_ppc64x.go | 122 + .../golang.org/x/sys/unix/syscall_linux_riscv64.go | 183 + .../golang.org/x/sys/unix/syscall_linux_s390x.go | 302 + .../golang.org/x/sys/unix/syscall_linux_sparc64.go | 118 + .../vendor/golang.org/x/sys/unix/syscall_netbsd.go | 609 ++ .../golang.org/x/sys/unix/syscall_netbsd_386.go | 38 + .../golang.org/x/sys/unix/syscall_netbsd_amd64.go | 38 + .../golang.org/x/sys/unix/syscall_netbsd_arm.go | 38 + .../golang.org/x/sys/unix/syscall_netbsd_arm64.go | 38 + .../golang.org/x/sys/unix/syscall_openbsd.go | 389 + .../golang.org/x/sys/unix/syscall_openbsd_386.go | 42 + .../golang.org/x/sys/unix/syscall_openbsd_amd64.go | 42 + .../golang.org/x/sys/unix/syscall_openbsd_arm.go | 42 + .../golang.org/x/sys/unix/syscall_openbsd_arm64.go | 42 + .../x/sys/unix/syscall_openbsd_mips64.go | 35 + .../golang.org/x/sys/unix/syscall_solaris.go | 1005 ++ .../golang.org/x/sys/unix/syscall_solaris_amd64.go | 28 + .../vendor/golang.org/x/sys/unix/syscall_unix.go | 486 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 18 + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 25 + .../golang.org/x/sys/unix/syscall_zos_s390x.go | 1823 ++++ .../vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 + .../vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 + .../golang.org/x/sys/unix/sysvshm_unix_other.go | 14 + src/cmd/vendor/golang.org/x/sys/unix/timestruct.go | 77 + .../vendor/golang.org/x/sys/unix/unveil_openbsd.go | 42 + src/cmd/vendor/golang.org/x/sys/unix/xattr_bsd.go | 241 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1385 +++ .../golang.org/x/sys/unix/zerrors_aix_ppc64.go | 1386 +++ .../golang.org/x/sys/unix/zerrors_darwin_amd64.go | 1892 ++++ .../golang.org/x/sys/unix/zerrors_darwin_arm64.go | 1892 ++++ .../x/sys/unix/zerrors_dragonfly_amd64.go | 1738 ++++ .../golang.org/x/sys/unix/zerrors_freebsd_386.go | 1948 ++++ .../golang.org/x/sys/unix/zerrors_freebsd_amd64.go | 1947 ++++ .../golang.org/x/sys/unix/zerrors_freebsd_arm.go | 1846 ++++ .../golang.org/x/sys/unix/zerrors_freebsd_arm64.go | 1948 ++++ .../vendor/golang.org/x/sys/unix/zerrors_linux.go | 3084 ++++++ .../golang.org/x/sys/unix/zerrors_linux_386.go | 826 ++ .../golang.org/x/sys/unix/zerrors_linux_amd64.go | 826 ++ .../golang.org/x/sys/unix/zerrors_linux_arm.go | 832 ++ .../golang.org/x/sys/unix/zerrors_linux_arm64.go | 823 ++ .../golang.org/x/sys/unix/zerrors_linux_loong64.go | 818 ++ .../golang.org/x/sys/unix/zerrors_linux_mips.go | 833 ++ .../golang.org/x/sys/unix/zerrors_linux_mips64.go | 833 ++ .../x/sys/unix/zerrors_linux_mips64le.go | 833 ++ .../golang.org/x/sys/unix/zerrors_linux_mipsle.go | 833 ++ .../golang.org/x/sys/unix/zerrors_linux_ppc.go | 885 ++ .../golang.org/x/sys/unix/zerrors_linux_ppc64.go | 889 ++ .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go | 889 ++ .../golang.org/x/sys/unix/zerrors_linux_riscv64.go | 813 ++ .../golang.org/x/sys/unix/zerrors_linux_s390x.go | 888 ++ .../golang.org/x/sys/unix/zerrors_linux_sparc64.go | 883 ++ .../golang.org/x/sys/unix/zerrors_netbsd_386.go | 1780 ++++ .../golang.org/x/sys/unix/zerrors_netbsd_amd64.go | 1770 ++++ .../golang.org/x/sys/unix/zerrors_netbsd_arm.go | 1759 ++++ .../golang.org/x/sys/unix/zerrors_netbsd_arm64.go | 1770 ++++ .../golang.org/x/sys/unix/zerrors_openbsd_386.go | 1668 ++++ .../golang.org/x/sys/unix/zerrors_openbsd_amd64.go | 1775 ++++ .../golang.org/x/sys/unix/zerrors_openbsd_arm.go | 1670 ++++ .../golang.org/x/sys/unix/zerrors_openbsd_arm64.go | 1798 ++++ .../x/sys/unix/zerrors_openbsd_mips64.go | 1863 ++++ .../golang.org/x/sys/unix/zerrors_solaris_amd64.go | 1557 +++ .../golang.org/x/sys/unix/zerrors_zos_s390x.go | 860 ++ .../golang.org/x/sys/unix/zptrace_armnn_linux.go | 42 + .../golang.org/x/sys/unix/zptrace_linux_arm64.go | 17 + .../golang.org/x/sys/unix/zptrace_mipsnn_linux.go | 51 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 51 + .../golang.org/x/sys/unix/zptrace_x86_linux.go | 81 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1485 +++ .../golang.org/x/sys/unix/zsyscall_aix_ppc64.go | 1443 +++ .../golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 +++ .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 +++ .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 25 + .../golang.org/x/sys/unix/zsyscall_darwin_amd64.go | 2519 +++++ .../golang.org/x/sys/unix/zsyscall_darwin_amd64.s | 889 ++ .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 25 + .../golang.org/x/sys/unix/zsyscall_darwin_arm64.go | 2519 +++++ .../golang.org/x/sys/unix/zsyscall_darwin_arm64.s | 889 ++ .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1679 ++++ .../golang.org/x/sys/unix/zsyscall_freebsd_386.go | 2016 ++++ .../x/sys/unix/zsyscall_freebsd_amd64.go | 2016 ++++ .../golang.org/x/sys/unix/zsyscall_freebsd_arm.go | 2016 ++++ .../x/sys/unix/zsyscall_freebsd_arm64.go | 2016 ++++ .../x/sys/unix/zsyscall_illumos_amd64.go | 128 + .../vendor/golang.org/x/sys/unix/zsyscall_linux.go | 2153 +++++ .../golang.org/x/sys/unix/zsyscall_linux_386.go | 537 ++ .../golang.org/x/sys/unix/zsyscall_linux_amd64.go | 704 ++ .../golang.org/x/sys/unix/zsyscall_linux_arm.go | 652 ++ .../golang.org/x/sys/unix/zsyscall_linux_arm64.go | 603 ++ .../x/sys/unix/zsyscall_linux_loong64.go | 552 ++ .../golang.org/x/sys/unix/zsyscall_linux_mips.go | 704 ++ .../golang.org/x/sys/unix/zsyscall_linux_mips64.go | 698 ++ .../x/sys/unix/zsyscall_linux_mips64le.go | 687 ++ .../golang.org/x/sys/unix/zsyscall_linux_mipsle.go | 704 ++ .../golang.org/x/sys/unix/zsyscall_linux_ppc.go | 709 ++ .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go | 755 ++ .../x/sys/unix/zsyscall_linux_ppc64le.go | 755 ++ .../x/sys/unix/zsyscall_linux_riscv64.go | 572 ++ .../golang.org/x/sys/unix/zsyscall_linux_s390x.go | 546 ++ .../x/sys/unix/zsyscall_linux_sparc64.go | 699 ++ .../golang.org/x/sys/unix/zsyscall_netbsd_386.go | 1850 ++++ .../golang.org/x/sys/unix/zsyscall_netbsd_amd64.go | 1850 ++++ .../golang.org/x/sys/unix/zsyscall_netbsd_arm.go | 1850 ++++ .../golang.org/x/sys/unix/zsyscall_netbsd_arm64.go | 1850 ++++ .../golang.org/x/sys/unix/zsyscall_openbsd_386.go | 1693 ++++ .../x/sys/unix/zsyscall_openbsd_amd64.go | 1693 ++++ .../golang.org/x/sys/unix/zsyscall_openbsd_arm.go | 1693 ++++ .../x/sys/unix/zsyscall_openbsd_arm64.go | 1693 ++++ .../x/sys/unix/zsyscall_openbsd_mips64.go | 1693 ++++ .../x/sys/unix/zsyscall_solaris_amd64.go | 2067 ++++ .../golang.org/x/sys/unix/zsyscall_zos_s390x.go | 1255 +++ .../golang.org/x/sys/unix/zsysctl_openbsd_386.go | 274 + .../golang.org/x/sys/unix/zsysctl_openbsd_amd64.go | 272 + .../golang.org/x/sys/unix/zsysctl_openbsd_arm.go | 274 + .../golang.org/x/sys/unix/zsysctl_openbsd_arm64.go | 276 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 280 + .../golang.org/x/sys/unix/zsysnum_darwin_amd64.go | 440 + .../golang.org/x/sys/unix/zsysnum_darwin_arm64.go | 438 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 317 + .../golang.org/x/sys/unix/zsysnum_freebsd_386.go | 397 + .../golang.org/x/sys/unix/zsysnum_freebsd_amd64.go | 397 + .../golang.org/x/sys/unix/zsysnum_freebsd_arm.go | 397 + .../golang.org/x/sys/unix/zsysnum_freebsd_arm64.go | 397 + .../golang.org/x/sys/unix/zsysnum_linux_386.go | 450 + .../golang.org/x/sys/unix/zsysnum_linux_amd64.go | 372 + .../golang.org/x/sys/unix/zsysnum_linux_arm.go | 414 + .../golang.org/x/sys/unix/zsysnum_linux_arm64.go | 317 + .../golang.org/x/sys/unix/zsysnum_linux_loong64.go | 313 + .../golang.org/x/sys/unix/zsysnum_linux_mips.go | 434 + .../golang.org/x/sys/unix/zsysnum_linux_mips64.go | 364 + .../x/sys/unix/zsysnum_linux_mips64le.go | 364 + .../golang.org/x/sys/unix/zsysnum_linux_mipsle.go | 434 + .../golang.org/x/sys/unix/zsysnum_linux_ppc.go | 441 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64.go | 413 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64le.go | 413 + .../golang.org/x/sys/unix/zsysnum_linux_riscv64.go | 315 + .../golang.org/x/sys/unix/zsysnum_linux_s390x.go | 378 + .../golang.org/x/sys/unix/zsysnum_linux_sparc64.go | 392 + .../golang.org/x/sys/unix/zsysnum_netbsd_386.go | 275 + .../golang.org/x/sys/unix/zsysnum_netbsd_amd64.go | 275 + .../golang.org/x/sys/unix/zsysnum_netbsd_arm.go | 275 + .../golang.org/x/sys/unix/zsysnum_netbsd_arm64.go | 275 + .../golang.org/x/sys/unix/zsysnum_openbsd_386.go | 219 + .../golang.org/x/sys/unix/zsysnum_openbsd_amd64.go | 219 + .../golang.org/x/sys/unix/zsysnum_openbsd_arm.go | 219 + .../golang.org/x/sys/unix/zsysnum_openbsd_arm64.go | 218 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 221 + .../golang.org/x/sys/unix/zsysnum_zos_s390x.go | 2670 ++++++ .../vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go | 354 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 358 + .../golang.org/x/sys/unix/ztypes_darwin_amd64.go | 768 ++ .../golang.org/x/sys/unix/ztypes_darwin_arm64.go | 768 ++ .../x/sys/unix/ztypes_dragonfly_amd64.go | 474 + .../golang.org/x/sys/unix/ztypes_freebsd_386.go | 723 ++ .../golang.org/x/sys/unix/ztypes_freebsd_amd64.go | 726 ++ .../golang.org/x/sys/unix/ztypes_freebsd_arm.go | 707 ++ .../golang.org/x/sys/unix/ztypes_freebsd_arm64.go | 704 ++ .../golang.org/x/sys/unix/ztypes_illumos_amd64.go | 42 + .../vendor/golang.org/x/sys/unix/ztypes_linux.go | 5590 +++++++++++ .../golang.org/x/sys/unix/ztypes_linux_386.go | 683 ++ .../golang.org/x/sys/unix/ztypes_linux_amd64.go | 699 ++ .../golang.org/x/sys/unix/ztypes_linux_arm.go | 678 ++ .../golang.org/x/sys/unix/ztypes_linux_arm64.go | 678 ++ .../golang.org/x/sys/unix/ztypes_linux_loong64.go | 679 ++ .../golang.org/x/sys/unix/ztypes_linux_mips.go | 683 ++ .../golang.org/x/sys/unix/ztypes_linux_mips64.go | 681 ++ .../golang.org/x/sys/unix/ztypes_linux_mips64le.go | 681 ++ .../golang.org/x/sys/unix/ztypes_linux_mipsle.go | 683 ++ .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 691 ++ .../golang.org/x/sys/unix/ztypes_linux_ppc64.go | 687 ++ .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go | 687 ++ .../golang.org/x/sys/unix/ztypes_linux_riscv64.go | 706 ++ .../golang.org/x/sys/unix/ztypes_linux_s390x.go | 701 ++ .../golang.org/x/sys/unix/ztypes_linux_sparc64.go | 682 ++ .../golang.org/x/sys/unix/ztypes_netbsd_386.go | 502 + .../golang.org/x/sys/unix/ztypes_netbsd_amd64.go | 510 + .../golang.org/x/sys/unix/ztypes_netbsd_arm.go | 507 + .../golang.org/x/sys/unix/ztypes_netbsd_arm64.go | 510 + .../golang.org/x/sys/unix/ztypes_openbsd_386.go | 574 ++ .../golang.org/x/sys/unix/ztypes_openbsd_amd64.go | 574 ++ .../golang.org/x/sys/unix/ztypes_openbsd_arm.go | 575 ++ .../golang.org/x/sys/unix/ztypes_openbsd_arm64.go | 568 ++ .../golang.org/x/sys/unix/ztypes_openbsd_mips64.go | 568 ++ .../golang.org/x/sys/unix/ztypes_solaris_amd64.go | 482 + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 406 + src/cmd/vendor/golang.org/x/sys/windows/aliases.go | 13 + .../vendor/golang.org/x/sys/windows/dll_windows.go | 416 + src/cmd/vendor/golang.org/x/sys/windows/empty.s | 9 + .../vendor/golang.org/x/sys/windows/env_windows.go | 54 + .../vendor/golang.org/x/sys/windows/eventlog.go | 21 + .../golang.org/x/sys/windows/exec_windows.go | 178 + .../golang.org/x/sys/windows/memory_windows.go | 48 + .../vendor/golang.org/x/sys/windows/mkerrors.bash | 70 + .../golang.org/x/sys/windows/mkknownfolderids.bash | 27 + .../vendor/golang.org/x/sys/windows/mksyscall.go | 10 + src/cmd/vendor/golang.org/x/sys/windows/race.go | 31 + src/cmd/vendor/golang.org/x/sys/windows/race0.go | 26 + .../golang.org/x/sys/windows/security_windows.go | 1444 +++ src/cmd/vendor/golang.org/x/sys/windows/service.go | 247 + .../golang.org/x/sys/windows/setupapi_windows.go | 1425 +++ src/cmd/vendor/golang.org/x/sys/windows/str.go | 23 + src/cmd/vendor/golang.org/x/sys/windows/syscall.go | 113 + .../golang.org/x/sys/windows/syscall_windows.go | 1700 ++++ .../golang.org/x/sys/windows/types_windows.go | 3176 +++++++ .../golang.org/x/sys/windows/types_windows_386.go | 35 + .../x/sys/windows/types_windows_amd64.go | 34 + .../golang.org/x/sys/windows/types_windows_arm.go | 35 + .../x/sys/windows/types_windows_arm64.go | 34 + .../golang.org/x/sys/windows/zerrors_windows.go | 9468 +++++++++++++++++++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../golang.org/x/sys/windows/zsyscall_windows.go | 4196 +++++++++ src/cmd/vendor/golang.org/x/term/AUTHORS | 3 + src/cmd/vendor/golang.org/x/term/CONTRIBUTING.md | 26 + src/cmd/vendor/golang.org/x/term/CONTRIBUTORS | 3 + src/cmd/vendor/golang.org/x/term/LICENSE | 27 + src/cmd/vendor/golang.org/x/term/PATENTS | 22 + src/cmd/vendor/golang.org/x/term/README.md | 19 + src/cmd/vendor/golang.org/x/term/codereview.cfg | 1 + src/cmd/vendor/golang.org/x/term/term.go | 60 + src/cmd/vendor/golang.org/x/term/term_plan9.go | 42 + src/cmd/vendor/golang.org/x/term/term_unix.go | 92 + src/cmd/vendor/golang.org/x/term/term_unix_bsd.go | 13 + .../vendor/golang.org/x/term/term_unix_other.go | 13 + .../vendor/golang.org/x/term/term_unsupported.go | 39 + src/cmd/vendor/golang.org/x/term/term_windows.go | 79 + src/cmd/vendor/golang.org/x/term/terminal.go | 987 ++ src/cmd/vendor/golang.org/x/tools/AUTHORS | 3 + src/cmd/vendor/golang.org/x/tools/CONTRIBUTORS | 3 + src/cmd/vendor/golang.org/x/tools/LICENSE | 27 + src/cmd/vendor/golang.org/x/tools/PATENTS | 22 + src/cmd/vendor/golang.org/x/tools/cover/profile.go | 266 + .../golang.org/x/tools/go/analysis/analysis.go | 242 + .../golang.org/x/tools/go/analysis/diagnostic.go | 65 + .../vendor/golang.org/x/tools/go/analysis/doc.go | 313 + .../go/analysis/internal/analysisflags/flags.go | 388 + .../go/analysis/internal/analysisflags/help.go | 96 + .../x/tools/go/analysis/internal/facts/facts.go | 322 + .../x/tools/go/analysis/internal/facts/imports.go | 118 + .../go/analysis/passes/asmdecl/arches_go118.go | 12 + .../go/analysis/passes/asmdecl/arches_go119.go | 14 + .../x/tools/go/analysis/passes/asmdecl/asmdecl.go | 824 ++ .../x/tools/go/analysis/passes/assign/assign.go | 76 + .../x/tools/go/analysis/passes/atomic/atomic.go | 96 + .../x/tools/go/analysis/passes/bools/bools.go | 225 + .../tools/go/analysis/passes/buildtag/buildtag.go | 367 + .../go/analysis/passes/buildtag/buildtag_old.go | 174 + .../x/tools/go/analysis/passes/cgocall/cgocall.go | 376 + .../go/analysis/passes/composite/composite.go | 140 + .../go/analysis/passes/composite/whitelist.go | 35 + .../tools/go/analysis/passes/copylock/copylock.go | 353 + .../tools/go/analysis/passes/ctrlflow/ctrlflow.go | 230 + .../tools/go/analysis/passes/errorsas/errorsas.go | 89 + .../analysis/passes/framepointer/framepointer.go | 91 + .../analysis/passes/httpresponse/httpresponse.go | 182 + .../go/analysis/passes/ifaceassert/ifaceassert.go | 111 + .../analysis/passes/ifaceassert/parameterized.go | 112 + .../x/tools/go/analysis/passes/inspect/inspect.go | 48 + .../analysis/passes/internal/analysisutil/util.go | 120 + .../go/analysis/passes/loopclosure/loopclosure.go | 165 + .../go/analysis/passes/lostcancel/lostcancel.go | 330 + .../x/tools/go/analysis/passes/nilfunc/nilfunc.go | 81 + .../x/tools/go/analysis/passes/printf/printf.go | 1139 +++ .../x/tools/go/analysis/passes/printf/types.go | 311 + .../x/tools/go/analysis/passes/shift/dead.go | 101 + .../x/tools/go/analysis/passes/shift/shift.go | 131 + .../go/analysis/passes/sigchanyzer/sigchanyzer.go | 154 + .../go/analysis/passes/stdmethods/stdmethods.go | 206 + .../go/analysis/passes/stringintconv/string.go | 219 + .../go/analysis/passes/structtag/structtag.go | 313 + .../passes/testinggoroutine/testinggoroutine.go | 192 + .../x/tools/go/analysis/passes/tests/tests.go | 484 + .../go/analysis/passes/unmarshal/unmarshal.go | 101 + .../go/analysis/passes/unreachable/unreachable.go | 325 + .../go/analysis/passes/unsafeptr/unsafeptr.go | 168 + .../analysis/passes/unusedresult/unusedresult.go | 137 + .../x/tools/go/analysis/unitchecker/unitchecker.go | 400 + .../go/analysis/unitchecker/unitchecker112.go | 14 + .../golang.org/x/tools/go/analysis/validate.go | 135 + .../golang.org/x/tools/go/ast/astutil/enclosing.go | 635 ++ .../golang.org/x/tools/go/ast/astutil/imports.go | 485 + .../golang.org/x/tools/go/ast/astutil/rewrite.go | 488 + .../golang.org/x/tools/go/ast/astutil/util.go | 18 + .../x/tools/go/ast/inspector/inspector.go | 186 + .../golang.org/x/tools/go/ast/inspector/typeof.go | 228 + .../vendor/golang.org/x/tools/go/cfg/builder.go | 507 + src/cmd/vendor/golang.org/x/tools/go/cfg/cfg.go | 149 + .../x/tools/go/types/objectpath/objectpath.go | 720 ++ .../golang.org/x/tools/go/types/typeutil/callee.go | 69 + .../x/tools/go/types/typeutil/imports.go | 30 + .../golang.org/x/tools/go/types/typeutil/map.go | 436 + .../x/tools/go/types/typeutil/methodsetcache.go | 71 + .../golang.org/x/tools/go/types/typeutil/ui.go | 51 + .../x/tools/internal/analysisinternal/analysis.go | 428 + .../golang.org/x/tools/internal/lsp/fuzzy/input.go | 183 + .../x/tools/internal/lsp/fuzzy/matcher.go | 407 + .../x/tools/internal/lsp/fuzzy/symbol.go | 237 + .../x/tools/internal/typeparams/common.go | 179 + .../x/tools/internal/typeparams/enabled_go117.go | 12 + .../x/tools/internal/typeparams/enabled_go118.go | 15 + .../x/tools/internal/typeparams/normalize.go | 218 + .../x/tools/internal/typeparams/termlist.go | 163 + .../tools/internal/typeparams/typeparams_go117.go | 197 + .../tools/internal/typeparams/typeparams_go118.go | 151 + .../x/tools/internal/typeparams/typeterm.go | 170 + src/cmd/vendor/modules.txt | 96 + 573 files changed, 289395 insertions(+) create mode 100644 src/cmd/vendor/github.com/google/pprof/AUTHORS create mode 100644 src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS create mode 100644 src/cmd/vendor/github.com/google/pprof/LICENSE create mode 100644 src/cmd/vendor/github.com/google/pprof/driver/driver.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/config.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/options.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/report/report.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/report/source.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/report/synth.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/encode.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/filter.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/index.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/merge.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/profile.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/proto.go create mode 100644 src/cmd/vendor/github.com/google/pprof/profile/prune.go create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/index.js create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/package-lock.json create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/package.json create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/update.sh create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/svgpan/LICENSE create mode 100644 src/cmd/vendor/github.com/google/pprof/third_party/svgpan/svgpan.go create mode 100644 src/cmd/vendor/github.com/ianlancetaylor/demangle/.gitignore create mode 100644 src/cmd/vendor/github.com/ianlancetaylor/demangle/LICENSE create mode 100644 src/cmd/vendor/github.com/ianlancetaylor/demangle/README.md create mode 100644 src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go create mode 100644 src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go create mode 100644 src/cmd/vendor/github.com/ianlancetaylor/demangle/rust.go create mode 100644 src/cmd/vendor/golang.org/x/arch/AUTHORS create mode 100644 src/cmd/vendor/golang.org/x/arch/CONTRIBUTORS create mode 100644 src/cmd/vendor/golang.org/x/arch/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/arch/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/arch/arm/armasm/Makefile create mode 100644 src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm/armasm/gnu.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm/armasm/inst.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm/armasm/tables.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/arg.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.json create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/plan9x.go create mode 100644 src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/tables.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/decode.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/doc.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/field.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/inst.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/plan9.go create mode 100644 src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/tables.go create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/Makefile create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/decode.go create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/inst.go create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/intel.go create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/plan9x.go create mode 100644 src/cmd/vendor/golang.org/x/arch/x86/x86asm/tables.go create mode 100644 src/cmd/vendor/golang.org/x/crypto/AUTHORS create mode 100644 src/cmd/vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 src/cmd/vendor/golang.org/x/crypto/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/crypto/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 src/cmd/vendor/golang.org/x/mod/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/mod/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 src/cmd/vendor/golang.org/x/mod/modfile/print.go create mode 100644 src/cmd/vendor/golang.org/x/mod/modfile/read.go create mode 100644 src/cmd/vendor/golang.org/x/mod/modfile/rule.go create mode 100644 src/cmd/vendor/golang.org/x/mod/modfile/work.go create mode 100644 src/cmd/vendor/golang.org/x/mod/module/module.go create mode 100644 src/cmd/vendor/golang.org/x/mod/module/pseudo.go create mode 100644 src/cmd/vendor/golang.org/x/mod/semver/semver.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/cache.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/client.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/dirhash/hash.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/server.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/test.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/tlog/note.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tile.go create mode 100644 src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tlog.go create mode 100644 src/cmd/vendor/golang.org/x/mod/zip/zip.go create mode 100644 src/cmd/vendor/golang.org/x/sync/AUTHORS create mode 100644 src/cmd/vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 src/cmd/vendor/golang.org/x/sync/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/sync/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 src/cmd/vendor/golang.org/x/sys/AUTHORS create mode 100644 src/cmd/vendor/golang.org/x/sys/CONTRIBUTORS create mode 100644 src/cmd/vendor/golang.org/x/sys/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/sys/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/asm.s create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/asm_plan9_386.s create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/const_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/dir_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/env_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/errors_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/mkall.sh create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/mkerrors.sh create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/pwd_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/race.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/race0.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/str.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/syscall.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/syscall_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/.gitignore create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/README.md create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/aliases.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_386.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_zos_s390x.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/constants.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dev_zos.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/dirent.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/fcntl.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/fcntl_darwin.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/fdset.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ioctl.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ioctl_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ioctl_zos.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/mkall.sh create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/pledge_openbsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ptrace_darwin.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ptrace_ios.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/race.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/race0.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/str.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_aix.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sysvshm_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/unveil_openbsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/xattr_bsd.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/aliases.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/empty.s create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/race.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/race0.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/service.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/setupapi_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/str.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/syscall.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 src/cmd/vendor/golang.org/x/term/AUTHORS create mode 100644 src/cmd/vendor/golang.org/x/term/CONTRIBUTING.md create mode 100644 src/cmd/vendor/golang.org/x/term/CONTRIBUTORS create mode 100644 src/cmd/vendor/golang.org/x/term/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/term/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/term/README.md create mode 100644 src/cmd/vendor/golang.org/x/term/codereview.cfg create mode 100644 src/cmd/vendor/golang.org/x/term/term.go create mode 100644 src/cmd/vendor/golang.org/x/term/term_plan9.go create mode 100644 src/cmd/vendor/golang.org/x/term/term_unix.go create mode 100644 src/cmd/vendor/golang.org/x/term/term_unix_bsd.go create mode 100644 src/cmd/vendor/golang.org/x/term/term_unix_other.go create mode 100644 src/cmd/vendor/golang.org/x/term/term_unsupported.go create mode 100644 src/cmd/vendor/golang.org/x/term/term_windows.go create mode 100644 src/cmd/vendor/golang.org/x/term/terminal.go create mode 100644 src/cmd/vendor/golang.org/x/tools/AUTHORS create mode 100644 src/cmd/vendor/golang.org/x/tools/CONTRIBUTORS create mode 100644 src/cmd/vendor/golang.org/x/tools/LICENSE create mode 100644 src/cmd/vendor/golang.org/x/tools/PATENTS create mode 100644 src/cmd/vendor/golang.org/x/tools/cover/profile.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/analysis.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/doc.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/internal/facts/imports.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag_old.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/composite/whitelist.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/dead.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker112.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/analysis/validate.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/cfg/builder.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/cfg/cfg.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/types/typeutil/callee.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/types/typeutil/imports.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go create mode 100644 src/cmd/vendor/golang.org/x/tools/go/types/typeutil/ui.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 src/cmd/vendor/modules.txt (limited to 'src/cmd/vendor') diff --git a/src/cmd/vendor/github.com/google/pprof/AUTHORS b/src/cmd/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 0000000..fd736cb --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS b/src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 0000000..8c8c37d --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/src/cmd/vendor/github.com/google/pprof/LICENSE b/src/cmd/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/driver/driver.go new file mode 100644 index 0000000..5a8222f --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/driver/driver.go @@ -0,0 +1,298 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package driver provides an external entry point to the pprof driver. +package driver + +import ( + "io" + "net/http" + "regexp" + "time" + + internaldriver "github.com/google/pprof/internal/driver" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// PProf acquires a profile, and symbolizes it using a profile +// manager. Then it generates a report formatted according to the +// options selected through the flags package. +func PProf(o *Options) error { + return internaldriver.PProf(o.internalOptions()) +} + +func (o *Options) internalOptions() *plugin.Options { + var obj plugin.ObjTool + if o.Obj != nil { + obj = &internalObjTool{o.Obj} + } + var sym plugin.Symbolizer + if o.Sym != nil { + sym = &internalSymbolizer{o.Sym} + } + var httpServer func(args *plugin.HTTPServerArgs) error + if o.HTTPServer != nil { + httpServer = func(args *plugin.HTTPServerArgs) error { + return o.HTTPServer(((*HTTPServerArgs)(args))) + } + } + return &plugin.Options{ + Writer: o.Writer, + Flagset: o.Flagset, + Fetch: o.Fetch, + Sym: sym, + Obj: obj, + UI: o.UI, + HTTPServer: httpServer, + HTTPTransport: o.HTTPTransport, + } +} + +// HTTPServerArgs contains arguments needed by an HTTP server that +// is exporting a pprof web interface. +type HTTPServerArgs plugin.HTTPServerArgs + +// Options groups all the optional plugins into pprof. +type Options struct { + Writer Writer + Flagset FlagSet + Fetch Fetcher + Sym Symbolizer + Obj ObjTool + UI UI + HTTPServer func(*HTTPServerArgs) error + HTTPTransport http.RoundTripper +} + +// Writer provides a mechanism to write data under a certain name, +// typically a filename. +type Writer interface { + Open(name string) (io.WriteCloser, error) +} + +// A FlagSet creates and parses command-line flags. +// It is similar to the standard flag.FlagSet. +type FlagSet interface { + // Bool, Int, Float64, and String define new flags, + // like the functions of the same name in package flag. + Bool(name string, def bool, usage string) *bool + Int(name string, def int, usage string) *int + Float64(name string, def float64, usage string) *float64 + String(name string, def string, usage string) *string + + // StringList is similar to String but allows multiple values for a + // single flag + StringList(name string, def string, usage string) *[]*string + + // ExtraUsage returns any additional text that should be printed after the + // standard usage message. The extra usage message returned includes all text + // added with AddExtraUsage(). + // The typical use of ExtraUsage is to show any custom flags defined by the + // specific pprof plugins being used. + ExtraUsage() string + + // AddExtraUsage appends additional text to the end of the extra usage message. + AddExtraUsage(eu string) + + // Parse initializes the flags with their values for this run + // and returns the non-flag command line arguments. + // If an unknown flag is encountered or there are no arguments, + // Parse should call usage and return nil. + Parse(usage func()) []string +} + +// A Fetcher reads and returns the profile named by src, using +// the specified duration and timeout. It returns the fetched +// profile and a string indicating a URL from where the profile +// was fetched, which may be different than src. +type Fetcher interface { + Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) +} + +// A Symbolizer introduces symbol information into a profile. +type Symbolizer interface { + Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error +} + +// MappingSources map each profile.Mapping to the source of the profile. +// The key is either Mapping.File or Mapping.BuildId. +type MappingSources map[string][]struct { + Source string // URL of the source the mapping was collected from + Start uint64 // delta applied to addresses from this source (to represent Merge adjustments) +} + +// An ObjTool inspects shared libraries and executable files. +type ObjTool interface { + // Open opens the named object file. If the object is a shared + // library, start/limit/offset are the addresses where it is mapped + // into memory in the address space being inspected. If the object + // is a linux kernel, relocationSymbol is the name of the symbol + // corresponding to the start address. + Open(file string, start, limit, offset uint64, relocationSymbol string) (ObjFile, error) + + // Disasm disassembles the named object file, starting at + // the start address and stopping at (before) the end address. + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) +} + +// An Inst is a single instruction in an assembly listing. +type Inst struct { + Addr uint64 // virtual address of instruction + Text string // instruction text + Function string // function name + File string // source file + Line int // source line +} + +// An ObjFile is a single object file: a shared library or executable. +type ObjFile interface { + // Name returns the underlying file name, if available. + Name() string + + // ObjAddr returns the objdump address corresponding to a runtime address. + ObjAddr(addr uint64) (uint64, error) + + // BuildID returns the GNU build ID of the file, or an empty string. + BuildID() string + + // SourceLine reports the source line information for a given + // address in the file. Due to inlining, the source line information + // is in general a list of positions representing a call stack, + // with the leaf function first. + SourceLine(addr uint64) ([]Frame, error) + + // Symbols returns a list of symbols in the object file. + // If r is not nil, Symbols restricts the list to symbols + // with names matching the regular expression. + // If addr is not zero, Symbols restricts the list to symbols + // containing that address. + Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error) + + // Close closes the file, releasing associated resources. + Close() error +} + +// A Frame describes a single line in a source file. +type Frame struct { + Func string // name of function + File string // source file name + Line int // line in file +} + +// A Sym describes a single symbol in an object file. +type Sym struct { + Name []string // names of symbol (many if symbol was dedup'ed) + File string // object file containing symbol + Start uint64 // start virtual address + End uint64 // virtual address of last byte in sym (Start+size-1) +} + +// A UI manages user interactions. +type UI interface { + // Read returns a line of text (a command) read from the user. + // prompt is printed before reading the command. + ReadLine(prompt string) (string, error) + + // Print shows a message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, Print writes to standard error. + // (Standard output is reserved for report data.) + Print(...interface{}) + + // PrintErr shows an error message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, PrintErr writes to standard error. + PrintErr(...interface{}) + + // IsTerminal returns whether the UI is known to be tied to an + // interactive terminal (as opposed to being redirected to a file). + IsTerminal() bool + + // WantBrowser indicates whether browser should be opened with the -http option. + WantBrowser() bool + + // SetAutoComplete instructs the UI to call complete(cmd) to obtain + // the auto-completion of cmd, if the UI supports auto-completion at all. + SetAutoComplete(complete func(string) string) +} + +// internalObjTool is a wrapper to map from the pprof external +// interface to the internal interface. +type internalObjTool struct { + ObjTool +} + +func (o *internalObjTool) Open(file string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + f, err := o.ObjTool.Open(file, start, limit, offset, relocationSymbol) + if err != nil { + return nil, err + } + return &internalObjFile{f}, err +} + +type internalObjFile struct { + ObjFile +} + +func (f *internalObjFile) SourceLine(frame uint64) ([]plugin.Frame, error) { + frames, err := f.ObjFile.SourceLine(frame) + if err != nil { + return nil, err + } + var pluginFrames []plugin.Frame + for _, f := range frames { + pluginFrames = append(pluginFrames, plugin.Frame(f)) + } + return pluginFrames, nil +} + +func (f *internalObjFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + syms, err := f.ObjFile.Symbols(r, addr) + if err != nil { + return nil, err + } + var pluginSyms []*plugin.Sym + for _, s := range syms { + ps := plugin.Sym(*s) + pluginSyms = append(pluginSyms, &ps) + } + return pluginSyms, nil +} + +func (o *internalObjTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + insts, err := o.ObjTool.Disasm(file, start, end, intelSyntax) + if err != nil { + return nil, err + } + var pluginInst []plugin.Inst + for _, inst := range insts { + pluginInst = append(pluginInst, plugin.Inst(inst)) + } + return pluginInst, nil +} + +// internalSymbolizer is a wrapper to map from the pprof external +// interface to the internal interface. +type internalSymbolizer struct { + Symbolizer +} + +func (s *internalSymbolizer) Symbolize(mode string, srcs plugin.MappingSources, prof *profile.Profile) error { + isrcs := MappingSources{} + for m, s := range srcs { + isrcs[m] = s + } + return s.Symbolizer.Symbolize(mode, isrcs, prof) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go new file mode 100644 index 0000000..0c70239 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go @@ -0,0 +1,238 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "github.com/google/pprof/internal/plugin" +) + +const ( + defaultAddr2line = "addr2line" + + // addr2line may produce multiple lines of output. We + // use this sentinel to identify the end of the output. + sentinel = ^uint64(0) +) + +// addr2Liner is a connection to an addr2line command for obtaining +// address and line number information from a binary. +type addr2Liner struct { + mu sync.Mutex + rw lineReaderWriter + base uint64 + + // nm holds an addr2Liner using nm tool. Certain versions of addr2line + // produce incomplete names due to + // https://sourceware.org/bugzilla/show_bug.cgi?id=17541. As a workaround, + // the names from nm are used when they look more complete. See addrInfo() + // code below for the exact heuristic. + nm *addr2LinerNM +} + +// lineReaderWriter is an interface to abstract the I/O to an addr2line +// process. It writes a line of input to the job, and reads its output +// one line at a time. +type lineReaderWriter interface { + write(string) error + readLine() (string, error) + close() +} + +type addr2LinerJob struct { + cmd *exec.Cmd + in io.WriteCloser + out *bufio.Reader +} + +func (a *addr2LinerJob) write(s string) error { + _, err := fmt.Fprint(a.in, s+"\n") + return err +} + +func (a *addr2LinerJob) readLine() (string, error) { + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil +} + +// close releases any resources used by the addr2liner object. +func (a *addr2LinerJob) close() { + a.in.Close() + a.cmd.Wait() +} + +// newAddr2liner starts the given addr2liner command reporting +// information about the given executable file. If file is a shared +// library, base should be the address at which it was mapped in the +// program under consideration. +func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) { + if cmd == "" { + cmd = defaultAddr2line + } + + j := &addr2LinerJob{ + cmd: exec.Command(cmd, "-aif", "-e", file), + } + + var err error + if j.in, err = j.cmd.StdinPipe(); err != nil { + return nil, err + } + + outPipe, err := j.cmd.StdoutPipe() + if err != nil { + return nil, err + } + + j.out = bufio.NewReader(outPipe) + if err := j.cmd.Start(); err != nil { + return nil, err + } + + a := &addr2Liner{ + rw: j, + base: base, + } + + return a, nil +} + +// readFrame parses the addr2line output for a single address. It +// returns a populated plugin.Frame and whether it has reached the end of the +// data. +func (d *addr2Liner) readFrame() (plugin.Frame, bool) { + funcname, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + if strings.HasPrefix(funcname, "0x") { + // If addr2line returns a hex address we can assume it is the + // sentinel. Read and ignore next two lines of output from + // addr2line + d.rw.readLine() + d.rw.readLine() + return plugin.Frame{}, true + } + + fileline, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + + linenumber := 0 + + if funcname == "??" { + funcname = "" + } + + if fileline == "??:0" { + fileline = "" + } else { + if i := strings.LastIndex(fileline, ":"); i >= 0 { + // Remove discriminator, if present + if disc := strings.Index(fileline, " (discriminator"); disc > 0 { + fileline = fileline[:disc] + } + // If we cannot parse a number after the last ":", keep it as + // part of the filename. + if line, err := strconv.Atoi(fileline[i+1:]); err == nil { + linenumber = line + fileline = fileline[:i] + } + } + } + + return plugin.Frame{ + Func: funcname, + File: fileline, + Line: linenumber}, false +} + +func (d *addr2Liner) rawAddrInfo(addr uint64) ([]plugin.Frame, error) { + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.rw.write(fmt.Sprintf("%x", addr-d.base)); err != nil { + return nil, err + } + + if err := d.rw.write(fmt.Sprintf("%x", sentinel)); err != nil { + return nil, err + } + + resp, err := d.rw.readLine() + if err != nil { + return nil, err + } + + if !strings.HasPrefix(resp, "0x") { + return nil, fmt.Errorf("unexpected addr2line output: %s", resp) + } + + var stack []plugin.Frame + for { + frame, end := d.readFrame() + if end { + break + } + + if frame != (plugin.Frame{}) { + stack = append(stack, frame) + } + } + return stack, err +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (d *addr2Liner) addrInfo(addr uint64) ([]plugin.Frame, error) { + stack, err := d.rawAddrInfo(addr) + if err != nil { + return nil, err + } + + // Certain versions of addr2line produce incomplete names due to + // https://sourceware.org/bugzilla/show_bug.cgi?id=17541. Attempt to replace + // the name with a better one from nm. + if len(stack) > 0 && d.nm != nil { + nm, err := d.nm.addrInfo(addr) + if err == nil && len(nm) > 0 { + // Last entry in frame list should match since it is non-inlined. As a + // simple heuristic, we only switch to the nm-based name if it is longer + // by 2 or more characters. We consider nm names that are longer by 1 + // character insignificant to avoid replacing foo with _foo on MacOS (for + // unknown reasons read2line produces the former and nm produces the + // latter on MacOS even though both tools are asked to produce mangled + // names). + nmName := nm[len(nm)-1].Func + a2lName := stack[len(stack)-1].Func + if len(nmName) > len(a2lName)+1 { + stack[len(stack)-1].Func = nmName + } + } + } + + return stack, nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go new file mode 100644 index 0000000..844c7a4 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go @@ -0,0 +1,181 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "github.com/google/pprof/internal/plugin" +) + +const ( + defaultLLVMSymbolizer = "llvm-symbolizer" +) + +// llvmSymbolizer is a connection to an llvm-symbolizer command for +// obtaining address and line number information from a binary. +type llvmSymbolizer struct { + sync.Mutex + filename string + rw lineReaderWriter + base uint64 +} + +type llvmSymbolizerJob struct { + cmd *exec.Cmd + in io.WriteCloser + out *bufio.Reader + // llvm-symbolizer requires the symbol type, CODE or DATA, for symbolization. + symType string +} + +func (a *llvmSymbolizerJob) write(s string) error { + _, err := fmt.Fprintln(a.in, a.symType, s) + return err +} + +func (a *llvmSymbolizerJob) readLine() (string, error) { + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil +} + +// close releases any resources used by the llvmSymbolizer object. +func (a *llvmSymbolizerJob) close() { + a.in.Close() + a.cmd.Wait() +} + +// newLlvmSymbolizer starts the given llvmSymbolizer command reporting +// information about the given executable file. If file is a shared +// library, base should be the address at which it was mapped in the +// program under consideration. +func newLLVMSymbolizer(cmd, file string, base uint64, isData bool) (*llvmSymbolizer, error) { + if cmd == "" { + cmd = defaultLLVMSymbolizer + } + + j := &llvmSymbolizerJob{ + cmd: exec.Command(cmd, "--inlining", "-demangle=false"), + symType: "CODE", + } + if isData { + j.symType = "DATA" + } + + var err error + if j.in, err = j.cmd.StdinPipe(); err != nil { + return nil, err + } + + outPipe, err := j.cmd.StdoutPipe() + if err != nil { + return nil, err + } + + j.out = bufio.NewReader(outPipe) + if err := j.cmd.Start(); err != nil { + return nil, err + } + + a := &llvmSymbolizer{ + filename: file, + rw: j, + base: base, + } + + return a, nil +} + +// readFrame parses the llvm-symbolizer output for a single address. It +// returns a populated plugin.Frame and whether it has reached the end of the +// data. +func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) { + funcname, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + + switch funcname { + case "": + return plugin.Frame{}, true + case "??": + funcname = "" + } + + fileline, err := d.rw.readLine() + if err != nil { + return plugin.Frame{Func: funcname}, true + } + + linenumber := 0 + // The llvm-symbolizer outputs the ::. + // When it cannot identify the source code location, it outputs "??:0:0". + // Older versions output just the filename and line number, so we check for + // both conditions here. + if fileline == "??:0" || fileline == "??:0:0" { + fileline = "" + } else { + switch split := strings.Split(fileline, ":"); len(split) { + case 1: + // filename + fileline = split[0] + case 2, 3: + // filename:line , or + // filename:line:disc , or + fileline = split[0] + if line, err := strconv.Atoi(split[1]); err == nil { + linenumber = line + } + default: + // Unrecognized, ignore + } + } + + return plugin.Frame{Func: funcname, File: fileline, Line: linenumber}, false +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (d *llvmSymbolizer) addrInfo(addr uint64) ([]plugin.Frame, error) { + d.Lock() + defer d.Unlock() + + if err := d.rw.write(fmt.Sprintf("%s 0x%x", d.filename, addr-d.base)); err != nil { + return nil, err + } + + var stack []plugin.Frame + for { + frame, end := d.readFrame() + if end { + break + } + + if frame != (plugin.Frame{}) { + stack = append(stack, frame) + } + } + + return stack, nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go new file mode 100644 index 0000000..8e0ccc7 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go @@ -0,0 +1,144 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" +) + +const ( + defaultNM = "nm" +) + +// addr2LinerNM is a connection to an nm command for obtaining symbol +// information from a binary. +type addr2LinerNM struct { + m []symbolInfo // Sorted list of symbol addresses from binary. +} + +type symbolInfo struct { + address uint64 + size uint64 + name string + symType string +} + +// isData returns if the symbol has a known data object symbol type. +func (s *symbolInfo) isData() bool { + // The following symbol types are taken from https://linux.die.net/man/1/nm: + // Lowercase letter means local symbol, uppercase denotes a global symbol. + // - b or B: the symbol is in the uninitialized data section, e.g. .bss; + // - d or D: the symbol is in the initialized data section; + // - r or R: the symbol is in a read only data section; + // - v or V: the symbol is a weak object; + // - W: the symbol is a weak symbol that has not been specifically tagged as a + // weak object symbol. Experiments with some binaries, showed these to be + // mostly data objects. + return strings.ContainsAny(s.symType, "bBdDrRvVW") +} + +// newAddr2LinerNM starts the given nm command reporting information about the +// given executable file. If file is a shared library, base should be the +// address at which it was mapped in the program under consideration. +func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) { + if cmd == "" { + cmd = defaultNM + } + var b bytes.Buffer + c := exec.Command(cmd, "--numeric-sort", "--print-size", "--format=posix", file) + c.Stdout = &b + if err := c.Run(); err != nil { + return nil, err + } + return parseAddr2LinerNM(base, &b) +} + +func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) { + a := &addr2LinerNM{ + m: []symbolInfo{}, + } + + // Parse nm output and populate symbol map. + // Skip lines we fail to parse. + buf := bufio.NewReader(nm) + for { + line, err := buf.ReadString('\n') + if line == "" && err != nil { + if err == io.EOF { + break + } + return nil, err + } + line = strings.TrimSpace(line) + fields := strings.Split(line, " ") + if len(fields) != 4 { + continue + } + address, err := strconv.ParseUint(fields[2], 16, 64) + if err != nil { + continue + } + size, err := strconv.ParseUint(fields[3], 16, 64) + if err != nil { + continue + } + a.m = append(a.m, symbolInfo{ + address: address + base, + size: size, + name: fields[0], + symType: fields[1], + }) + } + + return a, nil +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) { + if len(a.m) == 0 || addr < a.m[0].address || addr >= (a.m[len(a.m)-1].address+a.m[len(a.m)-1].size) { + return nil, nil + } + + // Binary search. Search until low, high are separated by 1. + low, high := 0, len(a.m) + for low+1 < high { + mid := (low + high) / 2 + v := a.m[mid].address + if addr == v { + low = mid + break + } else if addr > v { + low = mid + } else { + high = mid + } + } + + // Address is between a.m[low] and a.m[high]. Pick low, as it represents + // [low, high). For data symbols, we use a strict check that the address is in + // the [start, start + size) range of a.m[low]. + if a.m[low].isData() && addr >= (a.m[low].address+a.m[low].size) { + return nil, nil + } + return []plugin.Frame{{Func: a.m[low].name}}, nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go new file mode 100644 index 0000000..efa9167 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go @@ -0,0 +1,738 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package binutils provides access to the GNU binutils. +package binutils + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/google/pprof/internal/elfexec" + "github.com/google/pprof/internal/plugin" +) + +// A Binutils implements plugin.ObjTool by invoking the GNU binutils. +type Binutils struct { + mu sync.Mutex + rep *binrep +} + +var ( + objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`) + + // Defined for testing + elfOpen = elf.Open +) + +// binrep is an immutable representation for Binutils. It is atomically +// replaced on every mutation to provide thread-safe access. +type binrep struct { + // Commands to invoke. + llvmSymbolizer string + llvmSymbolizerFound bool + addr2line string + addr2lineFound bool + nm string + nmFound bool + objdump string + objdumpFound bool + isLLVMObjdump bool + + // if fast, perform symbolization using nm (symbol names only), + // instead of file-line detail from the slower addr2line. + fast bool +} + +// get returns the current representation for bu, initializing it if necessary. +func (bu *Binutils) get() *binrep { + bu.mu.Lock() + r := bu.rep + if r == nil { + r = &binrep{} + initTools(r, "") + bu.rep = r + } + bu.mu.Unlock() + return r +} + +// update modifies the rep for bu via the supplied function. +func (bu *Binutils) update(fn func(r *binrep)) { + r := &binrep{} + bu.mu.Lock() + defer bu.mu.Unlock() + if bu.rep == nil { + initTools(r, "") + } else { + *r = *bu.rep + } + fn(r) + bu.rep = r +} + +// String returns string representation of the binutils state for debug logging. +func (bu *Binutils) String() string { + r := bu.get() + var llvmSymbolizer, addr2line, nm, objdump string + if r.llvmSymbolizerFound { + llvmSymbolizer = r.llvmSymbolizer + } + if r.addr2lineFound { + addr2line = r.addr2line + } + if r.nmFound { + nm = r.nm + } + if r.objdumpFound { + objdump = r.objdump + } + return fmt.Sprintf("llvm-symbolizer=%q addr2line=%q nm=%q objdump=%q fast=%t", + llvmSymbolizer, addr2line, nm, objdump, r.fast) +} + +// SetFastSymbolization sets a toggle that makes binutils use fast +// symbolization (using nm), which is much faster than addr2line but +// provides only symbol name information (no file/line). +func (bu *Binutils) SetFastSymbolization(fast bool) { + bu.update(func(r *binrep) { r.fast = fast }) +} + +// SetTools processes the contents of the tools option. It +// expects a set of entries separated by commas; each entry is a pair +// of the form t:path, where cmd will be used to look only for the +// tool named t. If t is not specified, the path is searched for all +// tools. +func (bu *Binutils) SetTools(config string) { + bu.update(func(r *binrep) { initTools(r, config) }) +} + +func initTools(b *binrep, config string) { + // paths collect paths per tool; Key "" contains the default. + paths := make(map[string][]string) + for _, t := range strings.Split(config, ",") { + name, path := "", t + if ct := strings.SplitN(t, ":", 2); len(ct) == 2 { + name, path = ct[0], ct[1] + } + paths[name] = append(paths[name], path) + } + + defaultPath := paths[""] + b.llvmSymbolizer, b.llvmSymbolizerFound = chooseExe([]string{"llvm-symbolizer"}, []string{}, append(paths["llvm-symbolizer"], defaultPath...)) + b.addr2line, b.addr2lineFound = chooseExe([]string{"addr2line"}, []string{"gaddr2line"}, append(paths["addr2line"], defaultPath...)) + // The "-n" option is supported by LLVM since 2011. The output of llvm-nm + // and GNU nm with "-n" option is interchangeable for our purposes, so we do + // not need to differrentiate them. + b.nm, b.nmFound = chooseExe([]string{"llvm-nm", "nm"}, []string{"gnm"}, append(paths["nm"], defaultPath...)) + b.objdump, b.objdumpFound, b.isLLVMObjdump = findObjdump(append(paths["objdump"], defaultPath...)) +} + +// findObjdump finds and returns path to preferred objdump binary. +// Order of preference is: llvm-objdump, objdump. +// On MacOS only, also looks for gobjdump with least preference. +// Accepts a list of paths and returns: +// a string with path to the preferred objdump binary if found, +// or an empty string if not found; +// a boolean if any acceptable objdump was found; +// a boolean indicating if it is an LLVM objdump. +func findObjdump(paths []string) (string, bool, bool) { + objdumpNames := []string{"llvm-objdump", "objdump"} + if runtime.GOOS == "darwin" { + objdumpNames = append(objdumpNames, "gobjdump") + } + + for _, objdumpName := range objdumpNames { + if objdump, objdumpFound := findExe(objdumpName, paths); objdumpFound { + cmdOut, err := exec.Command(objdump, "--version").Output() + if err != nil { + continue + } + if isLLVMObjdump(string(cmdOut)) { + return objdump, true, true + } + if isBuObjdump(string(cmdOut)) { + return objdump, true, false + } + } + } + return "", false, false +} + +// chooseExe finds and returns path to preferred binary. names is a list of +// names to search on both Linux and OSX. osxNames is a list of names specific +// to OSX. names always has a higher priority than osxNames. The order of +// the name within each list decides its priority (e.g. the first name has a +// higher priority than the second name in the list). +// +// It returns a string with path to the binary and a boolean indicating if any +// acceptable binary was found. +func chooseExe(names, osxNames []string, paths []string) (string, bool) { + if runtime.GOOS == "darwin" { + names = append(names, osxNames...) + } + for _, name := range names { + if binary, found := findExe(name, paths); found { + return binary, true + } + } + return "", false +} + +// isLLVMObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is an LLVM +// objdump binary of an acceptable version. +func isLLVMObjdump(output string) bool { + fields := objdumpLLVMVerRE.FindStringSubmatch(output) + if len(fields) != 5 { + return false + } + if fields[4] == "trunk" { + return true + } + verMajor, err := strconv.Atoi(fields[1]) + if err != nil { + return false + } + verPatch, err := strconv.Atoi(fields[3]) + if err != nil { + return false + } + if runtime.GOOS == "linux" && verMajor >= 8 { + // Ensure LLVM objdump is at least version 8.0 on Linux. + // Some flags, like --demangle, and double dashes for options are + // not supported by previous versions. + return true + } + if runtime.GOOS == "darwin" { + // Ensure LLVM objdump is at least version 10.0.1 on MacOS. + return verMajor > 10 || (verMajor == 10 && verPatch >= 1) + } + return false +} + +// isBuObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is a GNU +// binutils objdump binary. No version check is performed. +func isBuObjdump(output string) bool { + return strings.Contains(output, "GNU objdump") +} + +// findExe looks for an executable command on a set of paths. +// If it cannot find it, returns cmd. +func findExe(cmd string, paths []string) (string, bool) { + for _, p := range paths { + cp := filepath.Join(p, cmd) + if c, err := exec.LookPath(cp); err == nil { + return c, true + } + } + return cmd, false +} + +// Disasm returns the assembly instructions for the specified address range +// of a binary. +func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + b := bu.get() + if !b.objdumpFound { + return nil, errors.New("cannot disasm: no objdump tool available") + } + args := []string{"--disassemble", "--demangle", "--no-show-raw-insn", + "--line-numbers", fmt.Sprintf("--start-address=%#x", start), + fmt.Sprintf("--stop-address=%#x", end)} + + if intelSyntax { + if b.isLLVMObjdump { + args = append(args, "--x86-asm-syntax=intel") + } else { + args = append(args, "-M", "intel") + } + } + + args = append(args, file) + cmd := exec.Command(b.objdump, args...) + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("%v: %v", cmd.Args, err) + } + + return disassemble(out) +} + +// Open satisfies the plugin.ObjTool interface. +func (bu *Binutils) Open(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + b := bu.get() + + // Make sure file is a supported executable. + // This uses magic numbers, mainly to provide better error messages but + // it should also help speed. + + if _, err := os.Stat(name); err != nil { + // For testing, do not require file name to exist. + if strings.Contains(b.addr2line, "testdata/") { + return &fileAddr2Line{file: file{b: b, name: name}}, nil + } + return nil, err + } + + // Read the first 4 bytes of the file. + + f, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("error opening %s: %v", name, err) + } + defer f.Close() + + var header [4]byte + if _, err = io.ReadFull(f, header[:]); err != nil { + return nil, fmt.Errorf("error reading magic number from %s: %v", name, err) + } + + elfMagic := string(header[:]) + + // Match against supported file types. + if elfMagic == elf.ELFMAG { + f, err := b.openELF(name, start, limit, offset, relocationSymbol) + if err != nil { + return nil, fmt.Errorf("error reading ELF file %s: %v", name, err) + } + return f, nil + } + + // Mach-O magic numbers can be big or little endian. + machoMagicLittle := binary.LittleEndian.Uint32(header[:]) + machoMagicBig := binary.BigEndian.Uint32(header[:]) + + if machoMagicLittle == macho.Magic32 || machoMagicLittle == macho.Magic64 || + machoMagicBig == macho.Magic32 || machoMagicBig == macho.Magic64 { + f, err := b.openMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading Mach-O file %s: %v", name, err) + } + return f, nil + } + if machoMagicLittle == macho.MagicFat || machoMagicBig == macho.MagicFat { + f, err := b.openFatMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading fat Mach-O file %s: %v", name, err) + } + return f, nil + } + + peMagic := string(header[:2]) + if peMagic == "MZ" { + f, err := b.openPE(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading PE file %s: %v", name, err) + } + return f, nil + } + + return nil, fmt.Errorf("unrecognized binary format: %s", name) +} + +func (b *binrep) openMachOCommon(name string, of *macho.File, start, limit, offset uint64) (plugin.ObjFile, error) { + + // Subtract the load address of the __TEXT section. Usually 0 for shared + // libraries or 0x100000000 for executables. You can check this value by + // running `objdump -private-headers `. + + textSegment := of.Segment("__TEXT") + if textSegment == nil { + return nil, fmt.Errorf("could not identify base for %s: no __TEXT segment", name) + } + if textSegment.Addr > start { + return nil, fmt.Errorf("could not identify base for %s: __TEXT segment address (0x%x) > mapping start address (0x%x)", + name, textSegment.Addr, start) + } + + base := start - textSegment.Addr + + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{b: b, name: name, base: base}}, nil + } + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +func (b *binrep) openFatMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.OpenFat(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + if len(of.Arches) == 0 { + return nil, fmt.Errorf("empty fat Mach-O file: %s", name) + } + + var arch macho.Cpu + // Use the host architecture. + // TODO: This is not ideal because the host architecture may not be the one + // that was profiled. E.g. an amd64 host can profile a 386 program. + switch runtime.GOARCH { + case "386": + arch = macho.Cpu386 + case "amd64", "amd64p32": + arch = macho.CpuAmd64 + case "arm", "armbe", "arm64", "arm64be": + arch = macho.CpuArm + case "ppc": + arch = macho.CpuPpc + case "ppc64", "ppc64le": + arch = macho.CpuPpc64 + default: + return nil, fmt.Errorf("unsupported host architecture for %s: %s", name, runtime.GOARCH) + } + for i := range of.Arches { + if of.Arches[i].Cpu == arch { + return b.openMachOCommon(name, of.Arches[i].File, start, limit, offset) + } + } + return nil, fmt.Errorf("architecture not found in %s: %s", name, runtime.GOARCH) +} + +func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + return b.openMachOCommon(name, of, start, limit, offset) +} + +func (b *binrep) openELF(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + ef, err := elfOpen(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer ef.Close() + + buildID := "" + if f, err := os.Open(name); err == nil { + if id, err := elfexec.GetBuildID(f); err == nil { + buildID = fmt.Sprintf("%x", id) + } + } + + var ( + kernelOffset *uint64 + pageAligned = func(addr uint64) bool { return addr%4096 == 0 } + ) + if strings.Contains(name, "vmlinux") || !pageAligned(start) || !pageAligned(limit) || !pageAligned(offset) { + // Reading all Symbols is expensive, and we only rarely need it so + // we don't want to do it every time. But if _stext happens to be + // page-aligned but isn't the same as Vaddr, we would symbolize + // wrong. So if the name the addresses aren't page aligned, or if + // the name is "vmlinux" we read _stext. We can be wrong if: (1) + // someone passes a kernel path that doesn't contain "vmlinux" AND + // (2) _stext is page-aligned AND (3) _stext is not at Vaddr + symbols, err := ef.Symbols() + if err != nil && err != elf.ErrNoSymbols { + return nil, err + } + + // The kernel relocation symbol (the mapping start address) can be either + // _text or _stext. When profiles are generated by `perf`, which one was used is + // distinguished by the mapping name for the kernel image: + // '[kernel.kallsyms]_text' or '[kernel.kallsyms]_stext', respectively. If we haven't + // been able to parse it from the mapping, we default to _stext. + if relocationSymbol == "" { + relocationSymbol = "_stext" + } + for _, s := range symbols { + if s.Name == relocationSymbol { + kernelOffset = &s.Value + break + } + } + } + + // Check that we can compute a base for the binary. This may not be the + // correct base value, so we don't save it. We delay computing the actual base + // value until we have a sample address for this mapping, so that we can + // correctly identify the associated program segment that is needed to compute + // the base. + if _, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), kernelOffset, start, limit, offset); err != nil { + return nil, fmt.Errorf("could not identify base for %s: %v", name, err) + } + + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset}, + }}, nil + } + return &fileAddr2Line{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset}, + }}, nil +} + +func (b *binrep) openPE(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + pf, err := pe.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer pf.Close() + + var imageBase uint64 + switch h := pf.OptionalHeader.(type) { + case *pe.OptionalHeader32: + imageBase = uint64(h.ImageBase) + case *pe.OptionalHeader64: + imageBase = uint64(h.ImageBase) + default: + return nil, fmt.Errorf("unknown OptionalHeader %T", pf.OptionalHeader) + } + + var base uint64 + if start > 0 { + base = start - imageBase + } + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{b: b, name: name, base: base}}, nil + } + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +// elfMapping stores the parameters of a runtime mapping that are needed to +// identify the ELF segment associated with a mapping. +type elfMapping struct { + // Runtime mapping parameters. + start, limit, offset uint64 + // Offset of kernel relocation symbol. Only defined for kernel images, nil otherwise. + kernelOffset *uint64 +} + +// findProgramHeader returns the program segment that matches the current +// mapping and the given address, or an error if it cannot find a unique program +// header. +func (m *elfMapping) findProgramHeader(ef *elf.File, addr uint64) (*elf.ProgHeader, error) { + // For user space executables, we try to find the actual program segment that + // is associated with the given mapping. Skip this search if limit <= start. + // We cannot use just a check on the start address of the mapping to tell if + // it's a kernel / .ko module mapping, because with quipper address remapping + // enabled, the address would be in the lower half of the address space. + + if m.kernelOffset != nil || m.start >= m.limit || m.limit >= (uint64(1)<<63) { + // For the kernel, find the program segment that includes the .text section. + return elfexec.FindTextProgHeader(ef), nil + } + + // Fetch all the loadable segments. + var phdrs []elf.ProgHeader + for i := range ef.Progs { + if ef.Progs[i].Type == elf.PT_LOAD { + phdrs = append(phdrs, ef.Progs[i].ProgHeader) + } + } + // Some ELF files don't contain any loadable program segments, e.g. .ko + // kernel modules. It's not an error to have no header in such cases. + if len(phdrs) == 0 { + return nil, nil + } + // Get all program headers associated with the mapping. + headers := elfexec.ProgramHeadersForMapping(phdrs, m.offset, m.limit-m.start) + if len(headers) == 0 { + return nil, errors.New("no program header matches mapping info") + } + if len(headers) == 1 { + return headers[0], nil + } + + // Use the file offset corresponding to the address to symbolize, to narrow + // down the header. + return elfexec.HeaderForFileOffset(headers, addr-m.start+m.offset) +} + +// file implements the binutils.ObjFile interface. +type file struct { + b *binrep + name string + buildID string + + baseOnce sync.Once // Ensures the base, baseErr and isData are computed once. + base uint64 + baseErr error // Any eventual error while computing the base. + isData bool + // Mapping information. Relevant only for ELF files, nil otherwise. + m *elfMapping +} + +// computeBase computes the relocation base for the given binary file only if +// the elfMapping field is set. It populates the base and isData fields and +// returns an error. +func (f *file) computeBase(addr uint64) error { + if f == nil || f.m == nil { + return nil + } + if addr < f.m.start || addr >= f.m.limit { + return fmt.Errorf("specified address %x is outside the mapping range [%x, %x] for file %q", addr, f.m.start, f.m.limit, f.name) + } + ef, err := elfOpen(f.name) + if err != nil { + return fmt.Errorf("error parsing %s: %v", f.name, err) + } + defer ef.Close() + + ph, err := f.m.findProgramHeader(ef, addr) + if err != nil { + return fmt.Errorf("failed to find program header for file %q, ELF mapping %#v, address %x: %v", f.name, *f.m, addr, err) + } + + base, err := elfexec.GetBase(&ef.FileHeader, ph, f.m.kernelOffset, f.m.start, f.m.limit, f.m.offset) + if err != nil { + return err + } + f.base = base + f.isData = ph != nil && ph.Flags&elf.PF_X == 0 + return nil +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) ObjAddr(addr uint64) (uint64, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return 0, f.baseErr + } + return addr - f.base, nil +} + +func (f *file) BuildID() string { + return f.buildID +} + +func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + return nil, nil +} + +func (f *file) Close() error { + return nil +} + +func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + // Get from nm a list of symbols sorted by address. + cmd := exec.Command(f.b.nm, "-n", f.name) + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("%v: %v", cmd.Args, err) + } + + return findSymbols(out, f.name, r, addr) +} + +// fileNM implements the binutils.ObjFile interface, using 'nm' to map +// addresses to symbols (without file/line number information). It is +// faster than fileAddr2Line. +type fileNM struct { + file + addr2linernm *addr2LinerNM +} + +func (f *fileNM) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + if f.addr2linernm == nil { + addr2liner, err := newAddr2LinerNM(f.b.nm, f.name, f.base) + if err != nil { + return nil, err + } + f.addr2linernm = addr2liner + } + return f.addr2linernm.addrInfo(addr) +} + +// fileAddr2Line implements the binutils.ObjFile interface, using +// llvm-symbolizer, if that's available, or addr2line to map addresses to +// symbols (with file/line number information). It can be slow for large +// binaries with debug information. +type fileAddr2Line struct { + once sync.Once + file + addr2liner *addr2Liner + llvmSymbolizer *llvmSymbolizer + isData bool +} + +func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + f.once.Do(f.init) + if f.llvmSymbolizer != nil { + return f.llvmSymbolizer.addrInfo(addr) + } + if f.addr2liner != nil { + return f.addr2liner.addrInfo(addr) + } + return nil, fmt.Errorf("could not find local addr2liner") +} + +func (f *fileAddr2Line) init() { + if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base, f.isData); err == nil { + f.llvmSymbolizer = llvmSymbolizer + return + } + + if addr2liner, err := newAddr2Liner(f.b.addr2line, f.name, f.base); err == nil { + f.addr2liner = addr2liner + + // When addr2line encounters some gcc compiled binaries, it + // drops interesting parts of names in anonymous namespaces. + // Fallback to NM for better function names. + if nm, err := newAddr2LinerNM(f.b.nm, f.name, f.base); err == nil { + f.addr2liner.nm = nm + } + } +} + +func (f *fileAddr2Line) Close() error { + if f.llvmSymbolizer != nil { + f.llvmSymbolizer.rw.close() + f.llvmSymbolizer = nil + } + if f.addr2liner != nil { + f.addr2liner.rw.close() + f.addr2liner = nil + } + return nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go new file mode 100644 index 0000000..e64adf5 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go @@ -0,0 +1,180 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bytes" + "io" + "regexp" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/ianlancetaylor/demangle" +) + +var ( + nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`) + objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`) + objdumpOutputFileLine = regexp.MustCompile(`^;?\s?(.*):([0-9]+)`) + objdumpOutputFunction = regexp.MustCompile(`^;?\s?(\S.*)\(\):`) + objdumpOutputFunctionLLVM = regexp.MustCompile(`^([[:xdigit:]]+)?\s?(.*):`) +) + +func findSymbols(syms []byte, file string, r *regexp.Regexp, address uint64) ([]*plugin.Sym, error) { + // Collect all symbols from the nm output, grouping names mapped to + // the same address into a single symbol. + + // The symbols to return. + var symbols []*plugin.Sym + + // The current group of symbol names, and the address they are all at. + names, start := []string{}, uint64(0) + + buf := bytes.NewBuffer(syms) + + for { + symAddr, name, err := nextSymbol(buf) + if err == io.EOF { + // Done. If there was an unfinished group, append it. + if len(names) != 0 { + if match := matchSymbol(names, start, symAddr-1, r, address); match != nil { + symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1}) + } + } + + // And return the symbols. + return symbols, nil + } + + if err != nil { + // There was some kind of serious error reading nm's output. + return nil, err + } + + // If this symbol is at the same address as the current group, add it to the group. + if symAddr == start { + names = append(names, name) + continue + } + + // Otherwise append the current group to the list of symbols. + if match := matchSymbol(names, start, symAddr-1, r, address); match != nil { + symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1}) + } + + // And start a new group. + names, start = []string{name}, symAddr + } +} + +// matchSymbol checks if a symbol is to be selected by checking its +// name to the regexp and optionally its address. It returns the name(s) +// to be used for the matched symbol, or nil if no match +func matchSymbol(names []string, start, end uint64, r *regexp.Regexp, address uint64) []string { + if address != 0 && address >= start && address <= end { + return names + } + for _, name := range names { + if r == nil || r.MatchString(name) { + return []string{name} + } + + // Match all possible demangled versions of the name. + for _, o := range [][]demangle.Option{ + {demangle.NoClones}, + {demangle.NoParams}, + {demangle.NoParams, demangle.NoTemplateParams}, + } { + if demangled, err := demangle.ToString(name, o...); err == nil && r.MatchString(demangled) { + return []string{demangled} + } + } + } + return nil +} + +// disassemble parses the output of the objdump command and returns +// the assembly instructions in a slice. +func disassemble(asm []byte) ([]plugin.Inst, error) { + buf := bytes.NewBuffer(asm) + function, file, line := "", "", 0 + var assembly []plugin.Inst + for { + input, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, err + } + if input == "" { + break + } + } + input = strings.TrimSpace(input) + + if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 { + if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { + assembly = append(assembly, + plugin.Inst{ + Addr: address, + Text: fields[2], + Function: function, + File: file, + Line: line, + }) + continue + } + } + if fields := objdumpOutputFileLine.FindStringSubmatch(input); len(fields) == 3 { + if l, err := strconv.ParseUint(fields[2], 10, 32); err == nil { + file, line = fields[1], int(l) + } + continue + } + if fields := objdumpOutputFunction.FindStringSubmatch(input); len(fields) == 2 { + function = fields[1] + continue + } else { + if fields := objdumpOutputFunctionLLVM.FindStringSubmatch(input); len(fields) == 3 { + function = fields[2] + continue + } + } + // Reset on unrecognized lines. + function, file, line = "", "", 0 + } + + return assembly, nil +} + +// nextSymbol parses the nm output to find the next symbol listed. +// Skips over any output it cannot recognize. +func nextSymbol(buf *bytes.Buffer) (uint64, string, error) { + for { + line, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF || line == "" { + return 0, "", err + } + } + line = strings.TrimSpace(line) + + if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 { + if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { + return address, fields[3], nil + } + } + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go new file mode 100644 index 0000000..237cc33 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "errors" + "fmt" + "os" + "strings" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/internal/plugin" +) + +type source struct { + Sources []string + ExecName string + BuildID string + Base []string + DiffBase bool + Normalize bool + + Seconds int + Timeout int + Symbolize string + HTTPHostport string + HTTPDisableBrowser bool + Comment string +} + +// parseFlags parses the command lines through the specified flags package +// and returns the source of the profile and optionally the command +// for the kind of report to generate (nil for interactive use). +func parseFlags(o *plugin.Options) (*source, []string, error) { + flag := o.Flagset + // Comparisons. + flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison") + flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction") + // Source options. + flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization") + flagBuildID := flag.String("buildid", "", "Override build id for first mapping") + flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile") + flagAddComment := flag.String("add_comment", "", "Annotation string to record in the profile") + // CPU profile options + flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles") + // Heap profile options + flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size") + flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts") + flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size") + flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts") + // Contention profile options + flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region") + flagContentions := flag.Bool("contentions", false, "Display number of delays at each region") + flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region") + flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames") + + flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port") + flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browswer for the interactive web UI") + + // Flags that set configuration properties. + cfg := currentConfig() + configFlagSetter := installConfigFlags(flag, &cfg) + + flagCommands := make(map[string]*bool) + flagParamCommands := make(map[string]*string) + for name, cmd := range pprofCommands { + if cmd.hasParam { + flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp") + } else { + flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format") + } + } + + args := flag.Parse(func() { + o.UI.Print(usageMsgHdr + + usage(true) + + usageMsgSrc + + flag.ExtraUsage() + + usageMsgVars) + }) + if len(args) == 0 { + return nil, nil, errors.New("no profile source specified") + } + + var execName string + // Recognize first argument as an executable or buildid override. + if len(args) > 1 { + arg0 := args[0] + if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0, ""); err == nil { + file.Close() + execName = arg0 + args = args[1:] + } else if *flagBuildID == "" && isBuildID(arg0) { + *flagBuildID = arg0 + args = args[1:] + } + } + + // Apply any specified flags to cfg. + if err := configFlagSetter(); err != nil { + return nil, nil, err + } + + cmd, err := outputFormat(flagCommands, flagParamCommands) + if err != nil { + return nil, nil, err + } + if cmd != nil && *flagHTTP != "" { + return nil, nil, errors.New("-http is not compatible with an output format on the command line") + } + + if *flagNoBrowser && *flagHTTP == "" { + return nil, nil, errors.New("-no_browser only makes sense with -http") + } + + si := cfg.SampleIndex + si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI) + si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI) + si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI) + si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI) + si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI) + si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI) + si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI) + cfg.SampleIndex = si + + if *flagMeanDelay { + cfg.Mean = true + } + + source := &source{ + Sources: args, + ExecName: execName, + BuildID: *flagBuildID, + Seconds: *flagSeconds, + Timeout: *flagTimeout, + Symbolize: *flagSymbolize, + HTTPHostport: *flagHTTP, + HTTPDisableBrowser: *flagNoBrowser, + Comment: *flagAddComment, + } + + if err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil { + return nil, nil, err + } + + normalize := cfg.Normalize + if normalize && len(source.Base) == 0 { + return nil, nil, errors.New("must have base profile to normalize by") + } + source.Normalize = normalize + + if bu, ok := o.Obj.(*binutils.Binutils); ok { + bu.SetTools(*flagTools) + } + + setCurrentConfig(cfg) + return source, cmd, nil +} + +// addBaseProfiles adds the list of base profiles or diff base profiles to +// the source. This function will return an error if both base and diff base +// profiles are specified. +func (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error { + base, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase) + if len(base) > 0 && len(diffBase) > 0 { + return errors.New("-base and -diff_base flags cannot both be specified") + } + + source.Base = base + if len(diffBase) > 0 { + source.Base, source.DiffBase = diffBase, true + } + return nil +} + +// dropEmpty list takes a slice of string pointers, and outputs a slice of +// non-empty strings associated with the flag. +func dropEmpty(list []*string) []string { + var l []string + for _, s := range list { + if *s != "" { + l = append(l, *s) + } + } + return l +} + +// installConfigFlags creates command line flags for configuration +// fields and returns a function which can be called after flags have +// been parsed to copy any flags specified on the command line to +// *cfg. +func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error { + // List of functions for setting the different parts of a config. + var setters []func() + var err error // Holds any errors encountered while running setters. + + for _, field := range configFields { + n := field.name + help := configHelp[n] + var setter func() + switch ptr := cfg.fieldPtr(field).(type) { + case *bool: + f := flag.Bool(n, *ptr, help) + setter = func() { *ptr = *f } + case *int: + f := flag.Int(n, *ptr, help) + setter = func() { *ptr = *f } + case *float64: + f := flag.Float64(n, *ptr, help) + setter = func() { *ptr = *f } + case *string: + if len(field.choices) == 0 { + f := flag.String(n, *ptr, help) + setter = func() { *ptr = *f } + } else { + // Make a separate flag per possible choice. + // Set all flags to initially false so we can + // identify conflicts. + bools := make(map[string]*bool) + for _, choice := range field.choices { + bools[choice] = flag.Bool(choice, false, configHelp[choice]) + } + setter = func() { + var set []string + for k, v := range bools { + if *v { + set = append(set, k) + } + } + switch len(set) { + case 0: + // Leave as default value. + case 1: + *ptr = set[0] + default: + err = fmt.Errorf("conflicting options set: %v", set) + } + } + } + } + setters = append(setters, setter) + } + + return func() error { + // Apply the setter for every flag. + for _, setter := range setters { + setter() + if err != nil { + return err + } + } + return nil + } +} + +// isBuildID determines if the profile may contain a build ID, by +// checking that it is a string of hex digits. +func isBuildID(id string) bool { + return strings.Trim(id, "0123456789abcdefABCDEF") == "" +} + +func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string { + if *flag { + if si == "" { + return sampleType + } + ui.PrintErr("Multiple value selections, ignoring ", option) + } + return si +} + +func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) { + for n, b := range bcmd { + if *b { + if cmd != nil { + return nil, errors.New("must set at most one output format") + } + cmd = []string{n} + } + } + for n, s := range acmd { + if *s != "" { + if cmd != nil { + return nil, errors.New("must set at most one output format") + } + cmd = []string{n, *s} + } + } + return cmd, nil +} + +var usageMsgHdr = `usage: + +Produce output in the specified format. + + pprof [options] [binary] ... + +Omit the format to get an interactive shell whose commands can be used +to generate various views of a profile + + pprof [options] [binary] ... + +Omit the format and provide the "-http" flag to get an interactive web +interface at the specified host:port that can be used to navigate through +various views of a profile. + + pprof -http [host]:[port] [options] [binary] ... + +Details: +` + +var usageMsgSrc = "\n\n" + + " Source options:\n" + + " -seconds Duration for time-based profile collection\n" + + " -timeout Timeout in seconds for profile collection\n" + + " -buildid Override build id for main binary\n" + + " -add_comment Free-form annotation to add to the profile\n" + + " Displayed on some reports or with pprof -comments\n" + + " -diff_base source Source of base profile for comparison\n" + + " -base source Source of base profile for profile subtraction\n" + + " profile.pb.gz Profile in compressed protobuf format\n" + + " legacy_profile Profile in legacy pprof format\n" + + " http://host/profile URL for profile handler to retrieve\n" + + " -symbolize= Controls source of symbol information\n" + + " none Do not attempt symbolization\n" + + " local Examine only local binaries\n" + + " fastlocal Only get function names from local binaries\n" + + " remote Do not examine local binaries\n" + + " force Force re-symbolization\n" + + " Binary Local path or build id of binary for symbolization\n" + +var usageMsgVars = "\n\n" + + " Misc options:\n" + + " -http Provide web interface at host:port.\n" + + " Host is optional and 'localhost' by default.\n" + + " Port is optional and a randomly available port by default.\n" + + " -no_browser Skip opening a browser for the interactive web UI.\n" + + " -tools Search path for object tools\n" + + "\n" + + " Legacy convenience options:\n" + + " -inuse_space Same as -sample_index=inuse_space\n" + + " -inuse_objects Same as -sample_index=inuse_objects\n" + + " -alloc_space Same as -sample_index=alloc_space\n" + + " -alloc_objects Same as -sample_index=alloc_objects\n" + + " -total_delay Same as -sample_index=delay\n" + + " -contentions Same as -sample_index=contentions\n" + + " -mean_delay Same as -mean -sample_index=delay\n" + + "\n" + + " Environment Variables:\n" + + " PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" + + " PPROF_TOOLS Search path for object-level tools\n" + + " PPROF_BINARY_PATH Search path for local binary files\n" + + " default: $HOME/pprof/binaries\n" + + " searches $name, $path, $buildid/$name, $path/$buildid\n" + + " * On Windows, %USERPROFILE% is used instead of $HOME" diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go new file mode 100644 index 0000000..c9edf10 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go @@ -0,0 +1,459 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "sort" + "strings" + "time" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" +) + +// commands describes the commands accepted by pprof. +type commands map[string]*command + +// command describes the actions for a pprof command. Includes a +// function for command-line completion, the report format to use +// during report generation, any postprocessing functions, and whether +// the command expects a regexp parameter (typically a function name). +type command struct { + format int // report format to generate + postProcess PostProcessor // postprocessing to run on report + visualizer PostProcessor // display output using some callback + hasParam bool // collect a parameter from the CLI + description string // single-line description text saying what the command does + usage string // multi-line help text saying how the command is used +} + +// help returns a help string for a command. +func (c *command) help(name string) string { + message := c.description + "\n" + if c.usage != "" { + message += " Usage:\n" + lines := strings.Split(c.usage, "\n") + for _, line := range lines { + message += fmt.Sprintf(" %s\n", line) + } + } + return message + "\n" +} + +// AddCommand adds an additional command to the set of commands +// accepted by pprof. This enables extensions to add new commands for +// specialized visualization formats. If the command specified already +// exists, it is overwritten. +func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) { + pprofCommands[cmd] = &command{format, post, nil, false, desc, usage} +} + +// SetVariableDefault sets the default value for a pprof +// variable. This enables extensions to set their own defaults. +func SetVariableDefault(variable, value string) { + configure(variable, value) +} + +// PostProcessor is a function that applies post-processing to the report output +type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error + +// interactiveMode is true if pprof is running on interactive mode, reading +// commands from its shell. +var interactiveMode = false + +// pprofCommands are the report generation commands recognized by pprof. +var pprofCommands = commands{ + // Commands that require no post-processing. + "comments": {report.Comments, nil, nil, false, "Output all profile comments", ""}, + "disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)}, + "dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)}, + "list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)}, + "peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."}, + "raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""}, + "tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."}, + "text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)}, + "top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)}, + "traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""}, + "tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)}, + + // Save binary formats to a file + "callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)}, + "proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""}, + "topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""}, + + // Generate report in DOT format and postprocess with dot + "gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)}, + "pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)}, + "png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)}, + "ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)}, + + // Save SVG output into a file + "svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)}, + + // Visualize postprocessed dot output + "eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)}, + "evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)}, + "gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)}, + "web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)}, + + // Visualize callgrind output + "kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)}, + + // Visualize HTML directly generated by report. + "weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)}, +} + +// configHelp contains help text per configuration parameter. +var configHelp = map[string]string{ + // Filename for file-based output formats, stdout by default. + "output": helpText("Output filename for file-based outputs"), + + // Comparisons. + "drop_negative": helpText( + "Ignore negative differences", + "Do not show any locations with values <0."), + + // Graph handling options. + "call_tree": helpText( + "Create a context-sensitive call tree", + "Treat locations reached through different paths as separate."), + + // Display options. + "relative_percentages": helpText( + "Show percentages relative to focused subgraph", + "If unset, percentages are relative to full graph before focusing", + "to facilitate comparison with original graph."), + "unit": helpText( + "Measurement units to display", + "Scale the sample values to this unit.", + "For time-based profiles, use seconds, milliseconds, nanoseconds, etc.", + "For memory profiles, use megabytes, kilobytes, bytes, etc.", + "Using auto will scale each value independently to the most natural unit."), + "compact_labels": "Show minimal headers", + "source_path": "Search path for source files", + "trim_path": "Path to trim from source paths before search", + "intel_syntax": helpText( + "Show assembly in Intel syntax", + "Only applicable to commands `disasm` and `weblist`"), + + // Filtering options + "nodecount": helpText( + "Max number of nodes to show", + "Uses heuristics to limit the number of locations to be displayed.", + "On graphs, dotted edges represent paths through nodes that have been removed."), + "nodefraction": "Hide nodes below *total", + "edgefraction": "Hide edges below *total", + "trim": helpText( + "Honor nodefraction/edgefraction/nodecount defaults", + "Set to false to get the full profile, without any trimming."), + "focus": helpText( + "Restricts to samples going through a node matching regexp", + "Discard samples that do not include a node matching this regexp.", + "Matching includes the function name, filename or object name."), + "ignore": helpText( + "Skips paths going through any nodes matching regexp", + "If set, discard samples that include a node matching this regexp.", + "Matching includes the function name, filename or object name."), + "prune_from": helpText( + "Drops any functions below the matched frame.", + "If set, any frames matching the specified regexp and any frames", + "below it will be dropped from each sample."), + "hide": helpText( + "Skips nodes matching regexp", + "Discard nodes that match this location.", + "Other nodes from samples that include this location will be shown.", + "Matching includes the function name, filename or object name."), + "show": helpText( + "Only show nodes matching regexp", + "If set, only show nodes that match this location.", + "Matching includes the function name, filename or object name."), + "show_from": helpText( + "Drops functions above the highest matched frame.", + "If set, all frames above the highest match are dropped from every sample.", + "Matching includes the function name, filename or object name."), + "tagroot": helpText( + "Adds pseudo stack frames for labels key/value pairs at the callstack root.", + "A comma-separated list of label keys.", + "The first key creates frames at the new root."), + "tagleaf": helpText( + "Adds pseudo stack frames for labels key/value pairs at the callstack leaf.", + "A comma-separated list of label keys.", + "The last key creates frames at the new leaf."), + "tagfocus": helpText( + "Restricts to samples with tags in range or matched by regexp", + "Use name=value syntax to limit the matching to a specific tag.", + "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagignore": helpText( + "Discard samples with tags in range or matched by regexp", + "Use name=value syntax to limit the matching to a specific tag.", + "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagshow": helpText( + "Only consider tags matching this regexp", + "Discard tags that do not match this regexp"), + "taghide": helpText( + "Skip tags matching this regexp", + "Discard tags that match this regexp"), + // Heap profile options + "divide_by": helpText( + "Ratio to divide all samples before visualization", + "Divide all samples values by a constant, eg the number of processors or jobs."), + "mean": helpText( + "Average sample value over first value (count)", + "For memory profiles, report average memory per allocation.", + "For time-based profiles, report average time per event."), + "sample_index": helpText( + "Sample value to report (0-based index or name)", + "Profiles contain multiple values per sample.", + "Use sample_index=i to select the ith value (starting at 0)."), + "normalize": helpText( + "Scales profile based on the base profile."), + + // Data sorting criteria + "flat": helpText("Sort entries based on own weight"), + "cum": helpText("Sort entries based on cumulative weight"), + + // Output granularity + "functions": helpText( + "Aggregate at the function level.", + "Ignores the filename where the function was defined."), + "filefunctions": helpText( + "Aggregate at the function level.", + "Takes into account the filename where the function was defined."), + "files": "Aggregate at the file level.", + "lines": "Aggregate at the source code line level.", + "addresses": helpText( + "Aggregate at the address level.", + "Includes functions' addresses in the output."), + "noinlines": helpText( + "Ignore inlines.", + "Attributes inlined functions to their first out-of-line caller."), +} + +func helpText(s ...string) string { + return strings.Join(s, "\n") + "\n" +} + +// usage returns a string describing the pprof commands and configuration +// options. if commandLine is set, the output reflect cli usage. +func usage(commandLine bool) string { + var prefix string + if commandLine { + prefix = "-" + } + fmtHelp := func(c, d string) string { + return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0]) + } + + var commands []string + for name, cmd := range pprofCommands { + commands = append(commands, fmtHelp(prefix+name, cmd.description)) + } + sort.Strings(commands) + + var help string + if commandLine { + help = " Output formats (select at most one):\n" + } else { + help = " Commands:\n" + commands = append(commands, fmtHelp("o/options", "List options and their current values")) + commands = append(commands, fmtHelp("q/quit/exit/^D", "Exit pprof")) + } + + help = help + strings.Join(commands, "\n") + "\n\n" + + " Options:\n" + + // Print help for configuration options after sorting them. + // Collect choices for multi-choice options print them together. + var variables []string + var radioStrings []string + for _, f := range configFields { + if len(f.choices) == 0 { + variables = append(variables, fmtHelp(prefix+f.name, configHelp[f.name])) + continue + } + // Format help for for this group. + s := []string{fmtHelp(f.name, "")} + for _, choice := range f.choices { + s = append(s, " "+fmtHelp(prefix+choice, configHelp[choice])) + } + radioStrings = append(radioStrings, strings.Join(s, "\n")) + } + sort.Strings(variables) + sort.Strings(radioStrings) + return help + strings.Join(variables, "\n") + "\n\n" + + " Option groups (only set one per group):\n" + + strings.Join(radioStrings, "\n") +} + +func reportHelp(c string, cum, redirect bool) string { + h := []string{ + c + " [n] [focus_regex]* [-ignore_regex]*", + "Include up to n samples", + "Include samples matching focus_regex, and exclude ignore_regex.", + } + if cum { + h[0] += " [-cum]" + h = append(h, "-cum sorts the output by cumulative weight") + } + if redirect { + h[0] += " >f" + h = append(h, "Optionally save the report on the file f") + } + return strings.Join(h, "\n") +} + +func listHelp(c string, redirect bool) string { + h := []string{ + c + " [-focus_regex]* [-ignore_regex]*", + "Include functions matching func_regex, or including the address specified.", + "Include samples matching focus_regex, and exclude ignore_regex.", + } + if redirect { + h[0] += " >f" + h = append(h, "Optionally save the report on the file f") + } + return strings.Join(h, "\n") +} + +// browsers returns a list of commands to attempt for web visualization. +func browsers() []string { + var cmds []string + if userBrowser := os.Getenv("BROWSER"); userBrowser != "" { + cmds = append(cmds, userBrowser) + } + switch runtime.GOOS { + case "darwin": + cmds = append(cmds, "/usr/bin/open") + case "windows": + cmds = append(cmds, "cmd /c start") + default: + // Commands opening browsers are prioritized over xdg-open, so browser() + // command can be used on linux to open the .svg file generated by the -web + // command (the .svg file includes embedded javascript so is best viewed in + // a browser). + cmds = append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox", "sensible-browser"}...) + if os.Getenv("DISPLAY") != "" { + // xdg-open is only for use in a desktop environment. + cmds = append(cmds, "xdg-open") + } + } + return cmds +} + +var kcachegrind = []string{"kcachegrind"} + +// awayFromTTY saves the output in a file if it would otherwise go to +// the terminal screen. This is used to avoid dumping binary data on +// the screen. +func awayFromTTY(format string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + if output == os.Stdout && (ui.IsTerminal() || interactiveMode) { + tempFile, err := newTempFile("", "profile", "."+format) + if err != nil { + return err + } + ui.PrintErr("Generating report in ", tempFile.Name()) + output = tempFile + } + _, err := io.Copy(output, input) + return err + } +} + +func invokeDot(format string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + cmd := exec.Command("dot", "-T"+format) + cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to execute dot. Is Graphviz installed? Error: %v", err) + } + return nil + } +} + +// massageDotSVG invokes the dot tool to generate an SVG image and alters +// the image to have panning capabilities when viewed in a browser. +func massageDotSVG() PostProcessor { + generateSVG := invokeDot("svg") + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + baseSVG := new(bytes.Buffer) + if err := generateSVG(input, baseSVG, ui); err != nil { + return err + } + _, err := output.Write([]byte(massageSVG(baseSVG.String()))) + return err + } +} + +func invokeVisualizer(suffix string, visualizers []string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix) + if err != nil { + return err + } + deferDeleteTempFile(tempFile.Name()) + if _, err := io.Copy(tempFile, input); err != nil { + return err + } + tempFile.Close() + // Try visualizers until one is successful + for _, v := range visualizers { + // Separate command and arguments for exec.Command. + args := strings.Split(v, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...) + viewer.Stderr = os.Stderr + if err = viewer.Start(); err == nil { + // Wait for a second so that the visualizer has a chance to + // open the input file. This needs to be done even if we're + // waiting for the visualizer as it can be just a wrapper that + // spawns a browser tab and returns right away. + defer func(t <-chan time.Time) { + <-t + }(time.After(time.Second)) + // On interactive mode, let the visualizer run in the background + // so other commands can be issued. + if !interactiveMode { + return viewer.Wait() + } + return nil + } + } + return err + } +} + +// stringToBool is a custom parser for bools. We avoid using strconv.ParseBool +// to remain compatible with old pprof behavior (e.g., treating "" as true). +func stringToBool(s string) (bool, error) { + switch strings.ToLower(s) { + case "true", "t", "yes", "y", "1", "": + return true, nil + case "false", "f", "no", "n", "0": + return false, nil + default: + return false, fmt.Errorf(`illegal value "%s" for bool variable`, s) + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go new file mode 100644 index 0000000..9fcdd45 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go @@ -0,0 +1,371 @@ +package driver + +import ( + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "sync" +) + +// config holds settings for a single named config. +// The JSON tag name for a field is used both for JSON encoding and as +// a named variable. +type config struct { + // Filename for file-based output formats, stdout by default. + Output string `json:"-"` + + // Display options. + CallTree bool `json:"call_tree,omitempty"` + RelativePercentages bool `json:"relative_percentages,omitempty"` + Unit string `json:"unit,omitempty"` + CompactLabels bool `json:"compact_labels,omitempty"` + SourcePath string `json:"-"` + TrimPath string `json:"-"` + IntelSyntax bool `json:"intel_syntax,omitempty"` + Mean bool `json:"mean,omitempty"` + SampleIndex string `json:"-"` + DivideBy float64 `json:"-"` + Normalize bool `json:"normalize,omitempty"` + Sort string `json:"sort,omitempty"` + + // Label pseudo stack frame generation options + TagRoot string `json:"tagroot,omitempty"` + TagLeaf string `json:"tagleaf,omitempty"` + + // Filtering options + DropNegative bool `json:"drop_negative,omitempty"` + NodeCount int `json:"nodecount,omitempty"` + NodeFraction float64 `json:"nodefraction,omitempty"` + EdgeFraction float64 `json:"edgefraction,omitempty"` + Trim bool `json:"trim,omitempty"` + Focus string `json:"focus,omitempty"` + Ignore string `json:"ignore,omitempty"` + PruneFrom string `json:"prune_from,omitempty"` + Hide string `json:"hide,omitempty"` + Show string `json:"show,omitempty"` + ShowFrom string `json:"show_from,omitempty"` + TagFocus string `json:"tagfocus,omitempty"` + TagIgnore string `json:"tagignore,omitempty"` + TagShow string `json:"tagshow,omitempty"` + TagHide string `json:"taghide,omitempty"` + NoInlines bool `json:"noinlines,omitempty"` + + // Output granularity + Granularity string `json:"granularity,omitempty"` +} + +// defaultConfig returns the default configuration values; it is unaffected by +// flags and interactive assignments. +func defaultConfig() config { + return config{ + Unit: "minimum", + NodeCount: -1, + NodeFraction: 0.005, + EdgeFraction: 0.001, + Trim: true, + DivideBy: 1.0, + Sort: "flat", + Granularity: "functions", + } +} + +// currentConfig holds the current configuration values; it is affected by +// flags and interactive assignments. +var currentCfg = defaultConfig() +var currentMu sync.Mutex + +func currentConfig() config { + currentMu.Lock() + defer currentMu.Unlock() + return currentCfg +} + +func setCurrentConfig(cfg config) { + currentMu.Lock() + defer currentMu.Unlock() + currentCfg = cfg +} + +// configField contains metadata for a single configuration field. +type configField struct { + name string // JSON field name/key in variables + urlparam string // URL parameter name + saved bool // Is field saved in settings? + field reflect.StructField // Field in config + choices []string // Name Of variables in group + defaultValue string // Default value for this field. +} + +var ( + configFields []configField // Precomputed metadata per config field + + // configFieldMap holds an entry for every config field as well as an + // entry for every valid choice for a multi-choice field. + configFieldMap map[string]configField +) + +func init() { + // Config names for fields that are not saved in settings and therefore + // do not have a JSON name. + notSaved := map[string]string{ + // Not saved in settings, but present in URLs. + "SampleIndex": "sample_index", + + // Following fields are also not placed in URLs. + "Output": "output", + "SourcePath": "source_path", + "TrimPath": "trim_path", + "DivideBy": "divide_by", + } + + // choices holds the list of allowed values for config fields that can + // take on one of a bounded set of values. + choices := map[string][]string{ + "sort": {"cum", "flat"}, + "granularity": {"functions", "filefunctions", "files", "lines", "addresses"}, + } + + // urlparam holds the mapping from a config field name to the URL + // parameter used to hold that config field. If no entry is present for + // a name, the corresponding field is not saved in URLs. + urlparam := map[string]string{ + "drop_negative": "dropneg", + "call_tree": "calltree", + "relative_percentages": "rel", + "unit": "unit", + "compact_labels": "compact", + "intel_syntax": "intel", + "nodecount": "n", + "nodefraction": "nf", + "edgefraction": "ef", + "trim": "trim", + "focus": "f", + "ignore": "i", + "prune_from": "prunefrom", + "hide": "h", + "show": "s", + "show_from": "sf", + "tagfocus": "tf", + "tagignore": "ti", + "tagshow": "ts", + "taghide": "th", + "mean": "mean", + "sample_index": "si", + "normalize": "norm", + "sort": "sort", + "granularity": "g", + "noinlines": "noinlines", + } + + def := defaultConfig() + configFieldMap = map[string]configField{} + t := reflect.TypeOf(config{}) + for i, n := 0, t.NumField(); i < n; i++ { + field := t.Field(i) + js := strings.Split(field.Tag.Get("json"), ",") + if len(js) == 0 { + continue + } + // Get the configuration name for this field. + name := js[0] + if name == "-" { + name = notSaved[field.Name] + if name == "" { + // Not a configurable field. + continue + } + } + f := configField{ + name: name, + urlparam: urlparam[name], + saved: (name == js[0]), + field: field, + choices: choices[name], + } + f.defaultValue = def.get(f) + configFields = append(configFields, f) + configFieldMap[f.name] = f + for _, choice := range f.choices { + configFieldMap[choice] = f + } + } +} + +// fieldPtr returns a pointer to the field identified by f in *cfg. +func (cfg *config) fieldPtr(f configField) interface{} { + // reflect.ValueOf: converts to reflect.Value + // Elem: dereferences cfg to make *cfg + // FieldByIndex: fetches the field + // Addr: takes address of field + // Interface: converts back from reflect.Value to a regular value + return reflect.ValueOf(cfg).Elem().FieldByIndex(f.field.Index).Addr().Interface() +} + +// get returns the value of field f in cfg. +func (cfg *config) get(f configField) string { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + return *ptr + case *int: + return fmt.Sprint(*ptr) + case *float64: + return fmt.Sprint(*ptr) + case *bool: + return fmt.Sprint(*ptr) + } + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) +} + +// set sets the value of field f in cfg to value. +func (cfg *config) set(f configField, value string) error { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + if len(f.choices) > 0 { + // Verify that value is one of the allowed choices. + for _, choice := range f.choices { + if choice == value { + *ptr = value + return nil + } + } + return fmt.Errorf("invalid %q value %q", f.name, value) + } + *ptr = value + case *int: + v, err := strconv.Atoi(value) + if err != nil { + return err + } + *ptr = v + case *float64: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + *ptr = v + case *bool: + v, err := stringToBool(value) + if err != nil { + return err + } + *ptr = v + default: + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) + } + return nil +} + +// isConfigurable returns true if name is either the name of a config field, or +// a valid value for a multi-choice config field. +func isConfigurable(name string) bool { + _, ok := configFieldMap[name] + return ok +} + +// isBoolConfig returns true if name is either name of a boolean config field, +// or a valid value for a multi-choice config field. +func isBoolConfig(name string) bool { + f, ok := configFieldMap[name] + if !ok { + return false + } + if name != f.name { + return true // name must be one possible value for the field + } + var cfg config + _, ok = cfg.fieldPtr(f).(*bool) + return ok +} + +// completeConfig returns the list of configurable names starting with prefix. +func completeConfig(prefix string) []string { + var result []string + for v := range configFieldMap { + if strings.HasPrefix(v, prefix) { + result = append(result, v) + } + } + return result +} + +// configure stores the name=value mapping into the current config, correctly +// handling the case when name identifies a particular choice in a field. +func configure(name, value string) error { + currentMu.Lock() + defer currentMu.Unlock() + f, ok := configFieldMap[name] + if !ok { + return fmt.Errorf("unknown config field %q", name) + } + if f.name == name { + return currentCfg.set(f, value) + } + // name must be one of the choices. If value is true, set field-value + // to name. + if v, err := strconv.ParseBool(value); v && err == nil { + return currentCfg.set(f, name) + } + return fmt.Errorf("unknown config field %q", name) +} + +// resetTransient sets all transient fields in *cfg to their currently +// configured values. +func (cfg *config) resetTransient() { + current := currentConfig() + cfg.Output = current.Output + cfg.SourcePath = current.SourcePath + cfg.TrimPath = current.TrimPath + cfg.DivideBy = current.DivideBy + cfg.SampleIndex = current.SampleIndex +} + +// applyURL updates *cfg based on params. +func (cfg *config) applyURL(params url.Values) error { + for _, f := range configFields { + var value string + if f.urlparam != "" { + value = params.Get(f.urlparam) + } + if value == "" { + continue + } + if err := cfg.set(f, value); err != nil { + return fmt.Errorf("error setting config field %s: %v", f.name, err) + } + } + return nil +} + +// makeURL returns a URL based on initialURL that contains the config contents +// as parameters. The second result is true iff a parameter value was changed. +func (cfg *config) makeURL(initialURL url.URL) (url.URL, bool) { + q := initialURL.Query() + changed := false + for _, f := range configFields { + if f.urlparam == "" || !f.saved { + continue + } + v := cfg.get(f) + if v == f.defaultValue { + v = "" // URL for of default value is the empty string. + } else if f.field.Type.Kind() == reflect.Bool { + // Shorten bool values to "f" or "t" + v = v[:1] + } + if q.Get(f.urlparam) == v { + continue + } + changed = true + if v == "" { + q.Del(f.urlparam) + } else { + q.Set(f.urlparam, v) + } + } + if changed { + initialURL.RawQuery = q.Encode() + } + return initialURL, changed +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go new file mode 100644 index 0000000..6a1e64c --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package driver implements the core pprof functionality. It can be +// parameterized with a flag implementation, fetch and symbolize +// mechanisms. +package driver + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +// PProf acquires a profile, and symbolizes it using a profile +// manager. Then it generates a report formatted according to the +// options selected through the flags package. +func PProf(eo *plugin.Options) error { + // Remove any temporary files created during pprof processing. + defer cleanupTempFiles() + + o := setDefaults(eo) + + src, cmd, err := parseFlags(o) + if err != nil { + return err + } + + p, err := fetchProfiles(src, o) + if err != nil { + return err + } + + if cmd != nil { + return generateReport(p, cmd, currentConfig(), o) + } + + if src.HTTPHostport != "" { + return serveWebInterface(src.HTTPHostport, p, o, src.HTTPDisableBrowser) + } + return interactive(p, o) +} + +func generateRawReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) (*command, *report.Report, error) { + p = p.Copy() // Prevent modification to the incoming profile. + + // Identify units of numeric tags in profile. + numLabelUnits := identifyNumLabelUnits(p, o.UI) + + // Get report output format + c := pprofCommands[cmd[0]] + if c == nil { + panic("unexpected nil command") + } + + cfg = applyCommandOverrides(cmd[0], c.format, cfg) + + // Create label pseudo nodes before filtering, in case the filters use + // the generated nodes. + generateTagRootsLeaves(p, cfg, o.UI) + + // Delay focus after configuring report to get percentages on all samples. + relative := cfg.RelativePercentages + if relative { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { + return nil, nil, err + } + } + ropt, err := reportOptions(p, numLabelUnits, cfg) + if err != nil { + return nil, nil, err + } + ropt.OutputFormat = c.format + if len(cmd) == 2 { + s, err := regexp.Compile(cmd[1]) + if err != nil { + return nil, nil, fmt.Errorf("parsing argument regexp %s: %v", cmd[1], err) + } + ropt.Symbol = s + } + + rpt := report.New(p, ropt) + if !relative { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { + return nil, nil, err + } + } + if err := aggregate(p, cfg); err != nil { + return nil, nil, err + } + + return c, rpt, nil +} + +func generateReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) error { + c, rpt, err := generateRawReport(p, cmd, cfg, o) + if err != nil { + return err + } + + // Generate the report. + dst := new(bytes.Buffer) + if err := report.Generate(dst, rpt, o.Obj); err != nil { + return err + } + src := dst + + // If necessary, perform any data post-processing. + if c.postProcess != nil { + dst = new(bytes.Buffer) + if err := c.postProcess(src, dst, o.UI); err != nil { + return err + } + src = dst + } + + // If no output is specified, use default visualizer. + output := cfg.Output + if output == "" { + if c.visualizer != nil { + return c.visualizer(src, os.Stdout, o.UI) + } + _, err := src.WriteTo(os.Stdout) + return err + } + + // Output to specified file. + o.UI.PrintErr("Generating report in ", output) + out, err := o.Writer.Open(output) + if err != nil { + return err + } + if _, err := src.WriteTo(out); err != nil { + out.Close() + return err + } + return out.Close() +} + +func applyCommandOverrides(cmd string, outputFormat int, cfg config) config { + // Some report types override the trim flag to false below. This is to make + // sure the default heuristics of excluding insignificant nodes and edges + // from the call graph do not apply. One example where it is important is + // annotated source or disassembly listing. Those reports run on a specific + // function (or functions), but the trimming is applied before the function + // data is selected. So, with trimming enabled, the report could end up + // showing no data if the specified function is "uninteresting" as far as the + // trimming is concerned. + trim := cfg.Trim + + switch cmd { + case "disasm": + trim = false + cfg.Granularity = "addresses" + // Force the 'noinlines' mode so that source locations for a given address + // collapse and there is only one for the given address. Without this + // cumulative metrics would be double-counted when annotating the assembly. + // This is because the merge is done by address and in case of an inlined + // stack each of the inlined entries is a separate callgraph node. + cfg.NoInlines = true + case "weblist": + trim = false + cfg.Granularity = "addresses" + cfg.NoInlines = false // Need inline info to support call expansion + case "peek": + trim = false + case "list": + trim = false + cfg.Granularity = "lines" + // Do not force 'noinlines' to be false so that specifying + // "-list foo -noinlines" is supported and works as expected. + case "text", "top", "topproto": + if cfg.NodeCount == -1 { + cfg.NodeCount = 0 + } + default: + if cfg.NodeCount == -1 { + cfg.NodeCount = 80 + } + } + + switch outputFormat { + case report.Proto, report.Raw, report.Callgrind: + trim = false + cfg.Granularity = "addresses" + cfg.NoInlines = false + } + + if !trim { + cfg.NodeCount = 0 + cfg.NodeFraction = 0 + cfg.EdgeFraction = 0 + } + return cfg +} + +// generateTagRootsLeaves generates extra nodes from the tagroot and tagleaf options. +func generateTagRootsLeaves(prof *profile.Profile, cfg config, ui plugin.UI) { + tagRootLabelKeys := dropEmptyStrings(strings.Split(cfg.TagRoot, ",")) + tagLeafLabelKeys := dropEmptyStrings(strings.Split(cfg.TagLeaf, ",")) + rootm, leafm := addLabelNodes(prof, tagRootLabelKeys, tagLeafLabelKeys, cfg.Unit) + warnNoMatches(cfg.TagRoot == "" || rootm, "TagRoot", ui) + warnNoMatches(cfg.TagLeaf == "" || leafm, "TagLeaf", ui) +} + +// dropEmptyStrings filters a slice to only non-empty strings +func dropEmptyStrings(in []string) (out []string) { + for _, s := range in { + if s != "" { + out = append(out, s) + } + } + return +} + +func aggregate(prof *profile.Profile, cfg config) error { + var function, filename, linenumber, address bool + inlines := !cfg.NoInlines + switch cfg.Granularity { + case "addresses": + if inlines { + return nil + } + function = true + filename = true + linenumber = true + address = true + case "lines": + function = true + filename = true + linenumber = true + case "files": + filename = true + case "functions": + function = true + case "filefunctions": + function = true + filename = true + default: + return fmt.Errorf("unexpected granularity") + } + return prof.Aggregate(inlines, function, filename, linenumber, address) +} + +func reportOptions(p *profile.Profile, numLabelUnits map[string]string, cfg config) (*report.Options, error) { + si, mean := cfg.SampleIndex, cfg.Mean + value, meanDiv, sample, err := sampleFormat(p, si, mean) + if err != nil { + return nil, err + } + + stype := sample.Type + if mean { + stype = "mean_" + stype + } + + if cfg.DivideBy == 0 { + return nil, fmt.Errorf("zero divisor specified") + } + + var filters []string + addFilter := func(k string, v string) { + if v != "" { + filters = append(filters, k+"="+v) + } + } + addFilter("focus", cfg.Focus) + addFilter("ignore", cfg.Ignore) + addFilter("hide", cfg.Hide) + addFilter("show", cfg.Show) + addFilter("show_from", cfg.ShowFrom) + addFilter("tagfocus", cfg.TagFocus) + addFilter("tagignore", cfg.TagIgnore) + addFilter("tagshow", cfg.TagShow) + addFilter("taghide", cfg.TagHide) + + ropt := &report.Options{ + CumSort: cfg.Sort == "cum", + CallTree: cfg.CallTree, + DropNegative: cfg.DropNegative, + + CompactLabels: cfg.CompactLabels, + Ratio: 1 / cfg.DivideBy, + + NodeCount: cfg.NodeCount, + NodeFraction: cfg.NodeFraction, + EdgeFraction: cfg.EdgeFraction, + + ActiveFilters: filters, + NumLabelUnits: numLabelUnits, + + SampleValue: value, + SampleMeanDivisor: meanDiv, + SampleType: stype, + SampleUnit: sample.Unit, + + OutputUnit: cfg.Unit, + + SourcePath: cfg.SourcePath, + TrimPath: cfg.TrimPath, + + IntelSyntax: cfg.IntelSyntax, + } + + if len(p.Mapping) > 0 && p.Mapping[0].File != "" { + ropt.Title = filepath.Base(p.Mapping[0].File) + } + + return ropt, nil +} + +// identifyNumLabelUnits returns a map of numeric label keys to the units +// associated with those keys. +func identifyNumLabelUnits(p *profile.Profile, ui plugin.UI) map[string]string { + numLabelUnits, ignoredUnits := p.NumLabelUnits() + + // Print errors for tags with multiple units associated with + // a single key. + for k, units := range ignoredUnits { + ui.PrintErr(fmt.Sprintf("For tag %s used unit %s, also encountered unit(s) %s", k, numLabelUnits[k], strings.Join(units, ", "))) + } + return numLabelUnits +} + +type sampleValueFunc func([]int64) int64 + +// sampleFormat returns a function to extract values out of a profile.Sample, +// and the type/units of those values. +func sampleFormat(p *profile.Profile, sampleIndex string, mean bool) (value, meanDiv sampleValueFunc, v *profile.ValueType, err error) { + if len(p.SampleType) == 0 { + return nil, nil, nil, fmt.Errorf("profile has no samples") + } + index, err := p.SampleIndexByName(sampleIndex) + if err != nil { + return nil, nil, nil, err + } + value = valueExtractor(index) + if mean { + meanDiv = valueExtractor(0) + } + v = p.SampleType[index] + return +} + +func valueExtractor(ix int) sampleValueFunc { + return func(v []int64) int64 { + return v[ix] + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go new file mode 100644 index 0000000..fd05adb --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go @@ -0,0 +1,219 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +var tagFilterRangeRx = regexp.MustCompile("([+-]?[[:digit:]]+)([[:alpha:]]+)?") + +// applyFocus filters samples based on the focus/ignore options +func applyFocus(prof *profile.Profile, numLabelUnits map[string]string, cfg config, ui plugin.UI) error { + focus, err := compileRegexOption("focus", cfg.Focus, nil) + ignore, err := compileRegexOption("ignore", cfg.Ignore, err) + hide, err := compileRegexOption("hide", cfg.Hide, err) + show, err := compileRegexOption("show", cfg.Show, err) + showfrom, err := compileRegexOption("show_from", cfg.ShowFrom, err) + tagfocus, err := compileTagFilter("tagfocus", cfg.TagFocus, numLabelUnits, ui, err) + tagignore, err := compileTagFilter("tagignore", cfg.TagIgnore, numLabelUnits, ui, err) + prunefrom, err := compileRegexOption("prune_from", cfg.PruneFrom, err) + if err != nil { + return err + } + + fm, im, hm, hnm := prof.FilterSamplesByName(focus, ignore, hide, show) + warnNoMatches(focus == nil || fm, "Focus", ui) + warnNoMatches(ignore == nil || im, "Ignore", ui) + warnNoMatches(hide == nil || hm, "Hide", ui) + warnNoMatches(show == nil || hnm, "Show", ui) + + sfm := prof.ShowFrom(showfrom) + warnNoMatches(showfrom == nil || sfm, "ShowFrom", ui) + + tfm, tim := prof.FilterSamplesByTag(tagfocus, tagignore) + warnNoMatches(tagfocus == nil || tfm, "TagFocus", ui) + warnNoMatches(tagignore == nil || tim, "TagIgnore", ui) + + tagshow, err := compileRegexOption("tagshow", cfg.TagShow, err) + taghide, err := compileRegexOption("taghide", cfg.TagHide, err) + tns, tnh := prof.FilterTagsByName(tagshow, taghide) + warnNoMatches(tagshow == nil || tns, "TagShow", ui) + warnNoMatches(taghide == nil || tnh, "TagHide", ui) + + if prunefrom != nil { + prof.PruneFrom(prunefrom) + } + return err +} + +func compileRegexOption(name, value string, err error) (*regexp.Regexp, error) { + if value == "" || err != nil { + return nil, err + } + rx, err := regexp.Compile(value) + if err != nil { + return nil, fmt.Errorf("parsing %s regexp: %v", name, err) + } + return rx, nil +} + +func compileTagFilter(name, value string, numLabelUnits map[string]string, ui plugin.UI, err error) (func(*profile.Sample) bool, error) { + if value == "" || err != nil { + return nil, err + } + + tagValuePair := strings.SplitN(value, "=", 2) + var wantKey string + if len(tagValuePair) == 2 { + wantKey = tagValuePair[0] + value = tagValuePair[1] + } + + if numFilter := parseTagFilterRange(value); numFilter != nil { + ui.PrintErr(name, ":Interpreted '", value, "' as range, not regexp") + labelFilter := func(vals []int64, unit string) bool { + for _, val := range vals { + if numFilter(val, unit) { + return true + } + } + return false + } + numLabelUnit := func(key string) string { + return numLabelUnits[key] + } + if wantKey == "" { + return func(s *profile.Sample) bool { + for key, vals := range s.NumLabel { + if labelFilter(vals, numLabelUnit(key)) { + return true + } + } + return false + }, nil + } + return func(s *profile.Sample) bool { + if vals, ok := s.NumLabel[wantKey]; ok { + return labelFilter(vals, numLabelUnit(wantKey)) + } + return false + }, nil + } + + var rfx []*regexp.Regexp + for _, tagf := range strings.Split(value, ",") { + fx, err := regexp.Compile(tagf) + if err != nil { + return nil, fmt.Errorf("parsing %s regexp: %v", name, err) + } + rfx = append(rfx, fx) + } + if wantKey == "" { + return func(s *profile.Sample) bool { + matchedrx: + for _, rx := range rfx { + for key, vals := range s.Label { + for _, val := range vals { + // TODO: Match against val, not key:val in future + if rx.MatchString(key + ":" + val) { + continue matchedrx + } + } + } + return false + } + return true + }, nil + } + return func(s *profile.Sample) bool { + if vals, ok := s.Label[wantKey]; ok { + for _, rx := range rfx { + for _, val := range vals { + if rx.MatchString(val) { + return true + } + } + } + } + return false + }, nil +} + +// parseTagFilterRange returns a function to checks if a value is +// contained on the range described by a string. It can recognize +// strings of the form: +// "32kb" -- matches values == 32kb +// ":64kb" -- matches values <= 64kb +// "4mb:" -- matches values >= 4mb +// "12kb:64mb" -- matches values between 12kb and 64mb (both included). +func parseTagFilterRange(filter string) func(int64, string) bool { + ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2) + if len(ranges) == 0 { + return nil // No ranges were identified + } + v, err := strconv.ParseInt(ranges[0][1], 10, 64) + if err != nil { + panic(fmt.Errorf("failed to parse int %s: %v", ranges[0][1], err)) + } + scaledValue, unit := measurement.Scale(v, ranges[0][2], ranges[0][2]) + if len(ranges) == 1 { + switch match := ranges[0][0]; filter { + case match: + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv == scaledValue + } + case match + ":": + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv >= scaledValue + } + case ":" + match: + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv <= scaledValue + } + } + return nil + } + if filter != ranges[0][0]+":"+ranges[1][0] { + return nil + } + if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil { + panic(fmt.Errorf("failed to parse int %s: %v", ranges[1][1], err)) + } + scaledValue2, unit2 := measurement.Scale(v, ranges[1][2], unit) + if unit != unit2 { + return nil + } + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv >= scaledValue && sv <= scaledValue2 + } +} + +func warnNoMatches(match bool, option string, ui plugin.UI) { + if !match { + ui.PrintErr(option + " expression matched no samples") + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go new file mode 100644 index 0000000..0b36165 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go @@ -0,0 +1,591 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// fetchProfiles fetches and symbolizes the profiles specified by s. +// It will merge all the profiles it is able to retrieve, even if +// there are some failures. It will return an error if it is unable to +// fetch any profiles. +func fetchProfiles(s *source, o *plugin.Options) (*profile.Profile, error) { + sources := make([]profileSource, 0, len(s.Sources)) + for _, src := range s.Sources { + sources = append(sources, profileSource{ + addr: src, + source: s, + }) + } + + bases := make([]profileSource, 0, len(s.Base)) + for _, src := range s.Base { + bases = append(bases, profileSource{ + addr: src, + source: s, + }) + } + + p, pbase, m, mbase, save, err := grabSourcesAndBases(sources, bases, o.Fetch, o.Obj, o.UI, o.HTTPTransport) + if err != nil { + return nil, err + } + + if pbase != nil { + if s.DiffBase { + pbase.SetLabel("pprof::base", []string{"true"}) + } + if s.Normalize { + err := p.Normalize(pbase) + if err != nil { + return nil, err + } + } + pbase.Scale(-1) + p, m, err = combineProfiles([]*profile.Profile{p, pbase}, []plugin.MappingSources{m, mbase}) + if err != nil { + return nil, err + } + } + + // Symbolize the merged profile. + if err := o.Sym.Symbolize(s.Symbolize, m, p); err != nil { + return nil, err + } + p.RemoveUninteresting() + unsourceMappings(p) + + if s.Comment != "" { + p.Comments = append(p.Comments, s.Comment) + } + + // Save a copy of the merged profile if there is at least one remote source. + if save { + dir, err := setTmpDir(o.UI) + if err != nil { + return nil, err + } + + prefix := "pprof." + if len(p.Mapping) > 0 && p.Mapping[0].File != "" { + prefix += filepath.Base(p.Mapping[0].File) + "." + } + for _, s := range p.SampleType { + prefix += s.Type + "." + } + + tempFile, err := newTempFile(dir, prefix, ".pb.gz") + if err == nil { + if err = p.Write(tempFile); err == nil { + o.UI.PrintErr("Saved profile in ", tempFile.Name()) + } + } + if err != nil { + o.UI.PrintErr("Could not save profile: ", err) + } + } + + if err := p.CheckValid(); err != nil { + return nil, err + } + + return p, nil +} + +func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, *profile.Profile, plugin.MappingSources, plugin.MappingSources, bool, error) { + wg := sync.WaitGroup{} + wg.Add(2) + var psrc, pbase *profile.Profile + var msrc, mbase plugin.MappingSources + var savesrc, savebase bool + var errsrc, errbase error + var countsrc, countbase int + go func() { + defer wg.Done() + psrc, msrc, savesrc, countsrc, errsrc = chunkedGrab(sources, fetch, obj, ui, tr) + }() + go func() { + defer wg.Done() + pbase, mbase, savebase, countbase, errbase = chunkedGrab(bases, fetch, obj, ui, tr) + }() + wg.Wait() + save := savesrc || savebase + + if errsrc != nil { + return nil, nil, nil, nil, false, fmt.Errorf("problem fetching source profiles: %v", errsrc) + } + if errbase != nil { + return nil, nil, nil, nil, false, fmt.Errorf("problem fetching base profiles: %v,", errbase) + } + if countsrc == 0 { + return nil, nil, nil, nil, false, fmt.Errorf("failed to fetch any source profiles") + } + if countbase == 0 && len(bases) > 0 { + return nil, nil, nil, nil, false, fmt.Errorf("failed to fetch any base profiles") + } + if want, got := len(sources), countsrc; want != got { + ui.PrintErr(fmt.Sprintf("Fetched %d source profiles out of %d", got, want)) + } + if want, got := len(bases), countbase; want != got { + ui.PrintErr(fmt.Sprintf("Fetched %d base profiles out of %d", got, want)) + } + + return psrc, pbase, msrc, mbase, save, nil +} + +// chunkedGrab fetches the profiles described in source and merges them into +// a single profile. It fetches a chunk of profiles concurrently, with a maximum +// chunk size to limit its memory usage. +func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { + const chunkSize = 64 + + var p *profile.Profile + var msrc plugin.MappingSources + var save bool + var count int + + for start := 0; start < len(sources); start += chunkSize { + end := start + chunkSize + if end > len(sources) { + end = len(sources) + } + chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui, tr) + switch { + case chunkErr != nil: + return nil, nil, false, 0, chunkErr + case chunkP == nil: + continue + case p == nil: + p, msrc, save, count = chunkP, chunkMsrc, chunkSave, chunkCount + default: + p, msrc, chunkErr = combineProfiles([]*profile.Profile{p, chunkP}, []plugin.MappingSources{msrc, chunkMsrc}) + if chunkErr != nil { + return nil, nil, false, 0, chunkErr + } + if chunkSave { + save = true + } + count += chunkCount + } + } + + return p, msrc, save, count, nil +} + +// concurrentGrab fetches multiple profiles concurrently +func concurrentGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { + wg := sync.WaitGroup{} + wg.Add(len(sources)) + for i := range sources { + go func(s *profileSource) { + defer wg.Done() + s.p, s.msrc, s.remote, s.err = grabProfile(s.source, s.addr, fetch, obj, ui, tr) + }(&sources[i]) + } + wg.Wait() + + var save bool + profiles := make([]*profile.Profile, 0, len(sources)) + msrcs := make([]plugin.MappingSources, 0, len(sources)) + for i := range sources { + s := &sources[i] + if err := s.err; err != nil { + ui.PrintErr(s.addr + ": " + err.Error()) + continue + } + save = save || s.remote + profiles = append(profiles, s.p) + msrcs = append(msrcs, s.msrc) + *s = profileSource{} + } + + if len(profiles) == 0 { + return nil, nil, false, 0, nil + } + + p, msrc, err := combineProfiles(profiles, msrcs) + if err != nil { + return nil, nil, false, 0, err + } + return p, msrc, save, len(profiles), nil +} + +func combineProfiles(profiles []*profile.Profile, msrcs []plugin.MappingSources) (*profile.Profile, plugin.MappingSources, error) { + // Merge profiles. + if err := measurement.ScaleProfiles(profiles); err != nil { + return nil, nil, err + } + + p, err := profile.Merge(profiles) + if err != nil { + return nil, nil, err + } + + // Combine mapping sources. + msrc := make(plugin.MappingSources) + for _, ms := range msrcs { + for m, s := range ms { + msrc[m] = append(msrc[m], s...) + } + } + return p, msrc, nil +} + +type profileSource struct { + addr string + source *source + + p *profile.Profile + msrc plugin.MappingSources + remote bool + err error +} + +func homeEnv() string { + switch runtime.GOOS { + case "windows": + return "USERPROFILE" + case "plan9": + return "home" + default: + return "HOME" + } +} + +// setTmpDir prepares the directory to use to save profiles retrieved +// remotely. It is selected from PPROF_TMPDIR, defaults to $HOME/pprof, and, if +// $HOME is not set, falls back to os.TempDir(). +func setTmpDir(ui plugin.UI) (string, error) { + var dirs []string + if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir != "" { + dirs = append(dirs, profileDir) + } + if homeDir := os.Getenv(homeEnv()); homeDir != "" { + dirs = append(dirs, filepath.Join(homeDir, "pprof")) + } + dirs = append(dirs, os.TempDir()) + for _, tmpDir := range dirs { + if err := os.MkdirAll(tmpDir, 0755); err != nil { + ui.PrintErr("Could not use temp dir ", tmpDir, ": ", err.Error()) + continue + } + return tmpDir, nil + } + return "", fmt.Errorf("failed to identify temp dir") +} + +const testSourceAddress = "pproftest.local" + +// grabProfile fetches a profile. Returns the profile, sources for the +// profile mappings, a bool indicating if the profile was fetched +// remotely, and an error. +func grabProfile(s *source, source string, fetcher plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, msrc plugin.MappingSources, remote bool, err error) { + var src string + duration, timeout := time.Duration(s.Seconds)*time.Second, time.Duration(s.Timeout)*time.Second + if fetcher != nil { + p, src, err = fetcher.Fetch(source, duration, timeout) + if err != nil { + return + } + } + if err != nil || p == nil { + // Fetch the profile over HTTP or from a file. + p, src, err = fetch(source, duration, timeout, ui, tr) + if err != nil { + return + } + } + + if err = p.CheckValid(); err != nil { + return + } + + // Update the binary locations from command line and paths. + locateBinaries(p, s, obj, ui) + + // Collect the source URL for all mappings. + if src != "" { + msrc = collectMappingSources(p, src) + remote = true + if strings.HasPrefix(src, "http://"+testSourceAddress) { + // Treat test inputs as local to avoid saving + // testcase profiles during driver testing. + remote = false + } + } + return +} + +// collectMappingSources saves the mapping sources of a profile. +func collectMappingSources(p *profile.Profile, source string) plugin.MappingSources { + ms := plugin.MappingSources{} + for _, m := range p.Mapping { + src := struct { + Source string + Start uint64 + }{ + source, m.Start, + } + key := m.BuildID + if key == "" { + key = m.File + } + if key == "" { + // If there is no build id or source file, use the source as the + // mapping file. This will enable remote symbolization for this + // mapping, in particular for Go profiles on the legacy format. + // The source is reset back to empty string by unsourceMapping + // which is called after symbolization is finished. + m.File = source + key = source + } + ms[key] = append(ms[key], src) + } + return ms +} + +// unsourceMappings iterates over the mappings in a profile and replaces file +// set to the remote source URL by collectMappingSources back to empty string. +func unsourceMappings(p *profile.Profile) { + for _, m := range p.Mapping { + if m.BuildID == "" { + if u, err := url.Parse(m.File); err == nil && u.IsAbs() { + m.File = "" + } + } + } +} + +// locateBinaries searches for binary files listed in the profile and, if found, +// updates the profile accordingly. +func locateBinaries(p *profile.Profile, s *source, obj plugin.ObjTool, ui plugin.UI) { + // Construct search path to examine + searchPath := os.Getenv("PPROF_BINARY_PATH") + if searchPath == "" { + // Use $HOME/pprof/binaries as default directory for local symbolization binaries + searchPath = filepath.Join(os.Getenv(homeEnv()), "pprof", "binaries") + } +mapping: + for _, m := range p.Mapping { + var baseName string + if m.File != "" { + baseName = filepath.Base(m.File) + } + + for _, path := range filepath.SplitList(searchPath) { + var fileNames []string + if m.BuildID != "" { + fileNames = []string{filepath.Join(path, m.BuildID, baseName)} + if matches, err := filepath.Glob(filepath.Join(path, m.BuildID, "*")); err == nil { + fileNames = append(fileNames, matches...) + } + fileNames = append(fileNames, filepath.Join(path, m.File, m.BuildID)) // perf path format + } + if m.File != "" { + // Try both the basename and the full path, to support the same directory + // structure as the perf symfs option. + if baseName != "" { + fileNames = append(fileNames, filepath.Join(path, baseName)) + } + fileNames = append(fileNames, filepath.Join(path, m.File)) + } + for _, name := range fileNames { + if f, err := obj.Open(name, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol); err == nil { + defer f.Close() + fileBuildID := f.BuildID() + if m.BuildID != "" && m.BuildID != fileBuildID { + ui.PrintErr("Ignoring local file " + name + ": build-id mismatch (" + m.BuildID + " != " + fileBuildID + ")") + } else { + // Explicitly do not update KernelRelocationSymbol -- + // the new local file name is most likely missing it. + m.File = name + continue mapping + } + } + } + } + } + if len(p.Mapping) == 0 { + // If there are no mappings, add a fake mapping to attempt symbolization. + // This is useful for some profiles generated by the golang runtime, which + // do not include any mappings. Symbolization with a fake mapping will only + // be successful against a non-PIE binary. + m := &profile.Mapping{ID: 1} + p.Mapping = []*profile.Mapping{m} + for _, l := range p.Location { + l.Mapping = m + } + } + // Replace executable filename/buildID with the overrides from source. + // Assumes the executable is the first Mapping entry. + if execName, buildID := s.ExecName, s.BuildID; execName != "" || buildID != "" { + m := p.Mapping[0] + if execName != "" { + // Explicitly do not update KernelRelocationSymbol -- + // the source override is most likely missing it. + m.File = execName + } + if buildID != "" { + m.BuildID = buildID + } + } +} + +// fetch fetches a profile from source, within the timeout specified, +// producing messages through the ui. It returns the profile and the +// url of the actual source of the profile for remote profiles. +func fetch(source string, duration, timeout time.Duration, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, src string, err error) { + var f io.ReadCloser + + if sourceURL, timeout := adjustURL(source, duration, timeout); sourceURL != "" { + ui.Print("Fetching profile over HTTP from " + sourceURL) + if duration > 0 { + ui.Print(fmt.Sprintf("Please wait... (%v)", duration)) + } + f, err = fetchURL(sourceURL, timeout, tr) + src = sourceURL + } else if isPerfFile(source) { + f, err = convertPerfData(source, ui) + } else { + f, err = os.Open(source) + } + if err == nil { + defer f.Close() + p, err = profile.Parse(f) + } + return +} + +// fetchURL fetches a profile from a URL using HTTP. +func fetchURL(source string, timeout time.Duration, tr http.RoundTripper) (io.ReadCloser, error) { + client := &http.Client{ + Transport: tr, + Timeout: timeout + 5*time.Second, + } + resp, err := client.Get(source) + if err != nil { + return nil, fmt.Errorf("http fetch: %v", err) + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + return nil, statusCodeError(resp) + } + + return resp.Body, nil +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := ioutil.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// isPerfFile checks if a file is in perf.data format. It also returns false +// if it encounters an error during the check. +func isPerfFile(path string) bool { + sourceFile, openErr := os.Open(path) + if openErr != nil { + return false + } + defer sourceFile.Close() + + // If the file is the output of a perf record command, it should begin + // with the string PERFILE2. + perfHeader := []byte("PERFILE2") + actualHeader := make([]byte, len(perfHeader)) + if _, readErr := sourceFile.Read(actualHeader); readErr != nil { + return false + } + return bytes.Equal(actualHeader, perfHeader) +} + +// convertPerfData converts the file at path which should be in perf.data format +// using the perf_to_profile tool and returns the file containing the +// profile.proto formatted data. +func convertPerfData(perfPath string, ui plugin.UI) (*os.File, error) { + ui.Print(fmt.Sprintf( + "Converting %s to a profile.proto... (May take a few minutes)", + perfPath)) + profile, err := newTempFile(os.TempDir(), "pprof_", ".pb.gz") + if err != nil { + return nil, err + } + deferDeleteTempFile(profile.Name()) + cmd := exec.Command("perf_to_profile", "-i", perfPath, "-o", profile.Name(), "-f") + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + if err := cmd.Run(); err != nil { + profile.Close() + return nil, fmt.Errorf("failed to convert perf.data file. Try github.com/google/perf_data_converter: %v", err) + } + return profile, nil +} + +// adjustURL validates if a profile source is a URL and returns an +// cleaned up URL and the timeout to use for retrieval over HTTP. +// If the source cannot be recognized as a URL it returns an empty string. +func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) { + u, err := url.Parse(source) + if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") { + // Try adding http:// to catch sources of the form hostname:port/path. + // url.Parse treats "hostname" as the scheme. + u, err = url.Parse("http://" + source) + } + if err != nil || u.Host == "" { + return "", 0 + } + + // Apply duration/timeout overrides to URL. + values := u.Query() + if duration > 0 { + values.Set("seconds", fmt.Sprint(int(duration.Seconds()))) + } else { + if urlSeconds := values.Get("seconds"); urlSeconds != "" { + if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil { + duration = time.Duration(us) * time.Second + } + } + } + if timeout <= 0 { + if duration > 0 { + timeout = duration + duration/2 + } else { + timeout = 60 * time.Second + } + } + u.RawQuery = values.Encode() + return u.String(), timeout +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go new file mode 100644 index 0000000..5390319 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "flag" + "strings" +) + +// GoFlags implements the plugin.FlagSet interface. +type GoFlags struct { + UsageMsgs []string +} + +// Bool implements the plugin.FlagSet interface. +func (*GoFlags) Bool(o string, d bool, c string) *bool { + return flag.Bool(o, d, c) +} + +// Int implements the plugin.FlagSet interface. +func (*GoFlags) Int(o string, d int, c string) *int { + return flag.Int(o, d, c) +} + +// Float64 implements the plugin.FlagSet interface. +func (*GoFlags) Float64(o string, d float64, c string) *float64 { + return flag.Float64(o, d, c) +} + +// String implements the plugin.FlagSet interface. +func (*GoFlags) String(o, d, c string) *string { + return flag.String(o, d, c) +} + +// StringList implements the plugin.FlagSet interface. +func (*GoFlags) StringList(o, d, c string) *[]*string { + return &[]*string{flag.String(o, d, c)} +} + +// ExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) ExtraUsage() string { + return strings.Join(f.UsageMsgs, "\n") +} + +// AddExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) AddExtraUsage(eu string) { + f.UsageMsgs = append(f.UsageMsgs, eu) +} + +// Parse implements the plugin.FlagSet interface. +func (*GoFlags) Parse(usage func()) []string { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + usage() + } + return args +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go new file mode 100644 index 0000000..fbeb765 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go @@ -0,0 +1,106 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "encoding/json" + "html/template" + "net/http" + "strings" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/report" +) + +type treeNode struct { + Name string `json:"n"` + FullName string `json:"f"` + Cum int64 `json:"v"` + CumFormat string `json:"l"` + Percent string `json:"p"` + Children []*treeNode `json:"c"` +} + +// flamegraph generates a web page containing a flamegraph. +func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { + // Force the call tree so that the graph is a tree. + // Also do not trim the tree so that the flame graph contains all functions. + rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { + cfg.CallTree = true + cfg.Trim = false + }) + if rpt == nil { + return // error already reported + } + + // Generate dot graph. + g, config := report.GetDOT(rpt) + var nodes []*treeNode + nroots := 0 + rootValue := int64(0) + nodeArr := []string{} + nodeMap := map[*graph.Node]*treeNode{} + // Make all nodes and the map, collect the roots. + for _, n := range g.Nodes { + v := n.CumValue() + fullName := n.Info.PrintableName() + node := &treeNode{ + Name: graph.ShortenFunctionName(fullName), + FullName: fullName, + Cum: v, + CumFormat: config.FormatValue(v), + Percent: strings.TrimSpace(measurement.Percentage(v, config.Total)), + } + nodes = append(nodes, node) + if len(n.In) == 0 { + nodes[nroots], nodes[len(nodes)-1] = nodes[len(nodes)-1], nodes[nroots] + nroots++ + rootValue += v + } + nodeMap[n] = node + // Get all node names into an array. + nodeArr = append(nodeArr, n.Info.Name) + } + // Populate the child links. + for _, n := range g.Nodes { + node := nodeMap[n] + for child := range n.Out { + node.Children = append(node.Children, nodeMap[child]) + } + } + + rootNode := &treeNode{ + Name: "root", + FullName: "root", + Cum: rootValue, + CumFormat: config.FormatValue(rootValue), + Percent: strings.TrimSpace(measurement.Percentage(rootValue, config.Total)), + Children: nodes[0:nroots], + } + + // JSON marshalling flame graph + b, err := json.Marshal(rootNode) + if err != nil { + http.Error(w, "error serializing flame graph", http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + + ui.render(w, req, "flamegraph", rpt, errList, config.Labels, webArgs{ + FlameGraph: template.JS(b), + Nodes: nodeArr, + }) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css new file mode 100644 index 0000000..03755ab --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css @@ -0,0 +1,272 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} +html, body { + height: 100%; +} +body { + font-family: 'Roboto', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + font-size: 13px; + line-height: 1.4; + display: flex; + flex-direction: column; +} +a { + color: #2a66d9; +} +.header { + display: flex; + align-items: center; + height: 44px; + min-height: 44px; + background-color: #eee; + color: #212121; + padding: 0 1rem; +} +.header > div { + margin: 0 0.125em; +} +.header .title h1 { + font-size: 1.75em; + margin-right: 1rem; + margin-bottom: 4px; +} +.header .title a { + color: #212121; + text-decoration: none; +} +.header .title a:hover { + text-decoration: underline; +} +.header .description { + width: 100%; + text-align: right; + white-space: nowrap; +} +@media screen and (max-width: 799px) { + .header input { + display: none; + } +} +#detailsbox { + display: none; + z-index: 1; + position: fixed; + top: 40px; + right: 20px; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); + line-height: 24px; + padding: 1em; + text-align: left; +} +.header input { + background: white url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' style='pointer-events:none;display:block;width:100%25;height:100%25;fill:%23757575'%3E%3Cpath d='M15.5 14h-.79l-.28-.27C15.41 12.59 16 11.11 16 9.5 16 5.91 13.09 3 9.5 3S3 5.91 3 9.5 5.91 16 9.5 16c1.61.0 3.09-.59 4.23-1.57l.27.28v.79l5 4.99L20.49 19l-4.99-5zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E") no-repeat 4px center/20px 20px; + border: 1px solid #d1d2d3; + border-radius: 2px 0 0 2px; + padding: 0.25em; + padding-left: 28px; + margin-left: 1em; + font-family: 'Roboto', 'Noto', sans-serif; + font-size: 1em; + line-height: 24px; + color: #212121; +} +.downArrow { + border-top: .36em solid #ccc; + border-left: .36em solid transparent; + border-right: .36em solid transparent; + margin-bottom: .05em; + margin-left: .5em; + transition: border-top-color 200ms; +} +.menu-item { + height: 100%; + text-transform: uppercase; + font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + position: relative; +} +.menu-item .menu-name:hover { + opacity: 0.75; +} +.menu-item .menu-name:hover .downArrow { + border-top-color: #666; +} +.menu-name { + height: 100%; + padding: 0 0.5em; + display: flex; + align-items: center; + justify-content: center; +} +.menu-name a { + text-decoration: none; + color: #212121; +} +.submenu { + display: none; + z-index: 1; + margin-top: -4px; + min-width: 10em; + position: absolute; + left: 0px; + background-color: white; + box-shadow: 0 1px 5px rgba(0,0,0,.3); + font-size: 100%; + text-transform: none; +} +.menu-item, .submenu { + user-select: none; + -moz-user-select: none; + -ms-user-select: none; + -webkit-user-select: none; +} +.submenu hr { + border: 0; + border-top: 2px solid #eee; +} +.submenu a { + display: block; + padding: .5em 1em; + text-decoration: none; +} +.submenu a:hover, .submenu a.active { + color: white; + background-color: #6b82d6; +} +.submenu a.disabled { + color: gray; + pointer-events: none; +} +.menu-check-mark { + position: absolute; + left: 2px; +} +.menu-delete-btn { + position: absolute; + right: 2px; +} + +{{/* Used to disable events when a modal dialog is displayed */}} +#dialog-overlay { + display: none; + position: fixed; + left: 0px; + top: 0px; + width: 100%; + height: 100%; + background-color: rgba(1,1,1,0.1); +} + +.dialog { + {{/* Displayed centered horizontally near the top */}} + display: none; + position: fixed; + margin: 0px; + top: 60px; + left: 50%; + transform: translateX(-50%); + + z-index: 3; + font-size: 125%; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); +} +.dialog-header { + font-size: 120%; + border-bottom: 1px solid #CCCCCC; + width: 100%; + text-align: center; + background: #EEEEEE; + user-select: none; +} +.dialog-footer { + border-top: 1px solid #CCCCCC; + width: 100%; + text-align: right; + padding: 10px; +} +.dialog-error { + margin: 10px; + color: red; +} +.dialog input { + margin: 10px; + font-size: inherit; +} +.dialog button { + margin-left: 10px; + font-size: inherit; +} +#save-dialog, #delete-dialog { + width: 50%; + max-width: 20em; +} +#delete-prompt { + padding: 10px; +} + +#content { + overflow-y: scroll; + padding: 1em; +} +#top { + overflow-y: scroll; +} +#graph { + overflow: hidden; +} +#graph svg { + width: 100%; + height: auto; + padding: 10px; +} +#content.source .filename { + margin-top: 0; + margin-bottom: 1em; + font-size: 120%; +} +#content.source pre { + margin-bottom: 3em; +} +table { + border-spacing: 0px; + width: 100%; + padding-bottom: 1em; + white-space: nowrap; +} +table thead { + font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; +} +table tr th { + position: sticky; + top: 0; + background-color: #ddd; + text-align: right; + padding: .3em .5em; +} +table tr td { + padding: .3em .5em; + text-align: right; +} +#top table tr th:nth-child(6), +#top table tr th:nth-child(7), +#top table tr td:nth-child(6), +#top table tr td:nth-child(7) { + text-align: left; +} +#top table tr td:nth-child(6) { + width: 100%; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; +} +#flathdr1, #flathdr2, #cumhdr1, #cumhdr2, #namehdr { + cursor: ns-resize; +} +.hilite { + background-color: #ebf5fb; + font-weight: bold; +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js new file mode 100644 index 0000000..4fe3caa --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js @@ -0,0 +1,693 @@ +// Make svg pannable and zoomable. +// Call clickHandler(t) if a click event is caught by the pan event handlers. +function initPanAndZoom(svg, clickHandler) { + 'use strict'; + + // Current mouse/touch handling mode + const IDLE = 0; + const MOUSEPAN = 1; + const TOUCHPAN = 2; + const TOUCHZOOM = 3; + let mode = IDLE; + + // State needed to implement zooming. + let currentScale = 1.0; + const initWidth = svg.viewBox.baseVal.width; + const initHeight = svg.viewBox.baseVal.height; + + // State needed to implement panning. + let panLastX = 0; // Last event X coordinate + let panLastY = 0; // Last event Y coordinate + let moved = false; // Have we seen significant movement + let touchid = null; // Current touch identifier + + // State needed for pinch zooming + let touchid2 = null; // Second id for pinch zooming + let initGap = 1.0; // Starting gap between two touches + let initScale = 1.0; // currentScale when pinch zoom started + let centerPoint = null; // Center point for scaling + + // Convert event coordinates to svg coordinates. + function toSvg(x, y) { + const p = svg.createSVGPoint(); + p.x = x; + p.y = y; + let m = svg.getCTM(); + if (m == null) m = svg.getScreenCTM(); // Firefox workaround. + return p.matrixTransform(m.inverse()); + } + + // Change the scaling for the svg to s, keeping the point denoted + // by u (in svg coordinates]) fixed at the same screen location. + function rescale(s, u) { + // Limit to a good range. + if (s < 0.2) s = 0.2; + if (s > 10.0) s = 10.0; + + currentScale = s; + + // svg.viewBox defines the visible portion of the user coordinate + // system. So to magnify by s, divide the visible portion by s, + // which will then be stretched to fit the viewport. + const vb = svg.viewBox; + const w1 = vb.baseVal.width; + const w2 = initWidth / s; + const h1 = vb.baseVal.height; + const h2 = initHeight / s; + vb.baseVal.width = w2; + vb.baseVal.height = h2; + + // We also want to adjust vb.baseVal.x so that u.x remains at same + // screen X coordinate. In other words, want to change it from x1 to x2 + // so that: + // (u.x - x1) / w1 = (u.x - x2) / w2 + // Simplifying that, we get + // (u.x - x1) * (w2 / w1) = u.x - x2 + // x2 = u.x - (u.x - x1) * (w2 / w1) + vb.baseVal.x = u.x - (u.x - vb.baseVal.x) * (w2 / w1); + vb.baseVal.y = u.y - (u.y - vb.baseVal.y) * (h2 / h1); + } + + function handleWheel(e) { + if (e.deltaY == 0) return; + // Change scale factor by 1.1 or 1/1.1 + rescale(currentScale * (e.deltaY < 0 ? 1.1 : (1/1.1)), + toSvg(e.offsetX, e.offsetY)); + } + + function setMode(m) { + mode = m; + touchid = null; + touchid2 = null; + } + + function panStart(x, y) { + moved = false; + panLastX = x; + panLastY = y; + } + + function panMove(x, y) { + let dx = x - panLastX; + let dy = y - panLastY; + if (Math.abs(dx) <= 2 && Math.abs(dy) <= 2) return; // Ignore tiny moves + + moved = true; + panLastX = x; + panLastY = y; + + // Firefox workaround: get dimensions from parentNode. + const swidth = svg.clientWidth || svg.parentNode.clientWidth; + const sheight = svg.clientHeight || svg.parentNode.clientHeight; + + // Convert deltas from screen space to svg space. + dx *= (svg.viewBox.baseVal.width / swidth); + dy *= (svg.viewBox.baseVal.height / sheight); + + svg.viewBox.baseVal.x -= dx; + svg.viewBox.baseVal.y -= dy; + } + + function handleScanStart(e) { + if (e.button != 0) return; // Do not catch right-clicks etc. + setMode(MOUSEPAN); + panStart(e.clientX, e.clientY); + e.preventDefault(); + svg.addEventListener('mousemove', handleScanMove); + } + + function handleScanMove(e) { + if (e.buttons == 0) { + // Missed an end event, perhaps because mouse moved outside window. + setMode(IDLE); + svg.removeEventListener('mousemove', handleScanMove); + return; + } + if (mode == MOUSEPAN) panMove(e.clientX, e.clientY); + } + + function handleScanEnd(e) { + if (mode == MOUSEPAN) panMove(e.clientX, e.clientY); + setMode(IDLE); + svg.removeEventListener('mousemove', handleScanMove); + if (!moved) clickHandler(e.target); + } + + // Find touch object with specified identifier. + function findTouch(tlist, id) { + for (const t of tlist) { + if (t.identifier == id) return t; + } + return null; + } + + // Return distance between two touch points + function touchGap(t1, t2) { + const dx = t1.clientX - t2.clientX; + const dy = t1.clientY - t2.clientY; + return Math.hypot(dx, dy); + } + + function handleTouchStart(e) { + if (mode == IDLE && e.changedTouches.length == 1) { + // Start touch based panning + const t = e.changedTouches[0]; + setMode(TOUCHPAN); + touchid = t.identifier; + panStart(t.clientX, t.clientY); + e.preventDefault(); + } else if (mode == TOUCHPAN && e.touches.length == 2) { + // Start pinch zooming + setMode(TOUCHZOOM); + const t1 = e.touches[0]; + const t2 = e.touches[1]; + touchid = t1.identifier; + touchid2 = t2.identifier; + initScale = currentScale; + initGap = touchGap(t1, t2); + centerPoint = toSvg((t1.clientX + t2.clientX) / 2, + (t1.clientY + t2.clientY) / 2); + e.preventDefault(); + } + } + + function handleTouchMove(e) { + if (mode == TOUCHPAN) { + const t = findTouch(e.changedTouches, touchid); + if (t == null) return; + if (e.touches.length != 1) { + setMode(IDLE); + return; + } + panMove(t.clientX, t.clientY); + e.preventDefault(); + } else if (mode == TOUCHZOOM) { + // Get two touches; new gap; rescale to ratio. + const t1 = findTouch(e.touches, touchid); + const t2 = findTouch(e.touches, touchid2); + if (t1 == null || t2 == null) return; + const gap = touchGap(t1, t2); + rescale(initScale * gap / initGap, centerPoint); + e.preventDefault(); + } + } + + function handleTouchEnd(e) { + if (mode == TOUCHPAN) { + const t = findTouch(e.changedTouches, touchid); + if (t == null) return; + panMove(t.clientX, t.clientY); + setMode(IDLE); + e.preventDefault(); + if (!moved) clickHandler(t.target); + } else if (mode == TOUCHZOOM) { + setMode(IDLE); + e.preventDefault(); + } + } + + svg.addEventListener('mousedown', handleScanStart); + svg.addEventListener('mouseup', handleScanEnd); + svg.addEventListener('touchstart', handleTouchStart); + svg.addEventListener('touchmove', handleTouchMove); + svg.addEventListener('touchend', handleTouchEnd); + svg.addEventListener('wheel', handleWheel, true); +} + +function initMenus() { + 'use strict'; + + let activeMenu = null; + let activeMenuHdr = null; + + function cancelActiveMenu() { + if (activeMenu == null) return; + activeMenu.style.display = 'none'; + activeMenu = null; + activeMenuHdr = null; + } + + // Set click handlers on every menu header. + for (const menu of document.getElementsByClassName('submenu')) { + const hdr = menu.parentElement; + if (hdr == null) return; + if (hdr.classList.contains('disabled')) return; + function showMenu(e) { + // menu is a child of hdr, so this event can fire for clicks + // inside menu. Ignore such clicks. + if (e.target.parentElement != hdr) return; + activeMenu = menu; + activeMenuHdr = hdr; + menu.style.display = 'block'; + } + hdr.addEventListener('mousedown', showMenu); + hdr.addEventListener('touchstart', showMenu); + } + + // If there is an active menu and a down event outside, retract the menu. + for (const t of ['mousedown', 'touchstart']) { + document.addEventListener(t, (e) => { + // Note: to avoid unnecessary flicker, if the down event is inside + // the active menu header, do not retract the menu. + if (activeMenuHdr != e.target.closest('.menu-item')) { + cancelActiveMenu(); + } + }, { passive: true, capture: true }); + } + + // If there is an active menu and an up event inside, retract the menu. + document.addEventListener('mouseup', (e) => { + if (activeMenu == e.target.closest('.submenu')) { + cancelActiveMenu(); + } + }, { passive: true, capture: true }); +} + +function sendURL(method, url, done) { + fetch(url.toString(), {method: method}) + .then((response) => { done(response.ok); }) + .catch((error) => { done(false); }); +} + +// Initialize handlers for saving/loading configurations. +function initConfigManager() { + 'use strict'; + + // Initialize various elements. + function elem(id) { + const result = document.getElementById(id); + if (!result) console.warn('element ' + id + ' not found'); + return result; + } + const overlay = elem('dialog-overlay'); + const saveDialog = elem('save-dialog'); + const saveInput = elem('save-name'); + const saveError = elem('save-error'); + const delDialog = elem('delete-dialog'); + const delPrompt = elem('delete-prompt'); + const delError = elem('delete-error'); + + let currentDialog = null; + let currentDeleteTarget = null; + + function showDialog(dialog) { + if (currentDialog != null) { + overlay.style.display = 'none'; + currentDialog.style.display = 'none'; + } + currentDialog = dialog; + if (dialog != null) { + overlay.style.display = 'block'; + dialog.style.display = 'block'; + } + } + + function cancelDialog(e) { + showDialog(null); + } + + // Show dialog for saving the current config. + function showSaveDialog(e) { + saveError.innerText = ''; + showDialog(saveDialog); + saveInput.focus(); + } + + // Commit save config. + function commitSave(e) { + const name = saveInput.value; + const url = new URL(document.URL); + // Set path relative to existing path. + url.pathname = new URL('./saveconfig', document.URL).pathname; + url.searchParams.set('config', name); + saveError.innerText = ''; + sendURL('POST', url, (ok) => { + if (!ok) { + saveError.innerText = 'Save failed'; + } else { + showDialog(null); + location.reload(); // Reload to show updated config menu + } + }); + } + + function handleSaveInputKey(e) { + if (e.key === 'Enter') commitSave(e); + } + + function deleteConfig(e, elem) { + e.preventDefault(); + const config = elem.dataset.config; + delPrompt.innerText = 'Delete ' + config + '?'; + currentDeleteTarget = elem; + showDialog(delDialog); + } + + function commitDelete(e, elem) { + if (!currentDeleteTarget) return; + const config = currentDeleteTarget.dataset.config; + const url = new URL('./deleteconfig', document.URL); + url.searchParams.set('config', config); + delError.innerText = ''; + sendURL('DELETE', url, (ok) => { + if (!ok) { + delError.innerText = 'Delete failed'; + return; + } + showDialog(null); + // Remove menu entry for this config. + if (currentDeleteTarget && currentDeleteTarget.parentElement) { + currentDeleteTarget.parentElement.remove(); + } + }); + } + + // Bind event on elem to fn. + function bind(event, elem, fn) { + if (elem == null) return; + elem.addEventListener(event, fn); + if (event == 'click') { + // Also enable via touch. + elem.addEventListener('touchstart', fn); + } + } + + bind('click', elem('save-config'), showSaveDialog); + bind('click', elem('save-cancel'), cancelDialog); + bind('click', elem('save-confirm'), commitSave); + bind('keydown', saveInput, handleSaveInputKey); + + bind('click', elem('delete-cancel'), cancelDialog); + bind('click', elem('delete-confirm'), commitDelete); + + // Activate deletion button for all config entries in menu. + for (const del of Array.from(document.getElementsByClassName('menu-delete-btn'))) { + bind('click', del, (e) => { + deleteConfig(e, del); + }); + } +} + +function viewer(baseUrl, nodes) { + 'use strict'; + + // Elements + const search = document.getElementById('search'); + const graph0 = document.getElementById('graph0'); + const svg = (graph0 == null ? null : graph0.parentElement); + const toptable = document.getElementById('toptable'); + + let regexpActive = false; + let selected = new Map(); + let origFill = new Map(); + let searchAlarm = null; + let buttonsEnabled = true; + + function handleDetails(e) { + e.preventDefault(); + const detailsText = document.getElementById('detailsbox'); + if (detailsText != null) { + if (detailsText.style.display === 'block') { + detailsText.style.display = 'none'; + } else { + detailsText.style.display = 'block'; + } + } + } + + function handleKey(e) { + if (e.keyCode != 13) return; + setHrefParams(window.location, function (params) { + params.set('f', search.value); + }); + e.preventDefault(); + } + + function handleSearch() { + // Delay expensive processing so a flurry of key strokes is handled once. + if (searchAlarm != null) { + clearTimeout(searchAlarm); + } + searchAlarm = setTimeout(selectMatching, 300); + + regexpActive = true; + updateButtons(); + } + + function selectMatching() { + searchAlarm = null; + let re = null; + if (search.value != '') { + try { + re = new RegExp(search.value); + } catch (e) { + // TODO: Display error state in search box + return; + } + } + + function match(text) { + return re != null && re.test(text); + } + + // drop currently selected items that do not match re. + selected.forEach(function(v, n) { + if (!match(nodes[n])) { + unselect(n, document.getElementById('node' + n)); + } + }) + + // add matching items that are not currently selected. + if (nodes) { + for (let n = 0; n < nodes.length; n++) { + if (!selected.has(n) && match(nodes[n])) { + select(n, document.getElementById('node' + n)); + } + } + } + + updateButtons(); + } + + function toggleSvgSelect(elem) { + // Walk up to immediate child of graph0 + while (elem != null && elem.parentElement != graph0) { + elem = elem.parentElement; + } + if (!elem) return; + + // Disable regexp mode. + regexpActive = false; + + const n = nodeId(elem); + if (n < 0) return; + if (selected.has(n)) { + unselect(n, elem); + } else { + select(n, elem); + } + updateButtons(); + } + + function unselect(n, elem) { + if (elem == null) return; + selected.delete(n); + setBackground(elem, false); + } + + function select(n, elem) { + if (elem == null) return; + selected.set(n, true); + setBackground(elem, true); + } + + function nodeId(elem) { + const id = elem.id; + if (!id) return -1; + if (!id.startsWith('node')) return -1; + const n = parseInt(id.slice(4), 10); + if (isNaN(n)) return -1; + if (n < 0 || n >= nodes.length) return -1; + return n; + } + + function setBackground(elem, set) { + // Handle table row highlighting. + if (elem.nodeName == 'TR') { + elem.classList.toggle('hilite', set); + return; + } + + // Handle svg element highlighting. + const p = findPolygon(elem); + if (p != null) { + if (set) { + origFill.set(p, p.style.fill); + p.style.fill = '#ccccff'; + } else if (origFill.has(p)) { + p.style.fill = origFill.get(p); + } + } + } + + function findPolygon(elem) { + if (elem.localName == 'polygon') return elem; + for (const c of elem.children) { + const p = findPolygon(c); + if (p != null) return p; + } + return null; + } + + // convert a string to a regexp that matches that string. + function quotemeta(str) { + return str.replace(/([\\\.?+*\[\](){}|^$])/g, '\\$1'); + } + + function setSampleIndexLink(id) { + const elem = document.getElementById(id); + if (elem != null) { + setHrefParams(elem, function (params) { + params.set("si", id); + }); + } + } + + // Update id's href to reflect current selection whenever it is + // liable to be followed. + function makeSearchLinkDynamic(id) { + const elem = document.getElementById(id); + if (elem == null) return; + + // Most links copy current selection into the 'f' parameter, + // but Refine menu links are different. + let param = 'f'; + if (id == 'ignore') param = 'i'; + if (id == 'hide') param = 'h'; + if (id == 'show') param = 's'; + if (id == 'show-from') param = 'sf'; + + // We update on mouseenter so middle-click/right-click work properly. + elem.addEventListener('mouseenter', updater); + elem.addEventListener('touchstart', updater); + + function updater() { + // The selection can be in one of two modes: regexp-based or + // list-based. Construct regular expression depending on mode. + let re = regexpActive + ? search.value + : Array.from(selected.keys()).map(key => quotemeta(nodes[key])).join('|'); + + setHrefParams(elem, function (params) { + if (re != '') { + // For focus/show/show-from, forget old parameter. For others, add to re. + if (param != 'f' && param != 's' && param != 'sf' && params.has(param)) { + const old = params.get(param); + if (old != '') { + re += '|' + old; + } + } + params.set(param, re); + } else { + params.delete(param); + } + }); + } + } + + function setHrefParams(elem, paramSetter) { + let url = new URL(elem.href); + url.hash = ''; + + // Copy params from this page's URL. + const params = url.searchParams; + for (const p of new URLSearchParams(window.location.search)) { + params.set(p[0], p[1]); + } + + // Give the params to the setter to modify. + paramSetter(params); + + elem.href = url.toString(); + } + + function handleTopClick(e) { + // Walk back until we find TR and then get the Name column (index 5) + let elem = e.target; + while (elem != null && elem.nodeName != 'TR') { + elem = elem.parentElement; + } + if (elem == null || elem.children.length < 6) return; + + e.preventDefault(); + const tr = elem; + const td = elem.children[5]; + if (td.nodeName != 'TD') return; + const name = td.innerText; + const index = nodes.indexOf(name); + if (index < 0) return; + + // Disable regexp mode. + regexpActive = false; + + if (selected.has(index)) { + unselect(index, elem); + } else { + select(index, elem); + } + updateButtons(); + } + + function updateButtons() { + const enable = (search.value != '' || selected.size != 0); + if (buttonsEnabled == enable) return; + buttonsEnabled = enable; + for (const id of ['focus', 'ignore', 'hide', 'show', 'show-from']) { + const link = document.getElementById(id); + if (link != null) { + link.classList.toggle('disabled', !enable); + } + } + } + + // Initialize button states + updateButtons(); + + // Setup event handlers + initMenus(); + if (svg != null) { + initPanAndZoom(svg, toggleSvgSelect); + } + if (toptable != null) { + toptable.addEventListener('mousedown', handleTopClick); + toptable.addEventListener('touchstart', handleTopClick); + } + + const ids = ['topbtn', 'graphbtn', 'flamegraph', 'peek', 'list', 'disasm', + 'focus', 'ignore', 'hide', 'show', 'show-from']; + ids.forEach(makeSearchLinkDynamic); + + const sampleIDs = [{{range .SampleTypes}}'{{.}}', {{end}}]; + sampleIDs.forEach(setSampleIndexLink); + + // Bind action to button with specified id. + function addAction(id, action) { + const btn = document.getElementById(id); + if (btn != null) { + btn.addEventListener('click', action); + btn.addEventListener('touchstart', action); + } + } + + addAction('details', handleDetails); + initConfigManager(); + + search.addEventListener('input', handleSearch); + search.addEventListener('keydown', handleKey); + + // Give initial focus to main container so it can be scrolled using keys. + const main = document.getElementById('bodycontainer'); + if (main) { + main.focus(); + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html new file mode 100644 index 0000000..9866755 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html @@ -0,0 +1,103 @@ + + + + + {{.Title}} + {{template "css" .}} + + + + + {{template "header" .}} +
+
+
+
+
+
+ {{template "script" .}} + + + + + diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html new file mode 100644 index 0000000..a113549 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/graph.html @@ -0,0 +1,16 @@ + + + + + {{.Title}} + {{template "css" .}} + + + {{template "header" .}} +
+ {{.HTMLBody}} +
+ {{template "script" .}} + + + diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html new file mode 100644 index 0000000..66cabbb --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html @@ -0,0 +1,113 @@ +
+
+

pprof

+
+ + + + {{$sampleLen := len .SampleTypes}} + {{if gt $sampleLen 1}} + + {{end}} + + + + + + + +
+ +
+ +
+ {{.Title}} +
+ {{range .Legend}}
{{.}}
{{end}} +
+
+
+ +
+ +
+
Save options as
+ + {{range .Configs}}{{if .UserConfig}} + + +
+ +
+
Delete config
+
+ +
+ +
{{range .Errors}}
{{.}}
{{end}}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html new file mode 100644 index 0000000..9791cc7 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/plaintext.html @@ -0,0 +1,18 @@ + + + + + {{.Title}} + {{template "css" .}} + + + {{template "header" .}} +
+
+      {{.TextBody}}
+    
+
+ {{template "script" .}} + + + diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html new file mode 100644 index 0000000..3212bee --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/source.html @@ -0,0 +1,18 @@ + + + + + {{.Title}} + {{template "css" .}} + {{template "weblistcss" .}} + {{template "weblistjs" .}} + + + {{template "header" .}} +
+ {{.HTMLBody}} +
+ {{template "script" .}} + + + diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html new file mode 100644 index 0000000..86d9fcb --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/top.html @@ -0,0 +1,114 @@ + + + + + {{.Title}} + {{template "css" .}} + + + + {{template "header" .}} +
+ + + + + + + + + + + + + +
FlatFlat%Sum%CumCum%NameInlined?
+
+ {{template "script" .}} + + + diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go new file mode 100644 index 0000000..777fb90 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go @@ -0,0 +1,418 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "io" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +var commentStart = "//:" // Sentinel for comments on options +var tailDigitsRE = regexp.MustCompile("[0-9]+$") + +// interactive starts a shell to read pprof commands. +func interactive(p *profile.Profile, o *plugin.Options) error { + // Enter command processing loop. + o.UI.SetAutoComplete(newCompleter(functionNames(p))) + configure("compact_labels", "true") + configHelp["sample_index"] += fmt.Sprintf("Or use sample_index=name, with name in %v.\n", sampleTypes(p)) + + // Do not wait for the visualizer to complete, to allow multiple + // graphs to be visualized simultaneously. + interactiveMode = true + shortcuts := profileShortcuts(p) + + greetings(p, o.UI) + for { + input, err := o.UI.ReadLine("(pprof) ") + if err != nil { + if err != io.EOF { + return err + } + if input == "" { + return nil + } + } + + for _, input := range shortcuts.expand(input) { + // Process assignments of the form variable=value + if s := strings.SplitN(input, "=", 2); len(s) > 0 { + name := strings.TrimSpace(s[0]) + var value string + if len(s) == 2 { + value = s[1] + if comment := strings.LastIndex(value, commentStart); comment != -1 { + value = value[:comment] + } + value = strings.TrimSpace(value) + } + if isConfigurable(name) { + // All non-bool options require inputs + if len(s) == 1 && !isBoolConfig(name) { + o.UI.PrintErr(fmt.Errorf("please specify a value, e.g. %s=", name)) + continue + } + if name == "sample_index" { + // Error check sample_index=xxx to ensure xxx is a valid sample type. + index, err := p.SampleIndexByName(value) + if err != nil { + o.UI.PrintErr(err) + continue + } + if index < 0 || index >= len(p.SampleType) { + o.UI.PrintErr(fmt.Errorf("invalid sample_index %q", value)) + continue + } + value = p.SampleType[index].Type + } + if err := configure(name, value); err != nil { + o.UI.PrintErr(err) + } + continue + } + } + + tokens := strings.Fields(input) + if len(tokens) == 0 { + continue + } + + switch tokens[0] { + case "o", "options": + printCurrentOptions(p, o.UI) + continue + case "exit", "quit", "q": + return nil + case "help": + commandHelp(strings.Join(tokens[1:], " "), o.UI) + continue + } + + args, cfg, err := parseCommandLine(tokens) + if err == nil { + err = generateReportWrapper(p, args, cfg, o) + } + + if err != nil { + o.UI.PrintErr(err) + } + } + } +} + +var generateReportWrapper = generateReport // For testing purposes. + +// greetings prints a brief welcome and some overall profile +// information before accepting interactive commands. +func greetings(p *profile.Profile, ui plugin.UI) { + numLabelUnits := identifyNumLabelUnits(p, ui) + ropt, err := reportOptions(p, numLabelUnits, currentConfig()) + if err == nil { + rpt := report.New(p, ropt) + ui.Print(strings.Join(report.ProfileLabels(rpt), "\n")) + if rpt.Total() == 0 && len(p.SampleType) > 1 { + ui.Print(`No samples were found with the default sample value type.`) + ui.Print(`Try "sample_index" command to analyze different sample values.`, "\n") + } + } + ui.Print(`Entering interactive mode (type "help" for commands, "o" for options)`) +} + +// shortcuts represents composite commands that expand into a sequence +// of other commands. +type shortcuts map[string][]string + +func (a shortcuts) expand(input string) []string { + input = strings.TrimSpace(input) + if a != nil { + if r, ok := a[input]; ok { + return r + } + } + return []string{input} +} + +var pprofShortcuts = shortcuts{ + ":": []string{"focus=", "ignore=", "hide=", "tagfocus=", "tagignore="}, +} + +// profileShortcuts creates macros for convenience and backward compatibility. +func profileShortcuts(p *profile.Profile) shortcuts { + s := pprofShortcuts + // Add shortcuts for sample types + for _, st := range p.SampleType { + command := fmt.Sprintf("sample_index=%s", st.Type) + s[st.Type] = []string{command} + s["total_"+st.Type] = []string{"mean=0", command} + s["mean_"+st.Type] = []string{"mean=1", command} + } + return s +} + +func sampleTypes(p *profile.Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} + +func printCurrentOptions(p *profile.Profile, ui plugin.UI) { + var args []string + current := currentConfig() + for _, f := range configFields { + n := f.name + v := current.get(f) + comment := "" + switch { + case len(f.choices) > 0: + values := append([]string{}, f.choices...) + sort.Strings(values) + comment = "[" + strings.Join(values, " | ") + "]" + case n == "sample_index": + st := sampleTypes(p) + if v == "" { + // Apply default (last sample index). + v = st[len(st)-1] + } + // Add comments for all sample types in profile. + comment = "[" + strings.Join(st, " | ") + "]" + case n == "source_path": + continue + case n == "nodecount" && v == "-1": + comment = "default" + case v == "": + // Add quotes for empty values. + v = `""` + } + if comment != "" { + comment = commentStart + " " + comment + } + args = append(args, fmt.Sprintf(" %-25s = %-20s %s", n, v, comment)) + } + sort.Strings(args) + ui.Print(strings.Join(args, "\n")) +} + +// parseCommandLine parses a command and returns the pprof command to +// execute and the configuration to use for the report. +func parseCommandLine(input []string) ([]string, config, error) { + cmd, args := input[:1], input[1:] + name := cmd[0] + + c := pprofCommands[name] + if c == nil { + // Attempt splitting digits on abbreviated commands (eg top10) + if d := tailDigitsRE.FindString(name); d != "" && d != name { + name = name[:len(name)-len(d)] + cmd[0], args = name, append([]string{d}, args...) + c = pprofCommands[name] + } + } + if c == nil { + if _, ok := configHelp[name]; ok { + value := "" + if len(args) > 0 { + value = args[0] + } + return nil, config{}, fmt.Errorf("did you mean: %s=%s", name, value) + } + return nil, config{}, fmt.Errorf("unrecognized command: %q", name) + } + + if c.hasParam { + if len(args) == 0 { + return nil, config{}, fmt.Errorf("command %s requires an argument", name) + } + cmd = append(cmd, args[0]) + args = args[1:] + } + + // Copy config since options set in the command line should not persist. + vcopy := currentConfig() + + var focus, ignore string + for i := 0; i < len(args); i++ { + t := args[i] + if n, err := strconv.ParseInt(t, 10, 32); err == nil { + vcopy.NodeCount = int(n) + continue + } + switch t[0] { + case '>': + outputFile := t[1:] + if outputFile == "" { + i++ + if i >= len(args) { + return nil, config{}, fmt.Errorf("unexpected end of line after >") + } + outputFile = args[i] + } + vcopy.Output = outputFile + case '-': + if t == "--cum" || t == "-cum" { + vcopy.Sort = "cum" + continue + } + ignore = catRegex(ignore, t[1:]) + default: + focus = catRegex(focus, t) + } + } + + if name == "tags" { + if focus != "" { + vcopy.TagFocus = focus + } + if ignore != "" { + vcopy.TagIgnore = ignore + } + } else { + if focus != "" { + vcopy.Focus = focus + } + if ignore != "" { + vcopy.Ignore = ignore + } + } + if vcopy.NodeCount == -1 && (name == "text" || name == "top") { + vcopy.NodeCount = 10 + } + + return cmd, vcopy, nil +} + +func catRegex(a, b string) string { + if a != "" && b != "" { + return a + "|" + b + } + return a + b +} + +// commandHelp displays help and usage information for all Commands +// and Variables or a specific Command or Variable. +func commandHelp(args string, ui plugin.UI) { + if args == "" { + help := usage(false) + help = help + ` + : Clear focus/ignore/hide/tagfocus/tagignore + + type "help " for more information +` + + ui.Print(help) + return + } + + if c := pprofCommands[args]; c != nil { + ui.Print(c.help(args)) + return + } + + if help, ok := configHelp[args]; ok { + ui.Print(help + "\n") + return + } + + ui.PrintErr("Unknown command: " + args) +} + +// newCompleter creates an autocompletion function for a set of commands. +func newCompleter(fns []string) func(string) string { + return func(line string) string { + switch tokens := strings.Fields(line); len(tokens) { + case 0: + // Nothing to complete + case 1: + // Single token -- complete command name + if match := matchVariableOrCommand(tokens[0]); match != "" { + return match + } + case 2: + if tokens[0] == "help" { + if match := matchVariableOrCommand(tokens[1]); match != "" { + return tokens[0] + " " + match + } + return line + } + fallthrough + default: + // Multiple tokens -- complete using functions, except for tags + if cmd := pprofCommands[tokens[0]]; cmd != nil && tokens[0] != "tags" { + lastTokenIdx := len(tokens) - 1 + lastToken := tokens[lastTokenIdx] + if strings.HasPrefix(lastToken, "-") { + lastToken = "-" + functionCompleter(lastToken[1:], fns) + } else { + lastToken = functionCompleter(lastToken, fns) + } + return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ") + } + } + return line + } +} + +// matchVariableOrCommand attempts to match a string token to the prefix of a Command. +func matchVariableOrCommand(token string) string { + token = strings.ToLower(token) + var matches []string + for cmd := range pprofCommands { + if strings.HasPrefix(cmd, token) { + matches = append(matches, cmd) + } + } + matches = append(matches, completeConfig(token)...) + if len(matches) == 1 { + return matches[0] + } + return "" +} + +// functionCompleter replaces provided substring with a function +// name retrieved from a profile if a single match exists. Otherwise, +// it returns unchanged substring. It defaults to no-op if the profile +// is not specified. +func functionCompleter(substring string, fns []string) string { + found := "" + for _, fName := range fns { + if strings.Contains(fName, substring) { + if found != "" { + return substring + } + found = fName + } + } + if found != "" { + return found + } + return substring +} + +func functionNames(p *profile.Profile) []string { + var fns []string + for _, fn := range p.Function { + fns = append(fns, fn.Name) + } + return fns +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go new file mode 100644 index 0000000..6e8f9fc --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go @@ -0,0 +1,100 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/symbolizer" + "github.com/google/pprof/internal/transport" +) + +// setDefaults returns a new plugin.Options with zero fields sets to +// sensible defaults. +func setDefaults(o *plugin.Options) *plugin.Options { + d := &plugin.Options{} + if o != nil { + *d = *o + } + if d.Writer == nil { + d.Writer = oswriter{} + } + if d.Flagset == nil { + d.Flagset = &GoFlags{} + } + if d.Obj == nil { + d.Obj = &binutils.Binutils{} + } + if d.UI == nil { + d.UI = &stdUI{r: bufio.NewReader(os.Stdin)} + } + if d.HTTPTransport == nil { + d.HTTPTransport = transport.New(d.Flagset) + } + if d.Sym == nil { + d.Sym = &symbolizer.Symbolizer{Obj: d.Obj, UI: d.UI, Transport: d.HTTPTransport} + } + return d +} + +type stdUI struct { + r *bufio.Reader +} + +func (ui *stdUI) ReadLine(prompt string) (string, error) { + os.Stdout.WriteString(prompt) + return ui.r.ReadString('\n') +} + +func (ui *stdUI) Print(args ...interface{}) { + ui.fprint(os.Stderr, args) +} + +func (ui *stdUI) PrintErr(args ...interface{}) { + ui.fprint(os.Stderr, args) +} + +func (ui *stdUI) IsTerminal() bool { + return false +} + +func (ui *stdUI) WantBrowser() bool { + return true +} + +func (ui *stdUI) SetAutoComplete(func(string) string) { +} + +func (ui *stdUI) fprint(f *os.File, args []interface{}) { + text := fmt.Sprint(args...) + if !strings.HasSuffix(text, "\n") { + text += "\n" + } + f.WriteString(text) +} + +// oswriter implements the Writer interface using a regular file. +type oswriter struct{} + +func (oswriter) Open(name string) (io.WriteCloser, error) { + f, err := os.Create(name) + return f, err +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go new file mode 100644 index 0000000..1e9154c --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go @@ -0,0 +1,159 @@ +package driver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" +) + +// settings holds pprof settings. +type settings struct { + // Configs holds a list of named UI configurations. + Configs []namedConfig `json:"configs"` +} + +// namedConfig associates a name with a config. +type namedConfig struct { + Name string `json:"name"` + config +} + +// settingsFileName returns the name of the file where settings should be saved. +func settingsFileName() (string, error) { + // Return "pprof/settings.json" under os.UserConfigDir(). + dir, err := os.UserConfigDir() + if err != nil { + return "", err + } + return filepath.Join(dir, "pprof", "settings.json"), nil +} + +// readSettings reads settings from fname. +func readSettings(fname string) (*settings, error) { + data, err := ioutil.ReadFile(fname) + if err != nil { + if os.IsNotExist(err) { + return &settings{}, nil + } + return nil, fmt.Errorf("could not read settings: %w", err) + } + settings := &settings{} + if err := json.Unmarshal(data, settings); err != nil { + return nil, fmt.Errorf("could not parse settings: %w", err) + } + for i := range settings.Configs { + settings.Configs[i].resetTransient() + } + return settings, nil +} + +// writeSettings saves settings to fname. +func writeSettings(fname string, settings *settings) error { + data, err := json.MarshalIndent(settings, "", " ") + if err != nil { + return fmt.Errorf("could not encode settings: %w", err) + } + + // create the settings directory if it does not exist + // XDG specifies permissions 0700 when creating settings dirs: + // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return fmt.Errorf("failed to create settings directory: %w", err) + } + + if err := ioutil.WriteFile(fname, data, 0644); err != nil { + return fmt.Errorf("failed to write settings: %w", err) + } + return nil +} + +// configMenuEntry holds information for a single config menu entry. +type configMenuEntry struct { + Name string + URL string + Current bool // Is this the currently selected config? + UserConfig bool // Is this a user-provided config? +} + +// configMenu returns a list of items to add to a menu in the web UI. +func configMenu(fname string, u url.URL) []configMenuEntry { + // Start with system configs. + configs := []namedConfig{{Name: "Default", config: defaultConfig()}} + if settings, err := readSettings(fname); err == nil { + // Add user configs. + configs = append(configs, settings.Configs...) + } + + // Convert to menu entries. + result := make([]configMenuEntry, len(configs)) + lastMatch := -1 + for i, cfg := range configs { + dst, changed := cfg.config.makeURL(u) + if !changed { + lastMatch = i + } + // Use a relative URL to work in presence of stripping/redirects in webui.go. + rel := &url.URL{RawQuery: dst.RawQuery, ForceQuery: true} + result[i] = configMenuEntry{ + Name: cfg.Name, + URL: rel.String(), + UserConfig: (i != 0), + } + } + // Mark the last matching config as currennt + if lastMatch >= 0 { + result[lastMatch].Current = true + } + return result +} + +// editSettings edits settings by applying fn to them. +func editSettings(fname string, fn func(s *settings) error) error { + settings, err := readSettings(fname) + if err != nil { + return err + } + if err := fn(settings); err != nil { + return err + } + return writeSettings(fname, settings) +} + +// setConfig saves the config specified in request to fname. +func setConfig(fname string, request url.URL) error { + q := request.Query() + name := q.Get("config") + if name == "" { + return fmt.Errorf("invalid config name") + } + cfg := currentConfig() + if err := cfg.applyURL(q); err != nil { + return err + } + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == name { + s.Configs[i].config = cfg + return nil + } + } + s.Configs = append(s.Configs, namedConfig{Name: name, config: cfg}) + return nil + }) +} + +// removeConfig removes config from fname. +func removeConfig(fname, config string) error { + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == config { + s.Configs = append(s.Configs[:i], s.Configs[i+1:]...) + return nil + } + } + return fmt.Errorf("config %s not found", config) + }) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go new file mode 100644 index 0000000..62767e7 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go @@ -0,0 +1,80 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "regexp" + "strings" + + "github.com/google/pprof/third_party/svgpan" +) + +var ( + viewBox = regexp.MustCompile(``) +) + +// massageSVG enhances the SVG output from DOT to provide better +// panning inside a web browser. It uses the svgpan library, which is +// embedded into the svgpan.JSSource variable. +func massageSVG(svg string) string { + // Work around for dot bug which misses quoting some ampersands, + // resulting on unparsable SVG. + svg = strings.Replace(svg, "&;", "&;", -1) + + // Dot's SVG output is + // + // + // + // ... + // + // + // + // Change it to + // + // + + // ` + // + // + // ... + // + // + // + + if loc := viewBox.FindStringIndex(svg); loc != nil { + svg = svg[:loc[0]] + + `` + string(svgpan.JSSource) + `` + + `` + + svg[loc[0]:] + } + + if loc := svgClose.FindStringIndex(svg); loc != nil { + svg = svg[:loc[0]] + + `` + + svg[loc[0]:] + } + + return svg +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go new file mode 100644 index 0000000..c43d599 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/tagroot.go @@ -0,0 +1,129 @@ +package driver + +import ( + "strings" + + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/profile" +) + +// addLabelNodes adds pseudo stack frames "label:value" to each Sample with +// labels matching the supplied keys. +// +// rootKeys adds frames at the root of the callgraph (first key becomes new root). +// leafKeys adds frames at the leaf of the callgraph (last key becomes new leaf). +// +// Returns whether there were matches found for the label keys. +func addLabelNodes(p *profile.Profile, rootKeys, leafKeys []string, outputUnit string) (rootm, leafm bool) { + // Find where to insert the new locations and functions at the end of + // their ID spaces. + var maxLocID uint64 + var maxFunctionID uint64 + for _, loc := range p.Location { + if loc.ID > maxLocID { + maxLocID = loc.ID + } + } + for _, f := range p.Function { + if f.ID > maxFunctionID { + maxFunctionID = f.ID + } + } + nextLocID := maxLocID + 1 + nextFuncID := maxFunctionID + 1 + + // Intern the new locations and functions we are generating. + type locKey struct { + functionName, fileName string + } + locs := map[locKey]*profile.Location{} + + internLoc := func(locKey locKey) *profile.Location { + loc, found := locs[locKey] + if found { + return loc + } + + function := &profile.Function{ + ID: nextFuncID, + Name: locKey.functionName, + Filename: locKey.fileName, + } + nextFuncID++ + p.Function = append(p.Function, function) + + loc = &profile.Location{ + ID: nextLocID, + Line: []profile.Line{ + { + Function: function, + }, + }, + } + nextLocID++ + p.Location = append(p.Location, loc) + locs[locKey] = loc + return loc + } + + makeLabelLocs := func(s *profile.Sample, keys []string) ([]*profile.Location, bool) { + var locs []*profile.Location + var match bool + for i := range keys { + // Loop backwards, ensuring the first tag is closest to the root, + // and the last tag is closest to the leaves. + k := keys[len(keys)-1-i] + values := formatLabelValues(s, k, outputUnit) + if len(values) > 0 { + match = true + } + locKey := locKey{ + functionName: strings.Join(values, ","), + fileName: k, + } + loc := internLoc(locKey) + locs = append(locs, loc) + } + return locs, match + } + + for _, s := range p.Sample { + rootsToAdd, sampleMatchedRoot := makeLabelLocs(s, rootKeys) + if sampleMatchedRoot { + rootm = true + } + leavesToAdd, sampleMatchedLeaf := makeLabelLocs(s, leafKeys) + if sampleMatchedLeaf { + leafm = true + } + + var newLocs []*profile.Location + newLocs = append(newLocs, leavesToAdd...) + newLocs = append(newLocs, s.Location...) + newLocs = append(newLocs, rootsToAdd...) + s.Location = newLocs + } + return +} + +// formatLabelValues returns all the string and numeric labels in Sample, with +// the numeric labels formatted according to outputUnit. +func formatLabelValues(s *profile.Sample, k string, outputUnit string) []string { + var values []string + values = append(values, s.Label[k]...) + numLabels := s.NumLabel[k] + numUnits := s.NumUnit[k] + if len(numLabels) != len(numUnits) && len(numUnits) != 0 { + return values + } + for i, numLabel := range numLabels { + var value string + if len(numUnits) != 0 { + value = measurement.ScaledLabel(numLabel, numUnits[i], outputUnit) + } else { + value = measurement.ScaledLabel(numLabel, "", "") + } + values = append(values, value) + } + return values +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go new file mode 100644 index 0000000..b6c8776 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "os" + "path/filepath" + "sync" +) + +// newTempFile returns a new output file in dir with the provided prefix and suffix. +func newTempFile(dir, prefix, suffix string) (*os.File, error) { + for index := 1; index < 10000; index++ { + switch f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix)), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666); { + case err == nil: + return f, nil + case !os.IsExist(err): + return nil, err + } + } + // Give up + return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix) +} + +var tempFiles []string +var tempFilesMu = sync.Mutex{} + +// deferDeleteTempFile marks a file to be deleted by next call to Cleanup() +func deferDeleteTempFile(path string) { + tempFilesMu.Lock() + tempFiles = append(tempFiles, path) + tempFilesMu.Unlock() +} + +// cleanupTempFiles removes any temporary files selected for deferred cleaning. +func cleanupTempFiles() error { + tempFilesMu.Lock() + defer tempFilesMu.Unlock() + var lastErr error + for _, f := range tempFiles { + if err := os.Remove(f); err != nil { + lastErr = err + } + } + tempFiles = nil + return lastErr +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go new file mode 100644 index 0000000..94f32e3 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go @@ -0,0 +1,68 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "embed" + "fmt" + "html/template" + "os" + + "github.com/google/pprof/third_party/d3flamegraph" +) + +//go:embed html +var embeddedFiles embed.FS + +// addTemplates adds a set of template definitions to templates. +func addTemplates(templates *template.Template) { + // Load specified file. + loadFile := func(fname string) string { + data, err := embeddedFiles.ReadFile(fname) + if err != nil { + fmt.Fprintf(os.Stderr, "internal/driver: embedded file %q not found\n", + fname) + os.Exit(1) + } + return string(data) + } + loadCSS := func(fname string) string { + return `` + "\n" + } + loadJS := func(fname string) string { + return `` + "\n" + } + + // Define a named template with specified contents. + def := func(name, contents string) { + sub := template.New(name) + template.Must(sub.Parse(contents)) + template.Must(templates.AddParseTree(name, sub.Tree)) + } + + // Pre-packaged third-party files. + def("d3flamegraphscript", d3flamegraph.JSSource) + def("d3flamegraphcss", d3flamegraph.CSSSource) + + // Embeded files. + def("css", loadCSS("html/common.css")) + def("header", loadFile("html/header.html")) + def("graph", loadFile("html/graph.html")) + def("script", loadJS("html/common.js")) + def("top", loadFile("html/top.html")) + def("sourcelisting", loadFile("html/source.html")) + def("plaintext", loadFile("html/plaintext.html")) + def("flamegraph", loadFile("html/flamegraph.html")) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go new file mode 100644 index 0000000..0f3e8bf --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go @@ -0,0 +1,465 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "html/template" + "net" + "net/http" + gourl "net/url" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +// webInterface holds the state needed for serving a browser based interface. +type webInterface struct { + prof *profile.Profile + options *plugin.Options + help map[string]string + templates *template.Template + settingsFile string +} + +func makeWebInterface(p *profile.Profile, opt *plugin.Options) (*webInterface, error) { + settingsFile, err := settingsFileName() + if err != nil { + return nil, err + } + templates := template.New("templategroup") + addTemplates(templates) + report.AddSourceTemplates(templates) + return &webInterface{ + prof: p, + options: opt, + help: make(map[string]string), + templates: templates, + settingsFile: settingsFile, + }, nil +} + +// maxEntries is the maximum number of entries to print for text interfaces. +const maxEntries = 50 + +// errorCatcher is a UI that captures errors for reporting to the browser. +type errorCatcher struct { + plugin.UI + errors []string +} + +func (ec *errorCatcher) PrintErr(args ...interface{}) { + ec.errors = append(ec.errors, strings.TrimSuffix(fmt.Sprintln(args...), "\n")) + ec.UI.PrintErr(args...) +} + +// webArgs contains arguments passed to templates in webhtml.go. +type webArgs struct { + Title string + Errors []string + Total int64 + SampleTypes []string + Legend []string + Help map[string]string + Nodes []string + HTMLBody template.HTML + TextBody string + Top []report.TextItem + FlameGraph template.JS + Configs []configMenuEntry +} + +func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, disableBrowser bool) error { + host, port, err := getHostAndPort(hostport) + if err != nil { + return err + } + interactiveMode = true + ui, err := makeWebInterface(p, o) + if err != nil { + return err + } + for n, c := range pprofCommands { + ui.help[n] = c.description + } + for n, help := range configHelp { + ui.help[n] = help + } + ui.help["details"] = "Show information about the profile and this view" + ui.help["graph"] = "Display profile as a directed graph" + ui.help["reset"] = "Show the entire profile" + ui.help["save_config"] = "Save current settings" + + server := o.HTTPServer + if server == nil { + server = defaultWebServer + } + args := &plugin.HTTPServerArgs{ + Hostport: net.JoinHostPort(host, strconv.Itoa(port)), + Host: host, + Port: port, + Handlers: map[string]http.Handler{ + "/": http.HandlerFunc(ui.dot), + "/top": http.HandlerFunc(ui.top), + "/disasm": http.HandlerFunc(ui.disasm), + "/source": http.HandlerFunc(ui.source), + "/peek": http.HandlerFunc(ui.peek), + "/flamegraph": http.HandlerFunc(ui.flamegraph), + "/saveconfig": http.HandlerFunc(ui.saveConfig), + "/deleteconfig": http.HandlerFunc(ui.deleteConfig), + "/download": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/vnd.google.protobuf+gzip") + w.Header().Set("Content-Disposition", "attachment;filename=profile.pb.gz") + p.Write(w) + }), + }, + } + + url := "http://" + args.Hostport + + o.UI.Print("Serving web UI on ", url) + + if o.UI.WantBrowser() && !disableBrowser { + go openBrowser(url, o) + } + return server(args) +} + +func getHostAndPort(hostport string) (string, int, error) { + host, portStr, err := net.SplitHostPort(hostport) + if err != nil { + return "", 0, fmt.Errorf("could not split http address: %v", err) + } + if host == "" { + host = "localhost" + } + var port int + if portStr == "" { + ln, err := net.Listen("tcp", net.JoinHostPort(host, "0")) + if err != nil { + return "", 0, fmt.Errorf("could not generate random port: %v", err) + } + port = ln.Addr().(*net.TCPAddr).Port + err = ln.Close() + if err != nil { + return "", 0, fmt.Errorf("could not generate random port: %v", err) + } + } else { + port, err = strconv.Atoi(portStr) + if err != nil { + return "", 0, fmt.Errorf("invalid port number: %v", err) + } + } + return host, port, nil +} +func defaultWebServer(args *plugin.HTTPServerArgs) error { + ln, err := net.Listen("tcp", args.Hostport) + if err != nil { + return err + } + isLocal := isLocalhost(args.Host) + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if isLocal { + // Only allow local clients + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil || !isLocalhost(host) { + http.Error(w, "permission denied", http.StatusForbidden) + return + } + } + h := args.Handlers[req.URL.Path] + if h == nil { + // Fall back to default behavior + h = http.DefaultServeMux + } + h.ServeHTTP(w, req) + }) + + // We serve the ui at /ui/ and redirect there from the root. This is done + // to surface any problems with serving the ui at a non-root early. See: + // + // https://github.com/google/pprof/pull/348 + mux := http.NewServeMux() + mux.Handle("/ui/", http.StripPrefix("/ui", handler)) + mux.Handle("/", redirectWithQuery("/ui")) + s := &http.Server{Handler: mux} + return s.Serve(ln) +} + +func redirectWithQuery(path string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + pathWithQuery := &gourl.URL{Path: path, RawQuery: r.URL.RawQuery} + http.Redirect(w, r, pathWithQuery.String(), http.StatusTemporaryRedirect) + } +} + +func isLocalhost(host string) bool { + for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} { + if host == v { + return true + } + } + return false +} + +func openBrowser(url string, o *plugin.Options) { + // Construct URL. + baseURL, _ := gourl.Parse(url) + current := currentConfig() + u, _ := current.makeURL(*baseURL) + + // Give server a little time to get ready. + time.Sleep(time.Millisecond * 500) + + for _, b := range browsers() { + args := strings.Split(b, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], u.String())...) + viewer.Stderr = os.Stderr + if err := viewer.Start(); err == nil { + return + } + } + // No visualizer succeeded, so just print URL. + o.UI.PrintErr(u.String()) +} + +// makeReport generates a report for the specified command. +// If configEditor is not null, it is used to edit the config used for the report. +func (ui *webInterface) makeReport(w http.ResponseWriter, req *http.Request, + cmd []string, configEditor func(*config)) (*report.Report, []string) { + cfg := currentConfig() + if err := cfg.applyURL(req.URL.Query()); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + if configEditor != nil { + configEditor(&cfg) + } + catcher := &errorCatcher{UI: ui.options.UI} + options := *ui.options + options.UI = catcher + _, rpt, err := generateRawReport(ui.prof, cmd, cfg, &options) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + return rpt, catcher.errors +} + +// render generates html using the named template based on the contents of data. +func (ui *webInterface) render(w http.ResponseWriter, req *http.Request, tmpl string, + rpt *report.Report, errList, legend []string, data webArgs) { + file := getFromLegend(legend, "File: ", "unknown") + profile := getFromLegend(legend, "Type: ", "unknown") + data.Title = file + " " + profile + data.Errors = errList + data.Total = rpt.Total() + data.SampleTypes = sampleTypes(ui.prof) + data.Legend = legend + data.Help = ui.help + data.Configs = configMenu(ui.settingsFile, *req.URL) + + html := &bytes.Buffer{} + if err := ui.templates.ExecuteTemplate(html, tmpl, data); err != nil { + http.Error(w, "internal template error", http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Write(html.Bytes()) +} + +// dot generates a web page containing an svg diagram. +func (ui *webInterface) dot(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"svg"}, nil) + if rpt == nil { + return // error already reported + } + + // Generate dot graph. + g, config := report.GetDOT(rpt) + legend := config.Labels + config.Labels = nil + dot := &bytes.Buffer{} + graph.ComposeDot(dot, g, &graph.DotAttributes{}, config) + + // Convert to svg. + svg, err := dotToSvg(dot.Bytes()) + if err != nil { + http.Error(w, "Could not execute dot; may need to install graphviz.", + http.StatusNotImplemented) + ui.options.UI.PrintErr("Failed to execute dot. Is Graphviz installed?\n", err) + return + } + + // Get all node names into an array. + nodes := []string{""} // dot starts with node numbered 1 + for _, n := range g.Nodes { + nodes = append(nodes, n.Info.Name) + } + + ui.render(w, req, "graph", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(string(svg)), + Nodes: nodes, + }) +} + +func dotToSvg(dot []byte) ([]byte, error) { + cmd := exec.Command("dot", "-Tsvg") + out := &bytes.Buffer{} + cmd.Stdin, cmd.Stdout, cmd.Stderr = bytes.NewBuffer(dot), out, os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + // Fix dot bug related to unquoted ampersands. + svg := bytes.Replace(out.Bytes(), []byte("&;"), []byte("&;"), -1) + + // Cleanup for embedding by dropping stuff before the start. + if pos := bytes.Index(svg, []byte("= 0 { + svg = svg[pos:] + } + return svg, nil +} + +func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"top"}, func(cfg *config) { + cfg.NodeCount = 500 + }) + if rpt == nil { + return // error already reported + } + top, legend := report.TextItems(rpt) + var nodes []string + for _, item := range top { + nodes = append(nodes, item.Name) + } + + ui.render(w, req, "top", rpt, errList, legend, webArgs{ + Top: top, + Nodes: nodes, + }) +} + +// disasm generates a web page containing disassembly. +func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { + args := []string{"disasm", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, nil) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.PrintAssembly(out, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) + +} + +// source generates a web page containing source code annotated with profile +// data. +func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { + args := []string{"weblist", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, nil) + if rpt == nil { + return // error already reported + } + + // Generate source listing. + var body bytes.Buffer + if err := report.PrintWebList(&body, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "sourcelisting", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(body.String()), + }) +} + +// peek generates a web page listing callers/callers. +func (ui *webInterface) peek(w http.ResponseWriter, req *http.Request) { + args := []string{"peek", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, func(cfg *config) { + cfg.Granularity = "lines" + }) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.Generate(out, rpt, ui.options.Obj); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) +} + +// saveConfig saves URL configuration. +func (ui *webInterface) saveConfig(w http.ResponseWriter, req *http.Request) { + if err := setConfig(ui.settingsFile, *req.URL); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// deleteConfig deletes a configuration. +func (ui *webInterface) deleteConfig(w http.ResponseWriter, req *http.Request) { + name := req.URL.Query().Get("config") + if err := removeConfig(ui.settingsFile, name); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// getFromLegend returns the suffix of an entry in legend that starts +// with param. It returns def if no such entry is found. +func getFromLegend(legend []string, param, def string) string { + for _, s := range legend { + if strings.HasPrefix(s, param) { + return s[len(param):] + } + } + return def +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go new file mode 100644 index 0000000..718481b --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go @@ -0,0 +1,383 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package elfexec provides utility routines to examine ELF binaries. +package elfexec + +import ( + "bufio" + "debug/elf" + "encoding/binary" + "fmt" + "io" +) + +const ( + maxNoteSize = 1 << 20 // in bytes + noteTypeGNUBuildID = 3 +) + +// elfNote is the payload of a Note Section in an ELF file. +type elfNote struct { + Name string // Contents of the "name" field, omitting the trailing zero byte. + Desc []byte // Contents of the "desc" field. + Type uint32 // Contents of the "type" field. +} + +// parseNotes returns the notes from a SHT_NOTE section or PT_NOTE segment. +func parseNotes(reader io.Reader, alignment int, order binary.ByteOrder) ([]elfNote, error) { + r := bufio.NewReader(reader) + + // padding returns the number of bytes required to pad the given size to an + // alignment boundary. + padding := func(size int) int { + return ((size + (alignment - 1)) &^ (alignment - 1)) - size + } + + var notes []elfNote + for { + noteHeader := make([]byte, 12) // 3 4-byte words + if _, err := io.ReadFull(r, noteHeader); err == io.EOF { + break + } else if err != nil { + return nil, err + } + namesz := order.Uint32(noteHeader[0:4]) + descsz := order.Uint32(noteHeader[4:8]) + typ := order.Uint32(noteHeader[8:12]) + + if uint64(namesz) > uint64(maxNoteSize) { + return nil, fmt.Errorf("note name too long (%d bytes)", namesz) + } + var name string + if namesz > 0 { + // Documentation differs as to whether namesz is meant to include the + // trailing zero, but everyone agrees that name is null-terminated. + // So we'll just determine the actual length after the fact. + var err error + name, err = r.ReadString('\x00') + if err == io.EOF { + return nil, fmt.Errorf("missing note name (want %d bytes)", namesz) + } else if err != nil { + return nil, err + } + namesz = uint32(len(name)) + name = name[:len(name)-1] + } + + // Drop padding bytes until the desc field. + for n := padding(len(noteHeader) + int(namesz)); n > 0; n-- { + if _, err := r.ReadByte(); err == io.EOF { + return nil, fmt.Errorf( + "missing %d bytes of padding after note name", n) + } else if err != nil { + return nil, err + } + } + + if uint64(descsz) > uint64(maxNoteSize) { + return nil, fmt.Errorf("note desc too long (%d bytes)", descsz) + } + desc := make([]byte, int(descsz)) + if _, err := io.ReadFull(r, desc); err == io.EOF { + return nil, fmt.Errorf("missing desc (want %d bytes)", len(desc)) + } else if err != nil { + return nil, err + } + + notes = append(notes, elfNote{Name: name, Desc: desc, Type: typ}) + + // Drop padding bytes until the next note or the end of the section, + // whichever comes first. + for n := padding(len(desc)); n > 0; n-- { + if _, err := r.ReadByte(); err == io.EOF { + // We hit the end of the section before an alignment boundary. + // This can happen if this section is at the end of the file or the next + // section has a smaller alignment requirement. + break + } else if err != nil { + return nil, err + } + } + } + return notes, nil +} + +// GetBuildID returns the GNU build-ID for an ELF binary. +// +// If no build-ID was found but the binary was read without error, it returns +// (nil, nil). +func GetBuildID(binary io.ReaderAt) ([]byte, error) { + f, err := elf.NewFile(binary) + if err != nil { + return nil, err + } + + findBuildID := func(notes []elfNote) ([]byte, error) { + var buildID []byte + for _, note := range notes { + if note.Name == "GNU" && note.Type == noteTypeGNUBuildID { + if buildID == nil { + buildID = note.Desc + } else { + return nil, fmt.Errorf("multiple build ids found, don't know which to use") + } + } + } + return buildID, nil + } + + for _, p := range f.Progs { + if p.Type != elf.PT_NOTE { + continue + } + notes, err := parseNotes(p.Open(), int(p.Align), f.ByteOrder) + if err != nil { + return nil, err + } + if b, err := findBuildID(notes); b != nil || err != nil { + return b, err + } + } + for _, s := range f.Sections { + if s.Type != elf.SHT_NOTE { + continue + } + notes, err := parseNotes(s.Open(), int(s.Addralign), f.ByteOrder) + if err != nil { + return nil, err + } + if b, err := findBuildID(notes); b != nil || err != nil { + return b, err + } + } + return nil, nil +} + +// kernelBase calculates the base for kernel mappings, which usually require +// special handling. For kernel mappings, tools (like perf) use the address of +// the kernel relocation symbol (_text or _stext) as the mmap start. Additionally, +// for obfuscation, ChromeOS profiles have the kernel image remapped to the 0-th page. +func kernelBase(loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, bool) { + const ( + // PAGE_OFFSET for PowerPC64, see arch/powerpc/Kconfig in the kernel sources. + pageOffsetPpc64 = 0xc000000000000000 + pageSize = 4096 + ) + + if loadSegment.Vaddr == start-offset { + return offset, true + } + if start == 0 && limit != 0 && stextOffset != nil { + // ChromeOS remaps its kernel to 0. Nothing else should come + // down this path. Empirical values: + // VADDR=0xffffffff80200000 + // stextOffset=0xffffffff80200198 + return start - *stextOffset, true + } + if start >= loadSegment.Vaddr && limit > start && (offset == 0 || offset == pageOffsetPpc64 || offset == start) { + // Some kernels look like: + // VADDR=0xffffffff80200000 + // stextOffset=0xffffffff80200198 + // Start=0xffffffff83200000 + // Limit=0xffffffff84200000 + // Offset=0 (0xc000000000000000 for PowerPC64) (== Start for ASLR kernel) + // So the base should be: + if stextOffset != nil && (start%pageSize) == (*stextOffset%pageSize) { + // perf uses the address of _stext as start. Some tools may + // adjust for this before calling GetBase, in which case the page + // alignment should be different from that of stextOffset. + return start - *stextOffset, true + } + + return start - loadSegment.Vaddr, true + } + if start%pageSize != 0 && stextOffset != nil && *stextOffset%pageSize == start%pageSize { + // ChromeOS remaps its kernel to 0 + start%pageSize. Nothing + // else should come down this path. Empirical values: + // start=0x198 limit=0x2f9fffff offset=0 + // VADDR=0xffffffff81000000 + // stextOffset=0xffffffff81000198 + return start - *stextOffset, true + } + return 0, false +} + +// GetBase determines the base address to subtract from virtual +// address to get symbol table address. For an executable, the base +// is 0. Otherwise, it's a shared library, and the base is the +// address where the mapping starts. The kernel needs special handling. +func GetBase(fh *elf.FileHeader, loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, error) { + + if start == 0 && offset == 0 && (limit == ^uint64(0) || limit == 0) { + // Some tools may introduce a fake mapping that spans the entire + // address space. Assume that the address has already been + // adjusted, so no additional base adjustment is necessary. + return 0, nil + } + + switch fh.Type { + case elf.ET_EXEC: + if loadSegment == nil { + // Assume fixed-address executable and so no adjustment. + return 0, nil + } + if stextOffset == nil && start > 0 && start < 0x8000000000000000 { + // A regular user-mode executable. Compute the base offset using same + // arithmetics as in ET_DYN case below, see the explanation there. + // Ideally, the condition would just be "stextOffset == nil" as that + // represents the address of _stext symbol in the vmlinux image. Alas, + // the caller may skip reading it from the binary (it's expensive to scan + // all the symbols) and so it may be nil even for the kernel executable. + // So additionally check that the start is within the user-mode half of + // the 64-bit address space. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil + } + // Various kernel heuristics and cases are handled separately. + if base, match := kernelBase(loadSegment, stextOffset, start, limit, offset); match { + return base, nil + } + // ChromeOS can remap its kernel to 0, and the caller might have not found + // the _stext symbol. Split this case from kernelBase() above, since we don't + // want to apply it to an ET_DYN user-mode executable. + if start == 0 && limit != 0 && stextOffset == nil { + return start - loadSegment.Vaddr, nil + } + + return 0, fmt.Errorf("don't know how to handle EXEC segment: %v start=0x%x limit=0x%x offset=0x%x", *loadSegment, start, limit, offset) + case elf.ET_REL: + if offset != 0 { + return 0, fmt.Errorf("don't know how to handle mapping.Offset") + } + return start, nil + case elf.ET_DYN: + // The process mapping information, start = start of virtual address range, + // and offset = offset in the executable file of the start address, tells us + // that a runtime virtual address x maps to a file offset + // fx = x - start + offset. + if loadSegment == nil { + return start - offset, nil + } + // Kernels compiled as PIE can be ET_DYN as well. Use heuristic, similar to + // the ET_EXEC case above. + if base, match := kernelBase(loadSegment, stextOffset, start, limit, offset); match { + return base, nil + } + // The program header, if not nil, indicates the offset in the file where + // the executable segment is located (loadSegment.Off), and the base virtual + // address where the first byte of the segment is loaded + // (loadSegment.Vaddr). A file offset fx maps to a virtual (symbol) address + // sx = fx - loadSegment.Off + loadSegment.Vaddr. + // + // Thus, a runtime virtual address x maps to a symbol address + // sx = x - start + offset - loadSegment.Off + loadSegment.Vaddr. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil + } + return 0, fmt.Errorf("don't know how to handle FileHeader.Type %v", fh.Type) +} + +// FindTextProgHeader finds the program segment header containing the .text +// section or nil if the segment cannot be found. +func FindTextProgHeader(f *elf.File) *elf.ProgHeader { + for _, s := range f.Sections { + if s.Name == ".text" { + // Find the LOAD segment containing the .text section. + for _, p := range f.Progs { + if p.Type == elf.PT_LOAD && p.Flags&elf.PF_X != 0 && s.Addr >= p.Vaddr && s.Addr < p.Vaddr+p.Memsz { + return &p.ProgHeader + } + } + } + } + return nil +} + +// ProgramHeadersForMapping returns the program segment headers that overlap +// the runtime mapping with file offset mapOff and memory size mapSz. We skip +// over segments zero file size because their file offset values are unreliable. +// Even if overlapping, a segment is not selected if its aligned file offset is +// greater than the mapping file offset, or if the mapping includes the last +// page of the segment, but not the full segment and the mapping includes +// additional pages after the segment end. +// The function returns a slice of pointers to the headers in the input +// slice, which are valid only while phdrs is not modified or discarded. +func ProgramHeadersForMapping(phdrs []elf.ProgHeader, mapOff, mapSz uint64) []*elf.ProgHeader { + const ( + // pageSize defines the virtual memory page size used by the loader. This + // value is dependent on the memory management unit of the CPU. The page + // size is 4KB virtually on all the architectures that we care about, so we + // define this metric as a constant. If we encounter architectures where + // page sie is not 4KB, we must try to guess the page size on the system + // where the profile was collected, possibly using the architecture + // specified in the ELF file header. + pageSize = 4096 + pageOffsetMask = pageSize - 1 + ) + mapLimit := mapOff + mapSz + var headers []*elf.ProgHeader + for i := range phdrs { + p := &phdrs[i] + // Skip over segments with zero file size. Their file offsets can have + // arbitrary values, see b/195427553. + if p.Filesz == 0 { + continue + } + segLimit := p.Off + p.Memsz + // The segment must overlap the mapping. + if p.Type == elf.PT_LOAD && mapOff < segLimit && p.Off < mapLimit { + // If the mapping offset is strictly less than the page aligned segment + // offset, then this mapping comes from a different segment, fixes + // b/179920361. + alignedSegOffset := uint64(0) + if p.Off > (p.Vaddr & pageOffsetMask) { + alignedSegOffset = p.Off - (p.Vaddr & pageOffsetMask) + } + if mapOff < alignedSegOffset { + continue + } + // If the mapping starts in the middle of the segment, it covers less than + // one page of the segment, and it extends at least one page past the + // segment, then this mapping comes from a different segment. + if mapOff > p.Off && (segLimit < mapOff+pageSize) && (mapLimit >= segLimit+pageSize) { + continue + } + headers = append(headers, p) + } + } + return headers +} + +// HeaderForFileOffset attempts to identify a unique program header that +// includes the given file offset. It returns an error if it cannot identify a +// unique header. +func HeaderForFileOffset(headers []*elf.ProgHeader, fileOffset uint64) (*elf.ProgHeader, error) { + var ph *elf.ProgHeader + for _, h := range headers { + if fileOffset >= h.Off && fileOffset < h.Off+h.Memsz { + if ph != nil { + // Assuming no other bugs, this can only happen if we have two or + // more small program segments that fit on the same page, and a + // segment other than the last one includes uninitialized data, or + // if the debug binary used for symbolization is stripped of some + // sections, so segment file sizes are smaller than memory sizes. + return nil, fmt.Errorf("found second program header (%#v) that matches file offset %x, first program header is %#v. Is this a stripped binary, or does the first program segment contain uninitialized data?", *h, fileOffset, *ph) + } + ph = h + } + } + if ph == nil { + return nil, fmt.Errorf("no program header matches file offset %x", fileOffset) + } + return ph, nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go new file mode 100644 index 0000000..09d40fd --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go @@ -0,0 +1,494 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "fmt" + "io" + "math" + "path/filepath" + "strings" + + "github.com/google/pprof/internal/measurement" +) + +// DotAttributes contains details about the graph itself, giving +// insight into how its elements should be rendered. +type DotAttributes struct { + Nodes map[*Node]*DotNodeAttributes // A map allowing each Node to have its own visualization option +} + +// DotNodeAttributes contains Node specific visualization options. +type DotNodeAttributes struct { + Shape string // The optional shape of the node when rendered visually + Bold bool // If the node should be bold or not + Peripheries int // An optional number of borders to place around a node + URL string // An optional url link to add to a node + Formatter func(*NodeInfo) string // An optional formatter for the node's label +} + +// DotConfig contains attributes about how a graph should be +// constructed and how it should look. +type DotConfig struct { + Title string // The title of the DOT graph + LegendURL string // The URL to link to from the legend. + Labels []string // The labels for the DOT's legend + + FormatValue func(int64) string // A formatting function for values + Total int64 // The total weight of the graph, used to compute percentages +} + +const maxNodelets = 4 // Number of nodelets for labels (both numeric and non) + +// ComposeDot creates and writes a in the DOT format to the writer, using +// the configurations given. +func ComposeDot(w io.Writer, g *Graph, a *DotAttributes, c *DotConfig) { + builder := &builder{w, a, c} + + // Begin constructing DOT by adding a title and legend. + builder.start() + defer builder.finish() + builder.addLegend() + + if len(g.Nodes) == 0 { + return + } + + // Preprocess graph to get id map and find max flat. + nodeIDMap := make(map[*Node]int) + hasNodelets := make(map[*Node]bool) + + maxFlat := float64(abs64(g.Nodes[0].FlatValue())) + for i, n := range g.Nodes { + nodeIDMap[n] = i + 1 + if float64(abs64(n.FlatValue())) > maxFlat { + maxFlat = float64(abs64(n.FlatValue())) + } + } + + edges := EdgeMap{} + + // Add nodes and nodelets to DOT builder. + for _, n := range g.Nodes { + builder.addNode(n, nodeIDMap[n], maxFlat) + hasNodelets[n] = builder.addNodelets(n, nodeIDMap[n]) + + // Collect all edges. Use a fake node to support multiple incoming edges. + for _, e := range n.Out { + edges[&Node{}] = e + } + } + + // Add edges to DOT builder. Sort edges by frequency as a hint to the graph layout engine. + for _, e := range edges.Sort() { + builder.addEdge(e, nodeIDMap[e.Src], nodeIDMap[e.Dest], hasNodelets[e.Src]) + } +} + +// builder wraps an io.Writer and understands how to compose DOT formatted elements. +type builder struct { + io.Writer + attributes *DotAttributes + config *DotConfig +} + +// start generates a title and initial node in DOT format. +func (b *builder) start() { + graphname := "unnamed" + if b.config.Title != "" { + graphname = b.config.Title + } + fmt.Fprintln(b, `digraph "`+graphname+`" {`) + fmt.Fprintln(b, `node [style=filled fillcolor="#f8f8f8"]`) +} + +// finish closes the opening curly bracket in the constructed DOT buffer. +func (b *builder) finish() { + fmt.Fprintln(b, "}") +} + +// addLegend generates a legend in DOT format. +func (b *builder) addLegend() { + labels := b.config.Labels + if len(labels) == 0 { + return + } + title := labels[0] + fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16`, escapeForDot(title)) + fmt.Fprintf(b, ` label="%s\l"`, strings.Join(escapeAllForDot(labels), `\l`)) + if b.config.LegendURL != "" { + fmt.Fprintf(b, ` URL="%s" target="_blank"`, b.config.LegendURL) + } + if b.config.Title != "" { + fmt.Fprintf(b, ` tooltip="%s"`, b.config.Title) + } + fmt.Fprintf(b, "] }\n") +} + +// addNode generates a graph node in DOT format. +func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) { + flat, cum := node.FlatValue(), node.CumValue() + attrs := b.attributes.Nodes[node] + + // Populate label for node. + var label string + if attrs != nil && attrs.Formatter != nil { + label = attrs.Formatter(&node.Info) + } else { + label = multilinePrintableName(&node.Info) + } + + flatValue := b.config.FormatValue(flat) + if flat != 0 { + label = label + fmt.Sprintf(`%s (%s)`, + flatValue, + strings.TrimSpace(measurement.Percentage(flat, b.config.Total))) + } else { + label = label + "0" + } + cumValue := flatValue + if cum != flat { + if flat != 0 { + label = label + `\n` + } else { + label = label + " " + } + cumValue = b.config.FormatValue(cum) + label = label + fmt.Sprintf(`of %s (%s)`, + cumValue, + strings.TrimSpace(measurement.Percentage(cum, b.config.Total))) + } + + // Scale font sizes from 8 to 24 based on percentage of flat frequency. + // Use non linear growth to emphasize the size difference. + baseFontSize, maxFontGrowth := 8, 16.0 + fontSize := baseFontSize + if maxFlat != 0 && flat != 0 && float64(abs64(flat)) <= maxFlat { + fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(abs64(flat))/maxFlat))) + } + + // Determine node shape. + shape := "box" + if attrs != nil && attrs.Shape != "" { + shape = attrs.Shape + } + + // Create DOT attribute for node. + attr := fmt.Sprintf(`label="%s" id="node%d" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`, + label, nodeID, fontSize, shape, escapeForDot(node.Info.PrintableName()), cumValue, + dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), false), + dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), true)) + + // Add on extra attributes if provided. + if attrs != nil { + // Make bold if specified. + if attrs.Bold { + attr += ` style="bold,filled"` + } + + // Add peripheries if specified. + if attrs.Peripheries != 0 { + attr += fmt.Sprintf(` peripheries=%d`, attrs.Peripheries) + } + + // Add URL if specified. target="_blank" forces the link to open in a new tab. + if attrs.URL != "" { + attr += fmt.Sprintf(` URL="%s" target="_blank"`, attrs.URL) + } + } + + fmt.Fprintf(b, "N%d [%s]\n", nodeID, attr) +} + +// addNodelets generates the DOT boxes for the node tags if they exist. +func (b *builder) addNodelets(node *Node, nodeID int) bool { + var nodelets string + + // Populate two Tag slices, one for LabelTags and one for NumericTags. + var ts []*Tag + lnts := make(map[string][]*Tag) + for _, t := range node.LabelTags { + ts = append(ts, t) + } + for l, tm := range node.NumericTags { + for _, t := range tm { + lnts[l] = append(lnts[l], t) + } + } + + // For leaf nodes, print cumulative tags (includes weight from + // children that have been deleted). + // For internal nodes, print only flat tags. + flatTags := len(node.Out) > 0 + + // Select the top maxNodelets alphanumeric labels by weight. + SortTags(ts, flatTags) + if len(ts) > maxNodelets { + ts = ts[:maxNodelets] + } + for i, t := range ts { + w := t.CumValue() + if flatTags { + w = t.FlatValue() + } + if w == 0 { + continue + } + weight := b.config.FormatValue(w) + nodelets += fmt.Sprintf(`N%d_%d [label = "%s" id="N%d_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, nodeID, i, weight) + nodelets += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"]`+"\n", nodeID, nodeID, i, weight, weight, weight) + if nts := lnts[t.Name]; nts != nil { + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i)) + } + } + + if nts := lnts[""]; nts != nil { + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID)) + } + + fmt.Fprint(b, nodelets) + return nodelets != "" +} + +func (b *builder) numericNodelets(nts []*Tag, maxNumNodelets int, flatTags bool, source string) string { + nodelets := "" + + // Collapse numeric labels into maxNumNodelets buckets, of the form: + // 1MB..2MB, 3MB..5MB, ... + for j, t := range b.collapsedTags(nts, maxNumNodelets, flatTags) { + w, attr := t.CumValue(), ` style="dotted"` + if flatTags || t.FlatValue() == t.CumValue() { + w, attr = t.FlatValue(), "" + } + if w != 0 { + weight := b.config.FormatValue(w) + nodelets += fmt.Sprintf(`N%s_%d [label = "%s" id="N%s_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, source, j, weight) + nodelets += fmt.Sprintf(`%s -> N%s_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"%s]`+"\n", source, source, j, weight, weight, weight, attr) + } + } + return nodelets +} + +// addEdge generates a graph edge in DOT format. +func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) { + var inline string + if edge.Inline { + inline = `\n (inline)` + } + w := b.config.FormatValue(edge.WeightValue()) + attr := fmt.Sprintf(`label=" %s%s"`, w, inline) + if b.config.Total != 0 { + // Note: edge.weight > b.config.Total is possible for profile diffs. + if weight := 1 + int(min64(abs64(edge.WeightValue()*100/b.config.Total), 100)); weight > 1 { + attr = fmt.Sprintf(`%s weight=%d`, attr, weight) + } + if width := 1 + int(min64(abs64(edge.WeightValue()*5/b.config.Total), 5)); width > 1 { + attr = fmt.Sprintf(`%s penwidth=%d`, attr, width) + } + attr = fmt.Sprintf(`%s color="%s"`, attr, + dotColor(float64(edge.WeightValue())/float64(abs64(b.config.Total)), false)) + } + arrow := "->" + if edge.Residual { + arrow = "..." + } + tooltip := fmt.Sprintf(`"%s %s %s (%s)"`, + escapeForDot(edge.Src.Info.PrintableName()), arrow, + escapeForDot(edge.Dest.Info.PrintableName()), w) + attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`, attr, tooltip, tooltip) + + if edge.Residual { + attr = attr + ` style="dotted"` + } + + if hasNodelets { + // Separate children further if source has tags. + attr = attr + " minlen=2" + } + + fmt.Fprintf(b, "N%d -> N%d [%s]\n", from, to, attr) +} + +// dotColor returns a color for the given score (between -1.0 and +// 1.0), with -1.0 colored green, 0.0 colored grey, and 1.0 colored +// red. If isBackground is true, then a light (low-saturation) +// color is returned (suitable for use as a background color); +// otherwise, a darker color is returned (suitable for use as a +// foreground color). +func dotColor(score float64, isBackground bool) string { + // A float between 0.0 and 1.0, indicating the extent to which + // colors should be shifted away from grey (to make positive and + // negative values easier to distinguish, and to make more use of + // the color range.) + const shift = 0.7 + + // Saturation and value (in hsv colorspace) for background colors. + const bgSaturation = 0.1 + const bgValue = 0.93 + + // Saturation and value (in hsv colorspace) for foreground colors. + const fgSaturation = 1.0 + const fgValue = 0.7 + + // Choose saturation and value based on isBackground. + var saturation float64 + var value float64 + if isBackground { + saturation = bgSaturation + value = bgValue + } else { + saturation = fgSaturation + value = fgValue + } + + // Limit the score values to the range [-1.0, 1.0]. + score = math.Max(-1.0, math.Min(1.0, score)) + + // Reduce saturation near score=0 (so it is colored grey, rather than yellow). + if math.Abs(score) < 0.2 { + saturation *= math.Abs(score) / 0.2 + } + + // Apply 'shift' to move scores away from 0.0 (grey). + if score > 0.0 { + score = math.Pow(score, (1.0 - shift)) + } + if score < 0.0 { + score = -math.Pow(-score, (1.0 - shift)) + } + + var r, g, b float64 // red, green, blue + if score < 0.0 { + g = value + r = value * (1 + saturation*score) + } else { + r = value + g = value * (1 - saturation*score) + } + b = value * (1 - saturation) + return fmt.Sprintf("#%02x%02x%02x", uint8(r*255.0), uint8(g*255.0), uint8(b*255.0)) +} + +func multilinePrintableName(info *NodeInfo) string { + infoCopy := *info + infoCopy.Name = escapeForDot(ShortenFunctionName(infoCopy.Name)) + infoCopy.Name = strings.Replace(infoCopy.Name, "::", `\n`, -1) + // Go type parameters are reported as "[...]" by Go pprof profiles. + // Keep this ellipsis rather than replacing with newlines below. + infoCopy.Name = strings.Replace(infoCopy.Name, "[...]", "[…]", -1) + infoCopy.Name = strings.Replace(infoCopy.Name, ".", `\n`, -1) + if infoCopy.File != "" { + infoCopy.File = filepath.Base(infoCopy.File) + } + return strings.Join(infoCopy.NameComponents(), `\n`) + `\n` +} + +// collapsedTags trims and sorts a slice of tags. +func (b *builder) collapsedTags(ts []*Tag, count int, flatTags bool) []*Tag { + ts = SortTags(ts, flatTags) + if len(ts) <= count { + return ts + } + + tagGroups := make([][]*Tag, count) + for i, t := range (ts)[:count] { + tagGroups[i] = []*Tag{t} + } + for _, t := range (ts)[count:] { + g, d := 0, tagDistance(t, tagGroups[0][0]) + for i := 1; i < count; i++ { + if nd := tagDistance(t, tagGroups[i][0]); nd < d { + g, d = i, nd + } + } + tagGroups[g] = append(tagGroups[g], t) + } + + var nts []*Tag + for _, g := range tagGroups { + l, w, c := b.tagGroupLabel(g) + nts = append(nts, &Tag{ + Name: l, + Flat: w, + Cum: c, + }) + } + return SortTags(nts, flatTags) +} + +func tagDistance(t, u *Tag) float64 { + v, _ := measurement.Scale(u.Value, u.Unit, t.Unit) + if v < float64(t.Value) { + return float64(t.Value) - v + } + return v - float64(t.Value) +} + +func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) { + if len(g) == 1 { + t := g[0] + return measurement.Label(t.Value, t.Unit), t.FlatValue(), t.CumValue() + } + min := g[0] + max := g[0] + df, f := min.FlatDiv, min.Flat + dc, c := min.CumDiv, min.Cum + for _, t := range g[1:] { + if v, _ := measurement.Scale(t.Value, t.Unit, min.Unit); int64(v) < min.Value { + min = t + } + if v, _ := measurement.Scale(t.Value, t.Unit, max.Unit); int64(v) > max.Value { + max = t + } + f += t.Flat + df += t.FlatDiv + c += t.Cum + dc += t.CumDiv + } + if df != 0 { + f = f / df + } + if dc != 0 { + c = c / dc + } + + // Tags are not scaled with the selected output unit because tags are often + // much smaller than other values which appear, so the range of tag sizes + // sometimes would appear to be "0..0" when scaled to the selected output unit. + return measurement.Label(min.Value, min.Unit) + ".." + measurement.Label(max.Value, max.Unit), f, c +} + +func min64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// escapeAllForDot applies escapeForDot to all strings in the given slice. +func escapeAllForDot(in []string) []string { + var out = make([]string, len(in)) + for i := range in { + out[i] = escapeForDot(in[i]) + } + return out +} + +// escapeForDot escapes double quotes and backslashes, and replaces Graphviz's +// "center" character (\n) with a left-justified character. +// See https://graphviz.org/docs/attr-types/escString/ for more info. +func escapeForDot(str string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(str, `\`, `\\`), `"`, `\"`), "\n", `\l`) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go new file mode 100644 index 0000000..74b904c --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go @@ -0,0 +1,1170 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graph collects a set of samples into a directed graph. +package graph + +import ( + "fmt" + "math" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/google/pprof/profile" +) + +var ( + // Removes package name and method arguments for Java method names. + // See tests for examples. + javaRegExp = regexp.MustCompile(`^(?:[a-z]\w*\.)*([A-Z][\w\$]*\.(?:|[a-z][\w\$]*(?:\$\d+)?))(?:(?:\()|$)`) + // Removes package name and method arguments for Go function names. + // See tests for examples. + goRegExp = regexp.MustCompile(`^(?:[\w\-\.]+\/)+(.+)`) + // Removes potential module versions in a package path. + goVerRegExp = regexp.MustCompile(`^(.*?)/v(?:[2-9]|[1-9][0-9]+)([./].*)$`) + // Strips C++ namespace prefix from a C++ function / method name. + // NOTE: Make sure to keep the template parameters in the name. Normally, + // template parameters are stripped from the C++ names but when + // -symbolize=demangle=templates flag is used, they will not be. + // See tests for examples. + cppRegExp = regexp.MustCompile(`^(?:[_a-zA-Z]\w*::)+(_*[A-Z]\w*::~?[_a-zA-Z]\w*(?:<.*>)?)`) + cppAnonymousPrefixRegExp = regexp.MustCompile(`^\(anonymous namespace\)::`) +) + +// Graph summarizes a performance profile into a format that is +// suitable for visualization. +type Graph struct { + Nodes Nodes +} + +// Options encodes the options for constructing a graph +type Options struct { + SampleValue func(s []int64) int64 // Function to compute the value of a sample + SampleMeanDivisor func(s []int64) int64 // Function to compute the divisor for mean graphs, or nil + FormatTag func(int64, string) string // Function to format a sample tag value into a string + ObjNames bool // Always preserve obj filename + OrigFnNames bool // Preserve original (eg mangled) function names + + CallTree bool // Build a tree instead of a graph + DropNegative bool // Drop nodes with overall negative values + + KeptNodes NodeSet // If non-nil, only use nodes in this set +} + +// Nodes is an ordered collection of graph nodes. +type Nodes []*Node + +// Node is an entry on a profiling report. It represents a unique +// program location. +type Node struct { + // Info describes the source location associated to this node. + Info NodeInfo + + // Function represents the function that this node belongs to. On + // graphs with sub-function resolution (eg line number or + // addresses), two nodes in a NodeMap that are part of the same + // function have the same value of Node.Function. If the Node + // represents the whole function, it points back to itself. + Function *Node + + // Values associated to this node. Flat is exclusive to this node, + // Cum includes all descendents. + Flat, FlatDiv, Cum, CumDiv int64 + + // In and out Contains the nodes immediately reaching or reached by + // this node. + In, Out EdgeMap + + // LabelTags provide additional information about subsets of a sample. + LabelTags TagMap + + // NumericTags provide additional values for subsets of a sample. + // Numeric tags are optionally associated to a label tag. The key + // for NumericTags is the name of the LabelTag they are associated + // to, or "" for numeric tags not associated to a label tag. + NumericTags map[string]TagMap +} + +// FlatValue returns the exclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) FlatValue() int64 { + if n.FlatDiv == 0 { + return n.Flat + } + return n.Flat / n.FlatDiv +} + +// CumValue returns the inclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) CumValue() int64 { + if n.CumDiv == 0 { + return n.Cum + } + return n.Cum / n.CumDiv +} + +// AddToEdge increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdge(to *Node, v int64, residual, inline bool) { + n.AddToEdgeDiv(to, 0, v, residual, inline) +} + +// AddToEdgeDiv increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdgeDiv(to *Node, dv, v int64, residual, inline bool) { + if n.Out[to] != to.In[n] { + panic(fmt.Errorf("asymmetric edges %v %v", *n, *to)) + } + + if e := n.Out[to]; e != nil { + e.WeightDiv += dv + e.Weight += v + if residual { + e.Residual = true + } + if !inline { + e.Inline = false + } + return + } + + info := &Edge{Src: n, Dest: to, WeightDiv: dv, Weight: v, Residual: residual, Inline: inline} + n.Out[to] = info + to.In[n] = info +} + +// NodeInfo contains the attributes for a node. +type NodeInfo struct { + Name string + OrigName string + Address uint64 + File string + StartLine, Lineno int + Objfile string +} + +// PrintableName calls the Node's Formatter function with a single space separator. +func (i *NodeInfo) PrintableName() string { + return strings.Join(i.NameComponents(), " ") +} + +// NameComponents returns the components of the printable name to be used for a node. +func (i *NodeInfo) NameComponents() []string { + var name []string + if i.Address != 0 { + name = append(name, fmt.Sprintf("%016x", i.Address)) + } + if fun := i.Name; fun != "" { + name = append(name, fun) + } + + switch { + case i.Lineno != 0: + // User requested line numbers, provide what we have. + name = append(name, fmt.Sprintf("%s:%d", i.File, i.Lineno)) + case i.File != "": + // User requested file name, provide it. + name = append(name, i.File) + case i.Name != "": + // User requested function name. It was already included. + case i.Objfile != "": + // Only binary name is available + name = append(name, "["+filepath.Base(i.Objfile)+"]") + default: + // Do not leave it empty if there is no information at all. + name = append(name, "") + } + return name +} + +// NodeMap maps from a node info struct to a node. It is used to merge +// report entries with the same info. +type NodeMap map[NodeInfo]*Node + +// NodeSet is a collection of node info structs. +type NodeSet map[NodeInfo]bool + +// NodePtrSet is a collection of nodes. Trimming a graph or tree requires a set +// of objects which uniquely identify the nodes to keep. In a graph, NodeInfo +// works as a unique identifier; however, in a tree multiple nodes may share +// identical NodeInfos. A *Node does uniquely identify a node so we can use that +// instead. Though a *Node also uniquely identifies a node in a graph, +// currently, during trimming, graphs are rebuilt from scratch using only the +// NodeSet, so there would not be the required context of the initial graph to +// allow for the use of *Node. +type NodePtrSet map[*Node]bool + +// FindOrInsertNode takes the info for a node and either returns a matching node +// from the node map if one exists, or adds one to the map if one does not. +// If kept is non-nil, nodes are only added if they can be located on it. +func (nm NodeMap) FindOrInsertNode(info NodeInfo, kept NodeSet) *Node { + if kept != nil { + if _, ok := kept[info]; !ok { + return nil + } + } + + if n, ok := nm[info]; ok { + return n + } + + n := &Node{ + Info: info, + In: make(EdgeMap), + Out: make(EdgeMap), + LabelTags: make(TagMap), + NumericTags: make(map[string]TagMap), + } + nm[info] = n + if info.Address == 0 && info.Lineno == 0 { + // This node represents the whole function, so point Function + // back to itself. + n.Function = n + return n + } + // Find a node that represents the whole function. + info.Address = 0 + info.Lineno = 0 + n.Function = nm.FindOrInsertNode(info, nil) + return n +} + +// EdgeMap is used to represent the incoming/outgoing edges from a node. +type EdgeMap map[*Node]*Edge + +// Edge contains any attributes to be represented about edges in a graph. +type Edge struct { + Src, Dest *Node + // The summary weight of the edge + Weight, WeightDiv int64 + + // residual edges connect nodes that were connected through a + // separate node, which has been removed from the report. + Residual bool + // An inline edge represents a call that was inlined into the caller. + Inline bool +} + +// WeightValue returns the weight value for this edge, normalizing if a +// divisor is available. +func (e *Edge) WeightValue() int64 { + if e.WeightDiv == 0 { + return e.Weight + } + return e.Weight / e.WeightDiv +} + +// Tag represent sample annotations +type Tag struct { + Name string + Unit string // Describe the value, "" for non-numeric tags + Value int64 + Flat, FlatDiv int64 + Cum, CumDiv int64 +} + +// FlatValue returns the exclusive value for this tag, computing the +// mean if a divisor is available. +func (t *Tag) FlatValue() int64 { + if t.FlatDiv == 0 { + return t.Flat + } + return t.Flat / t.FlatDiv +} + +// CumValue returns the inclusive value for this tag, computing the +// mean if a divisor is available. +func (t *Tag) CumValue() int64 { + if t.CumDiv == 0 { + return t.Cum + } + return t.Cum / t.CumDiv +} + +// TagMap is a collection of tags, classified by their name. +type TagMap map[string]*Tag + +// SortTags sorts a slice of tags based on their weight. +func SortTags(t []*Tag, flat bool) []*Tag { + ts := tags{t, flat} + sort.Sort(ts) + return ts.t +} + +// New summarizes performance data from a profile into a graph. +func New(prof *profile.Profile, o *Options) *Graph { + if o.CallTree { + return newTree(prof, o) + } + g, _ := newGraph(prof, o) + return g +} + +// newGraph computes a graph from a profile. It returns the graph, and +// a map from the profile location indices to the corresponding graph +// nodes. +func newGraph(prof *profile.Profile, o *Options) (*Graph, map[uint64]Nodes) { + nodes, locationMap := CreateNodes(prof, o) + seenNode := make(map[*Node]bool) + seenEdge := make(map[nodePair]bool) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + for k := range seenNode { + delete(seenNode, k) + } + for k := range seenEdge { + delete(seenEdge, k) + } + var parent *Node + // A residual edge goes over one or more nodes that were not kept. + residual := false + + labels := joinLabels(sample) + // Group the sample frames, based on a global map. + for i := len(sample.Location) - 1; i >= 0; i-- { + l := sample.Location[i] + locNodes := locationMap[l.ID] + for ni := len(locNodes) - 1; ni >= 0; ni-- { + n := locNodes[ni] + if n == nil { + residual = true + continue + } + // Add cum weight to all nodes in stack, avoiding double counting. + if _, ok := seenNode[n]; !ok { + seenNode[n] = true + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) + } + // Update edge weights for all edges in stack, avoiding double counting. + if _, ok := seenEdge[nodePair{n, parent}]; !ok && parent != nil && n != parent { + seenEdge[nodePair{n, parent}] = true + parent.AddToEdgeDiv(n, dw, w, residual, ni != len(locNodes)-1) + } + parent = n + residual = false + } + } + if parent != nil && !residual { + // Add flat weight to leaf node. + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) + } + } + + return selectNodesForGraph(nodes, o.DropNegative), locationMap +} + +func selectNodesForGraph(nodes Nodes, dropNegative bool) *Graph { + // Collect nodes into a graph. + gNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if n == nil { + continue + } + if n.Cum == 0 && n.Flat == 0 { + continue + } + if dropNegative && isNegative(n) { + continue + } + gNodes = append(gNodes, n) + } + return &Graph{gNodes} +} + +type nodePair struct { + src, dest *Node +} + +func newTree(prof *profile.Profile, o *Options) (g *Graph) { + parentNodeMap := make(map[*Node]NodeMap, len(prof.Sample)) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + var parent *Node + labels := joinLabels(sample) + // Group the sample frames, based on a per-node map. + for i := len(sample.Location) - 1; i >= 0; i-- { + l := sample.Location[i] + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + for lidx := len(lines) - 1; lidx >= 0; lidx-- { + nodeMap := parentNodeMap[parent] + if nodeMap == nil { + nodeMap = make(NodeMap) + parentNodeMap[parent] = nodeMap + } + n := nodeMap.findOrInsertLine(l, lines[lidx], o) + if n == nil { + continue + } + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) + if parent != nil { + parent.AddToEdgeDiv(n, dw, w, false, lidx != len(lines)-1) + } + parent = n + } + } + if parent != nil { + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) + } + } + + nodes := make(Nodes, len(prof.Location)) + for _, nm := range parentNodeMap { + nodes = append(nodes, nm.nodes()...) + } + return selectNodesForGraph(nodes, o.DropNegative) +} + +// ShortenFunctionName returns a shortened version of a function's name. +func ShortenFunctionName(f string) string { + f = cppAnonymousPrefixRegExp.ReplaceAllString(f, "") + f = goVerRegExp.ReplaceAllString(f, `${1}${2}`) + for _, re := range []*regexp.Regexp{goRegExp, javaRegExp, cppRegExp} { + if matches := re.FindStringSubmatch(f); len(matches) >= 2 { + return strings.Join(matches[1:], "") + } + } + return f +} + +// TrimTree trims a Graph in forest form, keeping only the nodes in kept. This +// will not work correctly if even a single node has multiple parents. +func (g *Graph) TrimTree(kept NodePtrSet) { + // Creates a new list of nodes + oldNodes := g.Nodes + g.Nodes = make(Nodes, 0, len(kept)) + + for _, cur := range oldNodes { + // A node may not have multiple parents + if len(cur.In) > 1 { + panic("TrimTree only works on trees") + } + + // If a node should be kept, add it to the new list of nodes + if _, ok := kept[cur]; ok { + g.Nodes = append(g.Nodes, cur) + continue + } + + // If a node has no parents, then delete all of the in edges of its + // children to make them each roots of their own trees. + if len(cur.In) == 0 { + for _, outEdge := range cur.Out { + delete(outEdge.Dest.In, cur) + } + continue + } + + // Get the parent. This works since at this point cur.In must contain only + // one element. + if len(cur.In) != 1 { + panic("Get parent assertion failed. cur.In expected to be of length 1.") + } + var parent *Node + for _, edge := range cur.In { + parent = edge.Src + } + + parentEdgeInline := parent.Out[cur].Inline + + // Remove the edge from the parent to this node + delete(parent.Out, cur) + + // Reconfigure every edge from the current node to now begin at the parent. + for _, outEdge := range cur.Out { + child := outEdge.Dest + + delete(child.In, cur) + child.In[parent] = outEdge + parent.Out[child] = outEdge + + outEdge.Src = parent + outEdge.Residual = true + // If the edge from the parent to the current node and the edge from the + // current node to the child are both inline, then this resulting residual + // edge should also be inline + outEdge.Inline = parentEdgeInline && outEdge.Inline + } + } + g.RemoveRedundantEdges() +} + +func joinLabels(s *profile.Sample) string { + if len(s.Label) == 0 { + return "" + } + + var labels []string + for key, vals := range s.Label { + for _, v := range vals { + labels = append(labels, key+":"+v) + } + } + sort.Strings(labels) + return strings.Join(labels, `\n`) +} + +// isNegative returns true if the node is considered as "negative" for the +// purposes of drop_negative. +func isNegative(n *Node) bool { + switch { + case n.Flat < 0: + return true + case n.Flat == 0 && n.Cum < 0: + return true + default: + return false + } +} + +// CreateNodes creates graph nodes for all locations in a profile. It +// returns set of all nodes, plus a mapping of each location to the +// set of corresponding nodes (one per location.Line). +func CreateNodes(prof *profile.Profile, o *Options) (Nodes, map[uint64]Nodes) { + locations := make(map[uint64]Nodes, len(prof.Location)) + nm := make(NodeMap, len(prof.Location)) + for _, l := range prof.Location { + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + nodes := make(Nodes, len(lines)) + for ln := range lines { + nodes[ln] = nm.findOrInsertLine(l, lines[ln], o) + } + locations[l.ID] = nodes + } + return nm.nodes(), locations +} + +func (nm NodeMap) nodes() Nodes { + nodes := make(Nodes, 0, len(nm)) + for _, n := range nm { + nodes = append(nodes, n) + } + return nodes +} + +func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Options) *Node { + var objfile string + if m := l.Mapping; m != nil && m.File != "" { + objfile = m.File + } + + if ni := nodeInfo(l, li, objfile, o); ni != nil { + return nm.FindOrInsertNode(*ni, o.KeptNodes) + } + return nil +} + +func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options) *NodeInfo { + if line.Function == nil { + return &NodeInfo{Address: l.Address, Objfile: objfile} + } + ni := &NodeInfo{ + Address: l.Address, + Lineno: int(line.Line), + Name: line.Function.Name, + } + if fname := line.Function.Filename; fname != "" { + ni.File = filepath.Clean(fname) + } + if o.OrigFnNames { + ni.OrigName = line.Function.SystemName + } + if o.ObjNames || (ni.Name == "" && ni.OrigName == "") { + ni.Objfile = objfile + ni.StartLine = int(line.Function.StartLine) + } + return ni +} + +type tags struct { + t []*Tag + flat bool +} + +func (t tags) Len() int { return len(t.t) } +func (t tags) Swap(i, j int) { t.t[i], t.t[j] = t.t[j], t.t[i] } +func (t tags) Less(i, j int) bool { + if !t.flat { + if t.t[i].Cum != t.t[j].Cum { + return abs64(t.t[i].Cum) > abs64(t.t[j].Cum) + } + } + if t.t[i].Flat != t.t[j].Flat { + return abs64(t.t[i].Flat) > abs64(t.t[j].Flat) + } + return t.t[i].Name < t.t[j].Name +} + +// Sum adds the flat and cum values of a set of nodes. +func (ns Nodes) Sum() (flat int64, cum int64) { + for _, n := range ns { + flat += n.Flat + cum += n.Cum + } + return +} + +func (n *Node) addSample(dw, w int64, labels string, numLabel map[string][]int64, numUnit map[string][]string, format func(int64, string) string, flat bool) { + // Update sample value + if flat { + n.FlatDiv += dw + n.Flat += w + } else { + n.CumDiv += dw + n.Cum += w + } + + // Add string tags + if labels != "" { + t := n.LabelTags.findOrAddTag(labels, "", 0) + if flat { + t.FlatDiv += dw + t.Flat += w + } else { + t.CumDiv += dw + t.Cum += w + } + } + + numericTags := n.NumericTags[labels] + if numericTags == nil { + numericTags = TagMap{} + n.NumericTags[labels] = numericTags + } + // Add numeric tags + if format == nil { + format = defaultLabelFormat + } + for k, nvals := range numLabel { + units := numUnit[k] + for i, v := range nvals { + var t *Tag + if len(units) > 0 { + t = numericTags.findOrAddTag(format(v, units[i]), units[i], v) + } else { + t = numericTags.findOrAddTag(format(v, k), k, v) + } + if flat { + t.FlatDiv += dw + t.Flat += w + } else { + t.CumDiv += dw + t.Cum += w + } + } + } +} + +func defaultLabelFormat(v int64, key string) string { + return strconv.FormatInt(v, 10) +} + +func (m TagMap) findOrAddTag(label, unit string, value int64) *Tag { + l := m[label] + if l == nil { + l = &Tag{ + Name: label, + Unit: unit, + Value: value, + } + m[label] = l + } + return l +} + +// String returns a text representation of a graph, for debugging purposes. +func (g *Graph) String() string { + var s []string + + nodeIndex := make(map[*Node]int, len(g.Nodes)) + + for i, n := range g.Nodes { + nodeIndex[n] = i + 1 + } + + for i, n := range g.Nodes { + name := n.Info.PrintableName() + var in, out []int + + for _, from := range n.In { + in = append(in, nodeIndex[from.Src]) + } + for _, to := range n.Out { + out = append(out, nodeIndex[to.Dest]) + } + s = append(s, fmt.Sprintf("%d: %s[flat=%d cum=%d] %x -> %v ", i+1, name, n.Flat, n.Cum, in, out)) + } + return strings.Join(s, "\n") +} + +// DiscardLowFrequencyNodes returns a set of the nodes at or over a +// specific cum value cutoff. +func (g *Graph) DiscardLowFrequencyNodes(nodeCutoff int64) NodeSet { + return makeNodeSet(g.Nodes, nodeCutoff) +} + +// DiscardLowFrequencyNodePtrs returns a NodePtrSet of nodes at or over a +// specific cum value cutoff. +func (g *Graph) DiscardLowFrequencyNodePtrs(nodeCutoff int64) NodePtrSet { + cutNodes := getNodesAboveCumCutoff(g.Nodes, nodeCutoff) + kept := make(NodePtrSet, len(cutNodes)) + for _, n := range cutNodes { + kept[n] = true + } + return kept +} + +func makeNodeSet(nodes Nodes, nodeCutoff int64) NodeSet { + cutNodes := getNodesAboveCumCutoff(nodes, nodeCutoff) + kept := make(NodeSet, len(cutNodes)) + for _, n := range cutNodes { + kept[n.Info] = true + } + return kept +} + +// getNodesAboveCumCutoff returns all the nodes which have a Cum value greater +// than or equal to cutoff. +func getNodesAboveCumCutoff(nodes Nodes, nodeCutoff int64) Nodes { + cutoffNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if abs64(n.Cum) < nodeCutoff { + continue + } + cutoffNodes = append(cutoffNodes, n) + } + return cutoffNodes +} + +// TrimLowFrequencyTags removes tags that have less than +// the specified weight. +func (g *Graph) TrimLowFrequencyTags(tagCutoff int64) { + // Remove nodes with value <= total*nodeFraction + for _, n := range g.Nodes { + n.LabelTags = trimLowFreqTags(n.LabelTags, tagCutoff) + for s, nt := range n.NumericTags { + n.NumericTags[s] = trimLowFreqTags(nt, tagCutoff) + } + } +} + +func trimLowFreqTags(tags TagMap, minValue int64) TagMap { + kept := TagMap{} + for s, t := range tags { + if abs64(t.Flat) >= minValue || abs64(t.Cum) >= minValue { + kept[s] = t + } + } + return kept +} + +// TrimLowFrequencyEdges removes edges that have less than +// the specified weight. Returns the number of edges removed +func (g *Graph) TrimLowFrequencyEdges(edgeCutoff int64) int { + var droppedEdges int + for _, n := range g.Nodes { + for src, e := range n.In { + if abs64(e.Weight) < edgeCutoff { + delete(n.In, src) + delete(src.Out, n) + droppedEdges++ + } + } + } + return droppedEdges +} + +// SortNodes sorts the nodes in a graph based on a specific heuristic. +func (g *Graph) SortNodes(cum bool, visualMode bool) { + // Sort nodes based on requested mode + switch { + case visualMode: + // Specialized sort to produce a more visually-interesting graph + g.Nodes.Sort(EntropyOrder) + case cum: + g.Nodes.Sort(CumNameOrder) + default: + g.Nodes.Sort(FlatNameOrder) + } +} + +// SelectTopNodePtrs returns a set of the top maxNodes *Node in a graph. +func (g *Graph) SelectTopNodePtrs(maxNodes int, visualMode bool) NodePtrSet { + set := make(NodePtrSet) + for _, node := range g.selectTopNodes(maxNodes, visualMode) { + set[node] = true + } + return set +} + +// SelectTopNodes returns a set of the top maxNodes nodes in a graph. +func (g *Graph) SelectTopNodes(maxNodes int, visualMode bool) NodeSet { + return makeNodeSet(g.selectTopNodes(maxNodes, visualMode), 0) +} + +// selectTopNodes returns a slice of the top maxNodes nodes in a graph. +func (g *Graph) selectTopNodes(maxNodes int, visualMode bool) Nodes { + if maxNodes > 0 { + if visualMode { + var count int + // If generating a visual graph, count tags as nodes. Update + // maxNodes to account for them. + for i, n := range g.Nodes { + tags := countTags(n) + if tags > maxNodelets { + tags = maxNodelets + } + if count += tags + 1; count >= maxNodes { + maxNodes = i + 1 + break + } + } + } + } + if maxNodes > len(g.Nodes) { + maxNodes = len(g.Nodes) + } + return g.Nodes[:maxNodes] +} + +// countTags counts the tags with flat count. This underestimates the +// number of tags being displayed, but in practice is close enough. +func countTags(n *Node) int { + count := 0 + for _, e := range n.LabelTags { + if e.Flat != 0 { + count++ + } + } + for _, t := range n.NumericTags { + for _, e := range t { + if e.Flat != 0 { + count++ + } + } + } + return count +} + +// RemoveRedundantEdges removes residual edges if the destination can +// be reached through another path. This is done to simplify the graph +// while preserving connectivity. +func (g *Graph) RemoveRedundantEdges() { + // Walk the nodes and outgoing edges in reverse order to prefer + // removing edges with the lowest weight. + for i := len(g.Nodes); i > 0; i-- { + n := g.Nodes[i-1] + in := n.In.Sort() + for j := len(in); j > 0; j-- { + e := in[j-1] + if !e.Residual { + // Do not remove edges heavier than a non-residual edge, to + // avoid potential confusion. + break + } + if isRedundantEdge(e) { + delete(e.Src.Out, e.Dest) + delete(e.Dest.In, e.Src) + } + } + } +} + +// isRedundantEdge determines if there is a path that allows e.Src +// to reach e.Dest after removing e. +func isRedundantEdge(e *Edge) bool { + src, n := e.Src, e.Dest + seen := map[*Node]bool{n: true} + queue := Nodes{n} + for len(queue) > 0 { + n := queue[0] + queue = queue[1:] + for _, ie := range n.In { + if e == ie || seen[ie.Src] { + continue + } + if ie.Src == src { + return true + } + seen[ie.Src] = true + queue = append(queue, ie.Src) + } + } + return false +} + +// nodeSorter is a mechanism used to allow a report to be sorted +// in different ways. +type nodeSorter struct { + rs Nodes + less func(l, r *Node) bool +} + +func (s nodeSorter) Len() int { return len(s.rs) } +func (s nodeSorter) Swap(i, j int) { s.rs[i], s.rs[j] = s.rs[j], s.rs[i] } +func (s nodeSorter) Less(i, j int) bool { return s.less(s.rs[i], s.rs[j]) } + +// Sort reorders a slice of nodes based on the specified ordering +// criteria. The result is sorted in decreasing order for (absolute) +// numeric quantities, alphabetically for text, and increasing for +// addresses. +func (ns Nodes) Sort(o NodeOrder) error { + var s nodeSorter + + switch o { + case FlatNameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + if iv, jv := abs64(l.Cum), abs64(r.Cum); iv != jv { + return iv > jv + } + return compareNodes(l, r) + }, + } + case FlatCumNameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + if iv, jv := abs64(l.Cum), abs64(r.Cum); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case NameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.Name, r.Info.Name; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case FileOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.File, r.Info.File; iv != jv { + return iv < jv + } + if iv, jv := l.Info.StartLine, r.Info.StartLine; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case AddressOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.Address, r.Info.Address; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case CumNameOrder, EntropyOrder: + // Hold scoring for score-based ordering + var score map[*Node]int64 + scoreOrder := func(l, r *Node) bool { + if iv, jv := abs64(score[l]), abs64(score[r]); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + return compareNodes(l, r) + } + + switch o { + case CumNameOrder: + score = make(map[*Node]int64, len(ns)) + for _, n := range ns { + score[n] = n.Cum + } + s = nodeSorter{ns, scoreOrder} + case EntropyOrder: + score = make(map[*Node]int64, len(ns)) + for _, n := range ns { + score[n] = entropyScore(n) + } + s = nodeSorter{ns, scoreOrder} + } + default: + return fmt.Errorf("report: unrecognized sort ordering: %d", o) + } + sort.Sort(s) + return nil +} + +// compareNodes compares two nodes to provide a deterministic ordering +// between them. Two nodes cannot have the same Node.Info value. +func compareNodes(l, r *Node) bool { + return fmt.Sprint(l.Info) < fmt.Sprint(r.Info) +} + +// entropyScore computes a score for a node representing how important +// it is to include this node on a graph visualization. It is used to +// sort the nodes and select which ones to display if we have more +// nodes than desired in the graph. This number is computed by looking +// at the flat and cum weights of the node and the incoming/outgoing +// edges. The fundamental idea is to penalize nodes that have a simple +// fallthrough from their incoming to the outgoing edge. +func entropyScore(n *Node) int64 { + score := float64(0) + + if len(n.In) == 0 { + score++ // Favor entry nodes + } else { + score += edgeEntropyScore(n, n.In, 0) + } + + if len(n.Out) == 0 { + score++ // Favor leaf nodes + } else { + score += edgeEntropyScore(n, n.Out, n.Flat) + } + + return int64(score*float64(n.Cum)) + n.Flat +} + +// edgeEntropyScore computes the entropy value for a set of edges +// coming in or out of a node. Entropy (as defined in information +// theory) refers to the amount of information encoded by the set of +// edges. A set of edges that have a more interesting distribution of +// samples gets a higher score. +func edgeEntropyScore(n *Node, edges EdgeMap, self int64) float64 { + score := float64(0) + total := self + for _, e := range edges { + if e.Weight > 0 { + total += abs64(e.Weight) + } + } + if total != 0 { + for _, e := range edges { + frac := float64(abs64(e.Weight)) / float64(total) + score += -frac * math.Log2(frac) + } + if self > 0 { + frac := float64(abs64(self)) / float64(total) + score += -frac * math.Log2(frac) + } + } + return score +} + +// NodeOrder sets the ordering for a Sort operation +type NodeOrder int + +// Sorting options for node sort. +const ( + FlatNameOrder NodeOrder = iota + FlatCumNameOrder + CumNameOrder + NameOrder + FileOrder + AddressOrder + EntropyOrder +) + +// Sort returns a slice of the edges in the map, in a consistent +// order. The sort order is first based on the edge weight +// (higher-to-lower) and then by the node names to avoid flakiness. +func (e EdgeMap) Sort() []*Edge { + el := make(edgeList, 0, len(e)) + for _, w := range e { + el = append(el, w) + } + + sort.Sort(el) + return el +} + +// Sum returns the total weight for a set of nodes. +func (e EdgeMap) Sum() int64 { + var ret int64 + for _, edge := range e { + ret += edge.Weight + } + return ret +} + +type edgeList []*Edge + +func (el edgeList) Len() int { + return len(el) +} + +func (el edgeList) Less(i, j int) bool { + if el[i].Weight != el[j].Weight { + return abs64(el[i].Weight) > abs64(el[j].Weight) + } + + from1 := el[i].Src.Info.PrintableName() + from2 := el[j].Src.Info.PrintableName() + if from1 != from2 { + return from1 < from2 + } + + to1 := el[i].Dest.Info.PrintableName() + to2 := el[j].Dest.Info.PrintableName() + + return to1 < to2 +} + +func (el edgeList) Swap(i, j int) { + el[i], el[j] = el[j], el[i] +} + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go new file mode 100644 index 0000000..b5fcfbc --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go @@ -0,0 +1,293 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package measurement export utility functions to manipulate/format performance profile sample values. +package measurement + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/google/pprof/profile" +) + +// ScaleProfiles updates the units in a set of profiles to make them +// compatible. It scales the profiles to the smallest unit to preserve +// data. +func ScaleProfiles(profiles []*profile.Profile) error { + if len(profiles) == 0 { + return nil + } + periodTypes := make([]*profile.ValueType, 0, len(profiles)) + for _, p := range profiles { + if p.PeriodType != nil { + periodTypes = append(periodTypes, p.PeriodType) + } + } + periodType, err := CommonValueType(periodTypes) + if err != nil { + return fmt.Errorf("period type: %v", err) + } + + // Identify common sample types + numSampleTypes := len(profiles[0].SampleType) + for _, p := range profiles[1:] { + if numSampleTypes != len(p.SampleType) { + return fmt.Errorf("inconsistent samples type count: %d != %d", numSampleTypes, len(p.SampleType)) + } + } + sampleType := make([]*profile.ValueType, numSampleTypes) + for i := 0; i < numSampleTypes; i++ { + sampleTypes := make([]*profile.ValueType, len(profiles)) + for j, p := range profiles { + sampleTypes[j] = p.SampleType[i] + } + sampleType[i], err = CommonValueType(sampleTypes) + if err != nil { + return fmt.Errorf("sample types: %v", err) + } + } + + for _, p := range profiles { + if p.PeriodType != nil && periodType != nil { + period, _ := Scale(p.Period, p.PeriodType.Unit, periodType.Unit) + p.Period, p.PeriodType.Unit = int64(period), periodType.Unit + } + ratios := make([]float64, len(p.SampleType)) + for i, st := range p.SampleType { + if sampleType[i] == nil { + ratios[i] = 1 + continue + } + ratios[i], _ = Scale(1, st.Unit, sampleType[i].Unit) + p.SampleType[i].Unit = sampleType[i].Unit + } + if err := p.ScaleN(ratios); err != nil { + return fmt.Errorf("scale: %v", err) + } + } + return nil +} + +// CommonValueType returns the finest type from a set of compatible +// types. +func CommonValueType(ts []*profile.ValueType) (*profile.ValueType, error) { + if len(ts) <= 1 { + return nil, nil + } + minType := ts[0] + for _, t := range ts[1:] { + if !compatibleValueTypes(minType, t) { + return nil, fmt.Errorf("incompatible types: %v %v", *minType, *t) + } + if ratio, _ := Scale(1, t.Unit, minType.Unit); ratio < 1 { + minType = t + } + } + rcopy := *minType + return &rcopy, nil +} + +func compatibleValueTypes(v1, v2 *profile.ValueType) bool { + if v1 == nil || v2 == nil { + return true // No grounds to disqualify. + } + // Remove trailing 's' to permit minor mismatches. + if t1, t2 := strings.TrimSuffix(v1.Type, "s"), strings.TrimSuffix(v2.Type, "s"); t1 != t2 { + return false + } + + if v1.Unit == v2.Unit { + return true + } + for _, ut := range unitTypes { + if ut.sniffUnit(v1.Unit) != nil && ut.sniffUnit(v2.Unit) != nil { + return true + } + } + return false +} + +// Scale a measurement from an unit to a different unit and returns +// the scaled value and the target unit. The returned target unit +// will be empty if uninteresting (could be skipped). +func Scale(value int64, fromUnit, toUnit string) (float64, string) { + // Avoid infinite recursion on overflow. + if value < 0 && -value > 0 { + v, u := Scale(-value, fromUnit, toUnit) + return -v, u + } + for _, ut := range unitTypes { + if v, u, ok := ut.convertUnit(value, fromUnit, toUnit); ok { + return v, u + } + } + // Skip non-interesting units. + switch toUnit { + case "count", "sample", "unit", "minimum", "auto": + return float64(value), "" + default: + return float64(value), toUnit + } +} + +// Label returns the label used to describe a certain measurement. +func Label(value int64, unit string) string { + return ScaledLabel(value, unit, "auto") +} + +// ScaledLabel scales the passed-in measurement (if necessary) and +// returns the label used to describe a float measurement. +func ScaledLabel(value int64, fromUnit, toUnit string) string { + v, u := Scale(value, fromUnit, toUnit) + sv := strings.TrimSuffix(fmt.Sprintf("%.2f", v), ".00") + if sv == "0" || sv == "-0" { + return "0" + } + return sv + u +} + +// Percentage computes the percentage of total of a value, and encodes +// it as a string. At least two digits of precision are printed. +func Percentage(value, total int64) string { + var ratio float64 + if total != 0 { + ratio = math.Abs(float64(value)/float64(total)) * 100 + } + switch { + case math.Abs(ratio) >= 99.95 && math.Abs(ratio) <= 100.05: + return " 100%" + case math.Abs(ratio) >= 1.0: + return fmt.Sprintf("%5.2f%%", ratio) + default: + return fmt.Sprintf("%5.2g%%", ratio) + } +} + +// unit includes a list of aliases representing a specific unit and a factor +// which one can multiple a value in the specified unit by to get the value +// in terms of the base unit. +type unit struct { + canonicalName string + aliases []string + factor float64 +} + +// unitType includes a list of units that are within the same category (i.e. +// memory or time units) and a default unit to use for this type of unit. +type unitType struct { + defaultUnit unit + units []unit +} + +// findByAlias returns the unit associated with the specified alias. It returns +// nil if the unit with such alias is not found. +func (ut unitType) findByAlias(alias string) *unit { + for _, u := range ut.units { + for _, a := range u.aliases { + if alias == a { + return &u + } + } + } + return nil +} + +// sniffUnit simpifies the input alias and returns the unit associated with the +// specified alias. It returns nil if the unit with such alias is not found. +func (ut unitType) sniffUnit(unit string) *unit { + unit = strings.ToLower(unit) + if len(unit) > 2 { + unit = strings.TrimSuffix(unit, "s") + } + return ut.findByAlias(unit) +} + +// autoScale takes in the value with units of the base unit and returns +// that value scaled to a reasonable unit if a reasonable unit is +// found. +func (ut unitType) autoScale(value float64) (float64, string, bool) { + var f float64 + var unit string + for _, u := range ut.units { + if u.factor >= f && (value/u.factor) >= 1.0 { + f = u.factor + unit = u.canonicalName + } + } + if f == 0 { + return 0, "", false + } + return value / f, unit, true +} + +// convertUnit converts a value from the fromUnit to the toUnit, autoscaling +// the value if the toUnit is "minimum" or "auto". If the fromUnit is not +// included in the unitType, then a false boolean will be returned. If the +// toUnit is not in the unitType, the value will be returned in terms of the +// default unitType. +func (ut unitType) convertUnit(value int64, fromUnitStr, toUnitStr string) (float64, string, bool) { + fromUnit := ut.sniffUnit(fromUnitStr) + if fromUnit == nil { + return 0, "", false + } + v := float64(value) * fromUnit.factor + if toUnitStr == "minimum" || toUnitStr == "auto" { + if v, u, ok := ut.autoScale(v); ok { + return v, u, true + } + return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true + } + toUnit := ut.sniffUnit(toUnitStr) + if toUnit == nil { + return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true + } + return v / toUnit.factor, toUnit.canonicalName, true +} + +var unitTypes = []unitType{{ + units: []unit{ + {"B", []string{"b", "byte"}, 1}, + {"kB", []string{"kb", "kbyte", "kilobyte"}, float64(1 << 10)}, + {"MB", []string{"mb", "mbyte", "megabyte"}, float64(1 << 20)}, + {"GB", []string{"gb", "gbyte", "gigabyte"}, float64(1 << 30)}, + {"TB", []string{"tb", "tbyte", "terabyte"}, float64(1 << 40)}, + {"PB", []string{"pb", "pbyte", "petabyte"}, float64(1 << 50)}, + }, + defaultUnit: unit{"B", []string{"b", "byte"}, 1}, +}, { + units: []unit{ + {"ns", []string{"ns", "nanosecond"}, float64(time.Nanosecond)}, + {"us", []string{"μs", "us", "microsecond"}, float64(time.Microsecond)}, + {"ms", []string{"ms", "millisecond"}, float64(time.Millisecond)}, + {"s", []string{"s", "sec", "second"}, float64(time.Second)}, + {"hrs", []string{"hour", "hr"}, float64(time.Hour)}, + }, + defaultUnit: unit{"s", []string{}, float64(time.Second)}, +}, { + units: []unit{ + {"n*GCU", []string{"nanogcu"}, 1e-9}, + {"u*GCU", []string{"microgcu"}, 1e-6}, + {"m*GCU", []string{"milligcu"}, 1e-3}, + {"GCU", []string{"gcu"}, 1}, + {"k*GCU", []string{"kilogcu"}, 1e3}, + {"M*GCU", []string{"megagcu"}, 1e6}, + {"G*GCU", []string{"gigagcu"}, 1e9}, + {"T*GCU", []string{"teragcu"}, 1e12}, + {"P*GCU", []string{"petagcu"}, 1e15}, + }, + defaultUnit: unit{"GCU", []string{}, 1.0}, +}} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go new file mode 100644 index 0000000..98eb1dd --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go @@ -0,0 +1,216 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package plugin defines the plugin implementations that the main pprof driver requires. +package plugin + +import ( + "io" + "net/http" + "regexp" + "time" + + "github.com/google/pprof/profile" +) + +// Options groups all the optional plugins into pprof. +type Options struct { + Writer Writer + Flagset FlagSet + Fetch Fetcher + Sym Symbolizer + Obj ObjTool + UI UI + + // HTTPServer is a function that should block serving http requests, + // including the handlers specified in args. If non-nil, pprof will + // invoke this function if necessary to provide a web interface. + // + // If HTTPServer is nil, pprof will use its own internal HTTP server. + // + // A common use for a custom HTTPServer is to provide custom + // authentication checks. + HTTPServer func(args *HTTPServerArgs) error + HTTPTransport http.RoundTripper +} + +// Writer provides a mechanism to write data under a certain name, +// typically a filename. +type Writer interface { + Open(name string) (io.WriteCloser, error) +} + +// A FlagSet creates and parses command-line flags. +// It is similar to the standard flag.FlagSet. +type FlagSet interface { + // Bool, Int, Float64, and String define new flags, + // like the functions of the same name in package flag. + Bool(name string, def bool, usage string) *bool + Int(name string, def int, usage string) *int + Float64(name string, def float64, usage string) *float64 + String(name string, def string, usage string) *string + + // StringList is similar to String but allows multiple values for a + // single flag + StringList(name string, def string, usage string) *[]*string + + // ExtraUsage returns any additional text that should be printed after the + // standard usage message. The extra usage message returned includes all text + // added with AddExtraUsage(). + // The typical use of ExtraUsage is to show any custom flags defined by the + // specific pprof plugins being used. + ExtraUsage() string + + // AddExtraUsage appends additional text to the end of the extra usage message. + AddExtraUsage(eu string) + + // Parse initializes the flags with their values for this run + // and returns the non-flag command line arguments. + // If an unknown flag is encountered or there are no arguments, + // Parse should call usage and return nil. + Parse(usage func()) []string +} + +// A Fetcher reads and returns the profile named by src. src can be a +// local file path or a URL. duration and timeout are units specified +// by the end user, or 0 by default. duration refers to the length of +// the profile collection, if applicable, and timeout is the amount of +// time to wait for a profile before returning an error. Returns the +// fetched profile, the URL of the actual source of the profile, or an +// error. +type Fetcher interface { + Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) +} + +// A Symbolizer introduces symbol information into a profile. +type Symbolizer interface { + Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error +} + +// MappingSources map each profile.Mapping to the source of the profile. +// The key is either Mapping.File or Mapping.BuildId. +type MappingSources map[string][]struct { + Source string // URL of the source the mapping was collected from + Start uint64 // delta applied to addresses from this source (to represent Merge adjustments) +} + +// An ObjTool inspects shared libraries and executable files. +type ObjTool interface { + // Open opens the named object file. If the object is a shared + // library, start/limit/offset are the addresses where it is mapped + // into memory in the address space being inspected. If the object + // is a linux kernel, relocationSymbol is the name of the symbol + // corresponding to the start address. + Open(file string, start, limit, offset uint64, relocationSymbol string) (ObjFile, error) + + // Disasm disassembles the named object file, starting at + // the start address and stopping at (before) the end address. + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) +} + +// An Inst is a single instruction in an assembly listing. +type Inst struct { + Addr uint64 // virtual address of instruction + Text string // instruction text + Function string // function name + File string // source file + Line int // source line +} + +// An ObjFile is a single object file: a shared library or executable. +type ObjFile interface { + // Name returns the underlyinf file name, if available + Name() string + + // ObjAddr returns the objdump (linker) address corresponding to a runtime + // address, and an error. + ObjAddr(addr uint64) (uint64, error) + + // BuildID returns the GNU build ID of the file, or an empty string. + BuildID() string + + // SourceLine reports the source line information for a given + // address in the file. Due to inlining, the source line information + // is in general a list of positions representing a call stack, + // with the leaf function first. + SourceLine(addr uint64) ([]Frame, error) + + // Symbols returns a list of symbols in the object file. + // If r is not nil, Symbols restricts the list to symbols + // with names matching the regular expression. + // If addr is not zero, Symbols restricts the list to symbols + // containing that address. + Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error) + + // Close closes the file, releasing associated resources. + Close() error +} + +// A Frame describes a single line in a source file. +type Frame struct { + Func string // name of function + File string // source file name + Line int // line in file +} + +// A Sym describes a single symbol in an object file. +type Sym struct { + Name []string // names of symbol (many if symbol was dedup'ed) + File string // object file containing symbol + Start uint64 // start virtual address + End uint64 // virtual address of last byte in sym (Start+size-1) +} + +// A UI manages user interactions. +type UI interface { + // Read returns a line of text (a command) read from the user. + // prompt is printed before reading the command. + ReadLine(prompt string) (string, error) + + // Print shows a message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, Print writes to standard error. + // (Standard output is reserved for report data.) + Print(...interface{}) + + // PrintErr shows an error message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, PrintErr writes to standard error. + PrintErr(...interface{}) + + // IsTerminal returns whether the UI is known to be tied to an + // interactive terminal (as opposed to being redirected to a file). + IsTerminal() bool + + // WantBrowser indicates whether a browser should be opened with the -http option. + WantBrowser() bool + + // SetAutoComplete instructs the UI to call complete(cmd) to obtain + // the auto-completion of cmd, if the UI supports auto-completion at all. + SetAutoComplete(complete func(string) string) +} + +// HTTPServerArgs contains arguments needed by an HTTP server that +// is exporting a pprof web interface. +type HTTPServerArgs struct { + // Hostport contains the http server address (derived from flags). + Hostport string + + Host string // Host portion of Hostport + Port int // Port portion of Hostport + + // Handlers maps from URL paths to the handler to invoke to + // serve that path. + Handlers map[string]http.Handler +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go new file mode 100644 index 0000000..36ddf2e --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go @@ -0,0 +1,1321 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package report summarizes a performance profile into a +// human-readable report. +package report + +import ( + "fmt" + "io" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// Output formats. +const ( + Callgrind = iota + Comments + Dis + Dot + List + Proto + Raw + Tags + Text + TopProto + Traces + Tree + WebList +) + +// Options are the formatting and filtering options used to generate a +// profile. +type Options struct { + OutputFormat int + + CumSort bool + CallTree bool + DropNegative bool + CompactLabels bool + Ratio float64 + Title string + ProfileLabels []string + ActiveFilters []string + NumLabelUnits map[string]string + + NodeCount int + NodeFraction float64 + EdgeFraction float64 + + SampleValue func(s []int64) int64 + SampleMeanDivisor func(s []int64) int64 + SampleType string + SampleUnit string // Unit for the sample data from the profile. + + OutputUnit string // Units for data formatting in report. + + Symbol *regexp.Regexp // Symbols to include on disassembly report. + SourcePath string // Search path for source files. + TrimPath string // Paths to trim from source file paths. + + IntelSyntax bool // Whether or not to print assembly in Intel syntax. +} + +// Generate generates a report as directed by the Report. +func Generate(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + o := rpt.options + + switch o.OutputFormat { + case Comments: + return printComments(w, rpt) + case Dot: + return printDOT(w, rpt) + case Tree: + return printTree(w, rpt) + case Text: + return printText(w, rpt) + case Traces: + return printTraces(w, rpt) + case Raw: + fmt.Fprint(w, rpt.prof.String()) + return nil + case Tags: + return printTags(w, rpt) + case Proto: + return printProto(w, rpt) + case TopProto: + return printTopProto(w, rpt) + case Dis: + return printAssembly(w, rpt, obj) + case List: + return printSource(w, rpt) + case WebList: + return printWebSource(w, rpt, obj) + case Callgrind: + return printCallgrind(w, rpt) + } + return fmt.Errorf("unexpected output format") +} + +// newTrimmedGraph creates a graph for this report, trimmed according +// to the report options. +func (rpt *Report) newTrimmedGraph() (g *graph.Graph, origCount, droppedNodes, droppedEdges int) { + o := rpt.options + + // Build a graph and refine it. On each refinement step we must rebuild the graph from the samples, + // as the graph itself doesn't contain enough information to preserve full precision. + visualMode := o.OutputFormat == Dot + cumSort := o.CumSort + + // The call_tree option is only honored when generating visual representations of the callgraph. + callTree := o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind) + + // First step: Build complete graph to identify low frequency nodes, based on their cum weight. + g = rpt.newGraph(nil) + totalValue, _ := g.Nodes.Sum() + nodeCutoff := abs64(int64(float64(totalValue) * o.NodeFraction)) + edgeCutoff := abs64(int64(float64(totalValue) * o.EdgeFraction)) + + // Filter out nodes with cum value below nodeCutoff. + if nodeCutoff > 0 { + if callTree { + if nodesKept := g.DiscardLowFrequencyNodePtrs(nodeCutoff); len(g.Nodes) != len(nodesKept) { + droppedNodes = len(g.Nodes) - len(nodesKept) + g.TrimTree(nodesKept) + } + } else { + if nodesKept := g.DiscardLowFrequencyNodes(nodeCutoff); len(g.Nodes) != len(nodesKept) { + droppedNodes = len(g.Nodes) - len(nodesKept) + g = rpt.newGraph(nodesKept) + } + } + } + origCount = len(g.Nodes) + + // Second step: Limit the total number of nodes. Apply specialized heuristics to improve + // visualization when generating dot output. + g.SortNodes(cumSort, visualMode) + if nodeCount := o.NodeCount; nodeCount > 0 { + // Remove low frequency tags and edges as they affect selection. + g.TrimLowFrequencyTags(nodeCutoff) + g.TrimLowFrequencyEdges(edgeCutoff) + if callTree { + if nodesKept := g.SelectTopNodePtrs(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { + g.TrimTree(nodesKept) + g.SortNodes(cumSort, visualMode) + } + } else { + if nodesKept := g.SelectTopNodes(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { + g = rpt.newGraph(nodesKept) + g.SortNodes(cumSort, visualMode) + } + } + } + + // Final step: Filter out low frequency tags and edges, and remove redundant edges that clutter + // the graph. + g.TrimLowFrequencyTags(nodeCutoff) + droppedEdges = g.TrimLowFrequencyEdges(edgeCutoff) + if visualMode { + g.RemoveRedundantEdges() + } + return +} + +func (rpt *Report) selectOutputUnit(g *graph.Graph) { + o := rpt.options + + // Select best unit for profile output. + // Find the appropriate units for the smallest non-zero sample + if o.OutputUnit != "minimum" || len(g.Nodes) == 0 { + return + } + var minValue int64 + + for _, n := range g.Nodes { + nodeMin := abs64(n.FlatValue()) + if nodeMin == 0 { + nodeMin = abs64(n.CumValue()) + } + if nodeMin > 0 && (minValue == 0 || nodeMin < minValue) { + minValue = nodeMin + } + } + maxValue := rpt.total + if minValue == 0 { + minValue = maxValue + } + + if r := o.Ratio; r > 0 && r != 1 { + minValue = int64(float64(minValue) * r) + maxValue = int64(float64(maxValue) * r) + } + + _, minUnit := measurement.Scale(minValue, o.SampleUnit, "minimum") + _, maxUnit := measurement.Scale(maxValue, o.SampleUnit, "minimum") + + unit := minUnit + if minUnit != maxUnit && minValue*100 < maxValue && o.OutputFormat != Callgrind { + // Minimum and maximum values have different units. Scale + // minimum by 100 to use larger units, allowing minimum value to + // be scaled down to 0.01, except for callgrind reports since + // they can only represent integer values. + _, unit = measurement.Scale(100*minValue, o.SampleUnit, "minimum") + } + + if unit != "" { + o.OutputUnit = unit + } else { + o.OutputUnit = o.SampleUnit + } +} + +// newGraph creates a new graph for this report. If nodes is non-nil, +// only nodes whose info matches are included. Otherwise, all nodes +// are included, without trimming. +func (rpt *Report) newGraph(nodes graph.NodeSet) *graph.Graph { + o := rpt.options + + // Clean up file paths using heuristics. + prof := rpt.prof + for _, f := range prof.Function { + f.Filename = trimPath(f.Filename, o.TrimPath, o.SourcePath) + } + // Removes all numeric tags except for the bytes tag prior + // to making graph. + // TODO: modify to select first numeric tag if no bytes tag + for _, s := range prof.Sample { + numLabels := make(map[string][]int64, len(s.NumLabel)) + numUnits := make(map[string][]string, len(s.NumLabel)) + for k, vs := range s.NumLabel { + if k == "bytes" { + unit := o.NumLabelUnits[k] + numValues := make([]int64, len(vs)) + numUnit := make([]string, len(vs)) + for i, v := range vs { + numValues[i] = v + numUnit[i] = unit + } + numLabels[k] = append(numLabels[k], numValues...) + numUnits[k] = append(numUnits[k], numUnit...) + } + } + s.NumLabel = numLabels + s.NumUnit = numUnits + } + + // Remove label marking samples from the base profiles, so it does not appear + // as a nodelet in the graph view. + prof.RemoveLabel("pprof::base") + + formatTag := func(v int64, key string) string { + return measurement.ScaledLabel(v, key, o.OutputUnit) + } + + gopt := &graph.Options{ + SampleValue: o.SampleValue, + SampleMeanDivisor: o.SampleMeanDivisor, + FormatTag: formatTag, + CallTree: o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind), + DropNegative: o.DropNegative, + KeptNodes: nodes, + } + + // Only keep binary names for disassembly-based reports, otherwise + // remove it to allow merging of functions across binaries. + switch o.OutputFormat { + case Raw, List, WebList, Dis, Callgrind: + gopt.ObjNames = true + } + + return graph.New(rpt.prof, gopt) +} + +// printProto writes the incoming proto via thw writer w. +// If the divide_by option has been specified, samples are scaled appropriately. +func printProto(w io.Writer, rpt *Report) error { + p, o := rpt.prof, rpt.options + + // Apply the sample ratio to all samples before saving the profile. + if r := o.Ratio; r > 0 && r != 1 { + for _, sample := range p.Sample { + for i, v := range sample.Value { + sample.Value[i] = int64(float64(v) * r) + } + } + } + return p.Write(w) +} + +// printTopProto writes a list of the hottest routines in a profile as a profile.proto. +func printTopProto(w io.Writer, rpt *Report) error { + p := rpt.prof + o := rpt.options + g, _, _, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + out := profile.Profile{ + SampleType: []*profile.ValueType{ + {Type: "cum", Unit: o.OutputUnit}, + {Type: "flat", Unit: o.OutputUnit}, + }, + TimeNanos: p.TimeNanos, + DurationNanos: p.DurationNanos, + PeriodType: p.PeriodType, + Period: p.Period, + } + functionMap := make(functionMap) + for i, n := range g.Nodes { + f, added := functionMap.findOrAdd(n.Info) + if added { + out.Function = append(out.Function, f) + } + flat, cum := n.FlatValue(), n.CumValue() + l := &profile.Location{ + ID: uint64(i + 1), + Address: n.Info.Address, + Line: []profile.Line{ + { + Line: int64(n.Info.Lineno), + Function: f, + }, + }, + } + + fv, _ := measurement.Scale(flat, o.SampleUnit, o.OutputUnit) + cv, _ := measurement.Scale(cum, o.SampleUnit, o.OutputUnit) + s := &profile.Sample{ + Location: []*profile.Location{l}, + Value: []int64{int64(cv), int64(fv)}, + } + out.Location = append(out.Location, l) + out.Sample = append(out.Sample, s) + } + + return out.Write(w) +} + +type functionMap map[string]*profile.Function + +// findOrAdd takes a node representing a function, adds the function +// represented by the node to the map if the function is not already present, +// and returns the function the node represents. This also returns a boolean, +// which is true if the function was added and false otherwise. +func (fm functionMap) findOrAdd(ni graph.NodeInfo) (*profile.Function, bool) { + fName := fmt.Sprintf("%q%q%q%d", ni.Name, ni.OrigName, ni.File, ni.StartLine) + + if f := fm[fName]; f != nil { + return f, false + } + + f := &profile.Function{ + ID: uint64(len(fm) + 1), + Name: ni.Name, + SystemName: ni.OrigName, + Filename: ni.File, + StartLine: int64(ni.StartLine), + } + fm[fName] = f + return f, true +} + +// printAssembly prints an annotated assembly listing. +func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + return PrintAssembly(w, rpt, obj, -1) +} + +// PrintAssembly prints annotated disassembly of rpt to w. +func PrintAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFuncs int) error { + o := rpt.options + prof := rpt.prof + + g := rpt.newGraph(nil) + + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil { + address = &hex + } + + fmt.Fprintln(w, "Total:", rpt.formatValue(rpt.total)) + symbols := symbolsFromBinaries(prof, g, o.Symbol, address, obj) + symNodes := nodesPerSymbol(g.Nodes, symbols) + + // Sort for printing. + var syms []*objSymbol + for s := range symNodes { + syms = append(syms, s) + } + byName := func(a, b *objSymbol) bool { + if na, nb := a.sym.Name[0], b.sym.Name[0]; na != nb { + return na < nb + } + return a.sym.Start < b.sym.Start + } + if maxFuncs < 0 { + sort.Sort(orderSyms{syms, byName}) + } else { + byFlatSum := func(a, b *objSymbol) bool { + suma, _ := symNodes[a].Sum() + sumb, _ := symNodes[b].Sum() + if suma != sumb { + return suma > sumb + } + return byName(a, b) + } + sort.Sort(orderSyms{syms, byFlatSum}) + if len(syms) > maxFuncs { + syms = syms[:maxFuncs] + } + } + + if len(syms) == 0 { + return fmt.Errorf("no matches found for regexp: %s", o.Symbol) + } + + // Correlate the symbols from the binary with the profile samples. + for _, s := range syms { + sns := symNodes[s] + + // Gather samples for this symbol. + flatSum, cumSum := sns.Sum() + + // Get the function assembly. + insts, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End, o.IntelSyntax) + if err != nil { + return err + } + + ns := annotateAssembly(insts, sns, s.file) + + fmt.Fprintf(w, "ROUTINE ======================== %s\n", s.sym.Name[0]) + for _, name := range s.sym.Name[1:] { + fmt.Fprintf(w, " AKA ======================== %s\n", name) + } + fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", + rpt.formatValue(flatSum), rpt.formatValue(cumSum), + measurement.Percentage(cumSum, rpt.total)) + + function, file, line := "", "", 0 + for _, n := range ns { + locStr := "" + // Skip loc information if it hasn't changed from previous instruction. + if n.function != function || n.file != file || n.line != line { + function, file, line = n.function, n.file, n.line + if n.function != "" { + locStr = n.function + " " + } + if n.file != "" { + locStr += n.file + if n.line != 0 { + locStr += fmt.Sprintf(":%d", n.line) + } + } + } + switch { + case locStr == "": + // No location info, just print the instruction. + fmt.Fprintf(w, "%10s %10s %10x: %s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + ) + case len(n.instruction) < 40: + // Short instruction, print loc on the same line. + fmt.Fprintf(w, "%10s %10s %10x: %-40s;%s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + locStr, + ) + default: + // Long instruction, print loc on a separate line. + fmt.Fprintf(w, "%74s;%s\n", "", locStr) + fmt.Fprintf(w, "%10s %10s %10x: %s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + ) + } + } + } + return nil +} + +// symbolsFromBinaries examines the binaries listed on the profile +// that have associated samples, and identifies symbols matching rx. +func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regexp, address *uint64, obj plugin.ObjTool) []*objSymbol { + hasSamples := make(map[string]bool) + // Only examine mappings that have samples that match the + // regexp. This is an optimization to speed up pprof. + for _, n := range g.Nodes { + if name := n.Info.PrintableName(); rx.MatchString(name) && n.Info.Objfile != "" { + hasSamples[n.Info.Objfile] = true + } + } + + // Walk all mappings looking for matching functions with samples. + var objSyms []*objSymbol + for _, m := range prof.Mapping { + if !hasSamples[m.File] { + if address == nil || !(m.Start <= *address && *address <= m.Limit) { + continue + } + } + + f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + fmt.Printf("%v\n", err) + continue + } + + // Find symbols in this binary matching the user regexp. + var addr uint64 + if address != nil { + addr = *address + } + msyms, err := f.Symbols(rx, addr) + f.Close() + if err != nil { + continue + } + for _, ms := range msyms { + objSyms = append(objSyms, + &objSymbol{ + sym: ms, + file: f, + }, + ) + } + } + + return objSyms +} + +// objSym represents a symbol identified from a binary. It includes +// the SymbolInfo from the disasm package and the base that must be +// added to correspond to sample addresses +type objSymbol struct { + sym *plugin.Sym + file plugin.ObjFile +} + +// orderSyms is a wrapper type to sort []*objSymbol by a supplied comparator. +type orderSyms struct { + v []*objSymbol + less func(a, b *objSymbol) bool +} + +func (o orderSyms) Len() int { return len(o.v) } +func (o orderSyms) Less(i, j int) bool { return o.less(o.v[i], o.v[j]) } +func (o orderSyms) Swap(i, j int) { o.v[i], o.v[j] = o.v[j], o.v[i] } + +// nodesPerSymbol classifies nodes into a group of symbols. +func nodesPerSymbol(ns graph.Nodes, symbols []*objSymbol) map[*objSymbol]graph.Nodes { + symNodes := make(map[*objSymbol]graph.Nodes) + for _, s := range symbols { + // Gather samples for this symbol. + for _, n := range ns { + if address, err := s.file.ObjAddr(n.Info.Address); err == nil && address >= s.sym.Start && address < s.sym.End { + symNodes[s] = append(symNodes[s], n) + } + } + } + return symNodes +} + +type assemblyInstruction struct { + address uint64 + instruction string + function string + file string + line int + flat, cum int64 + flatDiv, cumDiv int64 + startsBlock bool + inlineCalls []callID +} + +type callID struct { + file string + line int +} + +func (a *assemblyInstruction) flatValue() int64 { + if a.flatDiv != 0 { + return a.flat / a.flatDiv + } + return a.flat +} + +func (a *assemblyInstruction) cumValue() int64 { + if a.cumDiv != 0 { + return a.cum / a.cumDiv + } + return a.cum +} + +// annotateAssembly annotates a set of assembly instructions with a +// set of samples. It returns a set of nodes to display. base is an +// offset to adjust the sample addresses. +func annotateAssembly(insts []plugin.Inst, samples graph.Nodes, file plugin.ObjFile) []assemblyInstruction { + // Add end marker to simplify printing loop. + insts = append(insts, plugin.Inst{ + Addr: ^uint64(0), + }) + + // Ensure samples are sorted by address. + samples.Sort(graph.AddressOrder) + + s := 0 + asm := make([]assemblyInstruction, 0, len(insts)) + for ix, in := range insts[:len(insts)-1] { + n := assemblyInstruction{ + address: in.Addr, + instruction: in.Text, + function: in.Function, + line: in.Line, + } + if in.File != "" { + n.file = filepath.Base(in.File) + } + + // Sum all the samples until the next instruction (to account + // for samples attributed to the middle of an instruction). + for next := insts[ix+1].Addr; s < len(samples); s++ { + if addr, err := file.ObjAddr(samples[s].Info.Address); err != nil || addr >= next { + break + } + sample := samples[s] + n.flatDiv += sample.FlatDiv + n.flat += sample.Flat + n.cumDiv += sample.CumDiv + n.cum += sample.Cum + if f := sample.Info.File; f != "" && n.file == "" { + n.file = filepath.Base(f) + } + if ln := sample.Info.Lineno; ln != 0 && n.line == 0 { + n.line = ln + } + if f := sample.Info.Name; f != "" && n.function == "" { + n.function = f + } + } + asm = append(asm, n) + } + + return asm +} + +// valueOrDot formats a value according to a report, intercepting zero +// values. +func valueOrDot(value int64, rpt *Report) string { + if value == 0 { + return "." + } + return rpt.formatValue(value) +} + +// printTags collects all tags referenced in the profile and prints +// them in a sorted table. +func printTags(w io.Writer, rpt *Report) error { + p := rpt.prof + + o := rpt.options + formatTag := func(v int64, key string) string { + return measurement.ScaledLabel(v, key, o.OutputUnit) + } + + // Hashtable to keep accumulate tags as key,value,count. + tagMap := make(map[string]map[string]int64) + for _, s := range p.Sample { + for key, vals := range s.Label { + for _, val := range vals { + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap + } + valueMap[val] += o.SampleValue(s.Value) + } + } + for key, vals := range s.NumLabel { + unit := o.NumLabelUnits[key] + for _, nval := range vals { + val := formatTag(nval, unit) + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap + } + valueMap[val] += o.SampleValue(s.Value) + } + } + } + + tagKeys := make([]*graph.Tag, 0, len(tagMap)) + for key := range tagMap { + tagKeys = append(tagKeys, &graph.Tag{Name: key}) + } + tabw := tabwriter.NewWriter(w, 0, 0, 1, ' ', tabwriter.AlignRight) + for _, tagKey := range graph.SortTags(tagKeys, true) { + var total int64 + key := tagKey.Name + tags := make([]*graph.Tag, 0, len(tagMap[key])) + for t, c := range tagMap[key] { + total += c + tags = append(tags, &graph.Tag{Name: t, Flat: c}) + } + + f, u := measurement.Scale(total, o.SampleUnit, o.OutputUnit) + fmt.Fprintf(tabw, "%s:\t Total %.1f%s\n", key, f, u) + for _, t := range graph.SortTags(tags, true) { + f, u := measurement.Scale(t.FlatValue(), o.SampleUnit, o.OutputUnit) + if total > 0 { + fmt.Fprintf(tabw, " \t%.1f%s (%s):\t %s\n", f, u, measurement.Percentage(t.FlatValue(), total), t.Name) + } else { + fmt.Fprintf(tabw, " \t%.1f%s:\t %s\n", f, u, t.Name) + } + } + fmt.Fprintln(tabw) + } + return tabw.Flush() +} + +// printComments prints all freeform comments in the profile. +func printComments(w io.Writer, rpt *Report) error { + p := rpt.prof + + for _, c := range p.Comments { + fmt.Fprintln(w, c) + } + return nil +} + +// TextItem holds a single text report entry. +type TextItem struct { + Name string + InlineLabel string // Not empty if inlined + Flat, Cum int64 // Raw values + FlatFormat, CumFormat string // Formatted values +} + +// TextItems returns a list of text items from the report and a list +// of labels that describe the report. +func TextItems(rpt *Report) ([]TextItem, []string) { + g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + labels := reportLabels(rpt, g, origCount, droppedNodes, 0, false) + + var items []TextItem + var flatSum int64 + for _, n := range g.Nodes { + name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() + + var inline, noinline bool + for _, e := range n.In { + if e.Inline { + inline = true + } else { + noinline = true + } + } + + var inl string + if inline { + if noinline { + inl = "(partial-inline)" + } else { + inl = "(inline)" + } + } + + flatSum += flat + items = append(items, TextItem{ + Name: name, + InlineLabel: inl, + Flat: flat, + Cum: cum, + FlatFormat: rpt.formatValue(flat), + CumFormat: rpt.formatValue(cum), + }) + } + return items, labels +} + +// printText prints a flat text report for a profile. +func printText(w io.Writer, rpt *Report) error { + items, labels := TextItems(rpt) + fmt.Fprintln(w, strings.Join(labels, "\n")) + fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n", + "flat", "flat", "sum", "cum", "cum") + var flatSum int64 + for _, item := range items { + inl := item.InlineLabel + if inl != "" { + inl = " " + inl + } + flatSum += item.Flat + fmt.Fprintf(w, "%10s %s %s %10s %s %s%s\n", + item.FlatFormat, measurement.Percentage(item.Flat, rpt.total), + measurement.Percentage(flatSum, rpt.total), + item.CumFormat, measurement.Percentage(item.Cum, rpt.total), + item.Name, inl) + } + return nil +} + +// printTraces prints all traces from a profile. +func printTraces(w io.Writer, rpt *Report) error { + fmt.Fprintln(w, strings.Join(ProfileLabels(rpt), "\n")) + + prof := rpt.prof + o := rpt.options + + const separator = "-----------+-------------------------------------------------------" + + _, locations := graph.CreateNodes(prof, &graph.Options{}) + for _, sample := range prof.Sample { + type stk struct { + *graph.NodeInfo + inline bool + } + var stack []stk + for _, loc := range sample.Location { + nodes := locations[loc.ID] + for i, n := range nodes { + // The inline flag may be inaccurate if 'show' or 'hide' filter is + // used. See https://github.com/google/pprof/issues/511. + inline := i != len(nodes)-1 + stack = append(stack, stk{&n.Info, inline}) + } + } + + if len(stack) == 0 { + continue + } + + fmt.Fprintln(w, separator) + // Print any text labels for the sample. + var labels []string + for s, vs := range sample.Label { + labels = append(labels, fmt.Sprintf("%10s: %s\n", s, strings.Join(vs, " "))) + } + sort.Strings(labels) + fmt.Fprint(w, strings.Join(labels, "")) + + // Print any numeric labels for the sample + var numLabels []string + for key, vals := range sample.NumLabel { + unit := o.NumLabelUnits[key] + numValues := make([]string, len(vals)) + for i, vv := range vals { + numValues[i] = measurement.Label(vv, unit) + } + numLabels = append(numLabels, fmt.Sprintf("%10s: %s\n", key, strings.Join(numValues, " "))) + } + sort.Strings(numLabels) + fmt.Fprint(w, strings.Join(numLabels, "")) + + var d, v int64 + v = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + d = o.SampleMeanDivisor(sample.Value) + } + // Print call stack. + if d != 0 { + v = v / d + } + for i, s := range stack { + var vs, inline string + if i == 0 { + vs = rpt.formatValue(v) + } + if s.inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%10s %s%s\n", vs, s.PrintableName(), inline) + } + } + fmt.Fprintln(w, separator) + return nil +} + +// printCallgrind prints a graph for a profile on callgrind format. +func printCallgrind(w io.Writer, rpt *Report) error { + o := rpt.options + rpt.options.NodeFraction = 0 + rpt.options.EdgeFraction = 0 + rpt.options.NodeCount = 0 + + g, _, _, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + nodeNames := getDisambiguatedNames(g) + + fmt.Fprintln(w, "positions: instr line") + fmt.Fprintln(w, "events:", o.SampleType+"("+o.OutputUnit+")") + + objfiles := make(map[string]int) + files := make(map[string]int) + names := make(map[string]int) + + // prevInfo points to the previous NodeInfo. + // It is used to group cost lines together as much as possible. + var prevInfo *graph.NodeInfo + for _, n := range g.Nodes { + if prevInfo == nil || n.Info.Objfile != prevInfo.Objfile || n.Info.File != prevInfo.File || n.Info.Name != prevInfo.Name { + fmt.Fprintln(w) + fmt.Fprintln(w, "ob="+callgrindName(objfiles, n.Info.Objfile)) + fmt.Fprintln(w, "fl="+callgrindName(files, n.Info.File)) + fmt.Fprintln(w, "fn="+callgrindName(names, n.Info.Name)) + } + + addr := callgrindAddress(prevInfo, n.Info.Address) + sv, _ := measurement.Scale(n.FlatValue(), o.SampleUnit, o.OutputUnit) + fmt.Fprintf(w, "%s %d %d\n", addr, n.Info.Lineno, int64(sv)) + + // Print outgoing edges. + for _, out := range n.Out.Sort() { + c, _ := measurement.Scale(out.Weight, o.SampleUnit, o.OutputUnit) + callee := out.Dest + fmt.Fprintln(w, "cfl="+callgrindName(files, callee.Info.File)) + fmt.Fprintln(w, "cfn="+callgrindName(names, nodeNames[callee])) + // pprof doesn't have a flat weight for a call, leave as 0. + fmt.Fprintf(w, "calls=0 %s %d\n", callgrindAddress(prevInfo, callee.Info.Address), callee.Info.Lineno) + // TODO: This address may be in the middle of a call + // instruction. It would be best to find the beginning + // of the instruction, but the tools seem to handle + // this OK. + fmt.Fprintf(w, "* * %d\n", int64(c)) + } + + prevInfo = &n.Info + } + + return nil +} + +// getDisambiguatedNames returns a map from each node in the graph to +// the name to use in the callgrind output. Callgrind merges all +// functions with the same [file name, function name]. Add a [%d/n] +// suffix to disambiguate nodes with different values of +// node.Function, which we want to keep separate. In particular, this +// affects graphs created with --call_tree, where nodes from different +// contexts are associated to different Functions. +func getDisambiguatedNames(g *graph.Graph) map[*graph.Node]string { + nodeName := make(map[*graph.Node]string, len(g.Nodes)) + + type names struct { + file, function string + } + + // nameFunctionIndex maps the callgrind names (filename, function) + // to the node.Function values found for that name, and each + // node.Function value to a sequential index to be used on the + // disambiguated name. + nameFunctionIndex := make(map[names]map[*graph.Node]int) + for _, n := range g.Nodes { + nm := names{n.Info.File, n.Info.Name} + p, ok := nameFunctionIndex[nm] + if !ok { + p = make(map[*graph.Node]int) + nameFunctionIndex[nm] = p + } + if _, ok := p[n.Function]; !ok { + p[n.Function] = len(p) + } + } + + for _, n := range g.Nodes { + nm := names{n.Info.File, n.Info.Name} + nodeName[n] = n.Info.Name + if p := nameFunctionIndex[nm]; len(p) > 1 { + // If there is more than one function, add suffix to disambiguate. + nodeName[n] += fmt.Sprintf(" [%d/%d]", p[n.Function]+1, len(p)) + } + } + return nodeName +} + +// callgrindName implements the callgrind naming compression scheme. +// For names not previously seen returns "(N) name", where N is a +// unique index. For names previously seen returns "(N)" where N is +// the index returned the first time. +func callgrindName(names map[string]int, name string) string { + if name == "" { + return "" + } + if id, ok := names[name]; ok { + return fmt.Sprintf("(%d)", id) + } + id := len(names) + 1 + names[name] = id + return fmt.Sprintf("(%d) %s", id, name) +} + +// callgrindAddress implements the callgrind subposition compression scheme if +// possible. If prevInfo != nil, it contains the previous address. The current +// address can be given relative to the previous address, with an explicit +/- +// to indicate it is relative, or * for the same address. +func callgrindAddress(prevInfo *graph.NodeInfo, curr uint64) string { + abs := fmt.Sprintf("%#x", curr) + if prevInfo == nil { + return abs + } + + prev := prevInfo.Address + if prev == curr { + return "*" + } + + diff := int64(curr - prev) + relative := fmt.Sprintf("%+d", diff) + + // Only bother to use the relative address if it is actually shorter. + if len(relative) < len(abs) { + return relative + } + + return abs +} + +// printTree prints a tree-based report in text form. +func printTree(w io.Writer, rpt *Report) error { + const separator = "----------------------------------------------------------+-------------" + const legend = " flat flat% sum% cum cum% calls calls% + context " + + g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + fmt.Fprintln(w, strings.Join(reportLabels(rpt, g, origCount, droppedNodes, 0, false), "\n")) + + fmt.Fprintln(w, separator) + fmt.Fprintln(w, legend) + var flatSum int64 + + rx := rpt.options.Symbol + matched := 0 + for _, n := range g.Nodes { + name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() + + // Skip any entries that do not match the regexp (for the "peek" command). + if rx != nil && !rx.MatchString(name) { + continue + } + matched++ + + fmt.Fprintln(w, separator) + // Print incoming edges. + inEdges := n.In.Sort() + for _, in := range inEdges { + var inline string + if in.Inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%50s %s | %s%s\n", rpt.formatValue(in.Weight), + measurement.Percentage(in.Weight, cum), in.Src.Info.PrintableName(), inline) + } + + // Print current node. + flatSum += flat + fmt.Fprintf(w, "%10s %s %s %10s %s | %s\n", + rpt.formatValue(flat), + measurement.Percentage(flat, rpt.total), + measurement.Percentage(flatSum, rpt.total), + rpt.formatValue(cum), + measurement.Percentage(cum, rpt.total), + name) + + // Print outgoing edges. + outEdges := n.Out.Sort() + for _, out := range outEdges { + var inline string + if out.Inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%50s %s | %s%s\n", rpt.formatValue(out.Weight), + measurement.Percentage(out.Weight, cum), out.Dest.Info.PrintableName(), inline) + } + } + if len(g.Nodes) > 0 { + fmt.Fprintln(w, separator) + } + if rx != nil && matched == 0 { + return fmt.Errorf("no matches found for regexp: %s", rx) + } + return nil +} + +// GetDOT returns a graph suitable for dot processing along with some +// configuration information. +func GetDOT(rpt *Report) (*graph.Graph, *graph.DotConfig) { + g, origCount, droppedNodes, droppedEdges := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + labels := reportLabels(rpt, g, origCount, droppedNodes, droppedEdges, true) + + c := &graph.DotConfig{ + Title: rpt.options.Title, + Labels: labels, + FormatValue: rpt.formatValue, + Total: rpt.total, + } + return g, c +} + +// printDOT prints an annotated callgraph in DOT format. +func printDOT(w io.Writer, rpt *Report) error { + g, c := GetDOT(rpt) + graph.ComposeDot(w, g, &graph.DotAttributes{}, c) + return nil +} + +// ProfileLabels returns printable labels for a profile. +func ProfileLabels(rpt *Report) []string { + label := []string{} + prof := rpt.prof + o := rpt.options + if len(prof.Mapping) > 0 { + if prof.Mapping[0].File != "" { + label = append(label, "File: "+filepath.Base(prof.Mapping[0].File)) + } + if prof.Mapping[0].BuildID != "" { + label = append(label, "Build ID: "+prof.Mapping[0].BuildID) + } + } + // Only include comments that do not start with '#'. + for _, c := range prof.Comments { + if !strings.HasPrefix(c, "#") { + label = append(label, c) + } + } + if o.SampleType != "" { + label = append(label, "Type: "+o.SampleType) + } + if prof.TimeNanos != 0 { + const layout = "Jan 2, 2006 at 3:04pm (MST)" + label = append(label, "Time: "+time.Unix(0, prof.TimeNanos).Format(layout)) + } + if prof.DurationNanos != 0 { + duration := measurement.Label(prof.DurationNanos, "nanoseconds") + totalNanos, totalUnit := measurement.Scale(rpt.total, o.SampleUnit, "nanoseconds") + var ratio string + if totalUnit == "ns" && totalNanos != 0 { + ratio = "(" + measurement.Percentage(int64(totalNanos), prof.DurationNanos) + ")" + } + label = append(label, fmt.Sprintf("Duration: %s, Total samples = %s %s", duration, rpt.formatValue(rpt.total), ratio)) + } + return label +} + +// reportLabels returns printable labels for a report. Includes +// profileLabels. +func reportLabels(rpt *Report, g *graph.Graph, origCount, droppedNodes, droppedEdges int, fullHeaders bool) []string { + nodeFraction := rpt.options.NodeFraction + edgeFraction := rpt.options.EdgeFraction + nodeCount := len(g.Nodes) + + var label []string + if len(rpt.options.ProfileLabels) > 0 { + label = append(label, rpt.options.ProfileLabels...) + } else if fullHeaders || !rpt.options.CompactLabels { + label = ProfileLabels(rpt) + } + + var flatSum int64 + for _, n := range g.Nodes { + flatSum = flatSum + n.FlatValue() + } + + if len(rpt.options.ActiveFilters) > 0 { + activeFilters := legendActiveFilters(rpt.options.ActiveFilters) + label = append(label, activeFilters...) + } + + label = append(label, fmt.Sprintf("Showing nodes accounting for %s, %s of %s total", rpt.formatValue(flatSum), strings.TrimSpace(measurement.Percentage(flatSum, rpt.total)), rpt.formatValue(rpt.total))) + + if rpt.total != 0 { + if droppedNodes > 0 { + label = append(label, genLabel(droppedNodes, "node", "cum", + rpt.formatValue(abs64(int64(float64(rpt.total)*nodeFraction))))) + } + if droppedEdges > 0 { + label = append(label, genLabel(droppedEdges, "edge", "freq", + rpt.formatValue(abs64(int64(float64(rpt.total)*edgeFraction))))) + } + if nodeCount > 0 && nodeCount < origCount { + label = append(label, fmt.Sprintf("Showing top %d nodes out of %d", + nodeCount, origCount)) + } + } + + // Help new users understand the graph. + // A new line is intentionally added here to better show this message. + if fullHeaders { + label = append(label, "\nSee https://git.io/JfYMW for how to read the graph") + } + + return label +} + +func legendActiveFilters(activeFilters []string) []string { + legendActiveFilters := make([]string, len(activeFilters)+1) + legendActiveFilters[0] = "Active filters:" + for i, s := range activeFilters { + if len(s) > 80 { + s = s[:80] + "…" + } + legendActiveFilters[i+1] = " " + s + } + return legendActiveFilters +} + +func genLabel(d int, n, l, f string) string { + if d > 1 { + n = n + "s" + } + return fmt.Sprintf("Dropped %d %s (%s <= %s)", d, n, l, f) +} + +// New builds a new report indexing the sample values interpreting the +// samples with the provided function. +func New(prof *profile.Profile, o *Options) *Report { + format := func(v int64) string { + if r := o.Ratio; r > 0 && r != 1 { + fv := float64(v) * r + v = int64(fv) + } + return measurement.ScaledLabel(v, o.SampleUnit, o.OutputUnit) + } + return &Report{prof, computeTotal(prof, o.SampleValue, o.SampleMeanDivisor), + o, format} +} + +// NewDefault builds a new report indexing the last sample value +// available. +func NewDefault(prof *profile.Profile, options Options) *Report { + index := len(prof.SampleType) - 1 + o := &options + if o.Title == "" && len(prof.Mapping) > 0 && prof.Mapping[0].File != "" { + o.Title = filepath.Base(prof.Mapping[0].File) + } + o.SampleType = prof.SampleType[index].Type + o.SampleUnit = strings.ToLower(prof.SampleType[index].Unit) + o.SampleValue = func(v []int64) int64 { + return v[index] + } + return New(prof, o) +} + +// computeTotal computes the sum of the absolute value of all sample values. +// If any samples have label indicating they belong to the diff base, then the +// total will only include samples with that label. +func computeTotal(prof *profile.Profile, value, meanDiv func(v []int64) int64) int64 { + var div, total, diffDiv, diffTotal int64 + for _, sample := range prof.Sample { + var d, v int64 + v = value(sample.Value) + if meanDiv != nil { + d = meanDiv(sample.Value) + } + if v < 0 { + v = -v + } + total += v + div += d + if sample.DiffBaseSample() { + diffTotal += v + diffDiv += d + } + } + if diffTotal > 0 { + total = diffTotal + div = diffDiv + } + if div != 0 { + return total / div + } + return total +} + +// Report contains the data and associated routines to extract a +// report from a profile. +type Report struct { + prof *profile.Profile + total int64 + options *Options + formatValue func(int64) string +} + +// Total returns the total number of samples in a report. +func (rpt *Report) Total() int64 { return rpt.total } + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go new file mode 100644 index 0000000..d8b4395 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go @@ -0,0 +1,1114 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +// This file contains routines related to the generation of annotated +// source listings. + +import ( + "bufio" + "fmt" + "html/template" + "io" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/measurement" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +// printSource prints an annotated source listing, include all +// functions with samples that match the regexp rpt.options.symbol. +// The sources are sorted by function name and then by filename to +// eliminate potential nondeterminism. +func printSource(w io.Writer, rpt *Report) error { + o := rpt.options + g := rpt.newGraph(nil) + + // Identify all the functions that match the regexp provided. + // Group nodes for each matching function. + var functions graph.Nodes + functionNodes := make(map[string]graph.Nodes) + for _, n := range g.Nodes { + if !o.Symbol.MatchString(n.Info.Name) { + continue + } + if functionNodes[n.Info.Name] == nil { + functions = append(functions, n) + } + functionNodes[n.Info.Name] = append(functionNodes[n.Info.Name], n) + } + functions.Sort(graph.NameOrder) + + if len(functionNodes) == 0 { + return fmt.Errorf("no matches found for regexp: %s", o.Symbol) + } + + sourcePath := o.SourcePath + if sourcePath == "" { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("could not stat current dir: %v", err) + } + sourcePath = wd + } + reader := newSourceReader(sourcePath, o.TrimPath) + + fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total)) + for _, fn := range functions { + name := fn.Info.Name + + // Identify all the source files associated to this function. + // Group nodes for each source file. + var sourceFiles graph.Nodes + fileNodes := make(map[string]graph.Nodes) + for _, n := range functionNodes[name] { + if n.Info.File == "" { + continue + } + if fileNodes[n.Info.File] == nil { + sourceFiles = append(sourceFiles, n) + } + fileNodes[n.Info.File] = append(fileNodes[n.Info.File], n) + } + + if len(sourceFiles) == 0 { + fmt.Fprintf(w, "No source information for %s\n", name) + continue + } + + sourceFiles.Sort(graph.FileOrder) + + // Print each file associated with this function. + for _, fl := range sourceFiles { + filename := fl.Info.File + fns := fileNodes[filename] + flatSum, cumSum := fns.Sum() + + fnodes, _, err := getSourceFromFile(filename, reader, fns, 0, 0) + fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, filename) + fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", + rpt.formatValue(flatSum), rpt.formatValue(cumSum), + measurement.Percentage(cumSum, rpt.total)) + + if err != nil { + fmt.Fprintf(w, " Error: %v\n", err) + continue + } + + for _, fn := range fnodes { + fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), fn.Info.Lineno, fn.Info.Name) + } + } + } + return nil +} + +// printWebSource prints an annotated source listing, include all +// functions with samples that match the regexp rpt.options.symbol. +func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + printHeader(w, rpt) + if err := PrintWebList(w, rpt, obj, -1); err != nil { + return err + } + printPageClosing(w) + return nil +} + +// sourcePrinter holds state needed for generating source+asm HTML listing. +type sourcePrinter struct { + reader *sourceReader + synth *synthCode + objectTool plugin.ObjTool + objects map[string]plugin.ObjFile // Opened object files + sym *regexp.Regexp // May be nil + files map[string]*sourceFile // Set of files to print. + insts map[uint64]instructionInfo // Instructions of interest (keyed by address). + + // Set of function names that we are interested in (because they had + // a sample and match sym). + interest map[string]bool + + // Mapping from system function names to printable names. + prettyNames map[string]string +} + +// addrInfo holds information for an address we are interested in. +type addrInfo struct { + loc *profile.Location // Always non-nil + obj plugin.ObjFile // May be nil +} + +// instructionInfo holds collected information for an instruction. +type instructionInfo struct { + objAddr uint64 // Address in object file (with base subtracted out) + length int // Instruction length in bytes + disasm string // Disassembly of instruction + file string // For top-level function in which instruction occurs + line int // For top-level function in which instruction occurs + flat, cum int64 // Samples to report (divisor already applied) +} + +// sourceFile contains collected information for files we will print. +type sourceFile struct { + fname string + cum int64 + flat int64 + lines map[int][]sourceInst // Instructions to show per line + funcName map[int]string // Function name per line +} + +// sourceInst holds information for an instruction to be displayed. +type sourceInst struct { + addr uint64 + stack []callID // Inlined call-stack +} + +// sourceFunction contains information for a contiguous range of lines per function we +// will print. +type sourceFunction struct { + name string + begin, end int // Line numbers (end is not included in the range) + flat, cum int64 +} + +// addressRange is a range of addresses plus the object file that contains it. +type addressRange struct { + begin, end uint64 + obj plugin.ObjFile + mapping *profile.Mapping + score int64 // Used to order ranges for processing +} + +// PrintWebList prints annotated source listing of rpt to w. +// rpt.prof should contain inlined call info. +func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) error { + sourcePath := rpt.options.SourcePath + if sourcePath == "" { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("could not stat current dir: %v", err) + } + sourcePath = wd + } + sp := newSourcePrinter(rpt, obj, sourcePath) + if len(sp.interest) == 0 { + return fmt.Errorf("no matches found for regexp: %s", rpt.options.Symbol) + } + sp.print(w, maxFiles, rpt) + sp.close() + return nil +} + +func newSourcePrinter(rpt *Report, obj plugin.ObjTool, sourcePath string) *sourcePrinter { + sp := &sourcePrinter{ + reader: newSourceReader(sourcePath, rpt.options.TrimPath), + synth: newSynthCode(rpt.prof.Mapping), + objectTool: obj, + objects: map[string]plugin.ObjFile{}, + sym: rpt.options.Symbol, + files: map[string]*sourceFile{}, + insts: map[uint64]instructionInfo{}, + prettyNames: map[string]string{}, + interest: map[string]bool{}, + } + + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if sp.sym != nil { + if hex, err := strconv.ParseUint(sp.sym.String(), 0, 64); err == nil { + address = &hex + } + } + + addrs := map[uint64]addrInfo{} + flat := map[uint64]int64{} + cum := map[uint64]int64{} + + // Record an interest in the function corresponding to lines[index]. + markInterest := func(addr uint64, loc *profile.Location, index int) { + fn := loc.Line[index] + if fn.Function == nil { + return + } + sp.interest[fn.Function.Name] = true + sp.interest[fn.Function.SystemName] = true + if _, ok := addrs[addr]; !ok { + addrs[addr] = addrInfo{loc, sp.objectFile(loc.Mapping)} + } + } + + // See if sp.sym matches line. + matches := func(line profile.Line) bool { + if line.Function == nil { + return false + } + return sp.sym.MatchString(line.Function.Name) || + sp.sym.MatchString(line.Function.SystemName) || + sp.sym.MatchString(line.Function.Filename) + } + + // Extract sample counts and compute set of interesting functions. + for _, sample := range rpt.prof.Sample { + value := rpt.options.SampleValue(sample.Value) + if rpt.options.SampleMeanDivisor != nil { + div := rpt.options.SampleMeanDivisor(sample.Value) + if div != 0 { + value /= div + } + } + + // Find call-sites matching sym. + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + for _, line := range loc.Line { + if line.Function == nil { + continue + } + sp.prettyNames[line.Function.SystemName] = line.Function.Name + } + + addr := loc.Address + if addr == 0 { + // Some profiles are missing valid addresses. + addr = sp.synth.address(loc) + } + + cum[addr] += value + if i == 0 { + flat[addr] += value + } + + if sp.sym == nil || (address != nil && addr == *address) { + // Interested in top-level entry of stack. + if len(loc.Line) > 0 { + markInterest(addr, loc, len(loc.Line)-1) + } + continue + } + + // Search in inlined stack for a match. + matchFile := (loc.Mapping != nil && sp.sym.MatchString(loc.Mapping.File)) + for j, line := range loc.Line { + if (j == 0 && matchFile) || matches(line) { + markInterest(addr, loc, j) + } + } + } + } + + sp.expandAddresses(rpt, addrs, flat) + sp.initSamples(flat, cum) + return sp +} + +func (sp *sourcePrinter) close() { + for _, objFile := range sp.objects { + if objFile != nil { + objFile.Close() + } + } +} + +func (sp *sourcePrinter) expandAddresses(rpt *Report, addrs map[uint64]addrInfo, flat map[uint64]int64) { + // We found interesting addresses (ones with non-zero samples) above. + // Get covering address ranges and disassemble the ranges. + ranges, unprocessed := sp.splitIntoRanges(rpt.prof, addrs, flat) + sp.handleUnprocessed(addrs, unprocessed) + + // Trim ranges if there are too many. + const maxRanges = 25 + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].score > ranges[j].score + }) + if len(ranges) > maxRanges { + ranges = ranges[:maxRanges] + } + + for _, r := range ranges { + objBegin, err := r.obj.ObjAddr(r.begin) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range start %x: %v\n", r.begin, err) + continue + } + objEnd, err := r.obj.ObjAddr(r.end) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range end %x: %v\n", r.end, err) + continue + } + base := r.begin - objBegin + insts, err := sp.objectTool.Disasm(r.mapping.File, objBegin, objEnd, rpt.options.IntelSyntax) + if err != nil { + // TODO(sanjay): Report that the covered addresses are missing. + continue + } + + var lastFrames []plugin.Frame + var lastAddr, maxAddr uint64 + for i, inst := range insts { + addr := inst.Addr + base + + // Guard against duplicate output from Disasm. + if addr <= maxAddr { + continue + } + maxAddr = addr + + length := 1 + if i+1 < len(insts) && insts[i+1].Addr > inst.Addr { + // Extend to next instruction. + length = int(insts[i+1].Addr - inst.Addr) + } + + // Get inlined-call-stack for address. + frames, err := r.obj.SourceLine(addr) + if err != nil { + // Construct a frame from disassembler output. + frames = []plugin.Frame{{Func: inst.Function, File: inst.File, Line: inst.Line}} + } + + x := instructionInfo{objAddr: inst.Addr, length: length, disasm: inst.Text} + if len(frames) > 0 { + // We could consider using the outer-most caller's source + // location so we give the some hint as to where the + // inlining happened that led to this instruction. So for + // example, suppose we have the following (inlined) call + // chains for this instruction: + // F1->G->H + // F2->G->H + // We could tag the instructions from the first call with + // F1 and instructions from the second call with F2. But + // that leads to a somewhat confusing display. So for now, + // we stick with just the inner-most location (i.e., H). + // In the future we will consider changing the display to + // make caller info more visible. + index := 0 // Inner-most frame + x.file = frames[index].File + x.line = frames[index].Line + } + sp.insts[addr] = x + + // We sometimes get instructions with a zero reported line number. + // Make such instructions have the same line info as the preceding + // instruction, if an earlier instruction is found close enough. + const neighborhood = 32 + if len(frames) > 0 && frames[0].Line != 0 { + lastFrames = frames + lastAddr = addr + } else if (addr-lastAddr <= neighborhood) && lastFrames != nil { + frames = lastFrames + } + + sp.addStack(addr, frames) + } + } +} + +func (sp *sourcePrinter) addStack(addr uint64, frames []plugin.Frame) { + // See if the stack contains a function we are interested in. + for i, f := range frames { + if !sp.interest[f.Func] { + continue + } + + // Record sub-stack under frame's file/line. + fname := canonicalizeFileName(f.File) + file := sp.files[fname] + if file == nil { + file = &sourceFile{ + fname: fname, + lines: map[int][]sourceInst{}, + funcName: map[int]string{}, + } + sp.files[fname] = file + } + callees := frames[:i] + stack := make([]callID, 0, len(callees)) + for j := len(callees) - 1; j >= 0; j-- { // Reverse so caller is first + stack = append(stack, callID{ + file: callees[j].File, + line: callees[j].Line, + }) + } + file.lines[f.Line] = append(file.lines[f.Line], sourceInst{addr, stack}) + + // Remember the first function name encountered per source line + // and assume that that line belongs to that function. + if _, ok := file.funcName[f.Line]; !ok { + file.funcName[f.Line] = f.Func + } + } +} + +// synthAsm is the special disassembler value used for instructions without an object file. +const synthAsm = "" + +// handleUnprocessed handles addresses that were skipped by splitIntoRanges because they +// did not belong to a known object file. +func (sp *sourcePrinter) handleUnprocessed(addrs map[uint64]addrInfo, unprocessed []uint64) { + // makeFrames synthesizes a []plugin.Frame list for the specified address. + // The result will typically have length 1, but may be longer if address corresponds + // to inlined calls. + makeFrames := func(addr uint64) []plugin.Frame { + loc := addrs[addr].loc + stack := make([]plugin.Frame, 0, len(loc.Line)) + for _, line := range loc.Line { + fn := line.Function + if fn == nil { + continue + } + stack = append(stack, plugin.Frame{ + Func: fn.Name, + File: fn.Filename, + Line: int(line.Line), + }) + } + return stack + } + + for _, addr := range unprocessed { + frames := makeFrames(addr) + x := instructionInfo{ + objAddr: addr, + length: 1, + disasm: synthAsm, + } + if len(frames) > 0 { + x.file = frames[0].File + x.line = frames[0].Line + } + sp.insts[addr] = x + + sp.addStack(addr, frames) + } +} + +// splitIntoRanges converts the set of addresses we are interested in into a set of address +// ranges to disassemble. It also returns the set of addresses found that did not have an +// associated object file and were therefore not added to an address range. +func (sp *sourcePrinter) splitIntoRanges(prof *profile.Profile, addrMap map[uint64]addrInfo, flat map[uint64]int64) ([]addressRange, []uint64) { + // Partition addresses into two sets: ones with a known object file, and ones without. + var addrs, unprocessed []uint64 + for addr, info := range addrMap { + if info.obj != nil { + addrs = append(addrs, addr) + } else { + unprocessed = append(unprocessed, addr) + } + } + sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + + const expand = 500 // How much to expand range to pick up nearby addresses. + var result []addressRange + for i, n := 0, len(addrs); i < n; { + begin, end := addrs[i], addrs[i] + sum := flat[begin] + i++ + + info := addrMap[begin] + m := info.loc.Mapping + obj := info.obj // Non-nil because of the partitioning done above. + + // Find following addresses that are close enough to addrs[i]. + for i < n && addrs[i] <= end+2*expand && addrs[i] < m.Limit { + // When we expand ranges by "expand" on either side, the ranges + // for addrs[i] and addrs[i-1] will merge. + end = addrs[i] + sum += flat[end] + i++ + } + if m.Start-begin >= expand { + begin -= expand + } else { + begin = m.Start + } + if m.Limit-end >= expand { + end += expand + } else { + end = m.Limit + } + + result = append(result, addressRange{begin, end, obj, m, sum}) + } + return result, unprocessed +} + +func (sp *sourcePrinter) initSamples(flat, cum map[uint64]int64) { + for addr, inst := range sp.insts { + // Move all samples that were assigned to the middle of an instruction to the + // beginning of that instruction. This takes care of samples that were recorded + // against pc+1. + instEnd := addr + uint64(inst.length) + for p := addr; p < instEnd; p++ { + inst.flat += flat[p] + inst.cum += cum[p] + } + sp.insts[addr] = inst + } +} + +func (sp *sourcePrinter) print(w io.Writer, maxFiles int, rpt *Report) { + // Finalize per-file counts. + for _, file := range sp.files { + seen := map[uint64]bool{} + for _, line := range file.lines { + for _, x := range line { + if seen[x.addr] { + // Same address can be displayed multiple times in a file + // (e.g., if we show multiple inlined functions). + // Avoid double-counting samples in this case. + continue + } + seen[x.addr] = true + inst := sp.insts[x.addr] + file.cum += inst.cum + file.flat += inst.flat + } + } + } + + // Get sorted list of files to print. + var files []*sourceFile + for _, f := range sp.files { + files = append(files, f) + } + order := func(i, j int) bool { return files[i].flat > files[j].flat } + if maxFiles < 0 { + // Order by name for compatibility with old code. + order = func(i, j int) bool { return files[i].fname < files[j].fname } + maxFiles = len(files) + } + sort.Slice(files, order) + for i, f := range files { + if i < maxFiles { + sp.printFile(w, f, rpt) + } + } +} + +func (sp *sourcePrinter) printFile(w io.Writer, f *sourceFile, rpt *Report) { + for _, fn := range sp.functions(f) { + if fn.cum == 0 { + continue + } + printFunctionHeader(w, fn.name, f.fname, fn.flat, fn.cum, rpt) + var asm []assemblyInstruction + for l := fn.begin; l < fn.end; l++ { + lineContents, ok := sp.reader.line(f.fname, l) + if !ok { + if len(f.lines[l]) == 0 { + // Outside of range of valid lines and nothing to print. + continue + } + if l == 0 { + // Line number 0 shows up if line number is not known. + lineContents = "" + } else { + // Past end of file, but have data to print. + lineContents = "???" + } + } + + // Make list of assembly instructions. + asm = asm[:0] + var flatSum, cumSum int64 + var lastAddr uint64 + for _, inst := range f.lines[l] { + addr := inst.addr + x := sp.insts[addr] + flatSum += x.flat + cumSum += x.cum + startsBlock := (addr != lastAddr+uint64(sp.insts[lastAddr].length)) + lastAddr = addr + + // divisors already applied, so leave flatDiv,cumDiv as 0 + asm = append(asm, assemblyInstruction{ + address: x.objAddr, + instruction: x.disasm, + function: fn.name, + file: x.file, + line: x.line, + flat: x.flat, + cum: x.cum, + startsBlock: startsBlock, + inlineCalls: inst.stack, + }) + } + + printFunctionSourceLine(w, l, flatSum, cumSum, lineContents, asm, sp.reader, rpt) + } + printFunctionClosing(w) + } +} + +// functions splits apart the lines to show in a file into a list of per-function ranges. +func (sp *sourcePrinter) functions(f *sourceFile) []sourceFunction { + var funcs []sourceFunction + + // Get interesting lines in sorted order. + lines := make([]int, 0, len(f.lines)) + for l := range f.lines { + lines = append(lines, l) + } + sort.Ints(lines) + + // Merge adjacent lines that are in same function and not too far apart. + const mergeLimit = 20 + for _, l := range lines { + name := f.funcName[l] + if pretty, ok := sp.prettyNames[name]; ok { + // Use demangled name if available. + name = pretty + } + + fn := sourceFunction{name: name, begin: l, end: l + 1} + for _, x := range f.lines[l] { + inst := sp.insts[x.addr] + fn.flat += inst.flat + fn.cum += inst.cum + } + + // See if we should merge into preceding function. + if len(funcs) > 0 { + last := funcs[len(funcs)-1] + if l-last.end < mergeLimit && last.name == name { + last.end = l + 1 + last.flat += fn.flat + last.cum += fn.cum + funcs[len(funcs)-1] = last + continue + } + } + + // Add new function. + funcs = append(funcs, fn) + } + + // Expand function boundaries to show neighborhood. + const expand = 5 + for i, f := range funcs { + if i == 0 { + // Extend backwards, stopping at line number 1, but do not disturb 0 + // since that is a special line number that can show up when addr2line + // cannot determine the real line number. + if f.begin > expand { + f.begin -= expand + } else if f.begin > 1 { + f.begin = 1 + } + } else { + // Find gap from predecessor and divide between predecessor and f. + halfGap := (f.begin - funcs[i-1].end) / 2 + if halfGap > expand { + halfGap = expand + } + funcs[i-1].end += halfGap + f.begin -= halfGap + } + funcs[i] = f + } + + // Also extend the ending point of the last function. + if len(funcs) > 0 { + funcs[len(funcs)-1].end += expand + } + + return funcs +} + +// objectFile return the object for the specified mapping, opening it if necessary. +// It returns nil on error. +func (sp *sourcePrinter) objectFile(m *profile.Mapping) plugin.ObjFile { + if m == nil { + return nil + } + if object, ok := sp.objects[m.File]; ok { + return object // May be nil if we detected an error earlier. + } + object, err := sp.objectTool.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + object = nil + } + sp.objects[m.File] = object // Cache even on error. + return object +} + +// printHeader prints the page header for a weblist report. +func printHeader(w io.Writer, rpt *Report) { + fmt.Fprintln(w, ` + + + + +Pprof listing`) + fmt.Fprintln(w, weblistPageCSS) + fmt.Fprintln(w, weblistPageScript) + fmt.Fprint(w, "\n\n\n") + + var labels []string + for _, l := range ProfileLabels(rpt) { + labels = append(labels, template.HTMLEscapeString(l)) + } + + fmt.Fprintf(w, `
%s
Total: %s
`, + strings.Join(labels, "
\n"), + rpt.formatValue(rpt.total), + ) +} + +// printFunctionHeader prints a function header for a weblist report. +func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, rpt *Report) { + fmt.Fprintf(w, `

%s

%s

+
+  Total:  %10s %10s (flat, cum) %s
+`,
+		template.HTMLEscapeString(name), template.HTMLEscapeString(path),
+		rpt.formatValue(flatSum), rpt.formatValue(cumSum),
+		measurement.Percentage(cumSum, rpt.total))
+}
+
+// printFunctionSourceLine prints a source line and the corresponding assembly.
+func printFunctionSourceLine(w io.Writer, lineNo int, flat, cum int64, lineContents string,
+	assembly []assemblyInstruction, reader *sourceReader, rpt *Report) {
+	if len(assembly) == 0 {
+		fmt.Fprintf(w,
+			" %6d   %10s %10s %8s  %s \n",
+			lineNo,
+			valueOrDot(flat, rpt), valueOrDot(cum, rpt),
+			"", template.HTMLEscapeString(lineContents))
+		return
+	}
+
+	nestedInfo := false
+	cl := "deadsrc"
+	for _, an := range assembly {
+		if len(an.inlineCalls) > 0 || an.instruction != synthAsm {
+			nestedInfo = true
+			cl = "livesrc"
+		}
+	}
+
+	fmt.Fprintf(w,
+		" %6d   %10s %10s %8s  %s ",
+		lineNo, cl,
+		valueOrDot(flat, rpt), valueOrDot(cum, rpt),
+		"", template.HTMLEscapeString(lineContents))
+	if nestedInfo {
+		srcIndent := indentation(lineContents)
+		printNested(w, srcIndent, assembly, reader, rpt)
+	}
+	fmt.Fprintln(w)
+}
+
+func printNested(w io.Writer, srcIndent int, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) {
+	fmt.Fprint(w, "")
+	var curCalls []callID
+	for i, an := range assembly {
+		if an.startsBlock && i != 0 {
+			// Insert a separator between discontiguous blocks.
+			fmt.Fprintf(w, " %8s %28s\n", "", "⋮")
+		}
+
+		var fileline string
+		if an.file != "" {
+			fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(filepath.Base(an.file)), an.line)
+		}
+		flat, cum := an.flat, an.cum
+
+		// Print inlined call context.
+		for j, c := range an.inlineCalls {
+			if j < len(curCalls) && curCalls[j] == c {
+				// Skip if same as previous instruction.
+				continue
+			}
+			curCalls = nil
+			fline, ok := reader.line(c.file, c.line)
+			if !ok {
+				fline = ""
+			}
+			text := strings.Repeat(" ", srcIndent+4+4*j) + strings.TrimSpace(fline)
+			fmt.Fprintf(w, " %8s %10s %10s %8s  %s %s:%d\n",
+				"", "", "", "",
+				template.HTMLEscapeString(rightPad(text, 80)),
+				template.HTMLEscapeString(filepath.Base(c.file)), c.line)
+		}
+		curCalls = an.inlineCalls
+		if an.instruction == synthAsm {
+			continue
+		}
+		text := strings.Repeat(" ", srcIndent+4+4*len(curCalls)) + an.instruction
+		fmt.Fprintf(w, " %8s %10s %10s %8x: %s %s\n",
+			"", valueOrDot(flat, rpt), valueOrDot(cum, rpt), an.address,
+			template.HTMLEscapeString(rightPad(text, 80)),
+			// fileline should not be escaped since it was formed by appending
+			// line number (just digits) to an escaped file name. Escaping here
+			// would cause double-escaping of file name.
+			fileline)
+	}
+	fmt.Fprint(w, "")
+}
+
+// printFunctionClosing prints the end of a function in a weblist report.
+func printFunctionClosing(w io.Writer) {
+	fmt.Fprintln(w, "
") +} + +// printPageClosing prints the end of the page in a weblist report. +func printPageClosing(w io.Writer) { + fmt.Fprintln(w, weblistPageClosing) +} + +// getSourceFromFile collects the sources of a function from a source +// file and annotates it with the samples in fns. Returns the sources +// as nodes, using the info.name field to hold the source code. +func getSourceFromFile(file string, reader *sourceReader, fns graph.Nodes, start, end int) (graph.Nodes, string, error) { + lineNodes := make(map[int]graph.Nodes) + + // Collect source coordinates from profile. + const margin = 5 // Lines before first/after last sample. + if start == 0 { + if fns[0].Info.StartLine != 0 { + start = fns[0].Info.StartLine + } else { + start = fns[0].Info.Lineno - margin + } + } else { + start -= margin + } + if end == 0 { + end = fns[0].Info.Lineno + } + end += margin + for _, n := range fns { + lineno := n.Info.Lineno + nodeStart := n.Info.StartLine + if nodeStart == 0 { + nodeStart = lineno - margin + } + nodeEnd := lineno + margin + if nodeStart < start { + start = nodeStart + } else if nodeEnd > end { + end = nodeEnd + } + lineNodes[lineno] = append(lineNodes[lineno], n) + } + if start < 1 { + start = 1 + } + + var src graph.Nodes + for lineno := start; lineno <= end; lineno++ { + line, ok := reader.line(file, lineno) + if !ok { + break + } + flat, cum := lineNodes[lineno].Sum() + src = append(src, &graph.Node{ + Info: graph.NodeInfo{ + Name: strings.TrimRight(line, "\n"), + Lineno: lineno, + }, + Flat: flat, + Cum: cum, + }) + } + if err := reader.fileError(file); err != nil { + return nil, file, err + } + return src, file, nil +} + +// sourceReader provides access to source code with caching of file contents. +type sourceReader struct { + // searchPath is a filepath.ListSeparator-separated list of directories where + // source files should be searched. + searchPath string + + // trimPath is a filepath.ListSeparator-separated list of paths to trim. + trimPath string + + // files maps from path name to a list of lines. + // files[*][0] is unused since line numbering starts at 1. + files map[string][]string + + // errors collects errors encountered per file. These errors are + // consulted before returning out of these module. + errors map[string]error +} + +func newSourceReader(searchPath, trimPath string) *sourceReader { + return &sourceReader{ + searchPath, + trimPath, + make(map[string][]string), + make(map[string]error), + } +} + +func (reader *sourceReader) fileError(path string) error { + return reader.errors[path] +} + +// line returns the line numbered "lineno" in path, or _,false if lineno is out of range. +func (reader *sourceReader) line(path string, lineno int) (string, bool) { + lines, ok := reader.files[path] + if !ok { + // Read and cache file contents. + lines = []string{""} // Skip 0th line + f, err := openSourceFile(path, reader.searchPath, reader.trimPath) + if err != nil { + reader.errors[path] = err + } else { + s := bufio.NewScanner(f) + for s.Scan() { + lines = append(lines, s.Text()) + } + f.Close() + if s.Err() != nil { + reader.errors[path] = err + } + } + reader.files[path] = lines + } + if lineno <= 0 || lineno >= len(lines) { + return "", false + } + return lines[lineno], true +} + +// openSourceFile opens a source file from a name encoded in a profile. File +// names in a profile after can be relative paths, so search them in each of +// the paths in searchPath and their parents. In case the profile contains +// absolute paths, additional paths may be configured to trim from the source +// paths in the profile. This effectively turns the path into a relative path +// searching it using searchPath as usual). +func openSourceFile(path, searchPath, trim string) (*os.File, error) { + path = trimPath(path, trim, searchPath) + // If file is still absolute, require file to exist. + if filepath.IsAbs(path) { + f, err := os.Open(path) + return f, err + } + // Scan each component of the path. + for _, dir := range filepath.SplitList(searchPath) { + // Search up for every parent of each possible path. + for { + filename := filepath.Join(dir, path) + if f, err := os.Open(filename); err == nil { + return f, nil + } + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + } + + return nil, fmt.Errorf("could not find file %s on path %s", path, searchPath) +} + +// trimPath cleans up a path by removing prefixes that are commonly +// found on profiles plus configured prefixes. +// TODO(aalexand): Consider optimizing out the redundant work done in this +// function if it proves to matter. +func trimPath(path, trimPath, searchPath string) string { + // Keep path variable intact as it's used below to form the return value. + sPath, searchPath := filepath.ToSlash(path), filepath.ToSlash(searchPath) + if trimPath == "" { + // If the trim path is not configured, try to guess it heuristically: + // search for basename of each search path in the original path and, if + // found, strip everything up to and including the basename. So, for + // example, given original path "/some/remote/path/my-project/foo/bar.c" + // and search path "/my/local/path/my-project" the heuristic will return + // "/my/local/path/my-project/foo/bar.c". + for _, dir := range filepath.SplitList(searchPath) { + want := "/" + filepath.Base(dir) + "/" + if found := strings.Index(sPath, want); found != -1 { + return path[found+len(want):] + } + } + } + // Trim configured trim prefixes. + trimPaths := append(filepath.SplitList(filepath.ToSlash(trimPath)), "/proc/self/cwd/./", "/proc/self/cwd/") + for _, trimPath := range trimPaths { + if !strings.HasSuffix(trimPath, "/") { + trimPath += "/" + } + if strings.HasPrefix(sPath, trimPath) { + return path[len(trimPath):] + } + } + return path +} + +func indentation(line string) int { + column := 0 + for _, c := range line { + if c == ' ' { + column++ + } else if c == '\t' { + column++ + for column%8 != 0 { + column++ + } + } else { + break + } + } + return column +} + +// rightPad pads the input with spaces on the right-hand-side to make it have +// at least width n. It treats tabs as enough spaces that lead to the next +// 8-aligned tab-stop. +func rightPad(s string, n int) string { + var str strings.Builder + + // Convert tabs to spaces as we go so padding works regardless of what prefix + // is placed before the result. + column := 0 + for _, c := range s { + column++ + if c == '\t' { + str.WriteRune(' ') + for column%8 != 0 { + column++ + str.WriteRune(' ') + } + } else { + str.WriteRune(c) + } + } + for column < n { + column++ + str.WriteRune(' ') + } + return str.String() +} + +func canonicalizeFileName(fname string) string { + fname = strings.TrimPrefix(fname, "/proc/self/cwd/") + fname = strings.TrimPrefix(fname, "./") + return filepath.Clean(fname) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go new file mode 100644 index 0000000..851693f --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "html/template" +) + +// AddSourceTemplates adds templates used by PrintWebList to t. +func AddSourceTemplates(t *template.Template) { + template.Must(t.Parse(`{{define "weblistcss"}}` + weblistPageCSS + `{{end}}`)) + template.Must(t.Parse(`{{define "weblistjs"}}` + weblistPageScript + `{{end}}`)) +} + +const weblistPageCSS = `` + +const weblistPageScript = `` + +const weblistPageClosing = ` + +` diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/synth.go b/src/cmd/vendor/github.com/google/pprof/internal/report/synth.go new file mode 100644 index 0000000..7a35bbc --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/synth.go @@ -0,0 +1,39 @@ +package report + +import ( + "github.com/google/pprof/profile" +) + +// synthCode assigns addresses to locations without an address. +type synthCode struct { + next uint64 + addr map[*profile.Location]uint64 // Synthesized address assigned to a location +} + +func newSynthCode(mappings []*profile.Mapping) *synthCode { + // Find a larger address than any mapping. + s := &synthCode{next: 1} + for _, m := range mappings { + if s.next < m.Limit { + s.next = m.Limit + } + } + return s +} + +// address returns the synthetic address for loc, creating one if needed. +func (s *synthCode) address(loc *profile.Location) uint64 { + if loc.Address != 0 { + panic("can only synthesize addresses for locations without an address") + } + if addr, ok := s.addr[loc]; ok { + return addr + } + if s.addr == nil { + s.addr = map[*profile.Location]uint64{} + } + addr := s.next + s.next++ + s.addr[loc] = addr + return addr +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go new file mode 100644 index 0000000..d243b80 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go @@ -0,0 +1,379 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package symbolizer provides a routine to populate a profile with +// symbol, file and line number information. It relies on the +// addr2liner and demangle packages to do the actual work. +package symbolizer + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/symbolz" + "github.com/google/pprof/profile" + "github.com/ianlancetaylor/demangle" +) + +// Symbolizer implements the plugin.Symbolize interface. +type Symbolizer struct { + Obj plugin.ObjTool + UI plugin.UI + Transport http.RoundTripper +} + +// test taps for dependency injection +var symbolzSymbolize = symbolz.Symbolize +var localSymbolize = doLocalSymbolize +var demangleFunction = Demangle + +// Symbolize attempts to symbolize profile p. First uses binutils on +// local binaries; if the source is a URL it attempts to get any +// missed entries using symbolz. +func (s *Symbolizer) Symbolize(mode string, sources plugin.MappingSources, p *profile.Profile) error { + remote, local, fast, force, demanglerMode := true, true, false, false, "" + for _, o := range strings.Split(strings.ToLower(mode), ":") { + switch o { + case "": + continue + case "none", "no": + return nil + case "local": + remote, local = false, true + case "fastlocal": + remote, local, fast = false, true, true + case "remote": + remote, local = true, false + case "force": + force = true + default: + switch d := strings.TrimPrefix(o, "demangle="); d { + case "full", "none", "templates": + demanglerMode = d + force = true + continue + case "default": + continue + } + s.UI.PrintErr("ignoring unrecognized symbolization option: " + mode) + s.UI.PrintErr("expecting -symbolize=[local|fastlocal|remote|none][:force][:demangle=[none|full|templates|default]") + } + } + + var err error + if local { + // Symbolize locally using binutils. + if err = localSymbolize(p, fast, force, s.Obj, s.UI); err != nil { + s.UI.PrintErr("local symbolization: " + err.Error()) + } + } + if remote { + post := func(source, post string) ([]byte, error) { + return postURL(source, post, s.Transport) + } + if err = symbolzSymbolize(p, force, sources, post, s.UI); err != nil { + return err // Ran out of options. + } + } + + demangleFunction(p, force, demanglerMode) + return nil +} + +// postURL issues a POST to a URL over HTTP. +func postURL(source, post string, tr http.RoundTripper) ([]byte, error) { + client := &http.Client{ + Transport: tr, + } + resp, err := client.Post(source, "application/octet-stream", strings.NewReader(post)) + if err != nil { + return nil, fmt.Errorf("http post %s: %v", source, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("http post %s: %v", source, statusCodeError(resp)) + } + return ioutil.ReadAll(resp.Body) +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := ioutil.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// doLocalSymbolize adds symbol and line number information to all locations +// in a profile. mode enables some options to control +// symbolization. +func doLocalSymbolize(prof *profile.Profile, fast, force bool, obj plugin.ObjTool, ui plugin.UI) error { + if fast { + if bu, ok := obj.(*binutils.Binutils); ok { + bu.SetFastSymbolization(true) + } + } + + mt, err := newMapping(prof, obj, ui, force) + if err != nil { + return err + } + defer mt.close() + + functions := make(map[profile.Function]*profile.Function) + for _, l := range mt.prof.Location { + m := l.Mapping + segment := mt.segments[m] + if segment == nil { + // Nothing to do. + continue + } + + stack, err := segment.SourceLine(l.Address) + if err != nil || len(stack) == 0 { + // No answers from addr2line. + continue + } + + l.Line = make([]profile.Line, len(stack)) + l.IsFolded = false + for i, frame := range stack { + if frame.Func != "" { + m.HasFunctions = true + } + if frame.File != "" { + m.HasFilenames = true + } + if frame.Line != 0 { + m.HasLineNumbers = true + } + f := &profile.Function{ + Name: frame.Func, + SystemName: frame.Func, + Filename: frame.File, + } + if fp := functions[*f]; fp != nil { + f = fp + } else { + functions[*f] = f + f.ID = uint64(len(mt.prof.Function)) + 1 + mt.prof.Function = append(mt.prof.Function, f) + } + l.Line[i] = profile.Line{ + Function: f, + Line: int64(frame.Line), + } + } + + if len(stack) > 0 { + m.HasInlineFrames = true + } + } + + return nil +} + +// Demangle updates the function names in a profile with demangled C++ +// names, simplified according to demanglerMode. If force is set, +// overwrite any names that appear already demangled. +func Demangle(prof *profile.Profile, force bool, demanglerMode string) { + if force { + // Remove the current demangled names to force demangling + for _, f := range prof.Function { + if f.Name != "" && f.SystemName != "" { + f.Name = f.SystemName + } + } + } + + options := demanglerModeToOptions(demanglerMode) + for _, fn := range prof.Function { + demangleSingleFunction(fn, options) + } +} + +func demanglerModeToOptions(demanglerMode string) []demangle.Option { + switch demanglerMode { + case "": // demangled, simplified: no parameters, no templates, no return type + return []demangle.Option{demangle.NoParams, demangle.NoTemplateParams} + case "templates": // demangled, simplified: no parameters, no return type + return []demangle.Option{demangle.NoParams} + case "full": + return []demangle.Option{demangle.NoClones} + case "none": // no demangling + return []demangle.Option{} + } + + panic(fmt.Sprintf("unknown demanglerMode %s", demanglerMode)) +} + +func demangleSingleFunction(fn *profile.Function, options []demangle.Option) { + if fn.Name != "" && fn.SystemName != fn.Name { + return // Already demangled. + } + // Copy the options because they may be updated by the call. + o := make([]demangle.Option, len(options)) + copy(o, options) + if demangled := demangle.Filter(fn.SystemName, o...); demangled != fn.SystemName { + fn.Name = demangled + return + } + // Could not demangle. Apply heuristics in case the name is + // already demangled. + name := fn.SystemName + if looksLikeDemangledCPlusPlus(name) { + for _, o := range options { + switch o { + case demangle.NoParams: + name = removeMatching(name, '(', ')') + case demangle.NoTemplateParams: + name = removeMatching(name, '<', '>') + } + } + } + fn.Name = name +} + +// looksLikeDemangledCPlusPlus is a heuristic to decide if a name is +// the result of demangling C++. If so, further heuristics will be +// applied to simplify the name. +func looksLikeDemangledCPlusPlus(demangled string) bool { + // Skip java names of the form "class.". + if strings.Contains(demangled, ".<") { + return false + } + // Skip Go names of the form "foo.(*Bar[...]).Method". + if strings.Contains(demangled, "]).") { + return false + } + return strings.ContainsAny(demangled, "<>[]") || strings.Contains(demangled, "::") +} + +// removeMatching removes nested instances of start..end from name. +func removeMatching(name string, start, end byte) string { + s := string(start) + string(end) + var nesting, first, current int + for index := strings.IndexAny(name[current:], s); index != -1; index = strings.IndexAny(name[current:], s) { + switch current += index; name[current] { + case start: + nesting++ + if nesting == 1 { + first = current + } + case end: + nesting-- + switch { + case nesting < 0: + return name // Mismatch, abort + case nesting == 0: + name = name[:first] + name[current+1:] + current = first - 1 + } + } + current++ + } + return name +} + +// newMapping creates a mappingTable for a profile. +func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) { + mt := &mappingTable{ + prof: prof, + segments: make(map[*profile.Mapping]plugin.ObjFile), + } + + // Identify used mappings + mappings := make(map[*profile.Mapping]bool) + for _, l := range prof.Location { + mappings[l.Mapping] = true + } + + missingBinaries := false + for midx, m := range prof.Mapping { + if !mappings[m] { + continue + } + + // Do not attempt to re-symbolize a mapping that has already been symbolized. + if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) { + continue + } + + if m.File == "" { + if midx == 0 { + ui.PrintErr("Main binary filename not available.") + continue + } + missingBinaries = true + continue + } + + // Skip well-known system mappings + if m.Unsymbolizable() { + continue + } + + // Skip mappings pointing to a source URL + if m.BuildID == "" { + if u, err := url.Parse(m.File); err == nil && u.IsAbs() && strings.Contains(strings.ToLower(u.Scheme), "http") { + continue + } + } + + name := filepath.Base(m.File) + if m.BuildID != "" { + name += fmt.Sprintf(" (build ID %s)", m.BuildID) + } + f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + ui.PrintErr("Local symbolization failed for ", name, ": ", err) + missingBinaries = true + continue + } + if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID { + ui.PrintErr("Local symbolization failed for ", name, ": build ID mismatch") + f.Close() + continue + } + + mt.segments[m] = f + } + if missingBinaries { + ui.PrintErr("Some binary filenames not available. Symbolization may be incomplete.\n" + + "Try setting PPROF_BINARY_PATH to the search path for local binaries.") + } + return mt, nil +} + +// mappingTable contains the mechanisms for symbolization of a +// profile. +type mappingTable struct { + prof *profile.Profile + segments map[*profile.Mapping]plugin.ObjFile +} + +// Close releases any external processes being used for the mapping. +func (mt *mappingTable) close() { + for _, segment := range mt.segments { + segment.Close() + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go new file mode 100644 index 0000000..7be3048 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go @@ -0,0 +1,200 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package symbolz symbolizes a profile using the output from the symbolz +// service. +package symbolz + +import ( + "bytes" + "fmt" + "io" + "net/url" + "path" + "regexp" + "strconv" + "strings" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +var ( + symbolzRE = regexp.MustCompile(`(0x[[:xdigit:]]+)\s+(.*)`) +) + +// Symbolize symbolizes profile p by parsing data returned by a symbolz +// handler. syms receives the symbolz query (hex addresses separated by '+') +// and returns the symbolz output in a string. If force is false, it will only +// symbolize locations from mappings not already marked as HasFunctions. Never +// attempts symbolization of addresses from unsymbolizable system +// mappings as those may look negative - e.g. "[vsyscall]". +func Symbolize(p *profile.Profile, force bool, sources plugin.MappingSources, syms func(string, string) ([]byte, error), ui plugin.UI) error { + for _, m := range p.Mapping { + if !force && m.HasFunctions { + // Only check for HasFunctions as symbolz only populates function names. + continue + } + // Skip well-known system mappings. + if m.Unsymbolizable() { + continue + } + mappingSources := sources[m.File] + if m.BuildID != "" { + mappingSources = append(mappingSources, sources[m.BuildID]...) + } + for _, source := range mappingSources { + if symz := symbolz(source.Source); symz != "" { + if err := symbolizeMapping(symz, int64(source.Start)-int64(m.Start), syms, m, p); err != nil { + return err + } + m.HasFunctions = true + break + } + } + } + + return nil +} + +// hasGperftoolsSuffix checks whether path ends with one of the suffixes listed in +// pprof_remote_servers.html from the gperftools distribution +func hasGperftoolsSuffix(path string) bool { + suffixes := []string{ + "/pprof/heap", + "/pprof/growth", + "/pprof/profile", + "/pprof/pmuprofile", + "/pprof/contention", + } + for _, s := range suffixes { + if strings.HasSuffix(path, s) { + return true + } + } + return false +} + +// symbolz returns the corresponding symbolz source for a profile URL. +func symbolz(source string) string { + if url, err := url.Parse(source); err == nil && url.Host != "" { + // All paths in the net/http/pprof Go package contain /debug/pprof/ + if strings.Contains(url.Path, "/debug/pprof/") || hasGperftoolsSuffix(url.Path) { + url.Path = path.Clean(url.Path + "/../symbol") + } else { + url.Path = "/symbolz" + } + url.RawQuery = "" + return url.String() + } + + return "" +} + +// symbolizeMapping symbolizes locations belonging to a Mapping by querying +// a symbolz handler. An offset is applied to all addresses to take care of +// normalization occurred for merged Mappings. +func symbolizeMapping(source string, offset int64, syms func(string, string) ([]byte, error), m *profile.Mapping, p *profile.Profile) error { + // Construct query of addresses to symbolize. + var a []string + for _, l := range p.Location { + if l.Mapping == m && l.Address != 0 && len(l.Line) == 0 { + // Compensate for normalization. + addr, overflow := adjust(l.Address, offset) + if overflow { + return fmt.Errorf("cannot adjust address %d by %d, it would overflow (mapping %v)", l.Address, offset, l.Mapping) + } + a = append(a, fmt.Sprintf("%#x", addr)) + } + } + + if len(a) == 0 { + // No addresses to symbolize. + return nil + } + + lines := make(map[uint64]profile.Line) + functions := make(map[string]*profile.Function) + + b, err := syms(source, strings.Join(a, "+")) + if err != nil { + return err + } + + buf := bytes.NewBuffer(b) + for { + l, err := buf.ReadString('\n') + + if err != nil { + if err == io.EOF { + break + } + return err + } + + if symbol := symbolzRE.FindStringSubmatch(l); len(symbol) == 3 { + origAddr, err := strconv.ParseUint(symbol[1], 0, 64) + if err != nil { + return fmt.Errorf("unexpected parse failure %s: %v", symbol[1], err) + } + // Reapply offset expected by the profile. + addr, overflow := adjust(origAddr, -offset) + if overflow { + return fmt.Errorf("cannot adjust symbolz address %d by %d, it would overflow", origAddr, -offset) + } + + name := symbol[2] + fn := functions[name] + if fn == nil { + fn = &profile.Function{ + ID: uint64(len(p.Function) + 1), + Name: name, + SystemName: name, + } + functions[name] = fn + p.Function = append(p.Function, fn) + } + + lines[addr] = profile.Line{Function: fn} + } + } + + for _, l := range p.Location { + if l.Mapping != m { + continue + } + if line, ok := lines[l.Address]; ok { + l.Line = []profile.Line{line} + } + } + + return nil +} + +// adjust shifts the specified address by the signed offset. It returns the +// adjusted address. It signals that the address cannot be adjusted without an +// overflow by returning true in the second return value. +func adjust(addr uint64, offset int64) (uint64, bool) { + adj := uint64(int64(addr) + offset) + if offset < 0 { + if adj >= addr { + return 0, true + } + } else { + if adj < addr { + return 0, true + } + } + return adj, false +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go b/src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go new file mode 100644 index 0000000..b5fb1dd --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/transport/transport.go @@ -0,0 +1,131 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides a mechanism to send requests with https cert, +// key, and CA. +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "sync" + + "github.com/google/pprof/internal/plugin" +) + +type transport struct { + cert *string + key *string + ca *string + caCertPool *x509.CertPool + certs []tls.Certificate + initOnce sync.Once + initErr error +} + +const extraUsage = ` -tls_cert TLS client certificate file for fetching profile and symbols + -tls_key TLS private key file for fetching profile and symbols + -tls_ca TLS CA certs file for fetching profile and symbols` + +// New returns a round tripper for making requests with the +// specified cert, key, and ca. The flags tls_cert, tls_key, and tls_ca are +// added to the flagset to allow a user to specify the cert, key, and ca. If +// the flagset is nil, no flags will be added, and users will not be able to +// use these flags. +func New(flagset plugin.FlagSet) http.RoundTripper { + if flagset == nil { + return &transport{} + } + flagset.AddExtraUsage(extraUsage) + return &transport{ + cert: flagset.String("tls_cert", "", "TLS client certificate file for fetching profile and symbols"), + key: flagset.String("tls_key", "", "TLS private key file for fetching profile and symbols"), + ca: flagset.String("tls_ca", "", "TLS CA certs file for fetching profile and symbols"), + } +} + +// initialize uses the cert, key, and ca to initialize the certs +// to use these when making requests. +func (tr *transport) initialize() error { + var cert, key, ca string + if tr.cert != nil { + cert = *tr.cert + } + if tr.key != nil { + key = *tr.key + } + if tr.ca != nil { + ca = *tr.ca + } + + if cert != "" && key != "" { + tlsCert, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + return fmt.Errorf("could not load certificate/key pair specified by -tls_cert and -tls_key: %v", err) + } + tr.certs = []tls.Certificate{tlsCert} + } else if cert == "" && key != "" { + return fmt.Errorf("-tls_key is specified, so -tls_cert must also be specified") + } else if cert != "" && key == "" { + return fmt.Errorf("-tls_cert is specified, so -tls_key must also be specified") + } + + if ca != "" { + caCertPool := x509.NewCertPool() + caCert, err := ioutil.ReadFile(ca) + if err != nil { + return fmt.Errorf("could not load CA specified by -tls_ca: %v", err) + } + caCertPool.AppendCertsFromPEM(caCert) + tr.caCertPool = caCertPool + } + + return nil +} + +// RoundTrip executes a single HTTP transaction, returning +// a Response for the provided Request. +func (tr *transport) RoundTrip(req *http.Request) (*http.Response, error) { + tr.initOnce.Do(func() { + tr.initErr = tr.initialize() + }) + if tr.initErr != nil { + return nil, tr.initErr + } + + tlsConfig := &tls.Config{ + RootCAs: tr.caCertPool, + Certificates: tr.certs, + } + + if req.URL.Scheme == "https+insecure" { + // Make shallow copy of request, and req.URL, so the request's URL can be + // modified. + r := *req + *r.URL = *req.URL + req = &r + tlsConfig.InsecureSkipVerify = true + req.URL.Scheme = "https" + } + + transport := http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + } + + return transport.RoundTrip(req) +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/encode.go b/src/cmd/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 0000000..96aa271 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,576 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + if strings.HasPrefix(m.File, "[kernel.kallsyms]") { + m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + } + + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + for _, s := range p.Sample { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/filter.go b/src/cmd/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 0000000..ea8e66c --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/index.go b/src/cmd/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 0000000..bef1d60 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 0000000..91f45e5 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go b/src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 0000000..9ba9a77 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1229 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + line = "" + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/src/cmd/vendor/github.com/google/pprof/profile/merge.go b/src/cmd/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 0000000..6fcd11d --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,482 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID map[uint64]*Location + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) + } + + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) + } + sort.Strings(labels) + + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + } + sort.Strings(numlabels) + + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), + } +} + +type sampleKey struct { + locations string + labels string + numlabels string +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l, ok := pm.locationsByID[src.ID]; ok { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID[src.ID] = ll + return ll + } + pm.locationsByID[src.ID] = l + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 0000000..5a3807f --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,814 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = ioutil.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/proto.go b/src/cmd/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 0000000..539ad3a --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, int64(u)) + } + *x = append(*x, tmp...) + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, u) + } + *x = append(*x, tmp...) + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/prune.go b/src/cmd/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 0000000..02d21a8 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,178 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE new file mode 100644 index 0000000..b014515 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE @@ -0,0 +1,13 @@ +Copyright 2010-2021 Mike Bostock + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md new file mode 100644 index 0000000..eb84b68 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md @@ -0,0 +1,33 @@ +# Building a customized D3.js bundle + +The D3.js version distributed with pprof is customized to only include the +modules required by pprof. + +## Dependencies + +- Install [npm](https://www.npmjs.com). + +## Building + +- Run `update.sh` to: + - Download npm package dependencies (declared in `package.json` and `package-lock.json`) + - Create a d3.js bundle containing the JavScript of d3 and d3-flame-graph (by running `webpack`) + +This will `d3_flame_graph.go`, the minified custom D3.js bundle as Go source code. + +# References / Appendix + +## D3 Custom Bundle + +A demonstration of building a custom D3 4.0 bundle using ES2015 modules and Rollup. + +[bl.ocks.org/mbostock/bb09af4c39c79cffcde4](https://bl.ocks.org/mbostock/bb09af4c39c79cffcde4) + +## Old version of d3-pprof + +A previous version of d3-flame-graph bundled for pprof used Rollup instead of +Webpack. This has now been migrated directly into this directory. + +The repository configuring Rollup was here: + +[github.com/spiermar/d3-pprof](https://github.com/spiermar/d3-pprof) diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go new file mode 100644 index 0000000..7e27941 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go @@ -0,0 +1,65 @@ +// D3.js is a JavaScript library for manipulating documents based on data. +// https://github.com/d3/d3 +// See D3_LICENSE file for license details + +// d3-flame-graph is a D3.js plugin that produces flame graphs from hierarchical data. +// https://github.com/spiermar/d3-flame-graph +// See D3_FLAME_GRAPH_LICENSE file for license details + +package d3flamegraph + +// JSSource returns the d3 and d3-flame-graph JavaScript bundle +const JSSource = ` + +!function(t,n){if("object"==typeof exports&&"object"==typeof module)module.exports=n();else if("function"==typeof define&&define.amd)define([],n);else{var e=n();for(var r in e)("object"==typeof exports?exports:t)[r]=e[r]}}(self,(function(){return(()=>{"use strict";var t={d:(n,e)=>{for(var r in e)t.o(e,r)&&!t.o(n,r)&&Object.defineProperty(n,r,{enumerable:!0,get:e[r]})},o:(t,n)=>Object.prototype.hasOwnProperty.call(t,n),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},n={};function e(){}function r(t){return null==t?e:function(){return this.querySelector(t)}}function i(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function o(){return[]}function u(t){return function(n){return n.matches(t)}}t.r(n),t.d(n,{flamegraph:()=>ji,select:()=>pt});var a=Array.prototype.find;function l(){return this.firstElementChild}var s=Array.prototype.filter;function c(){return Array.from(this.children)}function f(t){return new Array(t.length)}function h(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}function p(t){return function(){return t}}function d(t,n,e,r,i,o){for(var u,a=0,l=n.length,s=o.length;an?1:t>=n?0:NaN}h.prototype={constructor:h,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var _="http://www.w3.org/1999/xhtml";const w={svg:"http://www.w3.org/2000/svg",xhtml:_,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function b(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),w.hasOwnProperty(n)?{space:w[n],local:t}:t}function x(t){return function(){this.removeAttribute(t)}}function M(t){return function(){this.removeAttributeNS(t.space,t.local)}}function A(t,n){return function(){this.setAttribute(t,n)}}function N(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function E(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function k(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function S(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function C(t){return function(){this.style.removeProperty(t)}}function P(t,n,e){return function(){this.style.setProperty(t,n,e)}}function j(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function q(t,n){return t.style.getPropertyValue(n)||S(t).getComputedStyle(t,null).getPropertyValue(n)}function O(t){return function(){delete this[t]}}function L(t,n){return function(){this[t]=n}}function T(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function B(t){return t.trim().split(/^|\s+/)}function D(t){return t.classList||new H(t)}function H(t){this._node=t,this._names=B(t.getAttribute("class")||"")}function R(t,n){for(var e=D(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function ut(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var ft=[null];function ht(t,n){this._groups=t,this._parents=n}function pt(t){return"string"==typeof t?new ht([[document.querySelector(t)]],[document.documentElement]):new ht([[t]],ft)}function dt(){}function gt(t){return null==t?dt:function(){return this.querySelector(t)}}function vt(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function yt(){return[]}function mt(t){return null==t?yt:function(){return this.querySelectorAll(t)}}function _t(t){return function(){return this.matches(t)}}function wt(t){return function(n){return n.matches(t)}}ht.prototype=function(){return new ht([[document.documentElement]],ft)}.prototype={constructor:ht,select:function(t){"function"!=typeof t&&(t=r(t));for(var n=this._groups,e=n.length,i=new Array(e),o=0;o=E&&(E=N+1);!(A=b[E])&&++E<_;);M._next=A||null}}return(u=new ht(u,r))._enter=a,u._exit=l,u},enter:function(){return new ht(this._enter||this._groups.map(f),this._parents)},exit:function(){return new ht(this._exit||this._groups.map(f),this._parents)},join:function(t,n,e){var r=this.enter(),i=this,o=this.exit();return"function"==typeof t?(r=t(r))&&(r=r.selection()):r=r.append(t+""),null!=n&&(i=n(i))&&(i=i.selection()),null==e?o.remove():e(o),r&&i?r.merge(i).order():i},merge:function(t){for(var n=t.selection?t.selection():t,e=this._groups,r=n._groups,i=e.length,o=r.length,u=Math.min(i,o),a=new Array(i),l=0;l=0;)(r=i[o])&&(u&&4^r.compareDocumentPosition(u)&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=m);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?C:"function"==typeof n?j:P)(t,n,null==e?"":e)):q(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?O:"function"==typeof n?T:L)(t,n)):this.node()[t]},classed:function(t,n){var e=B(t+"");if(arguments.length<2){for(var r=D(this.node()),i=-1,o=e.length;++in?1:t>=n?0:NaN}Et.prototype={constructor:Et,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var Ot="http://www.w3.org/1999/xhtml";const Lt={svg:"http://www.w3.org/2000/svg",xhtml:Ot,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function Tt(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),Lt.hasOwnProperty(n)?{space:Lt[n],local:t}:t}function Bt(t){return function(){this.removeAttribute(t)}}function Dt(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Ht(t,n){return function(){this.setAttribute(t,n)}}function Rt(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function Vt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function Xt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function zt(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function It(t){return function(){this.style.removeProperty(t)}}function $t(t,n,e){return function(){this.style.setProperty(t,n,e)}}function Ut(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function Yt(t,n){return t.style.getPropertyValue(n)||zt(t).getComputedStyle(t,null).getPropertyValue(n)}function Ft(t){return function(){delete this[t]}}function Zt(t,n){return function(){this[t]=n}}function Gt(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function Jt(t){return t.trim().split(/^|\s+/)}function Kt(t){return t.classList||new Qt(t)}function Qt(t){this._node=t,this._names=Jt(t.getAttribute("class")||"")}function Wt(t,n){for(var e=Kt(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function bn(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var En=[null];function kn(t,n){this._groups=t,this._parents=n}function Sn(){return new kn([[document.documentElement]],En)}kn.prototype=Sn.prototype={constructor:kn,select:function(t){"function"!=typeof t&&(t=gt(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i=b&&(b=w+1);!(_=v[b])&&++b=0;)(r=i[o])&&(u&&4^r.compareDocumentPosition(u)&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=qt);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?It:"function"==typeof n?Ut:$t)(t,n,null==e?"":e)):Yt(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?Ft:"function"==typeof n?Gt:Zt)(t,n)):this.node()[t]},classed:function(t,n){var e=Jt(t+"");if(arguments.length<2){for(var r=Kt(this.node()),i=-1,o=e.length;++i1?r[0]+r.slice(2):r,+t.slice(e+1)]}function qn(t){return(t=jn(Math.abs(t)))?t[1]:NaN}var On,Ln=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Tn(t){if(!(n=Ln.exec(t)))throw new Error("invalid format: "+t);var n;return new Bn({fill:n[1],align:n[2],sign:n[3],symbol:n[4],zero:n[5],width:n[6],comma:n[7],precision:n[8]&&n[8].slice(1),trim:n[9],type:n[10]})}function Bn(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function Dn(t,n){var e=jn(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Tn.prototype=Bn.prototype,Bn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};const Hn={"%":(t,n)=>(100*t).toFixed(n),b:t=>Math.round(t).toString(2),c:t=>t+"",d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:(t,n)=>t.toExponential(n),f:(t,n)=>t.toFixed(n),g:(t,n)=>t.toPrecision(n),o:t=>Math.round(t).toString(8),p:(t,n)=>Dn(100*t,n),r:Dn,s:function(t,n){var e=jn(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(On=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,u=r.length;return o===u?r:o>u?r+new Array(o-u+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+jn(t,Math.max(0,n+o-1))[0]},X:t=>Math.round(t).toString(16).toUpperCase(),x:t=>Math.round(t).toString(16)};function Rn(t){return t}var Vn,Xn,zn,In=Array.prototype.map,$n=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function Un(t,n){return null==t||null==n?NaN:tn?1:t>=n?0:NaN}function Yn(t){t.x0=Math.round(t.x0),t.y0=Math.round(t.y0),t.x1=Math.round(t.x1),t.y1=Math.round(t.y1)}function Fn(t){var n=0,e=t.children,r=e&&e.length;if(r)for(;--r>=0;)n+=e[r].value;else n=1;t.value=n}function Zn(t,n){t instanceof Map?(t=[void 0,t],void 0===n&&(n=Jn)):void 0===n&&(n=Gn);for(var e,r,i,o,u,a=new Wn(t),l=[a];e=l.pop();)if((i=n(e.data))&&(u=(i=Array.from(i)).length))for(e.children=i,o=u-1;o>=0;--o)l.push(r=i[o]=new Wn(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(Qn)}function Gn(t){return t.children}function Jn(t){return Array.isArray(t)?t[1]:null}function Kn(t){void 0!==t.data.value&&(t.value=t.data.value),t.data=t.data.data}function Qn(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function Wn(t){this.data=t,this.depth=this.height=0,this.parent=null}Vn=function(t){var n,e,r=void 0===t.grouping||void 0===t.thousands?Rn:(n=In.call(t.grouping,Number),e=t.thousands+"",function(t,r){for(var i=t.length,o=[],u=0,a=n[0],l=0;i>0&&a>0&&(l+a+1>r&&(a=Math.max(1,r-l)),o.push(t.substring(i-=a,i+a)),!((l+=a+1)>r));)a=n[u=(u+1)%n.length];return o.reverse().join(e)}),i=void 0===t.currency?"":t.currency[0]+"",o=void 0===t.currency?"":t.currency[1]+"",u=void 0===t.decimal?".":t.decimal+"",a=void 0===t.numerals?Rn:function(t){return function(n){return n.replace(/[0-9]/g,(function(n){return t[+n]}))}}(In.call(t.numerals,String)),l=void 0===t.percent?"%":t.percent+"",s=void 0===t.minus?"−":t.minus+"",c=void 0===t.nan?"NaN":t.nan+"";function f(t){var n=(t=Tn(t)).fill,e=t.align,f=t.sign,h=t.symbol,p=t.zero,d=t.width,g=t.comma,v=t.precision,y=t.trim,m=t.type;"n"===m?(g=!0,m="g"):Hn[m]||(void 0===v&&(v=12),y=!0,m="g"),(p||"0"===n&&"="===e)&&(p=!0,n="0",e="=");var _="$"===h?i:"#"===h&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",w="$"===h?o:/[%p]/.test(m)?l:"",b=Hn[m],x=/[defgprs%]/.test(m);function M(t){var i,o,l,h=_,M=w;if("c"===m)M=b(t)+M,t="";else{var A=(t=+t)<0||1/t<0;if(t=isNaN(t)?c:b(Math.abs(t),v),y&&(t=function(t){t:for(var n,e=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(n+1):t}(t)),A&&0==+t&&"+"!==f&&(A=!1),h=(A?"("===f?f:s:"-"===f||"("===f?"":f)+h,M=("s"===m?$n[8+On/3]:"")+M+(A&&"("===f?")":""),x)for(i=-1,o=t.length;++i(l=t.charCodeAt(i))||l>57){M=(46===l?u+t.slice(i+1):t.slice(i))+M,t=t.slice(0,i);break}}g&&!p&&(t=r(t,1/0));var N=h.length+t.length+M.length,E=N>1)+h+t+M+E.slice(N);break;default:t=E+h+t+M}return a(t)}return v=void 0===v?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,v)):Math.max(0,Math.min(20,v)),M.toString=function(){return t+""},M}return{format:f,formatPrefix:function(t,n){var e=f(((t=Tn(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(qn(n)/3))),i=Math.pow(10,-r),o=$n[8+r/3];return function(t){return e(i*t)+o}}}}({thousands:",",grouping:[3],currency:["$",""]}),Xn=Vn.format,zn=Vn.formatPrefix,Wn.prototype=Zn.prototype={constructor:Wn,count:function(){return this.eachAfter(Fn)},each:function(t,n){let e=-1;for(const r of this)t.call(n,r,++e,this);return this},eachAfter:function(t,n){for(var e,r,i,o=this,u=[o],a=[],l=-1;o=u.pop();)if(a.push(o),e=o.children)for(r=0,i=e.length;r=0;--r)o.push(e[r]);return this},find:function(t,n){let e=-1;for(const r of this)if(t.call(n,r,++e,this))return r},sum:function(t){return this.eachAfter((function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e}))},sort:function(t){return this.eachBefore((function(n){n.children&&n.children.sort(t)}))},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){return Array.from(this)},leaves:function(){var t=[];return this.eachBefore((function(n){n.children||t.push(n)})),t},links:function(){var t=this,n=[];return t.each((function(e){e!==t&&n.push({source:e.parent,target:e})})),n},copy:function(){return Zn(this).eachBefore(Kn)},[Symbol.iterator]:function*(){var t,n,e,r,i=this,o=[i];do{for(t=o.reverse(),o=[];i=t.pop();)if(yield i,n=i.children)for(e=0,r=n.length;e=0?(o>=te?10:o>=ne?5:o>=ee?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=te?10:o>=ne?5:o>=ee?2:1)}function ie(t){let n=t,e=t,r=t;function i(t,n,i=0,o=t.length){if(i>>1;r(t[e],n)<0?i=e+1:o=e}while(it(n)-e,e=Un,r=(n,e)=>Un(t(n),e)),{left:i,center:function(t,e,r=0,o=t.length){const u=i(t,e,r,o-1);return u>r&&n(t[u-1],e)>-n(t[u],e)?u-1:u},right:function(t,n,i=0,o=t.length){if(i>>1;r(t[e],n)<=0?i=e+1:o=e}while(i>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1):8===e?Se(n>>24&255,n>>16&255,n>>8&255,(255&n)/255):4===e?Se(n>>12&15|n>>8&240,n>>8&15|n>>4&240,n>>4&15|240&n,((15&n)<<4|15&n)/255):null):(n=ye.exec(t))?new je(n[1],n[2],n[3],1):(n=me.exec(t))?new je(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=_e.exec(t))?Se(n[1],n[2],n[3],n[4]):(n=we.exec(t))?Se(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=be.exec(t))?Te(n[1],n[2]/100,n[3]/100,1):(n=xe.exec(t))?Te(n[1],n[2]/100,n[3]/100,n[4]):Me.hasOwnProperty(t)?ke(Me[t]):"transparent"===t?new je(NaN,NaN,NaN,0):null}function ke(t){return new je(t>>16&255,t>>8&255,255&t,1)}function Se(t,n,e,r){return r<=0&&(t=n=e=NaN),new je(t,n,e,r)}function Ce(t){return t instanceof ce||(t=Ee(t)),t?new je((t=t.rgb()).r,t.g,t.b,t.opacity):new je}function Pe(t,n,e,r){return 1===arguments.length?Ce(t):new je(t,n,e,null==r?1:r)}function je(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function qe(){return"#"+Le(this.r)+Le(this.g)+Le(this.b)}function Oe(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Le(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Te(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new De(t,n,e,r)}function Be(t){if(t instanceof De)return new De(t.h,t.s,t.l,t.opacity);if(t instanceof ce||(t=Ee(t)),!t)return new De;if(t instanceof De)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,l=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&l<1?0:u,new De(u,a,l,t.opacity)}function De(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function He(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function Re(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Ve(t){return function(){return t}}function Xe(t,n){var e=n-t;return e?function(t,n){return function(e){return t+e*n}}(t,e):Ve(isNaN(t)?n:t)}le(ce,Ee,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:Ae,formatHex:Ae,formatHsl:function(){return Be(this).formatHsl()},formatRgb:Ne,toString:Ne}),le(je,Pe,se(ce,{brighter:function(t){return t=null==t?he:Math.pow(he,t),new je(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?fe:Math.pow(fe,t),new je(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:qe,formatHex:qe,formatRgb:Oe,toString:Oe})),le(De,(function(t,n,e,r){return 1===arguments.length?Be(t):new De(t,n,e,null==r?1:r)}),se(ce,{brighter:function(t){return t=null==t?he:Math.pow(he,t),new De(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?fe:Math.pow(fe,t),new De(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new je(He(t>=240?t-240:t+120,i,r),He(t,i,r),He(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const ze=function t(n){var e=function(t){return 1==(t=+t)?Xe:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):Ve(isNaN(n)?e:n)}}(n);function r(t,n){var r=e((t=Pe(t)).r,(n=Pe(n)).r),i=e(t.g,n.g),o=e(t.b,n.b),u=Xe(t.opacity,n.opacity);return function(n){return t.r=r(n),t.g=i(n),t.b=o(n),t.opacity=u(n),t+""}}return r.gamma=t,r}(1);function Ie(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],u=r>0?t[r-1]:2*i-o,a=ro&&(i=n.slice(o,i),a[u]?a[u]+=i:a[++u]=i),(e=e[0])===(r=r[0])?a[u]?a[u]+=r:a[++u]=r:(a[++u]=null,l.push({i:u,x:Ye(e,r)})),o=Ge.lastIndex;return on&&(e=t,t=n,n=e),s=function(e){return Math.max(t,Math.min(n,e))}),r=l>2?or:ir,i=o=null,f}function f(n){return null==n||isNaN(n=+n)?e:(i||(i=r(u.map(t),a,l)))(t(s(n)))}return f.invert=function(e){return s(n((o||(o=r(a,u.map(t),Ye)))(e)))},f.domain=function(t){return arguments.length?(u=Array.from(t,tr),c()):u.slice()},f.range=function(t){return arguments.length?(a=Array.from(t),c()):a.slice()},f.rangeRound=function(t){return a=Array.from(t),l=We,c()},f.clamp=function(t){return arguments.length?(s=!!t||er,c()):s!==er},f.interpolate=function(t){return arguments.length?(l=t,c()):l},f.unknown=function(t){return arguments.length?(e=t,f):e},function(e,r){return t=e,n=r,c()}}()(er,er)}function lr(t,n){switch(arguments.length){case 0:break;case 1:this.range(t);break;default:this.range(n).domain(t)}return this}function sr(t){var n=t.domain;return t.ticks=function(t){var e=n();return function(t,n,e){var r,i,o,u,a=-1;if(e=+e,(t=+t)==(n=+n)&&e>0)return[t];if((r=n0){let e=Math.round(t/u),r=Math.round(n/u);for(e*un&&--r,o=new Array(i=r-e+1);++an&&--r,o=new Array(i=r-e+1);++a=te?i*=10:o>=ne?i*=5:o>=ee&&(i*=2),n0;){if((i=re(l,s,e))===r)return o[u]=l,o[a]=s,n(o);if(i>0)l=Math.floor(l/i)*i,s=Math.ceil(s/i)*i;else{if(!(i<0))break;l=Math.ceil(l*i)/i,s=Math.floor(s*i)/i}r=i}return t},t}function cr(){var t=ar();return t.copy=function(){return ur(t,cr())},lr.apply(t,arguments),sr(t)}function fr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}var hr={value:()=>{}};function pr(){for(var t,n=0,e=arguments.length,r={};n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}}))}function vr(t,n){for(var e,r=0,i=t.length;r0)for(var e,r,i=new Array(e),o=0;o=0&&n._call.call(null,t),n=n._next;--br}()}finally{br=0,function(){for(var t,n,e=_r,r=1/0;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:_r=n);wr=t,Tr(r)}(),Nr=0}}function Lr(){var t=kr.now(),n=t-Ar;n>1e3&&(Er-=n,Ar=t)}function Tr(t){br||(xr&&(xr=clearTimeout(xr)),t-Nr>24?(t<1/0&&(xr=setTimeout(Or,t-kr.now()-Er)),Mr&&(Mr=clearInterval(Mr))):(Mr||(Ar=kr.now(),Mr=setInterval(Lr,1e3)),br=1,Sr(Or)))}function Br(t,n,e){var r=new jr;return n=null==n?0:+n,r.restart((function(e){r.stop(),t(e+n)}),n,e),r}jr.prototype=qr.prototype={constructor:jr,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?Cr():+e)+(null==n?0:+n),this._next||wr===this||(wr?wr._next=this:_r=this,wr=this),this._call=t,this._time=e,Tr()},stop:function(){this._call&&(this._call=null,this._time=1/0,Tr())}};var Dr=mr("start","end","cancel","interrupt"),Hr=[];function Rr(t,n,e,r,i,o){var u=t.__transition;if(u){if(e in u)return}else t.__transition={};!function(t,n,e){var r,i=t.__transition;function o(l){var s,c,f,h;if(1!==e.state)return a();for(s in i)if((h=i[s]).name===e.name){if(3===h.state)return Br(o);4===h.state?(h.state=6,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete i[s]):+s0)throw new Error("too late; already scheduled");return e}function Xr(t,n){var e=zr(t,n);if(e.state>3)throw new Error("too late; already running");return e}function zr(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}var Ir,$r,Ur,Yr,Fr=180/Math.PI,Zr={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function Gr(t,n,e,r,i,o){var u,a,l;return(u=Math.sqrt(t*t+n*n))&&(t/=u,n/=u),(l=t*e+n*r)&&(e-=t*l,r-=n*l),(a=Math.sqrt(e*e+r*r))&&(e/=a,r/=a,l/=a),t*r180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:Ye(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,u.rotate,a,l),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:Ye(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,u.skewX,a,l),function(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:Ye(t,e)},{i:a-2,x:Ye(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,u.scaleX,u.scaleY,a,l),o=u=null,function(t){for(var n,e=-1,r=l.length;++e=0&&(t=t.slice(0,n)),!t||"start"===t}))}(n)?Vr:Xr;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}var _i=Cn.prototype.constructor;function wi(t){return function(){this.style.removeProperty(t)}}function bi(t,n,e){return function(r){this.style.setProperty(t,n.call(this,r),e)}}function xi(t,n,e){var r,i;function o(){var o=n.apply(this,arguments);return o!==i&&(r=(i=o)&&bi(t,o,e)),r}return o._value=n,o}function Mi(t){return function(n){this.textContent=t.call(this,n)}}function Ai(t){var n,e;function r(){var r=t.apply(this,arguments);return r!==e&&(n=(e=r)&&Mi(r)),n}return r._value=t,r}var Ni=0;function Ei(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function ki(){return++Ni}var Si=Cn.prototype;Ei.prototype=function(t){return Cn().transition(t)}.prototype={constructor:Ei,select:function(t){var n=this._name,e=this._id;"function"!=typeof t&&(t=gt(t));for(var r=this._groups,i=r.length,o=new Array(i),u=0;u{p&&(p.textContent="search: "+n+" of "+e+" total samples ( "+Xn(".3f")(n/e*100,3)+"%)")},d()};const k=E;let S=(t,n,e=!1)=>{if(!n)return!1;let r=b(t);e&&(n=n.toLowerCase(),r=r.toLowerCase());const i=new RegExp(n);return void 0!==r&&r&&r.match(i)};const C=S;let P=function(t){p&&(t?p.textContent=t:"function"==typeof d?d():p.textContent="")};const j=P;let q=function(t){return b(t)+" ("+Xn(".3f")(100*(t.x1-t.x0),3)+"%, "+x(t)+" samples)"},O=function(t){return t.highlight?"#E600E6":function(t,n){let e=w||"warm";w||void 0===n||""===n||(e="red",void 0!==t&&t&&t.match(/::/)&&(e="yellow"),"kernel"===n?e="orange":"jit"===n?e="green":"inlined"===n&&(e="aqua"));const r=function(t){let n=0;if(t){const e=t.split("` + "`" + `");e.length>1&&(t=e[e.length-1]),n=function(t){let n=0,e=0,r=1;if(t){for(let i=0;i6);i++)n+=r*(t.charCodeAt(i)%10),e+=9*r,r*=.7;e>0&&(n/=e)}return n}(t=t.split("(")[0])}return n}(t);return function(t,n){let e,r,i;return"red"===t?(e=200+Math.round(55*n),r=50+Math.round(80*n),i=r):"orange"===t?(e=190+Math.round(65*n),r=90+Math.round(65*n),i=0):"yellow"===t?(e=175+Math.round(55*n),r=e,i=50+Math.round(20*n)):"green"===t?(e=50+Math.round(60*n),r=200+Math.round(55*n),i=e):"pastelgreen"===t?(e=163+Math.round(75*n),r=195+Math.round(49*n),i=72+Math.round(149*n)):"blue"===t?(e=91+Math.round(126*n),r=156+Math.round(76*n),i=221+Math.round(26*n)):"aqua"===t?(e=50+Math.round(60*n),r=165+Math.round(55*n),i=r):"cold"===t?(e=0+Math.round(55*(1-n)),r=0+Math.round(230*(1-n)),i=200+Math.round(55*n)):(e=200+Math.round(55*n),r=0+Math.round(230*(1-n)),i=0+Math.round(55*(1-n))),"rgb("+e+","+r+","+i+")"}(e,r)}(b(t),A(t))};const L=O;function T(t){t.data.fade=!1,t.data.hide=!1,t.children&&t.children.forEach(T)}function B(t){t.parent&&(t.parent.data.fade=!0,B(t.parent))}function D(t){if(i&&i.hide(),function(t){let n,e,r,i=t,o=i.parent;for(;o;){for(n=o.children,e=n.length;e--;)r=n[e],r!==i&&(r.data.hide=!0);i=o,o=i.parent}}(t),T(t),B(t),z(),y){const n=Pn(this).select("svg")._groups[0][0].parentNode.offsetTop,r=(window.innerHeight-n)/e,i=(t.height-r+10)*e;window.scrollTo({top:n+i,left:0,behavior:"smooth"})}"function"==typeof c&&c(t)}function H(t,n){if(t.id===n)return t;{const e=M(t);if(e)for(let t=0;t0){const r=t/(n.x1-n.x0);e=e.filter((function(t){return(t.x1-t.x0)*r>h}))}return e}(r),y=Pn(this).select("svg");y.attr("width",t);let _=y.selectAll("g").data(g,(function(t){return t.id}));if(!n||v){const t=Math.max.apply(null,g.map((function(t){return t.depth})));n=(t+3)*e,n{D(n)})),_.exit().remove(),_.on("mouseover",(function(t,n){i&&i.show(n,this),P(q(n)),"function"==typeof f&&f(n)})).on("mouseout",(function(){i&&i.hide(),P(null)}))}))}function I(t,n){n.forEach((function(n){const e=t.find((function(t){return t.name===n.name}));e?(e.value+=n.value,n.children&&(e.children||(e.children=[]),I(e.children,n.children))):t.push(n)}))}function $(t){let n,e,r,i,o,u,a,l;const s=[],c=[],f=[],h=!g;let p=t.data;for(p.hide?(t.value=0,e=t.children,e&&f.push(e)):(t.value=p.fade?0:x(p),s.push(t));n=s.pop();)if(e=n.children,e&&(o=e.length)){for(i=0;o--;)a=e[o],p=a.data,p.hide?(a.value=0,r=a.children,r&&f.push(r)):(p.fade?a.value=0:(l=x(p),a.value=l,i+=l),s.push(a));h&&n.value&&(n.value-=i),c.push(e)}for(o=c.length;o--;){for(e=c[o],i=0,u=e.length;u--;)i+=e[u].value;e[0].parent.value+=i}for(;f.length;)for(e=f.pop(),u=e.length;u--;)a=e[u],a.value=0,r=a.children,r&&f.push(r)}function U(){r.datum((t=>{if("Node"!==t.constructor.name){const n=Zn(t,M);return function(t){let n=0;!function(t,n){n(t);let e=t.children;if(e){const t=[e];let r,i,o;for(;t.length;)for(e=t.pop(),r=e.length;r--;)i=e[r],n(i),o=i.children,o&&t.push(o)}}(t,(function(t){t.id=n++}))}(n),$(n),n.originalValue=n.value,_&&n.eachAfter((t=>{let n=N(t);const e=t.children;let r=e&&e.length;for(;--r>=0;)n+=e[r].delta;t.delta=n})),n}}))}function Y(e){if(!arguments.length)return Y;r=e,U(),r.each((function(e){if(0===Pn(this).select("svg").size()){const e=Pn(this).append("svg:svg").attr("width",t).attr("class","partition d3-flame-graph");n&&(n(I([n.data],[t]),n.data))),U(),z(),Y):Y},Y.update=function(t){return r?(t&&(r.datum(t),U()),z(),Y):Y},Y.destroy=function(){return r?(i&&(i.hide(),"function"==typeof i.destroy&&i.destroy()),r.selectAll("svg").remove(),Y):Y},Y.setColorMapper=function(t){return arguments.length?(O=n=>{const e=L(n);return t(n,e)},Y):(O=L,Y)},Y.color=Y.setColorMapper,Y.setColorHue=function(t){return arguments.length?(w=t,Y):(w=null,Y)},Y.minFrameSize=function(t){return arguments.length?(h=t,Y):h},Y.setDetailsElement=function(t){return arguments.length?(p=t,Y):p},Y.details=Y.setDetailsElement,Y.selfValue=function(t){return arguments.length?(g=t,Y):g},Y.resetHeightOnZoom=function(t){return arguments.length?(v=t,Y):v},Y.scrollOnZoom=function(t){return arguments.length?(y=t,Y):y},Y.getName=function(t){return arguments.length?(b=t,Y):b},Y.getValue=function(t){return arguments.length?(x=t,Y):x},Y.getChildren=function(t){return arguments.length?(M=t,Y):M},Y.getLibtype=function(t){return arguments.length?(A=t,Y):A},Y.getDelta=function(t){return arguments.length?(N=t,Y):N},Y.setSearchHandler=function(t){return arguments.length?(E=t,Y):(E=k,Y)},Y.setDetailsHandler=function(t){return arguments.length?(P=t,Y):(P=j,Y)},Y.setSearchMatch=function(t){return arguments.length?(S=t,Y):(S=C,Y)},Y}return Cn.prototype.interrupt=function(t){return this.each((function(){!function(t,n){var e,r,i,o=t.__transition,u=!0;if(o){for(i in n=null==n?null:n+"",o)(e=o[i]).name===n?(r=e.state>2&&e.state<5,e.state=6,e.timer.stop(),e.on.call(r?"interrupt":"cancel",t,t.__data__,e.index,e.group),delete o[i]):u=!1;u&&delete t.__transition}}(this,t)}))},Cn.prototype.transition=function(t){var n,e;t instanceof Ei?(n=t._id,t=t._name):(n=ki(),(e=Ci).time=Cr(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,o=0;o d3_flame_graph.go +// D3.js is a JavaScript library for manipulating documents based on data. +// https://github.com/d3/d3 +// See D3_LICENSE file for license details + +// d3-flame-graph is a D3.js plugin that produces flame graphs from hierarchical data. +// https://github.com/spiermar/d3-flame-graph +// See D3_FLAME_GRAPH_LICENSE file for license details + +package d3flamegraph + +// JSSource returns the d3 and d3-flame-graph JavaScript bundle +const JSSource = \` + +$d3_js +\` + +// CSSSource returns the $D3FLAMEGRAPH_CSS file +const CSSSource = \` +$d3_css +\` + +EOF + gofmt -w d3_flame_graph.go +} + +get_licenses() { + cp node_modules/d3-selection/LICENSE D3_LICENSE + cp node_modules/d3-flame-graph/LICENSE D3_FLAME_GRAPH_LICENSE +} + +get_licenses +generate_d3_flame_graph_go diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js new file mode 100644 index 0000000..71239d9 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js @@ -0,0 +1,13 @@ +// Minimal webpack config to package a minified JS bundle (including +// dependencies) for execution in a