summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/tools
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/powerpc/tools
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/powerpc/tools')
-rwxr-xr-xarch/powerpc/tools/checkpatch.sh22
-rwxr-xr-xarch/powerpc/tools/gcc-check-mprofile-kernel.sh27
-rw-r--r--arch/powerpc/tools/head_check.sh80
-rwxr-xr-xarch/powerpc/tools/relocs_check.sh57
-rwxr-xr-xarch/powerpc/tools/unrel_branch_check.sh79
5 files changed, 265 insertions, 0 deletions
diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh
new file mode 100755
index 000000000..91c04802e
--- /dev/null
+++ b/arch/powerpc/tools/checkpatch.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2018, Michael Ellerman, IBM Corporation.
+#
+# Wrapper around checkpatch that uses our preferred settings
+
+script_base=$(realpath $(dirname $0))
+
+exec $script_base/../../../scripts/checkpatch.pl \
+ --subjective \
+ --no-summary \
+ --show-types \
+ --ignore ARCH_INCLUDE_LINUX \
+ --ignore BIT_MACRO \
+ --ignore COMPARISON_TO_NULL \
+ --ignore EMAIL_SUBJECT \
+ --ignore FILE_PATH_CHANGES \
+ --ignore GLOBAL_INITIALISERS \
+ --ignore LINE_SPACING \
+ --ignore MULTIPLE_ASSIGNMENTS \
+ --ignore DT_SPLIT_BINDING_PATCH \
+ $@
diff --git a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
new file mode 100755
index 000000000..137f3376a
--- /dev/null
+++ b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+set -o pipefail
+
+# To debug, uncomment the following line
+# set -x
+
+# -mprofile-kernel is only supported on 64le, so this should not be invoked
+# for other targets. Therefore we can pass in -m64 and -mlittle-endian
+# explicitly, to take care of toolchains defaulting to other targets.
+
+# Test whether the compile option -mprofile-kernel exists and generates
+# profiling code (ie. a call to _mcount()).
+echo "int func() { return 0; }" | \
+ $* -m64 -mlittle-endian -S -x c -O2 -p -mprofile-kernel - -o - \
+ 2> /dev/null | grep -q "_mcount"
+
+# Test whether the notrace attribute correctly suppresses calls to _mcount().
+
+echo -e "#include <linux/compiler.h>\nnotrace int func() { return 0; }" | \
+ $* -m64 -mlittle-endian -S -x c -O2 -p -mprofile-kernel - -o - \
+ 2> /dev/null | grep -q "_mcount" && \
+ exit 1
+
+exit 0
diff --git a/arch/powerpc/tools/head_check.sh b/arch/powerpc/tools/head_check.sh
new file mode 100644
index 000000000..689907cda
--- /dev/null
+++ b/arch/powerpc/tools/head_check.sh
@@ -0,0 +1,80 @@
+# Copyright © 2016 IBM Corporation
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+# This script checks the head of a vmlinux for linker stubs that
+# break our placement of fixed-location code for 64-bit.
+
+# based on relocs_check.pl
+# Copyright © 2009 IBM Corporation
+
+# NOTE!
+#
+# If the build dies here, it's likely code in head_64.S/exception-64*.S or
+# nearby, is branching to labels it can't reach directly, which results in the
+# linker inserting branch stubs. This can move code around in ways that break
+# the fixed section calculations (head-64.h). To debug this, disassemble the
+# vmlinux and look for branch stubs (long_branch, plt_branch, etc.) in the
+# fixed section region (0 - 0x8000ish). Check what code is calling those stubs,
+# and perhaps change so a direct branch can reach.
+#
+# A ".linker_stub_catch" section is used to catch some stubs generated by
+# early .text code, which tend to get placed at the start of the section.
+# If there are too many such stubs, they can overflow this section. Expanding
+# it may help (or reducing the number of stub branches).
+#
+# Linker stubs use the TOC pointer, so even if fixed section code could
+# tolerate them being inserted into head code, they can't be allowed in low
+# level entry code (boot, interrupt vectors, etc) until r2 is set up. This
+# could cause the kernel to die in early boot.
+
+# Allow for verbose output
+if [ "$V" = "1" ]; then
+ set -x
+fi
+
+if [ $# -lt 2 ]; then
+ echo "$0 [path to nm] [path to vmlinux]" 1>&2
+ exit 1
+fi
+
+# Have Kbuild supply the path to nm so we handle cross compilation.
+nm="$1"
+vmlinux="$2"
+
+# gcc-4.6-era toolchain make _stext an A (absolute) symbol rather than T
+$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" > .tmp_symbols.txt
+
+
+vma=$(grep -e " [TA] _stext$" .tmp_symbols.txt | cut -d' ' -f1)
+
+expected_start_head_addr="$vma"
+
+start_head_addr=$(grep " t start_first_256B$" .tmp_symbols.txt | cut -d' ' -f1)
+
+if [ "$start_head_addr" != "$expected_start_head_addr" ]; then
+ echo "ERROR: head code starts at $start_head_addr, should be $expected_start_head_addr" 1>&2
+ echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option" 1>&2
+ echo "ERROR: see comments in arch/powerpc/tools/head_check.sh" 1>&2
+
+ exit 1
+fi
+
+top_vma=$(echo "$vma" | cut -d'0' -f1)
+
+expected_start_text_addr=$(grep " a text_start$" .tmp_symbols.txt | cut -d' ' -f1 | sed "s/^0/$top_vma/")
+
+start_text_addr=$(grep " t start_text$" .tmp_symbols.txt | cut -d' ' -f1)
+
+if [ "$start_text_addr" != "$expected_start_text_addr" ]; then
+ echo "ERROR: start_text address is $start_text_addr, should be $expected_start_text_addr" 1>&2
+ echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option" 1>&2
+ echo "ERROR: see comments in arch/powerpc/tools/head_check.sh" 1>&2
+
+ exit 1
+fi
+
+rm -f .tmp_symbols.txt
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
new file mode 100755
index 000000000..63792af00
--- /dev/null
+++ b/arch/powerpc/tools/relocs_check.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Copyright © 2015 IBM Corporation
+
+
+# This script checks the relocations of a vmlinux for "suspicious"
+# relocations.
+
+# based on relocs_check.pl
+# Copyright © 2009 IBM Corporation
+
+if [ $# -lt 3 ]; then
+ echo "$0 [path to objdump] [path to nm] [path to vmlinux]" 1>&2
+ exit 1
+fi
+
+# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
+objdump="$1"
+nm="$2"
+vmlinux="$3"
+
+# Remove from the bad relocations those that match an undefined weak symbol
+# which will result in an absolute relocation to 0.
+# Weak unresolved symbols are of that form in nm output:
+# " w _binary__btf_vmlinux_bin_end"
+undef_weak_symbols=$($nm "$vmlinux" | awk '$1 ~ /w/ { print $2 }')
+
+bad_relocs=$(
+$objdump -R "$vmlinux" |
+ # Only look at relocation lines.
+ grep -E '\<R_' |
+ # These relocations are okay
+ # On PPC64:
+ # R_PPC64_RELATIVE, R_PPC64_NONE
+ # On PPC:
+ # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
+ # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
+ # R_PPC_NONE
+ grep -F -w -v 'R_PPC64_RELATIVE
+R_PPC64_NONE
+R_PPC64_UADDR64
+R_PPC_ADDR16_LO
+R_PPC_ADDR16_HI
+R_PPC_ADDR16_HA
+R_PPC_RELATIVE
+R_PPC_NONE' |
+ ([ "$undef_weak_symbols" ] && grep -F -w -v "$undef_weak_symbols" || cat)
+)
+
+if [ -z "$bad_relocs" ]; then
+ exit 0
+fi
+
+num_bad=$(echo "$bad_relocs" | wc -l)
+echo "WARNING: $num_bad bad relocations"
+echo "$bad_relocs"
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
new file mode 100755
index 000000000..8301efee1
--- /dev/null
+++ b/arch/powerpc/tools/unrel_branch_check.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright © 2016,2020 IBM Corporation
+#
+# This script checks the unrelocated code of a vmlinux for "suspicious"
+# branches to relocated code (head_64.S code).
+
+# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
+objdump="$1"
+nm="$2"
+vmlinux="$3"
+
+kstart=0xc000000000000000
+
+end_intr=0x$($nm -p "$vmlinux" |
+ sed -E -n '/\s+[[:alpha:]]\s+__end_interrupts\s*$/{s///p;q}')
+if [ "$end_intr" = "0x" ]; then
+ exit 0
+fi
+
+# we know that there is a correct branch to
+# __start_initialization_multiplatform, so find its address
+# so we can exclude it.
+sim=0x$($nm -p "$vmlinux" |
+ sed -E -n '/\s+[[:alpha:]]\s+__start_initialization_multiplatform\s*$/{s///p;q}')
+
+$objdump -D --no-show-raw-insn --start-address="$kstart" --stop-address="$end_intr" "$vmlinux" |
+sed -E -n '
+# match lines that start with a kernel address
+/^c[0-9a-f]*:\s*b/ {
+ # drop branches via ctr or lr
+ /\<b.?.?(ct|l)r/d
+ # cope with some differences between Clang and GNU objdumps
+ s/\<bt.?\s*[[:digit:]]+,/beq/
+ s/\<bf.?\s*[[:digit:]]+,/bne/
+ # tidy up
+ s/\s0x/ /
+ s/://
+ # format for the loop below
+ s/^(\S+)\s+(\S+)\s+(\S+)\s*(\S*).*$/\1:\2:\3:\4/
+ # strip out condition registers
+ s/:cr[0-7],/:/
+ p
+}' | {
+
+all_good=true
+while IFS=: read -r from branch to sym; do
+ case "$to" in
+ c*) to="0x$to"
+ ;;
+ .+*)
+ to=${to#.+}
+ if [ "$branch" = 'b' ]; then
+ if (( to >= 0x2000000 )); then
+ to=$(( to - 0x4000000 ))
+ fi
+ elif (( to >= 0x8000 )); then
+ to=$(( to - 0x10000 ))
+ fi
+ printf -v to '0x%x' $(( "0x$from" + to ))
+ ;;
+ *) printf 'Unkown branch format\n'
+ ;;
+ esac
+ if [ "$to" = "$sim" ]; then
+ continue
+ fi
+ if (( to > end_intr )); then
+ if $all_good; then
+ printf '%s\n' 'WARNING: Unrelocated relative branches'
+ all_good=false
+ fi
+ printf '%s %s-> %s %s\n' "$from" "$branch" "$to" "$sym"
+ fi
+done
+
+$all_good
+
+}