blob: e1c0c0ae9c00554f8f9b9aa9e0892114abb29768 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
|
#!/usr/bin/env bash
# SPDX-License-Identifier: LGPL-2.1-or-later
set -e
is_valid_target() {
local target="${1:?}"
local t
for t in all setup run clean clean-again; do
[[ "$target" == "$t" ]] && return 0
done
return 1
}
pass_deny_list() {
local test="${1:?}"
local marker
for marker in $DENY_LIST_MARKERS $BLACKLIST_MARKERS; do
if [[ -f "$test/$marker" ]]; then
echo "========== DENY-LISTED: $test ($marker) =========="
return 1
fi
done
return 0
}
test_run() {
local test_name="${1:?}"
shift
if [[ $# -eq 0 ]]; then
echo >&2 "test_run: missing arguments"
exit 1
fi
# Note: let's be very explicit in reporting the return code of the test command here, i.e don't rely on
# `set -e` or the return code of the last statement in the function, since reporting false positive
# would be very bad in this case.
if [[ "${SPLIT_TEST_LOGS:-0}" -ne 0 && -n "${ARTIFACT_DIRECTORY:-}" ]]; then
(set -x; "$@") &>>"$ARTIFACT_DIRECTORY/$test_name.log" || return $?
else
(set -x; "$@") || return $?
fi
}
ARGS=(setup run clean-again)
CLEAN=0
CLEAN_AGAIN=0
COUNT=0
FAILURES=0
declare -A RESULTS
declare -A TIMES
if [[ "${NO_BUILD:-0}" =~ ^(1|yes|true)$ ]]; then
BUILD_DIR=""
elif BUILD_DIR="$("$(dirname "$0")/../tools/find-build-dir.sh")"; then
ninja -C "$BUILD_DIR"
else
echo >&2 "No build found, please set BUILD_DIR or NO_BUILD"
exit 1
fi
if [[ $# -gt 0 ]]; then
ARGS=("$@")
fi
# Reject invalid make targets
for arg in "${ARGS[@]}"; do
if ! is_valid_target "$arg"; then
echo >&2 "Invalid target: $arg"
exit 1
fi
done
# Separate 'clean' and 'clean-again' operations
args_filtered=()
for arg in "${ARGS[@]}"; do
if [[ "$arg" == "clean-again" ]]; then
CLEAN_AGAIN=1
elif [[ "$arg" == "clean" ]]; then
CLEAN=1
else
args_filtered+=("$arg")
fi
done
ARGS=("${args_filtered[@]}")
cd "$(dirname "$0")"
SELECTED_TESTS="${SELECTED_TESTS:-TEST-??-*}"
# Let's always do the cleaning operation first, because it destroys the image
# cache.
if [[ $CLEAN -eq 1 ]]; then
for test in $SELECTED_TESTS; do
test_run "$test" make -C "$test" clean
done
fi
# Run actual tests (if requested)
if [[ ${#ARGS[@]} -ne 0 ]]; then
for test in $SELECTED_TESTS; do
COUNT=$((COUNT + 1))
pass_deny_list "$test" || continue
SECONDS=0
echo -e "\n[$(date +%R:%S)] --x-- Running $test --x--"
set +e
test_run "$test" make -C "$test" "${ARGS[@]}"
result=$?
set -e
echo "[$(date +%R:%S)] --x-- Result of $test: $result --x--"
RESULTS["$test"]="$result"
TIMES["$test"]="$SECONDS"
# Run clean-again here to free up space, if requested, and if the test succeeded
if [[ "$result" -ne 0 ]]; then
FAILURES=$((FAILURES + 1))
elif [[ $CLEAN_AGAIN -eq 1 ]]; then
test_run "$test" make -C "$test" clean-again
fi
done
fi
echo ""
for test in "${!RESULTS[@]}"; do
result="${RESULTS[$test]}"
time="${TIMES[$test]}"
[[ "$result" -eq 0 ]] && string="SUCCESS" || string="FAIL"
printf "%-35s %-8s (%3s s)\n" "$test:" "$string" "$time"
done | sort
if [[ "$FAILURES" -eq 0 ]]; then
echo -e "\nALL $COUNT TESTS PASSED"
else
echo -e "\nTOTAL FAILURES: $FAILURES OF $COUNT"
fi
# If we have coverage files, merge them into a single report for upload
if [[ -n "$ARTIFACT_DIRECTORY" ]]; then
lcov_args=()
while read -r info_file; do
lcov_args+=(--add-tracefile "$info_file")
done < <(find "$ARTIFACT_DIRECTORY" -maxdepth 1 -name "*.coverage-info")
if [[ ${#lcov_args[@]} -gt 1 ]]; then
lcov "${lcov_args[@]}" --output-file "$ARTIFACT_DIRECTORY/merged.coverage-info"
fi
fi
exit "$FAILURES"
|