Port gdb-tests from bash to python (#1916)

* [WIP] Port gdb-tests from bash to python

* Use threads instead of processes

* Port gdb tests to python

* Linting

* Fix coverage "again"

* Remove bash tests

---------

Co-authored-by: intrigus <abc123zeus@live.de>
This commit is contained in:
intrigus-lgtm 2023-12-04 11:44:16 +01:00 committed by GitHub
parent 13f467b024
commit 427bf8c96e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 183 additions and 224 deletions

View File

@ -1,14 +1,7 @@
#!/bin/bash
# Check some basic test dependencies
if ! command -v env_parallel &> /dev/null; then
echo 'Error: The `env_parallel` command could not be found. You should run `setup-dev.sh` to install development dependencies.'
echo '(Alternatively, run ./tests.sh with `--serial` to skip using parallel test running. However, if `env_parallel` is missing, it is likely that other dependencies like the `zig` compiler are also missing)'
exit
fi
# Run integration tests
(cd tests/gdb-tests && ./tests.sh $@)
(cd tests/gdb-tests && python3 tests.py $@)
# Run unit tests
# coverage run -m pytest tests/unit-tests

182
tests/gdb-tests/tests.py Normal file
View File

@ -0,0 +1,182 @@
from __future__ import annotations
import argparse
import concurrent.futures
import os
import re
import subprocess
import time
from subprocess import CompletedProcess
from typing import Tuple
ROOT_DIR = os.path.realpath("../../")
GDB_INIT_PATH = os.path.join(ROOT_DIR, "gdbinit.py")
COVERAGERC_PATH = os.path.join(ROOT_DIR, "pyproject.toml")
def ensureZigPath():
if "ZIGPATH" not in os.environ:
# If ZIGPATH is not set, set it to $pwd/.zig
# In Docker environment this should by default be set to /opt/zig
os.environ["ZIGPATH"] = os.path.join(ROOT_DIR, ".zig")
print(f'ZIGPATH set to {os.environ["ZIGPATH"]}')
def makeBinaries():
try:
subprocess.check_call(["make", "all"], cwd="./tests/binaries")
except subprocess.CalledProcessError:
exit(1)
def run_gdb(gdb_args: list[str], env=None, capture_output=True) -> CompletedProcess[str]:
env = os.environ if env is None else env
return subprocess.run(
["gdb", "--silent", "--nx", "--nh"] + gdb_args + ["--eval-command", "quit"],
env=env,
capture_output=capture_output,
text=True,
)
def getTestsList(collect_only: bool, test_name_filter: str) -> list[str]:
# NOTE: We run tests under GDB sessions and because of some cleanup/tests dependencies problems
# we decided to run each test in a separate GDB session
gdb_args = ["--init-command", GDB_INIT_PATH, "--command", "pytests_collect.py"]
result = run_gdb(gdb_args)
TESTS_COLLECT_OUTPUT = result.stdout
if result.returncode == 1:
print(TESTS_COLLECT_OUTPUT)
exit(1)
elif collect_only == 1:
print(TESTS_COLLECT_OUTPUT)
exit(0)
# Extract the test names from the output using regex
pattern = re.compile(r"tests/.*::.*")
matches = pattern.findall(TESTS_COLLECT_OUTPUT)
TESTS_LIST = [match for match in matches if re.search(test_name_filter, match)]
return TESTS_LIST
def run_test(test_case: str, args: argparse.Namespace) -> Tuple[CompletedProcess[str], str]:
gdb_args = ["--init-command", GDB_INIT_PATH, "--command", "pytests_launcher.py"]
if args.cov:
print("Running with coverage")
gdb_args = [
"-ex",
"py import sys;print(sys.path);import coverage;coverage.process_startup();",
] + gdb_args
env = os.environ.copy()
env["LC_ALL"] = "C.UTF-8"
env["LANG"] = "C.UTF-8"
env["LC_CTYPE"] = "C.UTF-8"
env["SRC_DIR"] = ROOT_DIR
env["COVERAGE_FILE"] = os.path.join(ROOT_DIR, ".cov/coverage")
env["COVERAGE_PROCESS_START"] = COVERAGERC_PATH
if args.pdb:
env["USE_PDB"] = "1"
env["PWNDBG_LAUNCH_TEST"] = test_case
env["PWNDBG_DISABLE_COLORS"] = "1"
result = run_gdb(gdb_args, env=env, capture_output=not args.serial)
return (result, test_case)
def run_tests_and_print_stats(tests_list: list[str], args: argparse.Namespace):
start = time.time()
test_results: list[Tuple[CompletedProcess[str], str]] = []
def handle_parallel_test_result(test_result: Tuple[CompletedProcess[str], str]):
test_results.append(test_result)
(process, _) = test_result
content = process.stdout
# Extract the test name and result using regex
testname = re.search(r"^(tests/[^ ]+)", content, re.MULTILINE)[0]
result = re.search(
r"(\x1b\[3.m(PASSED|FAILED|SKIPPED|XPASS|XFAIL)\x1b\[0m)", content, re.MULTILINE
)[0]
(_, testname) = testname.split("::")
print(f"{testname:<70} {result}")
# Only show the output of failed tests unless the verbose flag was used
if args.verbose or "FAIL" in result:
print("")
print(content)
if args.serial:
test_results = [run_test(test, args) for test in tests_list]
else:
print("")
print("Running tests in parallel")
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
for test in tests_list:
executor.submit(run_test, test, args).add_done_callback(
lambda future: handle_parallel_test_result(future.result())
)
end = time.time()
seconds = int(end - start)
print(f"Tests completed in {seconds} seconds")
failed_tests = [(process, _) for (process, _) in test_results if process.returncode != 0]
num_tests_failed = len(failed_tests)
num_tests_passed_or_skipped = len(tests_list) - num_tests_failed
print("")
print("*********************************")
print("********* TESTS SUMMARY *********")
print("*********************************")
print(f"Tests passed or skipped: {num_tests_passed_or_skipped}")
print(f"Tests failed: {num_tests_failed}")
if num_tests_failed != 0:
print("")
print(
f"Failing tests: {' '.join([failed_test_name for _, failed_test_name in failed_tests])}"
)
exit(1)
def parse_args():
parser = argparse.ArgumentParser(description="Run tests.")
parser.add_argument(
"-p",
"--pdb",
action="store_true",
help="enable pdb (Python debugger) post mortem debugger on failed tests",
)
parser.add_argument("-c", "--cov", action="store_true", help="enable codecov")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="display all test output instead of just failing test output",
)
parser.add_argument(
"-s", "--serial", action="store_true", help="run tests one at a time instead of in parallel"
)
parser.add_argument(
"--collect-only",
action="store_true",
help="only show the output of test collection, don't run any tests",
)
parser.add_argument(
"test_name_filter", nargs="?", help="run only tests that match the regex", default=".*"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.cov:
print("Will run codecov")
if args.pdb:
print("Will run tests in serial and with Python debugger")
args.serial = True
ensureZigPath()
makeBinaries()
tests: list[str] = getTestsList(args.collect_only, args.test_name_filter)
run_tests_and_print_stats(tests, args)

View File

@ -1,216 +0,0 @@
#!/bin/bash
#set -o errexit
set -o pipefail
# env_parallel will fail if there are too many environment variables, so we need
# to use `--session` or `--record-env`, and only `--record-env` is supported on
# the version of `parallel` on Ubuntu 20.04 and earlier. The directory also
# needs to be created for CI
mkdir -p ~/.parallel
. $(which env_parallel.bash)
# Workaround for Ubuntu 20.04 CI. If no aliases are defined
# `env_parallel --record-env` will have non-zero exit code for older versions of
# `parallel`, so we define a dummy alias here
alias __dummy=foo
env_parallel --record-env
ROOT_DIR="$(readlink -f ../../)"
GDB_INIT_PATH="$ROOT_DIR/gdbinit.py"
COVERAGERC_PATH="$ROOT_DIR/pyproject.toml"
help_and_exit() {
echo "Usage: ./tests.sh [-p|--pdb] [-c|--cov] [<test-name-filter>]"
echo " -p, --pdb enable pdb (Python debugger) post mortem debugger on failed tests"
echo " -c, --cov enable codecov"
echo " -v, --verbose display all test output instead of just failing test output"
echo " -k, --keep don't delete the temporary files containing the command output"
echo " -s, --serial run tests one at a time instead of in parallel"
echo " --collect-only only show the output of test collection, don't run any tests"
echo " <test-name-filter> run only tests that match the regex"
exit 1
}
if [[ $# -gt 3 ]]; then
help_and_exit
fi
USE_PDB=0
TEST_NAME_FILTER=""
RUN_CODECOV=0
KEEP=0
SERIAL=0
VERBOSE=0
COLLECT_ONLY=0
while [[ $# -gt 0 ]]; do
case $1 in
-p | --pdb)
USE_PDB=1
SERIAL=1
echo "Will run tests in serial and with Python debugger"
shift
;;
-c | --cov)
echo "Will run codecov"
RUN_CODECOV=1
shift
;;
-v | --verbose)
VERBOSE=1
shift
;;
-k | --keep)
KEEP=1
shift
;;
-s | --serial)
SERIAL=1
shift
;;
--collect-only)
COLLECT_ONLY=1
shift
;;
-h | --help)
help_and_exit
;;
*)
if [[ ! -z "${TEST_NAME_FILTER}" ]]; then
help_and_exit
fi
TEST_NAME_FILTER="$1"
shift
;;
esac
done
if [[ -z "$ZIGPATH" ]]; then
# If ZIGPATH is not set, set it to $pwd/.zig
# In Docker environment this should by default be set to /opt/zig
export ZIGPATH="$ROOT_DIR/.zig"
fi
echo "ZIGPATH set to $ZIGPATH"
(cd ./tests/binaries && make all) || exit 1
run_gdb() {
gdb --silent --nx --nh "$@" --eval-command quit
}
# NOTE: We run tests under GDB sessions and because of some cleanup/tests dependencies problems
# we decided to run each test in a separate GDB session
gdb_args=(--init-command $GDB_INIT_PATH --command pytests_collect.py)
TESTS_COLLECT_OUTPUT=$(run_gdb "${gdb_args[@]}")
if [ $? -eq 1 ]; then
echo -E "$TESTS_COLLECT_OUTPUT"
exit 1
elif [ $COLLECT_ONLY -eq 1 ]; then
echo "$TESTS_COLLECT_OUTPUT"
exit 0
fi
TESTS_LIST=($(echo -E "$TESTS_COLLECT_OUTPUT" | grep -o "tests/.*::.*" | grep "${TEST_NAME_FILTER}"))
run_test() {
test_case="$1"
gdb_args=(--init-command $GDB_INIT_PATH --command pytests_launcher.py)
if [ ${RUN_CODECOV} -ne 0 ]; then
gdb_args=(-ex 'py import coverage;coverage.process_startup()' "${gdb_args[@]}")
fi
SRC_DIR=$ROOT_DIR \
COVERAGE_FILE=$ROOT_DIR/.cov/coverage \
COVERAGE_PROCESS_START=$COVERAGERC_PATH \
USE_PDB="${USE_PDB}" \
PWNDBG_LAUNCH_TEST="${test_case}" \
PWNDBG_DISABLE_COLORS=1 \
run_gdb "${gdb_args[@]}"
retval=$?
if [ "$SERIAL" -ne 1 ]; then
exit $retval
fi
}
parse_output_file() {
output_file="$1"
read -r testname result < <(
grep -Po '(^tests/[^ ]+)|(\x1b\[3.m(PASSED|FAILED|SKIPPED|XPASS|XFAIL)\x1b\[0m)' "$output_file" \
| tr '\n' ' ' \
| cut -d ' ' -f 1,2
)
testfile=${testname%::*}
testname=${testname#*::}
printf '%-70s %s\n' $testname $result
# Only show the output of failed tests unless the verbose flag was used
if [[ $VERBOSE -eq 1 || "$result" =~ FAIL ]]; then
echo ""
cat "$output_file"
echo ""
fi
if [[ $KEEP -ne 1 ]]; then
# Delete the temporary file created by `parallel`
rm "$output_file"
else
echo "$output_file"
fi
}
start=$(date +%s)
if [ $SERIAL -eq 1 ]; then
for t in "${TESTS_LIST[@]}"; do
run_test "$t"
done
else
JOBLOG_PATH="$(mktemp)"
echo ""
echo -n "Running tests in parallel and using a joblog in $JOBLOG_PATH"
if [[ $KEEP -ne 1 ]]; then
echo " (use --keep it to persist it)"
else
echo ""
fi
# The `--env _` is required when using `--record-env`
env_parallel --env _ --output-as-files --joblog $JOBLOG_PATH run_test ::: "${TESTS_LIST[@]}" | env_parallel --env _ parse_output_file {}
fi
end=$(date +%s)
seconds=$((end - start))
echo "Tests completed in ${seconds} seconds"
# TODO: This doesn't work with serial
# The seventh column in the joblog is the exit value and the tenth is the test name
FAILED_TESTS=($(awk '$7 == "1" { print $10 }' "${JOBLOG_PATH}"))
num_tests_failed=${#FAILED_TESTS[@]}
num_tests_passed_or_skipped=$((${#TESTS_LIST[@]} - $num_tests_failed))
echo ""
echo "*********************************"
echo "********* TESTS SUMMARY *********"
echo "*********************************"
echo "Tests passed or skipped: ${num_tests_passed_or_skipped}"
echo "Tests failed: ${num_tests_failed}"
if [ "${num_tests_failed}" -ne 0 ]; then
echo ""
echo "Failing tests: ${FAILED_TESTS[@]}"
exit 1
fi
if [[ $KEEP -ne 1 ]]; then
# Delete the temporary joblog file
rm "${JOBLOG_PATH}"
else
echo "Not removing the ${JOBLOG_PATH} since --keep was passed"
fi