aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLeah Rumancik <leah.rumancik@gmail.com>2023-02-16 16:21:47 -0800
committerTheodore Ts'o <tytso@mit.edu>2023-07-03 12:47:27 -0400
commit4478f691ee542c90c266e3e4429600994da5d249 (patch)
tree8d9870da5071d65a148ed5263cdd602cb4b77baf
parent3a5af25831c4b9dd9364ca9b418a58f8c33b9e52 (diff)
downloadxfstests-bld-4478f691ee542c90c266e3e4429600994da5d249.tar.gz
selftests: add functions to help process test results
check_results.py - compare ensure an xml file has expected results util/results - retrieve results and feed input to check_results.py Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
-rw-r--r--selftests/util/check_results.py37
-rw-r--r--selftests/util/results83
2 files changed, 120 insertions, 0 deletions
diff --git a/selftests/util/check_results.py b/selftests/util/check_results.py
new file mode 100644
index 00000000..da65aafc
--- /dev/null
+++ b/selftests/util/check_results.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python3
+
+import sys
+import argparse
+from junitparser import JUnitXml, TestSuite
+from get_stats import get_stats_from_dir
+
+parser = argparse.ArgumentParser()
+parser.add_argument('results', help='Results directory')
+parser.add_argument('config', help='Test config')
+parser.add_argument('test', help='Test name you are intersted in')
+parser.add_argument('--fail', help='Expected number of failures', type=int, required=False)
+parser.add_argument('--skip', help='Expected number of skips', type=int, required=False)
+parser.add_argument('--error', help='Expected number of errors', type=int, required=False)
+parser.add_argument('--total', help='Expected number of total tests', type=int, required=False)
+args = parser.parse_args()
+
+results_stats = get_stats_from_dir(args.results)
+
+ret=0
+if args.fail is not None and args.fail != results_stats[args.config][args.test].failed:
+ print(f"Error ({args.test}): expected {args.fail} failures but selftest had {results_stats[args.config][args.test].failed} failures.")
+ ret=1
+
+if args.skip is not None and args.skip != results_stats[args.config][args.test].skipped:
+ print(f"Error ({args.test}): expected {args.skip} skips but selftest had {results_stats[args.config][args.test].skipped} skips.")
+ ret=1
+
+if args.error is not None and args.error != results_stats[args.config][args.test].error:
+ print(f"Error ({args.test}): expected {args.error} errors but selftest had {results_stats[args.config][args.test].error} errors.")
+ ret=1
+
+if args.total is not None and args.total != results_stats[args.config][args.test].total:
+ print(f"Error ({args.test}): expected {args.total} total tests but selftest had {results_stats[args.config][args.test].total} tests.")
+ ret=1
+
+sys.exit(ret)
diff --git a/selftests/util/results b/selftests/util/results
new file mode 100644
index 00000000..302165c1
--- /dev/null
+++ b/selftests/util/results
@@ -0,0 +1,83 @@
+#!/bin/bash
+#
+# Helper functions for getting/comparing test results
+PYTHONPATH=$PYTHONPATH:$(realpath ../test-appliance/files/usr/lib/python3/dist-packages):$(realpath ../test-appliance/files/usr/lib/python3/dist-packages/junitparser)
+
+. util/setup
+
+function check_debug_results ()
+{
+ local FAIL_LOOP_COUNT=0
+ local COUNT=1
+ local TESTS=""
+
+ local results_dir="$1"
+ shift
+ local config="$1"
+ shift
+
+ while [ "$1" != "" ]; do
+ case $1 in
+ --fail-loop-count) shift; FAIL_LOOP_COUNT=$1 ;;
+ -C) shift; COUNT=$1 ;;
+ *) TESTS="$TESTS $1" ;;
+ esac
+ shift
+ done
+
+ local FAIL_COUNT=$(( $COUNT + $COUNT * $FAIL_LOOP_COUNT ))
+
+ local ret=0
+ # selftest/001: pass
+ # selftest/002: fail
+ # selftest/003: fail
+ # selftest/004: skip
+ # selftest/005: crash
+ # selftest/006: timeout
+ for name in $TESTS; do
+ local fail=0
+ local skip=0
+ local error=0
+ local total=0
+ case "$name" in
+ selftest/001) total=$COUNT ;;
+ selftest/002) total=$FAIL_COUNT; fail=$FAIL_COUNT ;;
+ selftest/003) total=$FAIL_COUNT; fail=$FAIL_COUNT ;;
+ selftest/004) total=$COUNT; skip=$COUNT ;;
+ selftest/005) total=$COUNT; error=$COUNT ;;
+ selftest/006) total=$COUNT; error=$COUNT ;;
+ esac
+
+ python3 ./util/check_results.py --fail $fail --skip $skip \
+ --error $error --total $total $results_dir $config $name
+ if [ "$?" -ne 0 ]; then
+ ret=1
+ fi
+ done
+ return $ret
+}
+
+function unpack_results ()
+{
+ local id="$1"
+
+ log_debug "unpack_results"
+
+ # give LTM 10m to upload results
+ local cnt=0
+ local output=
+ while [ $cnt -lt 10 ] ; do
+ if output=$($GCE_XFSTESTS get-results --unpack "$id"); then
+ log_debug "$output"
+ echo "$output"
+ return 0
+ fi
+ let cnt=$cnt+1
+ sleep 1m
+ log_debug "$output"
+ log_debug "sleeping..."
+ done
+
+ echo "$output"
+ return 1
+}