cockpit/test/common/run-tests

576 lines
24 KiB
Python
Executable File

#!/usr/bin/python3 -cimport os, sys; os.execv(os.path.dirname(sys.argv[1]) + "/pywrap", sys.argv)
import argparse
import binascii
import errno
import glob
import importlib.machinery
import importlib.util
import logging
import os
import socket
import string
import subprocess
import sys
import tempfile
import time
import unittest
from typing import List, Optional, Tuple
import testlib
import testvm
from lcov import create_coverage_report, prepare_for_code_coverage
os.environ['PYTHONUNBUFFERED'] = '1'
def flush_stdout():
while True:
try:
sys.stdout.flush()
break
except BlockingIOError:
time.sleep(0.1)
class Test:
def __init__(self, test_id, command, timeout, nondestructive, retry_when_affected, todo, cost=1):
self.process = None
self.retries = 0
self.test_id = test_id
self.command = command
self.timeout = timeout
self.nondestructive = nondestructive
self.machine_id = None
self.retry_when_affected = retry_when_affected
self.todo = todo
self.cost = cost
self.returncode = None
def assign_machine(self, machine_id, ssh_address, web_address):
assert self.nondestructive, "assigning a machine only works for nondestructive test"
self.machine_id = machine_id
self.command.insert(-2, "--machine")
self.command.insert(-2, ssh_address)
self.command.insert(-2, "--browser")
self.command.insert(-2, web_address)
def start(self):
if self.nondestructive:
assert self.machine_id is not None, f"need to assign nondestructive test {self} {self.command} to a machine"
self.outfile = tempfile.TemporaryFile()
self.process = subprocess.Popen(["timeout", "-v", str(self.timeout), *self.command],
stdout=self.outfile, stderr=subprocess.STDOUT)
def poll(self):
poll_result = self.process.poll()
if poll_result is not None:
self.outfile.flush()
self.outfile.seek(0)
self.output = self.outfile.read()
self.outfile.close()
self.outfile = None
self.returncode = self.process.returncode
return poll_result
def finish(self, affected_tests: List[str], opts: argparse.Namespace) -> Tuple[Optional[str], int]:
"""Returns if a test should retry or not
Call test-failure-policy on the test's output, print if needed.
Return (retry_reason, exit_code). retry_reason can be None or a string.
"""
print_tap = not opts.list
affected = any(self.command[0].endswith(t) for t in affected_tests)
retry_reason = ""
# Try affected tests 3 times
if self.returncode == 0 and affected and self.retry_when_affected and self.retries < 2:
retry_reason = "test affected tests 3 times"
self.retries += 1
self._print_test(print_tap, "# RETRY {0} ({1})".format(self.retries, retry_reason))
return retry_reason, 0
# If test is being skipped pick up the reason
if self.returncode == 77:
lines = self.output.splitlines()
skip_reason = lines[-1].strip().decode("utf-8")
self.output = b"\n".join(lines[:-1])
self._print_test(print_tap, skip_reason=skip_reason)
return None, 0
# If the test was marked with @todo then...
if self.todo is not None:
if self.returncode == 0:
# The test passed, but it shouldn't have.
self.returncode = 1 # that's a fail
self._print_test(print_tap, todo_reason=f'# expected failure: {self.todo}')
return None, 1
else:
# The test failed as expected
# Outputs 'not ok 1 test # TODO ...'
self._print_test(print_tap, todo_reason=f'# TODO {self.todo}')
return None, 0
if self.returncode == 0:
self._print_test(print_tap)
return None, 0
if not opts.thorough:
cmd = ["test-failure-policy"]
if not opts.track_naughties:
cmd.append("--offline")
cmd.append(testvm.DEFAULT_IMAGE)
try:
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
reason = proc.communicate(self.output + ("not ok " + str(self)).encode())[0].strip()
if proc.returncode == 77:
self.returncode = proc.returncode
self._print_test(skip_reason="# SKIP {0}".format(reason.decode("utf-8")))
return None, 0
if proc.returncode == 1:
retry_reason = reason.decode("utf-8")
except OSError as ex:
if ex.errno != errno.ENOENT:
sys.stderr.write("\nCouldn't run test-failure-policy: {0}\n".format(str(ex)))
# HACK: many tests are unstable, always retry them 3 times unless affected
if not affected and not retry_reason and not opts.no_retry_fail:
retry_reason = "be robust against unstable tests"
has_unexpected_message = testlib.UNEXPECTED_MESSAGE.encode() in self.output
has_pixel_test_message = testlib.PIXEL_TEST_MESSAGE.encode() in self.output
if self.retries < 2 and not (has_unexpected_message or has_pixel_test_message) and retry_reason:
self.retries += 1
self._print_test(retry_reason="# RETRY {0} ({1})".format(self.retries, retry_reason))
return retry_reason, 0
self.output += b"\n"
self._print_test()
self.machine_id = None
return None, 1
# internal methods
def __str__(self):
cost = "" if self.cost == 1 else f" ${self.cost}"
nd = f" [ND@{self.machine_id}]" if self.nondestructive else ""
return f"{self.test_id} {self.command[0]} {self.command[-1]}{cost}{nd}"
def _print_test(self, print_tap=True, retry_reason="", skip_reason="", todo_reason=""):
def write_line(line):
while line:
try:
sys.stdout.buffer.write(line)
break
except BlockingIOError as e:
line = line[e.characters_written:]
time.sleep(0.1)
# be quiet in TAP mode for successful tests
lines = self.output.strip().splitlines(keepends=True)
if print_tap and self.returncode == 0 and len(lines) > 0:
for line in lines[:-1]:
if line.startswith(b"WARNING:"):
write_line(line)
write_line(lines[-1])
else:
for line in lines:
write_line(line)
if retry_reason:
retry_reason = " " + retry_reason
if skip_reason:
skip_reason = " " + skip_reason
if todo_reason:
todo_reason = " " + todo_reason
if not print_tap:
print(retry_reason + skip_reason + todo_reason)
flush_stdout()
return
print() # Tap needs to start on a separate line
status = 'ok' if self.returncode in [0, 77] else 'not ok'
print(f"{status} {self}{retry_reason}{skip_reason}{todo_reason}")
flush_stdout()
class GlobalMachine:
def __init__(self, restrict=True, cpus=None, memory_mb=None, machine_class=testvm.VirtMachine):
self.image = testvm.DEFAULT_IMAGE
self.network = testvm.VirtNetwork(image=self.image)
self.networking = self.network.host(restrict=restrict)
self.machine = machine_class(verbose=True, networking=self.networking, image=self.image, cpus=cpus, memory_mb=memory_mb)
self.machine_class = machine_class
if not os.path.exists(self.machine.image_file):
self.machine.pull(self.machine.image_file)
self.machine.start()
self.start_time = time.time()
self.duration = None
self.ssh_address = f"{self.machine.ssh_address}:{self.machine.ssh_port}"
self.web_address = f"{self.machine.web_address}:{self.machine.web_port}"
self.running_test = None
def reset(self):
# It is important to re-use self.networking here, so that the
# machine keeps its browser and control port.
self.machine.kill()
self.machine = self.machine_class(verbose=True, networking=self.networking, image=self.image)
self.machine.start()
def kill(self):
assert self.running_test is None, "can't kill global machine with running test"
self.machine.kill()
self.network.kill()
self.duration = round(time.time() - self.start_time)
self.machine = None
self.ssh_address = None
self.web_address = None
def is_available(self):
return self.machine and self.running_test is None
def check_valid(filename):
name = os.path.basename(filename)
allowed = string.ascii_letters + string.digits + '-_'
if not all(c in allowed for c in name):
return None
return name.replace("-", "_")
def build_command(filename, test, opts):
cmd = [filename]
if opts.trace:
cmd.append("-t")
if opts.verbosity:
cmd.append("-v")
if not opts.fetch:
cmd.append("--nonet")
if opts.list:
cmd.append("-l")
if opts.coverage:
cmd.append("--coverage")
cmd.append(test)
return cmd
def get_affected_tests(test_dir, base_branch, test_files):
if not base_branch:
return []
changed_tests = []
# Detect affected tests from changed test files
diff_out = subprocess.check_output(["git", "diff", "--name-only", "origin/" + base_branch, test_dir])
# Never consider 'test/verify/check-example' to be affected - our tests for tests count on that
# This file provides only examples, there is no place for it being flaky, no need to retry
changed_tests = [test.decode("utf-8") for test in diff_out.strip().splitlines() if not test.endswith(b"check-example")]
# If more than 3 test files were changed don't consider any of them as affected
# as it might be a PR that changes more unrelated things.
if len(changed_tests) > 3:
# If 'test/verify/check-testlib' is affected, keep just that one - our tests for tests count on that
if "test/verify/check-testlib" in changed_tests:
changed_tests = ["test/verify/check-testlib"]
else:
changed_tests = []
# Detect affected tests from changed pkg/* subdirectories in cockpit
# If affected tests get detected from pkg/* changes, don't apply the
# "only do this for max. 3 check-* changes" (even if the PR also changes ≥ 3 check-*)
# (this does not apply to other projects)
diff_out = subprocess.check_output(["git", "diff", "--name-only", "origin/" + base_branch, "--", "pkg/"])
# Drop changes in css files - this does not affect tests thus no reason to retry
files = [f.decode("utf-8") for f in diff_out.strip().splitlines() if not f.endswith(b"css")]
changed_pkgs = {"check-" + pkg.split('/')[1] for pkg in files}
changed_tests.extend([test for test in test_files if any(pkg in test for pkg in changed_pkgs)])
return changed_tests
def detect_tests(test_files, image, opts):
'''Build the list of tests we'll parallelize and the ones we'll run serially'''
parallel_tests = []
serial_tests = []
seen_classes = {}
machine_class = None
test_id = 1
for filename in test_files:
name = check_valid(filename)
if not name or not os.path.isfile(filename):
continue
loader = importlib.machinery.SourceFileLoader(name, filename)
module = importlib.util.module_from_spec(importlib.util.spec_from_loader(loader.name, loader))
loader.exec_module(module)
for test_suite in unittest.TestLoader().loadTestsFromModule(module):
for test in test_suite:
if hasattr(test, "machine_class") and test.machine_class is not None:
if machine_class is not None and machine_class != test.machine_class:
raise ValueError(f"only one unique machine_class can be used per project, provided with {machine_class} and {test.machine_class}")
machine_class = test.machine_class
# ensure that test classes are unique, so that they can be selected properly
cls = test.__class__.__name__
if seen_classes.get(cls) not in [None, filename]:
raise ValueError("test class %s in %s already defined in %s" % (cls, filename, seen_classes[cls]))
seen_classes[cls] = filename
test_method = getattr(test.__class__, test._testMethodName)
test_str = "{0}.{1}".format(cls, test._testMethodName)
# most tests should take much less than 10mins, so default to that;
# longer tests can be annotated with @timeout(seconds)
# check the test function first, fall back to the class'es timeout
if opts.tests and not any(t in test_str for t in opts.tests):
continue
if test_str in opts.exclude:
continue
test_timeout = testlib.get_decorator(test_method, test, "timeout", 600)
nd = testlib.get_decorator(test_method, test, "nondestructive")
rwa = not testlib.get_decorator(test_method, test, "no_retry_when_changed")
todo = testlib.get_decorator(test_method, test, "todo")
if getattr(test.__class__, "provision", None):
# each additionally provisioned VM costs parallel test capacity
cost = len(test.__class__.provision)
else:
cost = 1
test = Test(test_id, build_command(filename, test_str, opts), test_timeout, nd, rwa, todo, cost=cost)
if nd:
serial_tests.append(test)
else:
if not opts.nondestructive:
parallel_tests.append(test)
test_id += 1
# sort serial tests by class/test name, to avoid spurious errors where failures depend on the order of
# execution but let's make sure we always test them both ways around; hash the image name, which is
# robust, reproducible, and provides an even distribution of both directions
serial_tests.sort(key=lambda t: t.command[-1], reverse=bool(binascii.crc32(image.encode()) & 1))
return (serial_tests, parallel_tests, machine_class)
def list_tests(opts):
test_files = glob.glob(os.path.join(opts.test_dir, opts.test_glob))
serial_tests, parallel_tests, _ = detect_tests(test_files, "dummy", opts)
names = {t.command[-1] for t in serial_tests + parallel_tests}
for n in sorted(names):
print(n)
def run(opts, image):
fail_count = 0
start_time = time.time()
if opts.coverage:
prepare_for_code_coverage()
test_files = glob.glob(os.path.join(opts.test_dir, opts.test_glob))
changed_tests = get_affected_tests(opts.test_dir, opts.base, test_files)
serial_tests, parallel_tests, machine_class = detect_tests(test_files, image, opts)
serial_tests_len = len(serial_tests)
parallel_tests_len = len(parallel_tests)
if opts.machine:
assert not parallel_tests
print(f"1..{serial_tests_len + parallel_tests_len}")
flush_stdout()
running_tests = []
global_machines = []
if not opts.machine:
# Create appropriate number of serial machines; prioritize the nondestructive (serial) tests, to get
# them out of the way as fast as possible, then let the destructive (parallel) ones start as soon as
# a given serial runner is done.
num_global = min(serial_tests_len, opts.jobs)
for i in range(num_global):
global_machines.append(GlobalMachine(restrict=not opts.enable_network, cpus=opts.nondestructive_cpus,
memory_mb=opts.nondestructive_memory_mb,
machine_class=machine_class or testvm.VirtMachine))
# test scheduling loop
while True:
made_progress = False
# mop up finished tests
logging.debug("test loop: %d running tests", len(running_tests))
for test in running_tests.copy():
poll_result = test.poll()
if poll_result is not None:
made_progress = True
running_tests.remove(test)
test_machine = test.machine_id # test_finish() resets it
retry_reason, test_result = test.finish(changed_tests, opts)
fail_count += test_result
logging.debug("test %s finished; result %s retry reason %s", test, test_result, retry_reason)
if test_machine is not None and not opts.machine:
# unassign from global machine
global_machines[test_machine].running_test = None
# sometimes our global machine gets messed up; also, tests that time out don't run cleanup handlers
# restart it to avoid an unbounded number of test retries and follow-up errors
if not opts.machine and (poll_result == 124 or (retry_reason and "test harness" in retry_reason)):
# try hard to keep the test output consistent
sys.stderr.write("\nRestarting global machine %s\n" % test_machine)
sys.stderr.flush()
global_machines[test_machine].reset()
# run again if needed
if retry_reason:
if test.nondestructive:
serial_tests.insert(0, test)
else:
parallel_tests.insert(0, test)
if opts.machine:
if not running_tests and serial_tests:
test = serial_tests.pop(0)
logging.debug("Static machine is free, assigning next test %s", test)
test.assign_machine(-1, opts.machine, opts.browser)
test.start()
running_tests.append(test)
made_progress = True
else:
# find free global machines, and either assign a new serial test, or kill them to free resources
for (idx, machine) in enumerate(global_machines):
if machine.is_available():
if serial_tests:
test = serial_tests.pop(0)
logging.debug("Global machine %s is free, assigning next test %s", idx, test)
machine.running_test = test
test.assign_machine(idx, machine.ssh_address, machine.web_address)
test.start()
running_tests.append(test)
else:
logging.debug("Global machine %s is free, and no more serial tests; killing", idx)
machine.kill()
made_progress = True
def running_cost():
return sum(test.cost for test in running_tests)
# fill the remaining available job slots with parallel tests; run tests with a cost higher than #jobs by themselves
while parallel_tests and (running_cost() + parallel_tests[0].cost <= opts.jobs or len(running_tests) == 0):
test = parallel_tests.pop(0)
logging.debug("%d running tests with total cost %d, starting next parallel test %s",
len(running_tests), running_cost(), test)
test.start()
running_tests.append(test)
made_progress = True
# are we done?
if not running_tests:
assert not serial_tests, f"serial_tests should be empty: {[str(t) for t in serial_tests]}"
assert not parallel_tests, f"parallel_tests should be empty: {[str(t) for t in parallel_tests]}"
break
# Sleep if we didn't make progress
if not made_progress:
time.sleep(0.5)
# Create coverage report
if opts.coverage:
create_coverage_report()
# print summary
duration = int(time.time() - start_time)
hostname = socket.gethostname().split(".")[0]
serial_details = []
if not opts.machine:
for (idx, machine) in enumerate(global_machines):
serial_details.append(f"{idx}: {machine.duration}s")
details = f"[{duration}s on {hostname}, {parallel_tests_len} parallel tests, {serial_tests_len} serial tests: {', '.join(serial_details)}]"
print()
if fail_count > 0:
print(f"# {fail_count} TESTS FAILED {details}")
else:
print(f"# TESTS PASSED {details}")
flush_stdout()
return fail_count
def main():
parser = testlib.arg_parser(enable_sit=False)
parser.add_argument('-j', '--jobs', type=int,
default=int(os.environ.get("TEST_JOBS", 1)), help="Number of concurrent jobs")
parser.add_argument('--thorough', action='store_true',
help='Thorough mode, no skipping known issues')
parser.add_argument('-n', '--nondestructive', action='store_true',
help='Only consider @nondestructive tests')
parser.add_argument('--machine', metavar="hostname[:port]",
default=None, help="Run tests against an already running machine; implies --nondestructive")
parser.add_argument('--browser', metavar="hostname[:port]",
default=None, help="When using --machine, use this cockpit web address")
parser.add_argument('--test-dir', default=os.environ.get("TEST_DIR", testvm.TEST_DIR),
help="Directory in which to glob for test files; default: %(default)s")
parser.add_argument('--test-glob', default="check-*",
help="Pattern with which to glob in the test directory; default: %(default)s")
parser.add_argument('--exclude', action="append", default=[], metavar="TestClass.testName",
help="Exclude test (exact match only); can be specified multiple times")
parser.add_argument('--nondestructive-cpus', type=int, default=None,
help="Number of CPUs for nondestructive test global machines")
parser.add_argument('--nondestructive-memory-mb', type=int, default=None,
help="RAM size for nondestructive test global machines")
parser.add_argument('--base', default=os.environ.get("BASE_BRANCH"),
help="Retry affected tests compared to given base branch; default: %(default)s")
parser.add_argument('--track-naughties', action='store_true',
help='Update the occurrence of naughties on cockpit-project/bots')
parser.add_argument('--no-retry-fail', action='store_true',
help="Don't retry failed tests")
opts = parser.parse_args()
if opts.machine:
if opts.jobs > 1:
parser.error("--machine cannot be used with concurrent jobs")
if not opts.browser:
parser.error("--browser must be specified together with --machine")
opts.nondestructive = True
# Tell any subprocesses what we are testing
if "TEST_REVISION" not in os.environ:
r = subprocess.run(["git", "rev-parse", "HEAD"],
universal_newlines=True, check=False, stdout=subprocess.PIPE)
if r.returncode == 0:
os.environ["TEST_REVISION"] = r.stdout.strip()
os.environ["TEST_BROWSER"] = os.environ.get("TEST_BROWSER", "chromium")
image = testvm.DEFAULT_IMAGE
testvm.DEFAULT_IMAGE = image
os.environ["TEST_OS"] = image
# Make sure tests can make relative imports
sys.path.append(os.path.realpath(opts.test_dir))
if opts.list:
list_tests(opts)
return 0
return run(opts, image)
if __name__ == '__main__':
# logging.basicConfig(level=logging.DEBUG)
sys.exit(main())