summaryrefslogtreecommitdiff
path: root/automated/utils
diff options
context:
space:
mode:
Diffstat (limited to 'automated/utils')
-rwxr-xr-xautomated/utils/test-runner.py147
-rwxr-xr-xautomated/utils/upload-to-artifactorial.sh4
2 files changed, 128 insertions, 23 deletions
diff --git a/automated/utils/test-runner.py b/automated/utils/test-runner.py
index fd4591a..16f5e30 100755
--- a/automated/utils/test-runner.py
+++ b/automated/utils/test-runner.py
@@ -2,6 +2,7 @@
import argparse
import csv
import cmd
+import copy
import json
import logging
import os
@@ -13,6 +14,7 @@ import sys
import textwrap
import time
from uuid import uuid4
+from distutils.spawn import find_executable
try:
@@ -34,8 +36,11 @@ class StoreDictKeyPair(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values:
- k, v = kv.split("=")
- my_dict[k] = v
+ if "=" in kv:
+ k, v = kv.split("=", 1)
+ my_dict[k] = v
+ else:
+ print("Invalid parameter: %s" % kv)
setattr(namespace, self.dest, my_dict)
@@ -69,6 +74,59 @@ class TestPlan(object):
self.timeout = args.timeout
self.skip_install = args.skip_install
self.logger = logging.getLogger('RUNNER.TestPlan')
+ self.overlay = args.overlay
+
+ def apply_overlay(self, test_list):
+ fixed_test_list = copy.deepcopy(test_list)
+ logger = logging.getLogger('RUNNER.TestPlan.Overlay')
+ with open(self.overlay) as f:
+ data = yaml.load(f)
+
+ if data.get('skip'):
+ skip_tests = data['skip']
+ for test in test_list:
+ for skip_test in skip_tests:
+ if test['path'] == skip_test['path'] and test['repository'] == skip_test['repository']:
+ fixed_test_list.remove(test)
+ logger.info("Skipped: {}".format(test))
+ else:
+ continue
+
+ if data.get('amend'):
+ amend_tests = data['amend']
+ for test in fixed_test_list:
+ for amend_test in amend_tests:
+ if test['path'] == amend_test['path'] and test['repository'] == skip_test['repository']:
+ if amend_test.get('parameters'):
+ if test.get('parameters'):
+ test['parameters'].update(amend_test['parameters'])
+ else:
+ test['parameters'] = amend_test['parameters']
+ logger.info('Updated: {}'.format(test))
+ else:
+ logger.warning("'parameters' not found in {}, nothing to amend.".format(amend_test))
+
+ if data.get('add'):
+ add_tests = data['add']
+ unique_add_tests = []
+ for test in add_tests:
+ if test not in unique_add_tests:
+ unique_add_tests.append(test)
+ else:
+ logger.warning("Skipping duplicate test {}".format(test))
+
+ for test in test_list:
+ del test['uuid']
+
+ for add_test in unique_add_tests:
+ if add_test in test_list:
+ logger.warning("{} already included in test plan, do nothing.".format(add_test))
+ else:
+ add_test['uuid'] = str(uuid4())
+ fixed_test_list.append(add_test)
+ logger.info("Added: {}".format(add_test))
+
+ return fixed_test_list
def test_list(self, kind="automated"):
if self.test_def:
@@ -88,22 +146,30 @@ class TestPlan(object):
with open(self.test_plan, 'r') as f:
test_plan = yaml.safe_load(f)
try:
+ plan_version = test_plan['metadata'].get('format')
+ self.logger.info('Test plan version: {}'.format(plan_version))
+ if plan_version == "Linaro Test Plan v2":
+ tests = test_plan['tests'][kind]
+ elif plan_version == "Linaro Test Plan v1" or plan_version is None:
+ tests = []
+ for requirement in test_plan['requirements']:
+ if 'tests' in requirement.keys():
+ if requirement['tests'] and \
+ kind in requirement['tests'].keys() and \
+ requirement['tests'][kind]:
+ for test in requirement['tests'][kind]:
+ tests.append(test)
+
test_list = []
unique_tests = [] # List of test hashes
- for requirement in test_plan['requirements']:
- if 'tests' in requirement.keys():
- if requirement['tests'] and \
- kind in requirement['tests'].keys() and \
- requirement['tests'][kind]:
- for test in requirement['tests'][kind]:
- test_hash = hash(json.dumps(test, sort_keys=True))
- if test_hash in unique_tests:
- # Test is already in the test_list; don't add it again.
- self.logger.warning(
- "Skipping duplicate test {}".format(test))
- continue
- unique_tests.append(test_hash)
- test_list.append(test)
+ for test in tests:
+ test_hash = hash(json.dumps(test, sort_keys=True))
+ if test_hash in unique_tests:
+ # Test is already in the test_list; don't add it again.
+ self.logger.warning("Skipping duplicate test {}".format(test))
+ continue
+ unique_tests.append(test_hash)
+ test_list.append(test)
for test in test_list:
test['uuid'] = str(uuid4())
except KeyError as e:
@@ -113,7 +179,10 @@ class TestPlan(object):
self.logger.error('Plese specify a test or test plan.')
sys.exit(1)
- return test_list
+ if self.overlay is None:
+ return test_list
+ else:
+ return self.apply_overlay(test_list)
class TestSetup(object):
@@ -600,6 +669,9 @@ class ResultParser(object):
self.results['name'] = self.testdef['metadata']['name']
if 'params' in self.testdef.keys():
self.results['params'] = self.testdef['params']
+ if self.args.test_def_params:
+ for param_name, param_value in self.args.test_def_params.items():
+ self.results['params'][param_name] = param_value
if 'parse' in self.testdef.keys() and 'pattern' in self.testdef['parse'].keys():
self.pattern = self.testdef['parse']['pattern']
self.logger.info("Enabling log parse pattern: %s" % self.pattern)
@@ -621,6 +693,10 @@ class ResultParser(object):
test_version = subprocess.check_output("git rev-parse HEAD", shell=True).decode('utf-8')
self.results['version'] = test_version.rstrip()
os.chdir(path)
+ self.lava_run = args.lava_run
+ if self.lava_run and not find_executable('lava-test-case'):
+ self.logger.info("lava-test-case not found, '-l' or '--lava_run' option ignored'")
+ self.lava_run = False
def run(self):
self.parse_stdout()
@@ -667,21 +743,35 @@ class ResultParser(object):
self.metrics.append(data.copy())
+ if self.lava_run:
+ self.send_to_lava(data)
+
def parse_pattern(self):
with open('%s/stdout.log' % self.test['test_path'], 'r') as f:
+ rex_pattern = re.compile(r'%s' % self.pattern)
for line in f:
data = {}
- m = re.search(r'%s' % self.pattern, line)
+ m = rex_pattern.search(line)
if m:
data = m.groupdict()
for x in ['measurement', 'units']:
if x not in data:
data[x] = ''
- if self.fixup:
+ if self.fixup and data['result'] in self.fixup:
data['result'] = self.fixup[data['result']]
self.metrics.append(data.copy())
+ if self.lava_run:
+ self.send_to_lava(data)
+
+ def send_to_lava(self, data):
+ cmd = 'lava-test-case {} --result {}'.format(data['test_case_id'], data['result'])
+ if data['measurement']:
+ cmd = '{} --measurement {} --units {}'.format(cmd, data['measurement'], data['units'])
+ self.logger.debug('lava-run: cmd: {}'.format(cmd))
+ subprocess.call(shlex.split(cmd))
+
def dict_to_json(self):
# Save test results to output/test_id/result.json
with open('%s/result.json' % self.test['test_path'], 'w') as f:
@@ -774,21 +864,36 @@ def get_args():
parser.add_argument('-e', '--skip_environment', dest='skip_environment',
default=False, action='store_true',
help='skip environmental data collection (board name, distro, etc)')
+ parser.add_argument('-l', '--lava_run', dest='lava_run',
+ default=False, action='store_true',
+ help='send test result to LAVA with lava-test-case.')
+ parser.add_argument('-O', '--overlay', default=None,
+ dest='overlay', help=textwrap.dedent('''\
+ Specify test plan ovelay file to:
+ * skip tests
+ * amend test parameters
+ * add new tests
+ '''))
+ parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
+ default=False, help='Set log level to DEBUG.')
args = parser.parse_args()
return args
def main():
+ args = get_args()
+
# Setup logger.
logger = logging.getLogger('RUNNER')
- logger.setLevel(logging.DEBUG)
+ logger.setLevel(logging.INFO)
+ if args.verbose:
+ logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s: %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
- args = get_args()
logger.debug('Test job arguments: %s' % args)
if args.kind != "manual" and args.target is None:
if os.geteuid() != 0:
diff --git a/automated/utils/upload-to-artifactorial.sh b/automated/utils/upload-to-artifactorial.sh
index 1cb7fcb..234c625 100755
--- a/automated/utils/upload-to-artifactorial.sh
+++ b/automated/utils/upload-to-artifactorial.sh
@@ -27,7 +27,7 @@ fi
if which lava-test-reference; then
# If 'ARTIFACTORIAL_TOKEN' defined in 'secrects' dictionary defined in job
# definition file, it will be used.
- lava_test_dir="$(find /lava-* -maxdepth 0 -type d 2>/dev/null | sort | tail -1)"
+ lava_test_dir="$(find /lava-* -maxdepth 0 -type d -regex '/lava-[0-9]+' 2>/dev/null | sort | tail -1)"
if test -f "${lava_test_dir}/secrets" && grep -q "ARTIFACTORIAL_TOKEN" "${lava_test_dir}/secrets"; then
# shellcheck disable=SC1090
. "${lava_test_dir}/secrets"
@@ -43,7 +43,7 @@ if which lava-test-reference; then
fi
if echo "${return}" | grep "$(basename "${ATTACHMENT}")"; then
- lava-test-reference "test-attachment" --result "pass" --reference "https://archive.validation.linaro.org/artifacts/${return}"
+ lava-test-reference "test-attachment" --result "pass" --reference "${return}"
else
echo "test-attachment fail"
which lava-test-case && lava-test-case "test-attachment" --result "fail"