summaryrefslogtreecommitdiff
path: root/automated/android
diff options
context:
space:
mode:
authorYongqin Liu <yongqin.liu@linaro.org>2018-02-26 20:29:28 +0800
committerMilosz Wasilewski <milosz.wasilewski@linaro.org>2018-03-06 11:40:54 +0000
commit1de7b939c5a950f74ca8e30c267927463c2df277 (patch)
tree1288c3778306e82dbdfb537052cdb855c44a7b92 /automated/android
parentbc5f52e3f9bf8fd55f56fd62ec5a1252ceb30ee0 (diff)
tradefed tests: add support for specifying number for failed test case to be printed
the lava side will report timeout problem if we print too many failed test cases in the lava log. here we add support to specify the number to print for each job, Change-Id: Ia4937ac1bdba6eea65cb5162e8e62d6048eb3b94 Signed-off-by: Yongqin Liu <yongqin.liu@linaro.org>
Diffstat (limited to 'automated/android')
-rwxr-xr-xautomated/android/tradefed/tradefed-runner.py52
-rwxr-xr-xautomated/android/tradefed/tradefed.sh9
-rw-r--r--automated/android/tradefed/tradefed.yaml4
3 files changed, 46 insertions, 19 deletions
diff --git a/automated/android/tradefed/tradefed-runner.py b/automated/android/tradefed/tradefed-runner.py
index 2281d7d..814ee1e 100755
--- a/automated/android/tradefed/tradefed-runner.py
+++ b/automated/android/tradefed/tradefed-runner.py
@@ -60,6 +60,7 @@ def result_parser(xml_file, result_format):
sys.exit(1)
logger.info('Test modules in %s: %s'
% (xml_file, str(len(root.findall('Module')))))
+ failures_count = 0
for elem in root.findall('Module'):
# Naming: Module Name + Test Case Name + Test Name
if 'abi' in elem.attrib.keys():
@@ -93,21 +94,33 @@ def result_parser(xml_file, result_format):
result = '%s_done pass' % module_name
py_test_lib.add_result(RESULT_FILE, result)
- # print failed test cases for debug
- test_cases = elem.findall('.//TestCase')
- for test_case in test_cases:
- failed_tests = test_case.findall('.//Test[@result="fail"]')
- for failed_test in failed_tests:
- test_name = '%s/%s.%s' % (module_name,
- test_case.get("name"),
- failed_test.get("name"))
- failures = failed_test.findall('.//Failure')
- failure_msg = ''
- for failure in failures:
- failure_msg = '%s \n %s' % (failure_msg,
- failure.get('message'))
-
- logger.info('%s %s' % (test_name, failure_msg.strip()))
+ if args.FAILURES_PRINTED > 0 and failures_count < args.FAILURES_PRINTED:
+ # print failed test cases for debug
+ test_cases = elem.findall('.//TestCase')
+ for test_case in test_cases:
+ failed_tests = test_case.findall('.//Test[@result="fail"]')
+ for failed_test in failed_tests:
+ test_name = '%s/%s.%s' % (module_name,
+ test_case.get("name"),
+ failed_test.get("name"))
+ failures = failed_test.findall('.//Failure')
+ failure_msg = ''
+ for failure in failures:
+ failure_msg = '%s \n %s' % (failure_msg,
+ failure.get('message'))
+
+ logger.info('%s %s' % (test_name, failure_msg.strip()))
+ failures_count = failures_count + 1
+ if failures_count > args.FAILURES_PRINTED:
+ logger.info('There are more than %d test cases '
+ 'failed, the output for the rest '
+ 'failed test cases will be '
+ 'skipped.' % (args.FAILURES_PRINTED))
+ #break the for loop of failed_tests
+ break
+ if failures_count > args.FAILURES_PRINTED:
+ #break the for loop of test_cases
+ break
if result_format == ATOMIC:
test_cases = elem.findall('.//TestCase')
@@ -133,6 +146,15 @@ parser.add_argument('-r', dest='RESULTS_FORMAT', required=False,
help="The format of the saved results. 'aggregated' means number of \
passed and failed tests are recorded for each module. 'atomic' means \
each test result is recorded separately")
+
+## The total number of failed test cases to be printed for this job
+## Print too much failures would cause the lava job timed out
+## Default to not print any failures
+parser.add_argument('-f', dest='FAILURES_PRINTED', type=int,
+ required=False, default=0,
+ help="Speciy the number of failed test cases to be\
+ printed, 0 means not print any failures.")
+
args = parser.parse_args()
# TEST_PARAMS = args.TEST_PARAMS
diff --git a/automated/android/tradefed/tradefed.sh b/automated/android/tradefed/tradefed.sh
index 31f64e7..ff696bf 100755
--- a/automated/android/tradefed/tradefed.sh
+++ b/automated/android/tradefed/tradefed.sh
@@ -13,13 +13,15 @@ TEST_PATH="android-cts"
RESULT_FORMAT="aggregated"
RESULT_FILE="$(pwd)/output/result.txt"
export RESULT_FILE
+# the default number of failed test cases to be printed
+FAILURES_PRINTED="0"
usage() {
- echo "Usage: $0 [-o timeout] [-n serialno] [-c cts_url] [-t test_params] [-p test_path] [-r <aggregated|atomic>]" 1>&2
+ echo "Usage: $0 [-o timeout] [-n serialno] [-c cts_url] [-t test_params] [-p test_path] [-r <aggregated|atomic>] [-f failures_printed]" 1>&2
exit 1
}
-while getopts ':o:n:c:t:p:r:' opt; do
+while getopts ':o:n:c:t:p:r:f:' opt; do
case "${opt}" in
o) TIMEOUT="${OPTARG}" ;;
n) export ANDROID_SERIAL="${OPTARG}" ;;
@@ -27,6 +29,7 @@ while getopts ':o:n:c:t:p:r:' opt; do
t) TEST_PARAMS="${OPTARG}" ;;
p) TEST_PATH="${OPTARG}" ;;
r) RESULT_FORMAT="${OPTARG}" ;;
+ f) FAILURES_PRINTED="${OPTARG}" ;;
*) usage ;;
esac
done
@@ -73,4 +76,4 @@ fi
# Run tradefed test.
info_msg "About to run tradefed shell on device ${ANDROID_SERIAL}"
-./tradefed-runner.py -t "${TEST_PARAMS}" -p "${TEST_PATH}" -r "${RESULT_FORMAT}"
+./tradefed-runner.py -t "${TEST_PARAMS}" -p "${TEST_PATH}" -r "${RESULT_FORMAT}" -f "${FAILURES_PRINTED}"
diff --git a/automated/android/tradefed/tradefed.yaml b/automated/android/tradefed/tradefed.yaml
index 4b35a8a..e30f074 100644
--- a/automated/android/tradefed/tradefed.yaml
+++ b/automated/android/tradefed/tradefed.yaml
@@ -31,6 +31,8 @@ params:
TOKEN: ""
AP_SSID: ""
AP_KEY: ""
+ # Specify the failures number to be printed
+ FAILURES_PRINTED: "0"
run:
steps:
@@ -43,7 +45,7 @@ run:
# create test use to run the cts/vts tests
- useradd -m testuser && echo "testuser created successfully"
- chown testuser:testuser .
- - sudo -u testuser ./tradefed.sh -o "${TIMEOUT}" -c "${TEST_URL}" -t "${TEST_PARAMS}" -p "${TEST_PATH}" -r "${RESULTS_FORMAT}" -n "${ANDROID_SERIAL}"
+ - sudo -u testuser ./tradefed.sh -o "${TIMEOUT}" -c "${TEST_URL}" -t "${TEST_PARAMS}" -p "${TEST_PATH}" -r "${RESULTS_FORMAT}" -n "${ANDROID_SERIAL}" -f "${FAILURES_PRINTED}"
# Upload test log and result files to artifactorial.
- cp -r ./${TEST_PATH}/results ./output/ || true
- cp -r ./${TEST_PATH}/logs ./output/ || true