summaryrefslogtreecommitdiff
path: root/automated/android/noninteractive-tradefed/tradefed-runner.py
blob: 36d10d6f6d2aca86df3a1f8705ec30d4efab3a6b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
#!/usr/bin/env python

import datetime
import os
import re
import sys
import shlex
import shutil
import subprocess
import xml.etree.ElementTree as ET
import argparse
import logging
import time

sys.path.insert(0, '../../lib/')
import py_test_lib  # nopep8


OUTPUT = '%s/output' % os.getcwd()
RESULT_FILE = '%s/result.txt' % OUTPUT
TRADEFED_STDOUT = '%s/tradefed-stdout.txt' % OUTPUT
TRADEFED_LOGCAT = '%s/tradefed-logcat.txt' % OUTPUT
TEST_PARAMS = ''
AGGREGATED = 'aggregated'
ATOMIC = 'atomic'


def result_parser(xml_file, result_format):
    etree_file = open(xml_file, 'rb')
    etree_content = etree_file.read()
    rx = re.compile("&#([0-9]+);|&#x([0-9a-fA-F]+);")
    endpos = len(etree_content)
    pos = 0
    while pos < endpos:
        # remove characters that don't conform to XML spec
        m = rx.search(etree_content, pos)
        if not m:
            break
        mstart, mend = m.span()
        target = m.group(1)
        if target:
            num = int(target)
        else:
            num = int(m.group(2), 16)
        # #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
        if not(num in (0x9, 0xA, 0xD) or
                0x20 <= num <= 0xD7FF or
                0xE000 <= num <= 0xFFFD or
                0x10000 <= num <= 0x10FFFF):
            etree_content = etree_content[:mstart] + etree_content[mend:]
            endpos = len(etree_content)
        pos = mend

    try:
        root = ET.fromstring(etree_content)
    except ET.ParseError as e:
        logger.error('xml.etree.ElementTree.ParseError: %s' % e)
        logger.info('Please Check %s manually' % xml_file)
        sys.exit(1)
    logger.info('Test modules in %s: %s'
                % (xml_file, str(len(root.findall('Module')))))
    failures_count = 0
    for elem in root.findall('Module'):
        # Naming: Module Name + Test Case Name + Test Name
        if 'abi' in elem.attrib.keys():
            module_name = '.'.join([elem.attrib['abi'], elem.attrib['name']])
        else:
            module_name = elem.attrib['name']

        if result_format == AGGREGATED:
            tests_executed = len(elem.findall('.//Test'))
            tests_passed = len(elem.findall('.//Test[@result="pass"]'))
            tests_failed = len(elem.findall('.//Test[@result="fail"]'))

            result = '%s_executed pass %s' % (module_name, str(tests_executed))
            py_test_lib.add_result(RESULT_FILE, result)

            result = '%s_passed pass %s' % (module_name, str(tests_passed))
            py_test_lib.add_result(RESULT_FILE, result)

            failed_result = 'pass'
            if tests_failed > 0:
                failed_result = 'fail'
            result = '%s_failed %s %s' % (module_name, failed_result,
                                          str(tests_failed))
            py_test_lib.add_result(RESULT_FILE, result)

            # output result to show if the module is done or not
            tests_done = elem.get('done', 'false')
            if tests_done == 'false':
                result = '%s_done fail' % module_name
            else:
                result = '%s_done pass' % module_name
            py_test_lib.add_result(RESULT_FILE, result)

            if args.FAILURES_PRINTED > 0 and failures_count < args.FAILURES_PRINTED:
                # print failed test cases for debug
                test_cases = elem.findall('.//TestCase')
                for test_case in test_cases:
                    failed_tests = test_case.findall('.//Test[@result="fail"]')
                    for failed_test in failed_tests:
                        test_name = '%s/%s.%s' % (module_name,
                                                  test_case.get("name"),
                                                  failed_test.get("name"))
                        failures = failed_test.findall('.//Failure')
                        failure_msg = ''
                        for failure in failures:
                            failure_msg = '%s \n %s' % (failure_msg,
                                                        failure.get('message'))

                        logger.info('%s %s' % (test_name, failure_msg.strip()))
                        failures_count = failures_count + 1
                        if failures_count > args.FAILURES_PRINTED:
                            logger.info('There are more than %d test cases '
                                        'failed, the output for the rest '
                                        'failed test cases will be '
                                        'skipped.' % (args.FAILURES_PRINTED))
                            #break the for loop of failed_tests
                            break
                    if failures_count > args.FAILURES_PRINTED:
                        #break the for loop of test_cases
                        break

        if result_format == ATOMIC:
            test_cases = elem.findall('.//TestCase')
            for test_case in test_cases:
                tests = test_case.findall('.//Test')
                for atomic_test in tests:
                    atomic_test_result = atomic_test.get("result")
                    atomic_test_name = "%s/%s.%s" % (module_name,
                                                     test_case.get("name"),
                                                     atomic_test.get("name"))
                    py_test_lib.add_result(
                        RESULT_FILE, "%s %s" % (atomic_test_name,
                                                atomic_test_result))


parser = argparse.ArgumentParser()
parser.add_argument('-t', dest='TEST_PARAMS', required=True,
                    help="tradefed shell test parameters")
parser.add_argument('-p', dest='TEST_PATH', required=True,
                    help="path to tradefed package top directory")
parser.add_argument('-r', dest='RESULTS_FORMAT', required=False,
                    default=AGGREGATED, choices=[AGGREGATED, ATOMIC],
                    help="The format of the saved results. 'aggregated' means number of \
                    passed and failed tests are recorded for each module. 'atomic' means \
                    each test result is recorded separately")

## The total number of failed test cases to be printed for this job
## Print too much failures would cause the lava job timed out
## Default to not print any failures
parser.add_argument('-f', dest='FAILURES_PRINTED', type=int,
                    required=False, default=0,
                    help="Speciy the number of failed test cases to be\
                    printed, 0 means not print any failures.")

args = parser.parse_args()
# TEST_PARAMS = args.TEST_PARAMS

if os.path.exists(OUTPUT):
    suffix = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
    shutil.move(OUTPUT, '%s_%s' % (OUTPUT, suffix))
os.makedirs(OUTPUT)

# Setup logger.
# There might be an issue in lava/local dispatcher, most likely problem of
# pexpect. It prints the messages from print() last, not by sequence.
# Use logging and subprocess.call() to work around this.
logger = logging.getLogger('Tradefed')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s: %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)

tradefed_stdout = open(TRADEFED_STDOUT, 'w')
tradefed_logcat_out = open(TRADEFED_LOGCAT, 'w')
tradefed_logcat = subprocess.Popen(['adb', 'logcat'], stdout=tradefed_logcat_out)

logger.info('Test params: %s' % args.TEST_PARAMS)
logger.info('Starting tradefed shell test...')

command = None
prompt = None
if args.TEST_PATH == "android-cts":
    command = "android-cts/tools/cts-tradefed run commandAndExit " + args.TEST_PARAMS
if args.TEST_PATH == "android-vts":
    os.environ["VTS_ROOT"] = os.getcwd()
    command = "android-vts/tools/vts-tradefed run commandAndExit " + args.TEST_PARAMS

if command is None:
    logger.error("Not supported path: %s" % args.TEST_PATH)
    sys.exit(1)

child = subprocess.Popen(shlex.split(command))
fail_to_complete = child.wait()

if fail_to_complete:
    py_test_lib.add_result(RESULT_FILE, 'tradefed-test-run fail')
else:
    py_test_lib.add_result(RESULT_FILE, 'tradefed-test-run pass')

logger.info('Tradefed test finished')
tradefed_stdout.close()
tradefed_logcat.kill()
tradefed_logcat_out.close()

# Locate and parse test result.
result_dir = '%s/results' % args.TEST_PATH
test_result = 'test_result.xml'
if os.path.exists(result_dir) and os.path.isdir(result_dir):
    for root, dirs, files in os.walk(result_dir):
        for name in files:
            if name == test_result:
                result_parser(os.path.join(root, name), args.RESULTS_FORMAT)