aboutsummaryrefslogtreecommitdiff
path: root/lava_dispatcher/actions/test/shell.py
blob: 050cce5d105ef42dc984e3f2caf95079102a516e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
# Copyright (C) 2014 Linaro Limited
#
# Author: Neil Williams <neil.williams@linaro.org>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.

import re
import sys
import time
import yaml
import decimal
import logging
import pexpect
from nose.tools import nottest
from collections import OrderedDict

from lava_dispatcher.actions.test import (
    TestAction,
    handle_testcase
)
from lava_dispatcher.action import (
    Pipeline,
    JobError,
    InfrastructureError,
    TestError,
    LAVABug,
)
from lava_dispatcher.logical import (
    LavaTest,
    RetryAction
)
from lava_dispatcher.connection import SignalMatch
from lava_dispatcher.protocols.lxc import LxcProtocol
from lava_dispatcher.utils.constants import (
    DEFAULT_V1_PATTERN,
    DEFAULT_V1_FIXUP,
)
from functools import reduce

# pylint: disable=too-many-branches,too-many-statements,too-many-instance-attributes,logging-not-lazy


class TestShell(LavaTest):
    """
    LavaTestShell Strategy object
    """
    def __init__(self, parent, parameters):
        super(TestShell, self).__init__(parent)
        self.action = TestShellRetry()
        self.action.job = self.job
        self.action.section = self.action_type
        parent.add_action(self.action, parameters)

    @classmethod
    def accepts(cls, device, parameters):  # pylint: disable=unused-argument
        if ('definition' in parameters) or ('definitions' in parameters):
            return True, 'accepted'
        return False, '"definition" or "definitions" not in parameters'

    @classmethod
    def needs_deployment_data(cls):
        return True

    @classmethod
    def needs_overlay(cls):
        return True

    @classmethod
    def has_shell(cls):
        return True


class TestShellRetry(RetryAction):

    name = "lava-test-retry"
    description = "Retry wrapper for lava-test-shell"
    summary = "Retry support for Lava Test Shell"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(TestShellAction())


# FIXME: move to utils and call inside the overlay
class PatternFixup(object):

    def __init__(self, testdef, count):
        """
        Like all good arrays, the count is expected to start at zero.
        Avoid calling from validate() or populate() - this needs the
        RepoAction to be running.
        """
        super(PatternFixup, self).__init__()
        self.pat = DEFAULT_V1_PATTERN
        self.fixup = DEFAULT_V1_FIXUP
        if isinstance(testdef, dict) and 'metadata' in testdef:
            self.testdef = testdef
            self.name = "%d_%s" % (count, reduce(dict.get, ['metadata', 'name'], testdef))
        else:
            self.testdef = {}
            self.name = None

    def valid(self):
        return self.fixupdict() and self.pattern() and self.name

    def update(self, pattern, fixupdict):
        if not isinstance(pattern, str):
            raise TestError("Unrecognised test parse pattern type: %s" % type(pattern))
        try:
            self.pat = re.compile(pattern, re.M)
        except re.error as exc:
            raise TestError("Error parsing regular expression %r: %s" % (self.pat, exc.message))
        self.fixup = fixupdict

    def fixupdict(self):
        if 'parse' in self.testdef and 'fixupdict' in self.testdef['parse']:
            self.fixup = self.testdef['parse']['fixupdict']
        return self.fixup

    def pattern(self):
        if 'parse' in self.testdef and 'pattern' in self.testdef['parse']:
            self.pat = self.testdef['parse']['pattern']
            if not isinstance(self.pat, str):
                raise TestError("Unrecognised test parse pattern type: %s" % type(self.pat))
            try:
                self.pat = re.compile(self.pat, re.M)
            except re.error as exc:
                raise TestError("Error parsing regular expression %r: %s" % (self.pat, exc.message))
        return self.pat


class TestShellAction(TestAction):
    """
    Sets up and runs the LAVA Test Shell Definition scripts.
    Supports a pre-command-list of operations necessary on the
    booted image before the test shell can be started.
    """

    name = "lava-test-shell"
    description = "Executing lava-test-runner"
    summary = "Lava Test Shell"

    def __init__(self):
        super(TestShellAction, self).__init__()
        self.signal_director = self.SignalDirector(None)  # no default protocol
        self.patterns = {}
        self.signal_match = SignalMatch()
        self.definition = None
        self.testset_name = None
        self.report = {}
        self.start = None
        self.testdef_dict = {}
        # noinspection PyTypeChecker
        self.pattern = PatternFixup(testdef=None, count=0)
        self.current_run = None

    def _reset_patterns(self):
        # Extend the list of patterns when creating subclasses.
        self.patterns = {
            "exit": "<LAVA_TEST_RUNNER>: exiting",
            "error": "<LAVA_TEST_RUNNER>: ([^ ]+) installer failed, skipping",
            "eof": pexpect.EOF,
            "timeout": pexpect.TIMEOUT,
            "signal": r"<LAVA_SIGNAL_(\S+) ([^>]+)>",
        }
        # noinspection PyTypeChecker
        self.pattern = PatternFixup(testdef=None, count=0)

    def validate(self):
        if "definitions" in self.parameters:
            for testdef in self.parameters["definitions"]:
                if "repository" not in testdef:
                    self.errors = "Repository missing from test definition"
        self._reset_patterns()
        super(TestShellAction, self).validate()

    def run(self, connection, max_end_time, args=None):  # pylint: disable=too-many-locals
        """
        Common run function for subclasses which define custom patterns
        """
        super(TestShellAction, self).run(connection, max_end_time, args)

        # Get the connection, specific to this namespace
        connection_namespace = self.parameters.get('connection-namespace', None)
        parameters = None
        if connection_namespace:
            self.logger.debug("Using connection namespace: %s", connection_namespace)
            parameters = {"namespace": connection_namespace}
        else:
            parameters = {'namespace': self.parameters.get('namespace', 'common')}
            self.logger.debug("Using namespace: %s", parameters['namespace'])
        connection = self.get_namespace_data(
            action='shared', label='shared', key='connection', deepcopy=False, parameters=parameters)

        if not connection:
            raise LAVABug("No connection retrieved from namespace data")

        self.signal_director.connection = connection

        pattern_dict = {self.pattern.name: self.pattern}
        # pattern dictionary is the lookup from the STARTRUN to the parse pattern.
        self.set_namespace_data(action=self.name, label=self.name, key='pattern_dictionary', value=pattern_dict)
        if self.character_delay > 0:
            self.logger.debug("Using a character delay of %i (ms)", self.character_delay)

        if not connection.prompt_str:
            connection.prompt_str = [self.job.device.get_constant(
                'default-shell-prompt')]
            # FIXME: This should be logged whenever prompt_str is changed, by the connection object.
            self.logger.debug("Setting default test shell prompt %s", connection.prompt_str)
        connection.timeout = self.connection_timeout
        # force an initial prompt - not all shells will respond without an excuse.
        connection.sendline(connection.check_char)
        self.wait(connection)

        # use the string instead of self.name so that inheriting classes (like multinode)
        # still pick up the correct command.
        running = self.parameters['stage']
        pre_command_list = self.get_namespace_data(action='test', label="lava-test-shell", key='pre-command-list')
        lava_test_results_dir = self.get_namespace_data(
            action='test', label='results', key='lava_test_results_dir')
        lava_test_sh_cmd = self.get_namespace_data(action='test', label='shared', key='lava_test_sh_cmd')

        if pre_command_list and running == 0:
            for command in pre_command_list:
                connection.sendline(command, delay=self.character_delay)

        if lava_test_results_dir is None:
            raise JobError("Nothing to run. Maybe the 'deploy' stage is missing, "
                           "otherwise this is a bug which should be reported.")

        self.logger.debug("Using %s" % lava_test_results_dir)
        connection.sendline('ls -l %s/' % lava_test_results_dir, delay=self.character_delay)
        if lava_test_sh_cmd:
            connection.sendline('export SHELL=%s' % lava_test_sh_cmd, delay=self.character_delay)

        try:
            feedbacks = []
            for feedback_ns in self.data.keys():  # pylint: disable=no-member
                if feedback_ns == self.parameters.get('namespace'):
                    continue
                feedback_connection = self.get_namespace_data(
                    action='shared', label='shared', key='connection',
                    deepcopy=False, parameters={"namespace": feedback_ns})
                if feedback_connection:
                    self.logger.debug("Will listen to feedbacks from '%s' for 1 second",
                                      feedback_ns)
                    feedbacks.append((feedback_ns, feedback_connection))

            with connection.test_connection() as test_connection:
                # the structure of lava-test-runner means that there is just one TestAction and it must run all definitions
                test_connection.sendline(
                    "%s/bin/lava-test-runner %s/%s" % (
                        lava_test_results_dir,
                        lava_test_results_dir,
                        running),
                    delay=self.character_delay)

                test_connection.timeout = min(self.timeout.duration, self.connection_timeout.duration)
                self.logger.info("Test shell timeout: %ds (minimum of the action and connection timeout)",
                                 test_connection.timeout)

                # Because of the feedbacks, we use a small value for the
                # timeout.  This allows to grab feedback regularly.
                last_check = time.time()
                while self._keep_running(test_connection, test_connection.timeout, connection.check_char):
                    # Only grab the feedbacks every test_connection.timeout
                    if feedbacks and time.time() - last_check > test_connection.timeout:
                        for feedback in feedbacks:
                            # The timeout is really small because the goal is only
                            # to clean the buffer of the feedback connections:
                            # the characters are already in the buffer.
                            # With an higher timeout, this can have a big impact on
                            # the performances of the overall loop.
                            bytes_read = feedback[1].listen_feedback(timeout=1)
                            if bytes_read > 1:
                                self.logger.debug("Listened to connection for namespace '%s' done", feedback[0])
                        last_check = time.time()
        finally:
            if self.current_run is not None:
                self.logger.error("Marking unfinished test run as failed")
                self.current_run["duration"] = "%.02f" % (time.time() - self.start)
                self.logger.results(self.current_run)  # pylint: disable=no-member
                self.current_run = None

        # Only print if the report is not empty
        if self.report:
            self.logger.debug(yaml.dump(self.report, default_flow_style=False))
        if self.errors:
            raise TestError(self.errors)
        return connection

    def pattern_error(self, test_connection):
        (testrun, ) = test_connection.match.groups()
        self.logger.error("Unable to start testrun %s. "
                          "Read the log for more details.", testrun)
        self.errors = "Unable to start testrun %s" % testrun
        # This is not accurate but required when exiting.
        self.start = time.time()
        self.current_run = {
            "definition": "lava",
            "case": testrun,
            "result": "fail"
        }
        return True

    def signal_start_run(self, params):
        self.signal_director.test_uuid = params[1]
        self.definition = params[0]
        uuid = params[1]
        self.start = time.time()
        self.logger.info("Starting test lava.%s (%s)", self.definition, uuid)
        # set the pattern for this run from pattern_dict
        testdef_index = self.get_namespace_data(action='test-definition', label='test-definition',
                                                key='testdef_index')
        uuid_list = self.get_namespace_data(action='repo-action', label='repo-action', key='uuid-list')
        for (key, value) in enumerate(testdef_index):
            if self.definition == "%s_%s" % (key, value):
                pattern_dict = self.get_namespace_data(action='test', label=uuid_list[key], key='testdef_pattern')
                pattern = pattern_dict['testdef_pattern']['pattern']
                fixup = pattern_dict['testdef_pattern']['fixupdict']
                self.patterns.update({'test_case_result': re.compile(pattern, re.M)})
                self.pattern.update(pattern, fixup)
                self.logger.info("Enabling test definition pattern %r" % pattern)
                self.logger.info("Enabling test definition fixup %r" % self.pattern.fixup)
        self.current_run = {
            "definition": "lava",
            "case": self.definition,
            "uuid": uuid,
            "result": "fail"
        }
        testdef_commit = self.get_namespace_data(
            action='test', label=uuid, key='commit-id')
        if testdef_commit:
            self.current_run.update({
                'commit_id': testdef_commit
            })

    def signal_end_run(self, params):
        self.definition = params[0]
        uuid = params[1]
        # remove the pattern for this run from pattern_dict
        self._reset_patterns()
        # catch error in ENDRUN being handled without STARTRUN
        if not self.start:
            self.start = time.time()
        self.logger.info("Ending use of test pattern.")
        self.logger.info("Ending test lava.%s (%s), duration %.02f",
                         self.definition, uuid,
                         time.time() - self.start)
        self.current_run = None
        res = {
            "definition": "lava",
            "case": self.definition,
            "uuid": uuid,
            'repository': self.get_namespace_data(
                action='test', label=uuid, key='repository'),
            'path': self.get_namespace_data(
                action='test', label=uuid, key='path'),
            "duration": "%.02f" % (time.time() - self.start),
            "result": "pass"
        }
        revision = self.get_namespace_data(action='test', label=uuid, key='revision')
        res['revision'] = revision if revision else 'unspecified'
        res['namespace'] = self.parameters['namespace']
        connection_namespace = self.parameters.get('connection_namespace', None)
        if connection_namespace:
            res['connection-namespace'] = connection_namespace
        commit_id = self.get_namespace_data(action='test', label=uuid, key='commit-id')
        if commit_id:
            res['commit_id'] = commit_id

        self.logger.results(res)  # pylint: disable=no-member
        self.start = None

    @nottest
    def signal_test_case(self, params):
        # If the STARTRUN signal was not received correctly, we cannot continue
        # as the test_uuid is missing.
        # This is only happening when the signal string is split by some kernel messages.
        if self.signal_director.test_uuid is None:
            self.logger.error("Unknown test uuid. The STARTRUN signal for this test action was not received correctly.")
            raise TestError("Invalid TESTCASE signal")
        try:
            data = handle_testcase(params)
            # get the fixup from the pattern_dict
            res = self.signal_match.match(data, fixupdict=self.pattern.fixupdict())
        except (JobError, TestError) as exc:
            self.logger.error(str(exc))
            return True

        p_res = self.get_namespace_data(action='test', label=self.signal_director.test_uuid, key='results')
        if not p_res:
            p_res = OrderedDict()
            self.set_namespace_data(
                action='test', label=self.signal_director.test_uuid, key='results', value=p_res)

        # prevent losing data in the update
        # FIXME: support parameters and retries
        if res["test_case_id"] in p_res:
            raise JobError("Duplicate test_case_id in results: %s" % res["test_case_id"])
        # turn the result dict inside out to get the unique
        # test_case_id/testset_name as key and result as value
        res_data = {
            'definition': self.definition,
            'case': res["test_case_id"],
            'result': res["result"]
        }
        # check for measurements
        if 'measurement' in res:
            try:
                measurement = decimal.Decimal(res['measurement'])
            except decimal.InvalidOperation:
                raise TestError("Invalid measurement %s" % res['measurement'])
            res_data['measurement'] = measurement
            if 'units' in res:
                res_data['units'] = res['units']

        if self.testset_name:
            res_data['set'] = self.testset_name
            self.report[res['test_case_id']] = {
                'set': self.testset_name,
                'result': res['result']
            }
        else:
            self.report[res['test_case_id']] = res['result']
        # Send the results back
        self.logger.results(res_data)  # pylint: disable=no-member

    @nottest
    def signal_test_reference(self, params):
        if len(params) != 3:
            raise TestError("Invalid use of TESTREFERENCE")
        res_dict = {
            'case': params[0],
            'definition': self.definition,
            'result': params[1],
            'reference': params[2],
        }
        if self.testset_name:
            res_dict.update({'set': self.testset_name})
        self.logger.results(res_dict)  # pylint: disable=no-member

    @nottest
    def signal_test_feedback(self, params):
        feedback_ns = params[0]
        if feedback_ns not in self.data.keys():
            self.logger.error("%s is not a valid namespace")
            return
        self.logger.info("Requesting feedback from namespace: %s", feedback_ns)
        feedback_connection = self.get_namespace_data(
            action='shared', label='shared', key='connection',
            deepcopy=False, parameters={"namespace": feedback_ns})
        feedback_connection.listen_feedback(timeout=1)

    @nottest
    def signal_test_set(self, params):
        name = None
        action = params.pop(0)
        if action == "START":
            name = "testset_" + action.lower()
            try:
                self.testset_name = params[0]
            except IndexError:
                raise JobError("Test set declared without a name")
            self.logger.info("Starting test_set %s", self.testset_name)
        elif action == "STOP":
            self.logger.info("Closing test_set %s", self.testset_name)
            self.testset_name = None
            name = "testset_" + action.lower()
        return name

    @nottest
    def pattern_test_case(self, test_connection):
        match = test_connection.match
        if match is pexpect.TIMEOUT:
            self.logger.warning("err: lava_test_shell has timed out (test_case)")
            return False
        res = self.signal_match.match(match.groupdict(), fixupdict=self.pattern.fixupdict())
        self.logger.debug("outer_loop_result: %s" % res)
        return True

    @nottest
    def pattern_test_case_result(self, test_connection):
        res = test_connection.match.groupdict()
        fixupdict = self.pattern.fixupdict()
        if res['result'] in fixupdict:
            res['result'] = fixupdict[res['result']]
        if res:
            # disallow whitespace in test_case_id
            test_case_id = "%s" % res['test_case_id'].replace('/', '_')
            if ' ' in test_case_id.strip():
                self.logger.debug("Skipping invalid test_case_id '%s'", test_case_id.strip())
                return True
            res_data = {
                'definition': self.definition,
                'case': res["test_case_id"],
                'result': res["result"]
            }
            # check for measurements
            if 'measurement' in res:
                try:
                    measurement = decimal.Decimal(res['measurement'])
                except decimal.InvalidOperation:
                    raise TestError("Invalid measurement %s" % res['measurement'])
                res_data['measurement'] = measurement
                if 'units' in res:
                    res_data['units'] = res['units']

            self.logger.results(res_data)  # pylint: disable=no-member
            self.report[res["test_case_id"]] = res["result"]
        return True

    def check_patterns(self, event, test_connection, check_char):  # pylint: disable=unused-argument
        """
        Defines the base set of pattern responses.
        Stores the results of testcases inside the TestAction
        Call from subclasses before checking subclass-specific events.
        """
        ret_val = False
        if event == "exit":
            self.logger.info("ok: lava_test_shell seems to have completed")
            self.testset_name = None

        elif event == "error":
            # Parsing is not finished
            ret_val = self.pattern_error(test_connection)

        elif event == "eof":
            self.testset_name = None
            raise InfrastructureError("lava_test_shell connection dropped.")

        elif event == "timeout":
            # allow feedback in long runs
            ret_val = True

        elif event == "signal":
            name, params = test_connection.match.groups()
            self.logger.debug("Received signal: <%s> %s" % (name, params))
            params = params.split()
            if name == "STARTRUN":
                self.signal_start_run(params)
            elif name == "ENDRUN":
                self.signal_end_run(params)
            elif name == "TESTCASE":
                self.signal_test_case(params)
            elif name == "TESTFEEDBACK":
                self.signal_test_feedback(params)
            elif name == "TESTREFERENCE":
                self.signal_test_reference(params)
            elif name == "TESTSET":
                ret = self.signal_test_set(params)
                if ret:
                    name = ret
            elif name == "TESTRAISE":
                raise TestError(' '.join(params))

            self.signal_director.signal(name, params)
            ret_val = True

        elif event == "test_case":
            ret_val = self.pattern_test_case(test_connection)
        elif event == 'test_case_result':
            ret_val = self.pattern_test_case_result(test_connection)
        return ret_val

    def _keep_running(self, test_connection, timeout, check_char):
        if 'test_case_results' in self.patterns:
            self.logger.info("Test case result pattern: %r" % self.patterns['test_case_results'])
        retval = test_connection.expect(list(self.patterns.values()), timeout=timeout)
        return self.check_patterns(list(self.patterns.keys())[retval], test_connection, check_char)

    class SignalDirector(object):

        # FIXME: create proxy handlers
        def __init__(self, protocol=None):
            """
            Base SignalDirector for singlenode jobs.
            MultiNode and LMP jobs need to create a suitable derived class as both also require
            changes equivalent to the old _keep_running functionality.

            SignalDirector is the link between the Action and the Connection. The Action uses
            the SignalDirector to interact with the I/O over the Connection.
            """
            self.protocol = protocol  # communicate externally over the protocol API
            self.connection = None  # communicate with the device
            self.logger = logging.getLogger("dispatcher")
            self.test_uuid = None

        def setup(self, parameters):
            """
            Allows the parent Action to pass extra data to a customised SignalDirector
            """
            pass

        def signal(self, name, params):
            handler = getattr(self, "_on_" + name.lower(), None)
            if handler:
                try:
                    # The alternative here is to drop the getattr and have a long if:elif:elif:else.
                    # Without python support for switch, this gets harder to read than using
                    # a getattr lookup for the callable (codehelp). So disable checkers:
                    # noinspection PyCallingNonCallable
                    handler(*params)
                except TypeError as exc:
                    # handle serial corruption which can overlap kernel messages onto test output.
                    self.logger.exception(str(exc))
                    raise TestError("Unable to handle the test shell signal correctly: %s" % str(exc))
                except JobError as exc:
                    self.logger.error("job error: handling signal %s failed: %s", name, exc)
                    return False
                return True