diff options
-rw-r--r-- | lava_dispatcher/actions/launch_control.py | 35 | ||||
-rw-r--r-- | lava_dispatcher/connection.py | 2 | ||||
-rw-r--r-- | lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf | 2 | ||||
-rwxr-xr-x | setup.py | 1 |
4 files changed, 31 insertions, 9 deletions
diff --git a/lava_dispatcher/actions/launch_control.py b/lava_dispatcher/actions/launch_control.py index b38990f53..3fec575e1 100644 --- a/lava_dispatcher/actions/launch_control.py +++ b/lava_dispatcher/actions/launch_control.py @@ -24,6 +24,9 @@ import os import shutil import tarfile import logging +import urlparse + +from lava_tool.authtoken import AuthenticatingServerProxy, MemoryAuthBackend from lava_dispatcher.actions import BaseAction from lava_dispatcher.client.base import OperationFailed @@ -44,8 +47,8 @@ class SubmitResultAction(BaseAction): test_runs += bundle['test_runs'] return main_bundle - def submit_combine_bundles(self, status='pass', err_msg='', server=None, stream=None): - dashboard = _get_dashboard(server) + def submit_combine_bundles(self, status='pass', err_msg='', server=None, stream=None, token=None): + dashboard = _get_dashboard(server, token) main_bundle = self.combine_bundles() self.context.test_data.add_seriallog( self.context.client.get_seriallog()) @@ -67,7 +70,7 @@ class SubmitResultAction(BaseAction): logging.warning("Fault string: %s" % err.faultString) class cmd_submit_results_on_host(SubmitResultAction): - def run(self, server, stream): + def run(self, server, stream, token=None): #Upload bundle files to dashboard logging.info("Executing submit_results_on_host command") bundlename_list = [] @@ -87,7 +90,7 @@ class cmd_submit_results_on_host(SubmitResultAction): status = 'fail' err_msg = err_msg + " Some test case result appending failed." - self.submit_combine_bundles(status, err_msg, server, stream) + self.submit_combine_bundles(status, err_msg, server, stream, token) for bundle in bundlename_list: os.remove(bundle) @@ -98,7 +101,7 @@ class cmd_submit_results_on_host(SubmitResultAction): class cmd_submit_results(SubmitResultAction): - def run(self, server, stream, result_disk="testrootfs"): + def run(self, server, stream, result_disk="testrootfs", token=None): """Submit test results to a lava-dashboard server :param server: URL of the lava-dashboard server RPC endpoint :param stream: Stream on the lava-dashboard server to save the result to @@ -126,12 +129,12 @@ class cmd_submit_results(SubmitResultAction): if err_msg is None: err_msg = '' - self.submit_combine_bundles(status, err_msg, server, stream) + self.submit_combine_bundles(status, err_msg, server, stream, token) if status == 'fail': raise OperationFailed(err_msg) #util function, see if it needs to be part of utils.py -def _get_dashboard(server): +def _get_dashboard(server, token): if not server.endswith("/"): server = ''.join([server, "/"]) @@ -141,7 +144,23 @@ def _get_dashboard(server): server = ''.join([server, "xml-rpc/"]) logging.warn("Please use whole endpoint URL not just end with 'dashboard/', 'xml-rpc/' is added automatically now!!!") - srv = xmlrpclib.ServerProxy(server, allow_none=True, use_datetime=True) + parsed_server = urlparse.urlparse(server) + auth_backend = MemoryAuthBackend([]) + if parsed_server.username: + if token: + userless_server = '%s://%s%s' % ( + parsed_server.scheme, parsed_server.hostname, parsed_server.path) + auth_backend = MemoryAuthBackend([(parsed_server.username, userless_server, token)]) + else: + logging.warn( + "specifying a user without a token is unlikely to work") + else: + if token: + logging.warn( + "specifying a token without a user is probably useless") + + srv = AuthenticatingServerProxy( + server, allow_none=True, use_datetime=True, auth_backend=auth_backend) if server.endswith("xml-rpc/"): logging.warn("Please use RPC2 endpoint instead, xml-rpc is deprecated!!!") dashboard = srv diff --git a/lava_dispatcher/connection.py b/lava_dispatcher/connection.py index e0ad10d20..2c264c30d 100644 --- a/lava_dispatcher/connection.py +++ b/lava_dispatcher/connection.py @@ -102,7 +102,7 @@ class LavaConnection(object): class LavaConmuxConnection(LavaConnection): def _make_connection(self, sio): - cmd = "conmux-console %s" % self.device_option("hostname") + cmd = self.device_option("connection_command") proc = pexpect.spawn(cmd, timeout=1200, logfile=sio) #serial can be slow, races do funny things if you don't increase delay proc.delaybeforesend=1 diff --git a/lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf b/lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf index 427fa95e4..35b80ba7c 100644 --- a/lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf +++ b/lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf @@ -12,6 +12,8 @@ # 'qemu' is the other possible value at this time. client_type = master +connection_command = conmux-console %(hostname)s + # The bootloader commands to boot the device into the test image (we # assume that the device boots into the master image without bootloader # intervention). @@ -22,6 +22,7 @@ setup( }, install_requires=[ "pexpect >= 2.3", + "lava-tool", "json-schema-validator", ], setup_requires=[ |