aboutsummaryrefslogtreecommitdiff
path: root/lava-v2-submit-jobs.py
diff options
context:
space:
mode:
authorMatt Hart <github@blacklabsystems.com>2016-11-14 10:55:47 +0000
committerMilo Casagrande <milo@ubuntu.com>2016-11-14 11:55:47 +0100
commitebedad46743d86d20b2ea84880a752f5d0a1d9f5 (patch)
treea4afd37900e1f884f67c876d7cfe129c38ba7b84 /lava-v2-submit-jobs.py
parent2be496000efb70737c7c578d958f223503988330 (diff)
LAVA V2 Initial Job Support
Job creation and submission is now working. There will need to be *lots* of templates added in future but this gets us started. Results are not being recorded yet but there is an example config/script to show how this can be done. lava-v2-jobs-from-api.py: generates LAVA v2 jobs by making requests to the KCI API. lava-v2-submit-jobs.py: Submits LAVA v2 jobs to a lab if a pipeline device exists that matches the requested device_type. reactobus-kernel-ci-config.yaml: Configuration for reactobus (https://github.com/ivoire/ReactOBus) which will be the daemon waiting for ZMQ notifications that the job has completed reactobus-job-parser.py: Basic beginnings of a script which is executed by reactobus, to parse the ZMQ response and eventually update the KCI API with the result. Add some initial templates for qemu/apq8016-sbc devices.
Diffstat (limited to 'lava-v2-submit-jobs.py')
-rwxr-xr-xlava-v2-submit-jobs.py141
1 files changed, 141 insertions, 0 deletions
diff --git a/lava-v2-submit-jobs.py b/lava-v2-submit-jobs.py
new file mode 100755
index 0000000..e97c3a2
--- /dev/null
+++ b/lava-v2-submit-jobs.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# Usage ./lava-v2-submit-jobs.py --username test --token xxxx --server http://server/RPC2 --jobs jobfolder
+
+import os
+import xmlrpclib
+import yaml
+import subprocess
+import fnmatch
+import time
+import re
+import argparse
+import httplib
+
+from lib import utils
+from lib import configuration
+
+job_map = {}
+
+
+def submit_jobs(connection, server, bundle_stream=None):
+ online_devices, offline_devices = gather_devices(connection)
+ online_device_types, offline_device_types = gather_device_types(connection)
+ print "Submitting Jobs to Server..."
+ for job in job_map:
+ try:
+ with open(job, 'rb') as stream:
+ job_data = stream.read()
+ job_info = yaml.load(job_data)
+ # Check if request device(s) are available
+ if 'device_type' in job_info:
+ if job_info['device_type'] in offline_device_types:
+ print "All device types: %s are OFFLINE, skipping..." % job_info['device_type']
+ print os.path.basename(job) + ' : skip'
+ elif job_info['device_type'] in online_device_types:
+ pass
+ jobs = connection.scheduler.submit_job(job_data)
+ if isinstance(jobs, int):
+ jobs = str(jobs).split()
+ job_map[job] = jobs
+ else:
+ print "No device-type available on server, skipping..."
+ print os.path.basename(job) + ' : skip'
+ elif 'device_group' in job_info:
+ print "Multinode Job Detected! Not supported yet :("
+ elif 'vm_group' in job_info:
+ print "VMGroup Job Detected! Not supported yet :("
+ else:
+ print "Should never get here - no idea what job type"
+ print os.path.basename(job) + ' : skip'
+ except (xmlrpclib.ProtocolError, xmlrpclib.Fault, IOError, ValueError) as e:
+ print "JSON VALIDATION ERROR!"
+ print job
+ print e
+ continue
+
+
+def load_jobs(top):
+ for root, dirnames, filenames in os.walk(top):
+ for filename in fnmatch.filter(filenames, '*.yaml'):
+ job_map[os.path.join(root, filename)] = None
+
+
+def retrieve_jobs(jobs):
+ cmd = 'git clone %s' % jobs
+ try:
+ print "Cloning LAVA Jobs..."
+ subprocess.check_output(cmd, shell=True)
+ print "Clone Successful!"
+ print "clone-jobs : pass"
+ except subprocess.CalledProcessError as e:
+ print "ERROR!"
+ print "Unable to clone %s" % jobs
+ print "clone-jobs : fail"
+ exit(1)
+
+
+def gather_devices(connection):
+ online_devices = {}
+ offline_devices = {}
+ print "Gathering Devices..."
+ all_devices = connection.scheduler.all_devices()
+ for device in all_devices:
+ if device[4]: #check if pipeline device
+ if device[2] == 'offline':
+ offline_devices[device[0]] = 1
+ else:
+ online_devices[device[0]] = 1
+ print "Gathered Devices Successfully!"
+ return online_devices, offline_devices
+
+
+def gather_device_types(connection):
+ online_device_types = {}
+ offline_device_types = {}
+ print "Gathering Device Types..."
+ all_device_types = connection.scheduler.all_device_types()
+ for device_type in all_device_types:
+ # Only use dictionary data structures
+ if isinstance(device_type, dict):
+ # Retired
+ if device_type['idle'] == 0 and device_type['busy'] == 0 and device_type['offline'] == 0:
+ offline_device_types[device_type['name']] = 0
+ # Running
+ elif device_type['idle'] > 0 or device_type['busy'] > 0:
+ online_device_types[device_type['name']] = device_type['idle'] + device_type['busy']
+ # Offline
+ else:
+ offline_device_types[device_type['name']] = device_type['offline']
+ print "Gathered Device Types Successfully!"
+ return online_device_types, offline_device_types
+
+
+def main(args):
+ config = configuration.get_config(args)
+
+ url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
+ connection = utils.connect(url)
+
+ if config.get("repo"):
+ retrieve_jobs(config.get("repo"))
+
+ if config.get("jobs"):
+ load_jobs(config.get("jobs"))
+ print "Loading jobs from top folder " + str(config.get("jobs"))
+ else:
+ load_jobs(os.getcwd())
+
+ submit_jobs(connection, config.get("server"))
+ exit(0)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--config", help="configuration for the LAVA server")
+ parser.add_argument("--jobs", help="absolute path to top jobs folder (default scans the whole cwd)")
+ parser.add_argument("--username", help="username for the LAVA server")
+ parser.add_argument("--token", help="token for LAVA server api")
+ parser.add_argument("--server", help="server url for LAVA server")
+ parser.add_argument("--repo", help="git repo for LAVA jobs")
+ parser.add_argument("--timeout", type=int, default=-1, help="polling timeout in seconds. default is -1.")
+ args = vars(parser.parse_args())
+ main(args)