aboutsummaryrefslogtreecommitdiff
path: root/lava_scheduler_app/tests
diff options
context:
space:
mode:
authorRĂ©mi Duraffort <remi.duraffort@linaro.org>2018-02-09 09:57:55 +0100
committerStevan Radakovic <stevan.radakovic@linaro.org>2018-03-22 08:38:15 +0000
commit2d37b3416d72048b786e756be7e5bafa550b6de2 (patch)
tree4e49a619f611f622137e12821fe22df5e9b70450 /lava_scheduler_app/tests
parent1ae6dda0b963a03f1abb329e82584edcde35fa7c (diff)
Remove is_pipeline
Change-Id: I6c753e93b6e3e51baf5c3602c499ef9699309cfc
Diffstat (limited to 'lava_scheduler_app/tests')
-rw-r--r--lava_scheduler_app/tests/test_api.py2
-rw-r--r--lava_scheduler_app/tests/test_pipeline.py94
-rw-r--r--lava_scheduler_app/tests/test_scheduler.py16
-rw-r--r--lava_scheduler_app/tests/test_state_machine.py25
-rw-r--r--lava_scheduler_app/tests/test_submission.py3
5 files changed, 29 insertions, 111 deletions
diff --git a/lava_scheduler_app/tests/test_api.py b/lava_scheduler_app/tests/test_api.py
index 492f55be4..899029a48 100644
--- a/lava_scheduler_app/tests/test_api.py
+++ b/lava_scheduler_app/tests/test_api.py
@@ -95,7 +95,7 @@ class TestSchedulerAPI(TestCaseWithFactory): # pylint: disable=too-many-ancesto
server = self.server_proxy('test', 'test')
self.assertEqual(
{'status': 'idle', 'job': None, 'offline_since': None, 'hostname': 'black01',
- 'offline_by': None, 'is_pipeline': False},
+ 'offline_by': None, 'is_pipeline': True},
server.scheduler.get_device_status('black01'))
def test_type_aliases(self):
diff --git a/lava_scheduler_app/tests/test_pipeline.py b/lava_scheduler_app/tests/test_pipeline.py
index 70f4fd0f2..ac9c574ad 100644
--- a/lava_scheduler_app/tests/test_pipeline.py
+++ b/lava_scheduler_app/tests/test_pipeline.py
@@ -74,7 +74,7 @@ class YamlFactory(ModelFactory):
if tags and not isinstance(tags, list):
tags = []
# a hidden device type will override is_public
- device = Device(device_type=device_type, is_public=is_public, hostname=hostname, is_pipeline=True, **kw)
+ device = Device(device_type=device_type, is_public=is_public, hostname=hostname, **kw)
if tags:
device.tags = tags
if DEBUG:
@@ -253,10 +253,8 @@ class TestPipelineSubmit(TestCaseWithFactory):
def test_exclusivity(self):
device = Device.objects.get(hostname="fakeqemu1")
- self.assertTrue(device.is_pipeline)
self.assertFalse(device.is_exclusive)
device = Device.objects.get(hostname="fakeqemu3")
- self.assertTrue(device.is_pipeline)
self.assertTrue(device.is_exclusive)
def test_context(self):
@@ -293,7 +291,6 @@ class TestPipelineSubmit(TestCaseWithFactory):
self.factory.make_device(device_type=mustang_type, hostname=hostname)
device = Device.objects.get(hostname="fakemustang1")
self.assertEqual('mustang-uefi', device.device_type.name)
- self.assertTrue(device.is_pipeline)
job_ctx = {
'tftp_mac': 'FF:01:00:69:AA:CC',
'extra_nfsroot_args': ',nolock',
@@ -943,37 +940,6 @@ class TestYamlMultinode(TestCaseWithFactory):
job = TestJob.objects.get(id=job.id)
self.assertNotEqual(job.sub_id, '')
- def test_mixed_multinode(self):
- user = self.factory.make_user()
- device_type = self.factory.make_device_type()
- self.factory.make_device(device_type, 'fakeqemu1')
- self.factory.make_device(device_type, 'fakeqemu2')
- self.factory.make_device(device_type, 'fakeqemu3')
- self.factory.make_device(device_type, 'fakeqemu4')
- submission = yaml.load(open(
- os.path.join(os.path.dirname(__file__), 'sample_jobs', 'kvm-multinode.yaml'), 'r'))
- role_list = submission['protocols'][MultinodeProtocol.name]['roles']
- for role in role_list:
- if 'tags' in role_list[role]:
- del role_list[role]['tags']
- job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
- self.assertEqual(len(job_list), 2)
- # make the list mixed
- fakeqemu1 = Device.objects.get(hostname='fakeqemu1')
- fakeqemu1.is_pipeline = False
- fakeqemu1.save(update_fields=['is_pipeline'])
- fakeqemu3 = Device.objects.get(hostname='fakeqemu3')
- fakeqemu3.is_pipeline = False
- fakeqemu3.save(update_fields=['is_pipeline'])
- device_list = Device.objects.filter(device_type=device_type, is_pipeline=True)
- self.assertEqual(len(device_list), 2)
- self.assertIsInstance(device_list, RestrictedResourceQuerySet)
- self.assertIsInstance(list(device_list), list)
- job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
- self.assertEqual(len(job_list), 2)
- for job in job_list:
- self.assertEqual(job.requested_device_type, device_type)
-
def test_multinode_with_retired(self): # pylint: disable=too-many-statements
"""
check handling with retired devices in device_list
@@ -992,64 +958,6 @@ class TestYamlMultinode(TestCaseWithFactory):
del role_list[role]['tags']
job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
self.assertEqual(len(job_list), 2)
- # make the list mixed
- fakeqemu1 = Device.objects.get(hostname='fakeqemu1')
- fakeqemu1.is_pipeline = False
- fakeqemu1.save(update_fields=['is_pipeline'])
- fakeqemu2 = Device.objects.get(hostname='fakeqemu2')
- fakeqemu3 = Device.objects.get(hostname='fakeqemu3')
- fakeqemu4 = Device.objects.get(hostname='fakeqemu4')
- device_list = Device.objects.filter(device_type=device_type, is_pipeline=True)
- self.assertEqual(len(device_list), 3)
- self.assertIsInstance(device_list, RestrictedResourceQuerySet)
- self.assertIsInstance(list(device_list), list)
- allowed_devices = []
- for device in list(device_list):
- if _check_submit_to_device([device], user):
- allowed_devices.append(device)
- self.assertEqual(len(allowed_devices), 3)
- self.assertIn(fakeqemu3, allowed_devices)
- self.assertIn(fakeqemu4, allowed_devices)
- self.assertIn(fakeqemu2, allowed_devices)
- self.assertNotIn(fakeqemu1, allowed_devices)
-
- # set one candidate device to RETIRED to force the bug
- fakeqemu4.health = Device.HEALTH_RETIRED
- fakeqemu4.save(update_fields=['health'])
- # refresh the device_list
- device_list = Device.objects.filter(device_type=device_type, is_pipeline=True).order_by('hostname')
- allowed_devices = []
- # test the old code to force the exception
- try:
- # by looping through in the test *and* in _check_submit_to_device
- # the retired device in device_list triggers the exception.
- for device in list(device_list):
- if _check_submit_to_device([device], user):
- allowed_devices.append(device)
- except DevicesUnavailableException:
- self.assertEqual(len(allowed_devices), 2)
- self.assertIn(fakeqemu4, device_list)
- self.assertEqual(fakeqemu4.health, Device.HEALTH_RETIRED)
- else:
- self.fail("Missed DevicesUnavailableException")
- allowed_devices = []
- allowed_devices.extend(_check_submit_to_device(list(device_list), user))
- self.assertEqual(len(allowed_devices), 2)
- self.assertIn(fakeqemu3, allowed_devices)
- self.assertIn(fakeqemu2, allowed_devices)
- self.assertNotIn(fakeqemu4, allowed_devices)
- self.assertNotIn(fakeqemu1, allowed_devices)
- allowed_devices = []
-
- # test improvement as there is no point wasting memory with a Query containing Retired.
- device_list = Device.objects.filter(
- Q(device_type=device_type), Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED))
- allowed_devices.extend(_check_submit_to_device(list(device_list), user))
- self.assertEqual(len(allowed_devices), 2)
- self.assertIn(fakeqemu3, allowed_devices)
- self.assertIn(fakeqemu2, allowed_devices)
- self.assertNotIn(fakeqemu4, allowed_devices)
- self.assertNotIn(fakeqemu1, allowed_devices)
def test_multinode_v2_metadata(self):
device_type = self.factory.make_device_type()
diff --git a/lava_scheduler_app/tests/test_scheduler.py b/lava_scheduler_app/tests/test_scheduler.py
index 16b6cca40..d569871ca 100644
--- a/lava_scheduler_app/tests/test_scheduler.py
+++ b/lava_scheduler_app/tests/test_scheduler.py
@@ -73,7 +73,7 @@ class TestHealthCheckScheduling(TestCase):
self.user = User.objects.create(username="user-01")
self.last_hc03 = TestJob.objects.create(health_check=True, actual_device=self.device03,
user=self.user, submitter=self.user,
- start_time=timezone.now(),
+ start_time=timezone.now(), is_public=True,
state=TestJob.STATE_FINISHED, health=TestJob.HEALTH_COMPLETE)
self.device03.last_health_report_job = self.last_hc03
self.device03.save()
@@ -209,7 +209,7 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled now
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
schedule(DummyLogger())
self.device01.refresh_from_db()
@@ -221,7 +221,7 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled after the health check
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
self.device03.refresh_from_db()
self.last_hc03.submit_time = timezone.now() - timedelta(hours=25)
@@ -253,13 +253,13 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled now
j01 = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
j02 = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
j03 = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
schedule(DummyLogger())
@@ -314,7 +314,7 @@ class TestPriorities(TestCase):
jobs = []
for p in [TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW]:
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None), priority=p)
jobs.append(j)
@@ -391,7 +391,7 @@ class TestPriorities(TestCase):
jobs = []
for p in [TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW]:
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None), priority=p)
jobs.append(j)
diff --git a/lava_scheduler_app/tests/test_state_machine.py b/lava_scheduler_app/tests/test_state_machine.py
index 8bea4e1d8..0c88f298a 100644
--- a/lava_scheduler_app/tests/test_state_machine.py
+++ b/lava_scheduler_app/tests/test_state_machine.py
@@ -32,6 +32,18 @@ from lava_scheduler_app.models import (
)
+minimal_valid_job = yaml.dump("""
+job_name: minimal valid job
+visibility: public
+timeouts:
+ job:
+ minutes: 10
+ action:
+ minutes: 5
+actions: []
+""")
+
+
class TestTestJobStateMachine(TestCase):
def setUp(self):
@@ -41,7 +53,8 @@ class TestTestJobStateMachine(TestCase):
worker_host=self.worker)
self.user = User.objects.create(username="user-01")
self.job = TestJob.objects.create(requested_device_type=self.device_type,
- submitter=self.user, user=self.user)
+ submitter=self.user, user=self.user, is_public=True,
+ definition=minimal_valid_job)
def check_device(self, state, health):
self.device.refresh_from_db()
@@ -224,13 +237,13 @@ class TestTestJobStateMachine(TestCase):
self.job.save()
self.sub_job1 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job1.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job1.actual_device = self.device2
self.sub_job1.save()
self.sub_job2 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job2.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job2.actual_device = self.device3
self.sub_job2.save()
@@ -456,13 +469,13 @@ class TestTestJobStateMachine(TestCase):
self.job.save()
self.sub_job1 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job1.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job1.actual_device = self.device2
self.sub_job1.save()
self.sub_job2 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job2.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job2.actual_device = self.device3
self.sub_job2.save()
@@ -616,7 +629,7 @@ class TestWorkerStateMachine(TestCase):
worker_host=self.worker)
self.user = User.objects.create(username="user-01")
self.job = TestJob.objects.create(requested_device_type=self.device_type,
- submitter=self.user, user=self.user)
+ submitter=self.user, user=self.user, is_public=True)
def check_device(self, state, health):
self.device.refresh_from_db()
diff --git a/lava_scheduler_app/tests/test_submission.py b/lava_scheduler_app/tests/test_submission.py
index 7b4cdf457..3c89c4e29 100644
--- a/lava_scheduler_app/tests/test_submission.py
+++ b/lava_scheduler_app/tests/test_submission.py
@@ -179,15 +179,12 @@ class TestTestJob(TestCaseWithFactory): # pylint: disable=too-many-ancestors,to
self.factory.cleanup()
dt = self.factory.make_device_type(name='name')
device = self.factory.make_device(device_type=dt, hostname='name-1')
- device.is_pipeline = True
device.save()
definition = self.factory.make_job_data()
definition['visibility'] = {'group': ['newgroup']}
definition['job_name'] = 'unittest_visibility'
self.assertIsNotNone(yaml.dump(definition))
self.assertIsNotNone(list(Device.objects.filter(device_type=dt)))
- self.assertIsNotNone(list(Device.objects.filter(
- device_type=dt, is_pipeline=True)))
user = self.factory.make_user()
user.user_permissions.add(
Permission.objects.get(codename='add_testjob'))