aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/v2/user-notifications.rst2
-rw-r--r--lava_results_app/models.py2
-rw-r--r--lava_results_app/tests/test_names.py2
-rw-r--r--lava_scheduler_app/admin.py4
-rw-r--r--lava_scheduler_app/api/__init__.py34
-rw-r--r--lava_scheduler_app/api/devices.py7
-rw-r--r--lava_scheduler_app/api/jobs.py1
-rw-r--r--lava_scheduler_app/checks.py4
-rw-r--r--lava_scheduler_app/managers.py5
-rw-r--r--lava_scheduler_app/migrations/0036_remove_is_pipeline.py23
-rw-r--r--lava_scheduler_app/models.py63
-rw-r--r--lava_scheduler_app/signals.py2
-rw-r--r--lava_scheduler_app/tables.py2
-rw-r--r--lava_scheduler_app/tests/test_api.py2
-rw-r--r--lava_scheduler_app/tests/test_pipeline.py94
-rw-r--r--lava_scheduler_app/tests/test_scheduler.py16
-rw-r--r--lava_scheduler_app/tests/test_state_machine.py25
-rw-r--r--lava_scheduler_app/tests/test_submission.py3
-rw-r--r--lava_scheduler_app/views.py17
-rw-r--r--lava_server/api.py4
-rw-r--r--lava_server/management/commands/devices.py3
-rw-r--r--lava_server/management/commands/jobs.py11
-rw-r--r--lava_server/management/commands/lava-master.py1
23 files changed, 95 insertions, 232 deletions
diff --git a/doc/v2/user-notifications.rst b/doc/v2/user-notifications.rst
index dab064cc6..63fe54fda 100644
--- a/doc/v2/user-notifications.rst
+++ b/doc/v2/user-notifications.rst
@@ -66,7 +66,7 @@ The **callback** section supports following options:
of the data and possible options are as following:
* **minimal** This will provide basic job info such as job id, status,
- submit_time, start_time, end_time, submitter_username, is_pipeline,
+ submit_time, start_time, end_time, submitter_username,
failure_comment, priority, description, actual_device_id, definition and
metadata.
* **logs** In addition to minimal data this will also attach the job log
diff --git a/lava_results_app/models.py b/lava_results_app/models.py
index 5bcc80301..7b3e16a42 100644
--- a/lava_results_app/models.py
+++ b/lava_results_app/models.py
@@ -1181,7 +1181,7 @@ class QueryCondition(models.Model):
TestJob: [
"submitter", "start_time", "end_time", "state", "health", "actual_device",
"requested_device_type", "health_check", "user", "group",
- "priority", "is_pipeline", "description"],
+ "priority", "description"],
TestSuite: ["name"],
TestCase: ["name", "result", "measurement"],
NamedTestAttribute: []
diff --git a/lava_results_app/tests/test_names.py b/lava_results_app/tests/test_names.py
index a68d69403..88f3c5f93 100644
--- a/lava_results_app/tests/test_names.py
+++ b/lava_results_app/tests/test_names.py
@@ -70,7 +70,7 @@ class ModelFactory(object):
hostname = self.getUniqueString()
if tags and type(tags) != list:
tags = []
- device = Device(device_type=device_type, is_public=is_public, hostname=hostname, is_pipeline=True, **kw)
+ device = Device(device_type=device_type, is_public=is_public, hostname=hostname, **kw)
if tags:
device.tags = tags
logging.debug("making a device of type %s %s %s with tags '%s'"
diff --git a/lava_scheduler_app/admin.py b/lava_scheduler_app/admin.py
index 762e6b8fc..9c2632072 100644
--- a/lava_scheduler_app/admin.py
+++ b/lava_scheduler_app/admin.py
@@ -206,7 +206,7 @@ class DeviceAdmin(admin.ModelAdmin):
('Properties', {
'fields': (('device_type', 'hostname'), 'worker_host', 'device_version')}),
('Device owner', {
- 'fields': (('user', 'group'), ('physical_owner', 'physical_group'), 'is_public', 'is_pipeline')}),
+ 'fields': (('user', 'group'), ('physical_owner', 'physical_group'), 'is_public')}),
('Status', {
'fields': (('state', 'health'), ('last_health_report_job', 'current_job'))}),
('Advanced properties', {
@@ -217,7 +217,7 @@ class DeviceAdmin(admin.ModelAdmin):
readonly_fields = ('device_dictionary_jinja', 'state', 'current_job')
list_display = ('hostname', 'device_type', 'current_job', 'worker_host',
'state', 'health', 'has_health_check',
- 'health_check_enabled', 'is_public', 'is_pipeline',
+ 'health_check_enabled', 'is_public',
'valid_device', 'exclusive_device')
search_fields = ('hostname', 'device_type__name')
ordering = ['hostname']
diff --git a/lava_scheduler_app/api/__init__.py b/lava_scheduler_app/api/__init__.py
index 2ca4ef5e7..5b1307cde 100644
--- a/lava_scheduler_app/api/__init__.py
+++ b/lava_scheduler_app/api/__init__.py
@@ -310,7 +310,7 @@ class SchedulerAPI(ExposedAPI):
continue
devices_list.append(dev)
- return [[dev.hostname, dev.device_type.name, build_device_status_display(dev.state, dev.health), dev.current_job().pk if dev.current_job() else None, dev.is_pipeline]
+ return [[dev.hostname, dev.device_type.name, build_device_status_display(dev.state, dev.health), dev.current_job().pk if dev.current_job() else None, True]
for dev in devices_list]
def all_device_types(self):
@@ -605,7 +605,7 @@ class SchedulerAPI(ExposedAPI):
device_dict["job"] = None
device_dict["offline_since"] = None
device_dict["offline_by"] = None
- device_dict["is_pipeline"] = device.is_pipeline
+ device_dict["is_pipeline"] = True
current_job = device.current_job()
if current_job is not None:
@@ -801,6 +801,7 @@ class SchedulerAPI(ExposedAPI):
job.health = job.get_health_display()
job.submitter_username = job.submitter.username
job.absolute_url = job.get_absolute_url()
+ job.is_pipeline = True
except PermissionDenied:
raise xmlrpclib.Fault(
401, "Permission denied for user to job %s" % job_id)
@@ -858,26 +859,10 @@ class SchedulerAPI(ExposedAPI):
'sub_id': job.sub_id
})
- if job.is_pipeline:
- job_status.update({
- 'job_status': job.get_legacy_status_display(),
- 'bundle_sha1': ""
- })
- return job_status
-
- # DEPRECATED
- bundle_sha1 = ""
- if job.results_link:
- try:
- bundle_sha1 = job.results_link.split('/')[-2]
- except IndexError:
- pass
-
job_status.update({
'job_status': job.get_legacy_status_display(),
- 'bundle_sha1': bundle_sha1
+ 'bundle_sha1': ""
})
-
return job_status
def job_list_status(self, job_id_list):
@@ -1213,11 +1198,6 @@ class SchedulerAPI(ExposedAPI):
raise xmlrpclib.Fault(
404, "Device '%s' was not found." % hostname
)
- if not device.is_pipeline:
- raise xmlrpclib.Fault(
- 400, "Device '%s' is not a pipeline device" % hostname
- )
-
device_dict = device.load_configuration(output_format="raw")
if not device_dict:
raise xmlrpclib.Fault(
@@ -1263,11 +1243,11 @@ class SchedulerAPI(ExposedAPI):
"""
if not name:
- devices = Device.objects.filter(is_pipeline=True).exclude(health=Device.HEALTH_RETIRED)
+ devices = Device.objects.exclude(health=Device.HEALTH_RETIRED)
else:
- devices = Device.objects.filter(is_pipeline=True).exclude(health=Device.HEALTH_RETIRED).filter(device_type__name=name)
+ devices = Device.objects.exclude(health=Device.HEALTH_RETIRED).filter(device_type__name=name)
if not devices:
- devices = Device.objects.filter(is_pipeline=True).exclude(health=Device.HEALTH_RETIRED).filter(hostname=name)
+ devices = Device.objects.exclude(health=Device.HEALTH_RETIRED).filter(hostname=name)
if not devices and name:
raise xmlrpclib.Fault(
404,
diff --git a/lava_scheduler_app/api/devices.py b/lava_scheduler_app/api/devices.py
index 5e12b1d6c..fa3201869 100644
--- a/lava_scheduler_app/api/devices.py
+++ b/lava_scheduler_app/api/devices.py
@@ -117,9 +117,8 @@ class SchedulerDevicesAPI(ExposedV2API):
try:
Device.objects.create(hostname=hostname, device_type=device_type,
user=user, group=group, is_public=public,
- worker_host=worker, is_pipeline=True,
state=Device.STATE_IDLE, health=health_val,
- description=description)
+ worker_host=worker, description=description)
except (IntegrityError, ValidationError) as exc:
raise xmlrpclib.Fault(
@@ -247,8 +246,7 @@ class SchedulerDevicesAPI(ExposedV2API):
"type": device.device_type.name,
"health": device.get_health_display(),
"state": device.get_state_display(),
- "current_job": current_job.pk if current_job else None,
- "pipeline": device.is_pipeline}
+ "current_job": current_job.pk if current_job else None}
ret.append(device_dict)
return ret
@@ -293,7 +291,6 @@ class SchedulerDevicesAPI(ExposedV2API):
"health_job": bool(device.get_health_check()),
"description": device.description,
"public": device.is_public,
- "pipeline": device.is_pipeline,
"has_device_dict": bool(device.load_configuration(output_format="raw")),
"worker": None,
"user": device.user.username if device.user else None,
diff --git a/lava_scheduler_app/api/jobs.py b/lava_scheduler_app/api/jobs.py
index 1702db48d..0ab2a5aa1 100644
--- a/lava_scheduler_app/api/jobs.py
+++ b/lava_scheduler_app/api/jobs.py
@@ -235,7 +235,6 @@ class SchedulerJobsAPI(ExposedV2API):
"device": device_hostname,
"device_type": device_type,
"health_check": job.health_check,
- "pipeline": job.is_pipeline,
"health": job.get_health_display(),
"state": job.get_state_display(),
"submitter": job.submitter.username,
diff --git a/lava_scheduler_app/checks.py b/lava_scheduler_app/checks.py
index 1601049ef..5c4adb576 100644
--- a/lava_scheduler_app/checks.py
+++ b/lava_scheduler_app/checks.py
@@ -32,7 +32,7 @@ from lava_scheduler_app.schema import SubmissionException
def check_health_checks(app_configs, **kwargs):
errors = []
- for device in Device.objects.filter(is_pipeline=True):
+ for device in Device.objects.all():
ht = device.get_health_check()
ht_disabled = device.device_type.disable_health_check
@@ -65,7 +65,7 @@ def check_health_checks(app_configs, **kwargs):
def check_device_configuration(app_configs, **kwargs):
errors = []
- for device in Device.objects.filter(Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED)):
+ for device in Device.objects.exclude(health=Device.HEALTH_RETIRED):
if not device.is_valid():
errors.append(Error('Invalid configuration', obj=device.hostname))
diff --git a/lava_scheduler_app/managers.py b/lava_scheduler_app/managers.py
index 58fdb807b..1c75cbbc4 100644
--- a/lava_scheduler_app/managers.py
+++ b/lava_scheduler_app/managers.py
@@ -30,13 +30,12 @@ class RestrictedTestJobQuerySet(RestrictedResourceQuerySet):
from lava_scheduler_app.models import TestJob
# Pipeline jobs.
- conditions = Q(is_pipeline=True)
if not user or user.is_anonymous():
- conditions &= Q(is_public=True)
+ conditions = Q(is_public=True)
elif not user.is_superuser and not user.has_perm('lava_scheduler_app.cancel_resubmit_testjob') and not user.has_perm('lava_scheduler_app.change_device'):
# continue adding conditions only if user is not superuser and
# does not have admin permission for jobs or devices.
- conditions &= (
+ conditions = (
Q(is_public=True) |
Q(submitter=user) |
(~Q(actual_device=None) & Q(actual_device__user=user)) |
diff --git a/lava_scheduler_app/migrations/0036_remove_is_pipeline.py b/lava_scheduler_app/migrations/0036_remove_is_pipeline.py
new file mode 100644
index 000000000..e7bde918f
--- /dev/null
+++ b/lava_scheduler_app/migrations/0036_remove_is_pipeline.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.11.10 on 2018-02-08 17:08
+from __future__ import unicode_literals
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('lava_scheduler_app', '0035_remove_testjob__results_link'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='device',
+ name='is_pipeline',
+ ),
+ migrations.RemoveField(
+ model_name='testjob',
+ name='is_pipeline',
+ ),
+ ]
diff --git a/lava_scheduler_app/models.py b/lava_scheduler_app/models.py
index 3e66fd4e0..37d46b84c 100644
--- a/lava_scheduler_app/models.py
+++ b/lava_scheduler_app/models.py
@@ -649,12 +649,6 @@ class Device(RestrictedResource):
on_delete=models.SET_NULL,
)
- is_pipeline = models.BooleanField(
- verbose_name="Pipeline device?",
- default=False,
- editable=True
- )
-
def clean(self):
"""
Complies with the RestrictedResource constraints
@@ -750,8 +744,6 @@ class Device(RestrictedResource):
return self.is_owned_by(user)
def is_valid(self, system=True):
- if not self.is_pipeline:
- return False # V1 config cannot be checked
rendered = self.load_configuration()
try:
validate_device(rendered)
@@ -938,10 +930,6 @@ class Device(RestrictedResource):
return False
def get_health_check(self):
- # Do not submit any new v1 job
- if not self.is_pipeline:
- return None
-
# Get the device dictionary
extends = self.get_extends()
if not extends:
@@ -1158,8 +1146,7 @@ def _create_pipeline_job(job_data, user, taglist, device=None,
health_check=False,
user=user, is_public=public_state,
visibility=visibility,
- priority=priority,
- is_pipeline=True)
+ priority=priority)
job.save()
# need a valid job before the tags can be assigned, then it needs to be saved again.
for tag in Tag.objects.filter(name__in=taglist):
@@ -1238,7 +1225,7 @@ def _pipeline_protocols(job_data, user, yaml_data=None): # pylint: disable=too-
allowed_devices = []
device_list = Device.objects.filter(
- Q(device_type=device_type), Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED))
+ Q(device_type=device_type), ~Q(health=Device.HEALTH_RETIRED))
allowed_devices.extend(_check_submit_to_device(list(device_list), user))
if len(allowed_devices) < params['count']:
@@ -1381,7 +1368,7 @@ class TestJob(RestrictedResource):
(Enhanced version of vmgroups.)
A Primary connection needs a real device (persistence).
"""
- if not self.is_pipeline or not self.is_multinode or not self.definition:
+ if not self.is_multinode or not self.definition:
return False
job_data = yaml.load(self.definition)
return 'connection' in job_data
@@ -1626,12 +1613,6 @@ class TestJob(RestrictedResource):
blank=True
)
- is_pipeline = models.BooleanField(
- verbose_name="Pipeline job?",
- default=False,
- editable=False
- )
-
# calculated by the master validation process.
pipeline_compatibility = models.IntegerField(
default=0,
@@ -1663,8 +1644,7 @@ class TestJob(RestrictedResource):
str(self.id))
def output_file(self):
- filename = 'output.yaml' if self.is_pipeline else 'output.txt'
- output_path = os.path.join(self.output_dir, filename)
+ output_path = os.path.join(self.output_dir, "output.yaml")
if os.path.exists(output_path):
return open(output_path, encoding='utf-8', errors='replace')
else:
@@ -1690,10 +1670,7 @@ class TestJob(RestrictedResource):
@property
def results_link(self):
- if self.is_pipeline:
- return reverse("lava.results.testjob", args=[self.id])
- else:
- return None
+ return reverse("lava.results.testjob", args=[self.id])
@property
def essential_role(self): # pylint: disable=too-many-return-statements
@@ -1770,14 +1747,14 @@ class TestJob(RestrictedResource):
# singlenode only
device_type = _get_device_type(user, job_data['device_type'])
allow = _check_submit_to_device(list(Device.objects.filter(
- device_type=device_type, is_pipeline=True)), user)
+ device_type=device_type)), user)
if not allow:
raise DevicesUnavailableException("No devices of type %s have pipeline support." % device_type)
taglist = _get_tag_list(job_data.get('tags', []), True)
if taglist:
supported = _check_tags(taglist, device_type=device_type)
_check_tags_support(supported, allow)
- if original_job and original_job.is_pipeline:
+ if original_job:
# Add old job absolute url to metadata for pipeline jobs.
job_url = str(original_job.get_absolute_url())
try:
@@ -1796,16 +1773,11 @@ class TestJob(RestrictedResource):
Implement the schema constraints for visibility for pipeline jobs so that
admins cannot set a job into a logically inconsistent state.
"""
- if self.is_pipeline:
- # public settings must match
- if self.is_public and self.visibility != TestJob.VISIBLE_PUBLIC:
- raise ValidationError("is_public is set but visibility is not public.")
- elif not self.is_public and self.visibility == TestJob.VISIBLE_PUBLIC:
- raise ValidationError("is_public is not set but visibility is public.")
- else:
- if self.visibility != TestJob.VISIBLE_PUBLIC:
- raise ValidationError("Only pipeline jobs support any value of visibility except the default "
- "PUBLIC, even if the job and bundle are private.")
+ # public settings must match
+ if self.is_public and self.visibility != TestJob.VISIBLE_PUBLIC:
+ raise ValidationError("is_public is set but visibility is not public.")
+ elif not self.is_public and self.visibility == TestJob.VISIBLE_PUBLIC:
+ raise ValidationError("is_public is not set but visibility is public.")
return super(TestJob, self).clean()
def can_view(self, user):
@@ -1826,9 +1798,6 @@ class TestJob(RestrictedResource):
return False
if self.is_public:
return True
- if not self.is_pipeline:
- # old jobs will be private, only pipeline extends beyond this level
- return self.is_accessible_by(user)
logger = logging.getLogger('lava_scheduler_app')
if self.visibility == self.VISIBLE_PUBLIC:
# logical error
@@ -1883,9 +1852,8 @@ class TestJob(RestrictedResource):
return self._can_admin(user) and self.state in states
def can_resubmit(self, user):
- return self.is_pipeline and \
- (user.is_superuser or
- user.has_perm('lava_scheduler_app.cancel_resubmit_testjob'))
+ return (user.is_superuser or
+ user.has_perm('lava_scheduler_app.cancel_resubmit_testjob'))
def _generate_summary_mail(self):
domain = '???'
@@ -2718,7 +2686,6 @@ class Notification(models.Model):
"start_time": str(self.test_job.start_time),
"end_time": str(self.test_job.end_time),
"submitter_username": self.test_job.submitter.username,
- "is_pipeline": self.test_job.is_pipeline,
"failure_comment": self.test_job.failure_comment,
"priority": self.test_job.priority,
"description": self.test_job.description,
@@ -2872,7 +2839,7 @@ def process_notifications(sender, **kwargs):
notification_state = [TestJob.STATE_RUNNING, TestJob.STATE_FINISHED]
# Send only for pipeline jobs.
# If it's a new TestJob, no need to send notifications.
- if new_job.is_pipeline and new_job.id:
+ if new_job.id:
old_job = TestJob.objects.get(pk=new_job.id)
if new_job.state in notification_state and \
old_job.state != new_job.state:
diff --git a/lava_scheduler_app/signals.py b/lava_scheduler_app/signals.py
index 136d78e16..acc697c2a 100644
--- a/lava_scheduler_app/signals.py
+++ b/lava_scheduler_app/signals.py
@@ -70,7 +70,6 @@ def device_post_handler(sender, **kwargs):
"state": instance.get_state_display(),
"device": instance.hostname,
"device_type": instance.device_type.name,
- "pipeline": instance.is_pipeline,
}
current_job = instance.current_job()
if current_job is not None:
@@ -110,7 +109,6 @@ def testjob_post_handler(sender, **kwargs):
"submit_time": instance.submit_time.isoformat(),
"submitter": str(instance.submitter),
"visibility": instance.get_visibility_display(),
- "pipeline": instance.is_pipeline,
}
if instance.is_multinode:
data['sub_id'] = instance.sub_id
diff --git a/lava_scheduler_app/tables.py b/lava_scheduler_app/tables.py
index c65759ab3..54f33efb3 100644
--- a/lava_scheduler_app/tables.py
+++ b/lava_scheduler_app/tables.py
@@ -552,7 +552,7 @@ class DeviceTable(LavaTable):
exclude = [
'user', 'group', 'is_public', 'device_version',
'physical_owner', 'physical_group', 'description',
- 'current_job', 'last_health_report_job', 'is_pipeline'
+ 'current_job', 'last_health_report_job'
]
sequence = [
'hostname', 'worker_host', 'device_type', 'state',
diff --git a/lava_scheduler_app/tests/test_api.py b/lava_scheduler_app/tests/test_api.py
index 492f55be4..899029a48 100644
--- a/lava_scheduler_app/tests/test_api.py
+++ b/lava_scheduler_app/tests/test_api.py
@@ -95,7 +95,7 @@ class TestSchedulerAPI(TestCaseWithFactory): # pylint: disable=too-many-ancesto
server = self.server_proxy('test', 'test')
self.assertEqual(
{'status': 'idle', 'job': None, 'offline_since': None, 'hostname': 'black01',
- 'offline_by': None, 'is_pipeline': False},
+ 'offline_by': None, 'is_pipeline': True},
server.scheduler.get_device_status('black01'))
def test_type_aliases(self):
diff --git a/lava_scheduler_app/tests/test_pipeline.py b/lava_scheduler_app/tests/test_pipeline.py
index 70f4fd0f2..ac9c574ad 100644
--- a/lava_scheduler_app/tests/test_pipeline.py
+++ b/lava_scheduler_app/tests/test_pipeline.py
@@ -74,7 +74,7 @@ class YamlFactory(ModelFactory):
if tags and not isinstance(tags, list):
tags = []
# a hidden device type will override is_public
- device = Device(device_type=device_type, is_public=is_public, hostname=hostname, is_pipeline=True, **kw)
+ device = Device(device_type=device_type, is_public=is_public, hostname=hostname, **kw)
if tags:
device.tags = tags
if DEBUG:
@@ -253,10 +253,8 @@ class TestPipelineSubmit(TestCaseWithFactory):
def test_exclusivity(self):
device = Device.objects.get(hostname="fakeqemu1")
- self.assertTrue(device.is_pipeline)
self.assertFalse(device.is_exclusive)
device = Device.objects.get(hostname="fakeqemu3")
- self.assertTrue(device.is_pipeline)
self.assertTrue(device.is_exclusive)
def test_context(self):
@@ -293,7 +291,6 @@ class TestPipelineSubmit(TestCaseWithFactory):
self.factory.make_device(device_type=mustang_type, hostname=hostname)
device = Device.objects.get(hostname="fakemustang1")
self.assertEqual('mustang-uefi', device.device_type.name)
- self.assertTrue(device.is_pipeline)
job_ctx = {
'tftp_mac': 'FF:01:00:69:AA:CC',
'extra_nfsroot_args': ',nolock',
@@ -943,37 +940,6 @@ class TestYamlMultinode(TestCaseWithFactory):
job = TestJob.objects.get(id=job.id)
self.assertNotEqual(job.sub_id, '')
- def test_mixed_multinode(self):
- user = self.factory.make_user()
- device_type = self.factory.make_device_type()
- self.factory.make_device(device_type, 'fakeqemu1')
- self.factory.make_device(device_type, 'fakeqemu2')
- self.factory.make_device(device_type, 'fakeqemu3')
- self.factory.make_device(device_type, 'fakeqemu4')
- submission = yaml.load(open(
- os.path.join(os.path.dirname(__file__), 'sample_jobs', 'kvm-multinode.yaml'), 'r'))
- role_list = submission['protocols'][MultinodeProtocol.name]['roles']
- for role in role_list:
- if 'tags' in role_list[role]:
- del role_list[role]['tags']
- job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
- self.assertEqual(len(job_list), 2)
- # make the list mixed
- fakeqemu1 = Device.objects.get(hostname='fakeqemu1')
- fakeqemu1.is_pipeline = False
- fakeqemu1.save(update_fields=['is_pipeline'])
- fakeqemu3 = Device.objects.get(hostname='fakeqemu3')
- fakeqemu3.is_pipeline = False
- fakeqemu3.save(update_fields=['is_pipeline'])
- device_list = Device.objects.filter(device_type=device_type, is_pipeline=True)
- self.assertEqual(len(device_list), 2)
- self.assertIsInstance(device_list, RestrictedResourceQuerySet)
- self.assertIsInstance(list(device_list), list)
- job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
- self.assertEqual(len(job_list), 2)
- for job in job_list:
- self.assertEqual(job.requested_device_type, device_type)
-
def test_multinode_with_retired(self): # pylint: disable=too-many-statements
"""
check handling with retired devices in device_list
@@ -992,64 +958,6 @@ class TestYamlMultinode(TestCaseWithFactory):
del role_list[role]['tags']
job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
self.assertEqual(len(job_list), 2)
- # make the list mixed
- fakeqemu1 = Device.objects.get(hostname='fakeqemu1')
- fakeqemu1.is_pipeline = False
- fakeqemu1.save(update_fields=['is_pipeline'])
- fakeqemu2 = Device.objects.get(hostname='fakeqemu2')
- fakeqemu3 = Device.objects.get(hostname='fakeqemu3')
- fakeqemu4 = Device.objects.get(hostname='fakeqemu4')
- device_list = Device.objects.filter(device_type=device_type, is_pipeline=True)
- self.assertEqual(len(device_list), 3)
- self.assertIsInstance(device_list, RestrictedResourceQuerySet)
- self.assertIsInstance(list(device_list), list)
- allowed_devices = []
- for device in list(device_list):
- if _check_submit_to_device([device], user):
- allowed_devices.append(device)
- self.assertEqual(len(allowed_devices), 3)
- self.assertIn(fakeqemu3, allowed_devices)
- self.assertIn(fakeqemu4, allowed_devices)
- self.assertIn(fakeqemu2, allowed_devices)
- self.assertNotIn(fakeqemu1, allowed_devices)
-
- # set one candidate device to RETIRED to force the bug
- fakeqemu4.health = Device.HEALTH_RETIRED
- fakeqemu4.save(update_fields=['health'])
- # refresh the device_list
- device_list = Device.objects.filter(device_type=device_type, is_pipeline=True).order_by('hostname')
- allowed_devices = []
- # test the old code to force the exception
- try:
- # by looping through in the test *and* in _check_submit_to_device
- # the retired device in device_list triggers the exception.
- for device in list(device_list):
- if _check_submit_to_device([device], user):
- allowed_devices.append(device)
- except DevicesUnavailableException:
- self.assertEqual(len(allowed_devices), 2)
- self.assertIn(fakeqemu4, device_list)
- self.assertEqual(fakeqemu4.health, Device.HEALTH_RETIRED)
- else:
- self.fail("Missed DevicesUnavailableException")
- allowed_devices = []
- allowed_devices.extend(_check_submit_to_device(list(device_list), user))
- self.assertEqual(len(allowed_devices), 2)
- self.assertIn(fakeqemu3, allowed_devices)
- self.assertIn(fakeqemu2, allowed_devices)
- self.assertNotIn(fakeqemu4, allowed_devices)
- self.assertNotIn(fakeqemu1, allowed_devices)
- allowed_devices = []
-
- # test improvement as there is no point wasting memory with a Query containing Retired.
- device_list = Device.objects.filter(
- Q(device_type=device_type), Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED))
- allowed_devices.extend(_check_submit_to_device(list(device_list), user))
- self.assertEqual(len(allowed_devices), 2)
- self.assertIn(fakeqemu3, allowed_devices)
- self.assertIn(fakeqemu2, allowed_devices)
- self.assertNotIn(fakeqemu4, allowed_devices)
- self.assertNotIn(fakeqemu1, allowed_devices)
def test_multinode_v2_metadata(self):
device_type = self.factory.make_device_type()
diff --git a/lava_scheduler_app/tests/test_scheduler.py b/lava_scheduler_app/tests/test_scheduler.py
index 16b6cca40..d569871ca 100644
--- a/lava_scheduler_app/tests/test_scheduler.py
+++ b/lava_scheduler_app/tests/test_scheduler.py
@@ -73,7 +73,7 @@ class TestHealthCheckScheduling(TestCase):
self.user = User.objects.create(username="user-01")
self.last_hc03 = TestJob.objects.create(health_check=True, actual_device=self.device03,
user=self.user, submitter=self.user,
- start_time=timezone.now(),
+ start_time=timezone.now(), is_public=True,
state=TestJob.STATE_FINISHED, health=TestJob.HEALTH_COMPLETE)
self.device03.last_health_report_job = self.last_hc03
self.device03.save()
@@ -209,7 +209,7 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled now
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
schedule(DummyLogger())
self.device01.refresh_from_db()
@@ -221,7 +221,7 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled after the health check
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
self.device03.refresh_from_db()
self.last_hc03.submit_time = timezone.now() - timedelta(hours=25)
@@ -253,13 +253,13 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled now
j01 = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
j02 = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
j03 = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
schedule(DummyLogger())
@@ -314,7 +314,7 @@ class TestPriorities(TestCase):
jobs = []
for p in [TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW]:
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None), priority=p)
jobs.append(j)
@@ -391,7 +391,7 @@ class TestPriorities(TestCase):
jobs = []
for p in [TestJob.LOW, TestJob.MEDIUM, TestJob.HIGH, TestJob.MEDIUM, TestJob.LOW]:
j = TestJob.objects.create(requested_device_type=self.device_type01,
- user=self.user, submitter=self.user,
+ user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None), priority=p)
jobs.append(j)
diff --git a/lava_scheduler_app/tests/test_state_machine.py b/lava_scheduler_app/tests/test_state_machine.py
index 8bea4e1d8..0c88f298a 100644
--- a/lava_scheduler_app/tests/test_state_machine.py
+++ b/lava_scheduler_app/tests/test_state_machine.py
@@ -32,6 +32,18 @@ from lava_scheduler_app.models import (
)
+minimal_valid_job = yaml.dump("""
+job_name: minimal valid job
+visibility: public
+timeouts:
+ job:
+ minutes: 10
+ action:
+ minutes: 5
+actions: []
+""")
+
+
class TestTestJobStateMachine(TestCase):
def setUp(self):
@@ -41,7 +53,8 @@ class TestTestJobStateMachine(TestCase):
worker_host=self.worker)
self.user = User.objects.create(username="user-01")
self.job = TestJob.objects.create(requested_device_type=self.device_type,
- submitter=self.user, user=self.user)
+ submitter=self.user, user=self.user, is_public=True,
+ definition=minimal_valid_job)
def check_device(self, state, health):
self.device.refresh_from_db()
@@ -224,13 +237,13 @@ class TestTestJobStateMachine(TestCase):
self.job.save()
self.sub_job1 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job1.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job1.actual_device = self.device2
self.sub_job1.save()
self.sub_job2 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job2.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job2.actual_device = self.device3
self.sub_job2.save()
@@ -456,13 +469,13 @@ class TestTestJobStateMachine(TestCase):
self.job.save()
self.sub_job1 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job1.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job1.actual_device = self.device2
self.sub_job1.save()
self.sub_job2 = TestJob.objects.create(requested_device_type=self.device_type,
submitter=self.user, user=self.user,
- target_group="target_group")
+ target_group="target_group", is_public=True)
self.sub_job2.definition = yaml.dump({"protocols": {"lava-multinode": {"role": "worker", "essential": False}}})
self.sub_job2.actual_device = self.device3
self.sub_job2.save()
@@ -616,7 +629,7 @@ class TestWorkerStateMachine(TestCase):
worker_host=self.worker)
self.user = User.objects.create(username="user-01")
self.job = TestJob.objects.create(requested_device_type=self.device_type,
- submitter=self.user, user=self.user)
+ submitter=self.user, user=self.user, is_public=True)
def check_device(self, state, health):
self.device.refresh_from_db()
diff --git a/lava_scheduler_app/tests/test_submission.py b/lava_scheduler_app/tests/test_submission.py
index 7b4cdf457..3c89c4e29 100644
--- a/lava_scheduler_app/tests/test_submission.py
+++ b/lava_scheduler_app/tests/test_submission.py
@@ -179,15 +179,12 @@ class TestTestJob(TestCaseWithFactory): # pylint: disable=too-many-ancestors,to
self.factory.cleanup()
dt = self.factory.make_device_type(name='name')
device = self.factory.make_device(device_type=dt, hostname='name-1')
- device.is_pipeline = True
device.save()
definition = self.factory.make_job_data()
definition['visibility'] = {'group': ['newgroup']}
definition['job_name'] = 'unittest_visibility'
self.assertIsNotNone(yaml.dump(definition))
self.assertIsNotNone(list(Device.objects.filter(device_type=dt)))
- self.assertIsNotNone(list(Device.objects.filter(
- device_type=dt, is_pipeline=True)))
user = self.factory.make_user()
user.user_permissions.add(
Permission.objects.get(codename='add_testjob'))
diff --git a/lava_scheduler_app/views.py b/lava_scheduler_app/views.py
index 4ec405c48..16b168068 100644
--- a/lava_scheduler_app/views.py
+++ b/lava_scheduler_app/views.py
@@ -313,8 +313,7 @@ class DeviceTableView(JobTableView):
return Device.objects.select_related("device_type", "worker_host",
"user", "group") \
.prefetch_related("tags") \
- .filter(device_type__in=visible,
- is_pipeline=True) \
+ .filter(device_type__in=visible) \
.order_by("hostname")
@@ -919,7 +918,7 @@ def health_job_list(request, pk):
class MyJobsView(JobTableView):
def get_queryset(self):
- query = all_jobs_with_custom_sort().filter(is_pipeline=True)
+ query = all_jobs_with_custom_sort()
return query.filter(submitter=self.request.user)
@@ -940,14 +939,14 @@ class FavoriteJobsView(JobTableView):
def get_queryset(self):
user = self.user if self.user else self.request.user
- query = all_jobs_with_custom_sort().filter(is_pipeline=True)
+ query = all_jobs_with_custom_sort()
return query.filter(testjobuser__user=user, testjobuser__is_favorite=True)
class AllJobsView(JobTableView):
def get_queryset(self):
- return all_jobs_with_custom_sort().filter(is_pipeline=True)
+ return all_jobs_with_custom_sort()
@BreadCrumb("Jobs", parent=index)
@@ -1054,10 +1053,6 @@ def job_submit(request):
def job_detail(request, pk):
job = get_restricted_job(request.user, pk, request=request)
- # Refuse non-pipeline jobs
- if not job.is_pipeline:
- raise Http404()
-
# Is the job favorite?
is_favorite = False
if request.user.is_authenticated():
@@ -1792,7 +1787,6 @@ def device_detail(request, pk):
"device_log_table": device_log_ptable,
'can_admin': device_can_admin,
'exclusive': device.is_exclusive,
- 'pipeline': device.is_pipeline,
'edit_description': device_can_admin,
'bread_crumb_trail': BreadCrumbTrail.leading_to(device_detail, pk=pk),
'context_help': BreadCrumbTrail.show_help(device_detail, pk="help"),
@@ -1818,9 +1812,6 @@ def device_dictionary(request, pk):
if not device.device_type.some_devices_visible_to(request.user):
raise Http404('No device matches the given query.')
- if not device.is_pipeline:
- raise Http404
-
raw_device_dict = device.load_configuration(output_format="raw")
if not raw_device_dict:
raise Http404
diff --git a/lava_server/api.py b/lava_server/api.py
index 764105867..21291a1d3 100644
--- a/lava_server/api.py
+++ b/lava_server/api.py
@@ -309,7 +309,7 @@ class LavaSystemAPI(SystemAPI):
if visible:
retval[device_type.name].append({
hostname: {
- 'is_pipeline': device.is_pipeline,
+ 'is_pipeline': True,
'visible': visible,
'exclusive': device.is_exclusive
}
@@ -429,7 +429,7 @@ class LavaSystemAPI(SystemAPI):
self._authenticate()
# get all device dictionaries, build the entire map.
dictionaries = [
- (device.hostname, device.load_configuration()) for device in Device.objects.filter(is_pipeline=True)
+ (device.hostname, device.load_configuration()) for device in Device.objects.all()
]
network_map = {'switches': {}}
for (hostname, params) in dictionaries:
diff --git a/lava_server/management/commands/devices.py b/lava_server/management/commands/devices.py
index be9769ee0..552439bbc 100644
--- a/lava_server/management/commands/devices.py
+++ b/lava_server/management/commands/devices.py
@@ -216,9 +216,8 @@ class Command(BaseCommand):
health = Device.HEALTH_GOOD if online else Device.HEALTH_MAINTENANCE
device = Device.objects.create(hostname=hostname, device_type=dt,
description=description,
- worker_host=worker, is_pipeline=True,
state=Device.STATE_IDLE, health=health,
- is_public=public)
+ worker_host=worker, is_public=public)
if tags is not None:
for tag in tags:
diff --git a/lava_server/management/commands/jobs.py b/lava_server/management/commands/jobs.py
index 0f21cd030..e3f1d0613 100644
--- a/lava_server/management/commands/jobs.py
+++ b/lava_server/management/commands/jobs.py
@@ -81,9 +81,6 @@ class Command(BaseCommand):
help="Filter jobs by submitter")
rm.add_argument("--dry-run", default=False, action="store_true",
help="Do not remove any data, simulate the output")
- rm.add_argument("--v1", default=False, action="store_true",
- help="Remove only v1 jobs. "
- "If this is the only filtering option, all v1 jobs will be removed.")
rm.add_argument("--slow", default=False, action="store_true",
help="Be nice with the system by sleeping regularly")
@@ -91,8 +88,7 @@ class Command(BaseCommand):
""" forward to the right sub-handler """
if options["sub_command"] == "rm":
self.handle_rm(options["older_than"], options["submitter"],
- options["state"], options["v1"],
- options["dry_run"], options["slow"])
+ options["state"], options["dry_run"], options["slow"])
elif options["sub_command"] == "fail":
self.handle_fail(options["job_id"])
@@ -105,7 +101,7 @@ class Command(BaseCommand):
except TestJob.DoesNotExist:
raise CommandError("TestJob '%d' does not exists" % job_id)
- def handle_rm(self, older_than, submitter, state, v1_only, simulate, slow):
+ def handle_rm(self, older_than, submitter, state, simulate, slow):
if not older_than and not submitter and not state and not v1_only:
raise CommandError("You should specify at least one filtering option")
@@ -135,9 +131,6 @@ class Command(BaseCommand):
if state is not None:
jobs = jobs.filter(state=self.job_state[state])
- if v1_only:
- jobs = jobs.filter(is_pipeline=False)
-
self.stdout.write("Removing %d jobs:" % jobs.count())
while True:
diff --git a/lava_server/management/commands/lava-master.py b/lava_server/management/commands/lava-master.py
index cae26a8a6..9abc2785b 100644
--- a/lava_server/management/commands/lava-master.py
+++ b/lava_server/management/commands/lava-master.py
@@ -211,7 +211,6 @@ class Command(LAVADaemonCommand):
The master crashed, send a STATUS message to get the current state of jobs
"""
jobs = TestJob.objects.filter(actual_device__worker_host__hostname=hostname,
- is_pipeline=True,
state=TestJob.STATE_RUNNING)
for job in jobs:
self.logger.info("[%d] STATUS => %s (%s)", job.id, hostname,