[Data_processing] fix job_executions table

The table does not crash anymore after the cluster is deleted.
Both the cluster object and the job are queried before extending a job execution.
When the cluster or the job are deleted 'Not available' is assigned as the name.

Closes-Bug: #1376738
Closes-Bug: #1391356
Closes-Bug: #1391507
Change-Id: Iee60572f3a6e02447a6cd8508735bbb91ec020e9
This commit is contained in:
Nikita Konovalov 2014-10-03 14:48:27 +04:00
parent 4fdc42cf63
commit 4f947f8cb2
7 changed files with 96 additions and 33 deletions

View File

@ -19,12 +19,12 @@ from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from saharaclient.api.base import APIException
from saharaclient import client as api_client
LOG = logging.getLogger(__name__)
# "type" of Sahara service registered in keystone
SAHARA_SERVICE = 'data-processing'
# Sahara service_type registered in Juno
@ -43,6 +43,23 @@ VERSIONS.load_supported_version(1.1, {"client": api_client,
"version": 1.1})
def safe_call(func, *args, **kwargs):
"""Call a function ignoring Not Found error
This method is supposed to be used only for safe retrieving Sahara
objects. If the object is no longer available the None should be
returned.
"""
try:
return func(*args, **kwargs)
except APIException as e:
if e.error_code == 404:
return None # Not found. Exiting with None
raise # Other errors are not expected here
@memoized
def client(request):
try:
@ -319,18 +336,42 @@ def job_execution_create(request, job_id, cluster_id,
configs)
def _resolve_job_execution_names(job_execution, cluster=None,
job=None):
job_execution.cluster_name = None
if cluster:
job_execution.cluster_name = cluster.name
job_execution.job_name = None
if job:
job_execution.job_name = job.name
return job_execution
def job_execution_list(request, search_opts=None):
jex_list = client(request).job_executions.list(search_opts)
job_execution_list = client(request).job_executions.list(search_opts)
job_dict = dict((j.id, j) for j in job_list(request))
cluster_dict = dict((c.id, c) for c in cluster_list(request))
for jex in jex_list:
setattr(jex, 'job_name', job_dict.get(jex.job_id).name)
setattr(jex, 'cluster_name', cluster_dict.get(jex.cluster_id).name)
return jex_list
resolved_job_execution_list = [
_resolve_job_execution_names(
job_execution,
cluster_dict.get(job_execution.cluster_id),
job_dict.get(job_execution.job_id))
for job_execution in job_execution_list
]
return resolved_job_execution_list
def job_execution_get(request, jex_id):
return client(request).job_executions.get(jex_id)
jex = client(request).job_executions.get(jex_id)
cluster = safe_call(client(request).clusters.get, jex.cluster_id)
job = safe_call(client(request).jobs.get, jex.job_id)
return _resolve_job_execution_names(jex, cluster, job)
def job_execution_delete(request, jex_id):

View File

@ -59,7 +59,7 @@ class NodeGroupsTab(tabs.Tab):
continue
ng["flavor_name"] = (
nova.flavor_get(request, ng["flavor_id"]).name)
ng["node_group_template"] = helpers.safe_call(
ng["node_group_template"] = saharaclient.safe_call(
saharaclient.nodegroup_template_get,
request, ng.get("node_group_template_id", None))
ng["security_groups_full"] = helpers.get_security_groups(

View File

@ -27,7 +27,6 @@ from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
@ -54,7 +53,7 @@ class GeneralTab(tabs.Tab):
cluster.default_image_id)
if getattr(cluster, 'cluster_template_id', None):
cluster_template = helpers.safe_call(
cluster_template = saharaclient.safe_call(
sahara.cluster_templates.get,
cluster.cluster_template_id)
else:
@ -102,7 +101,7 @@ class NodeGroupsTab(tabs.Tab):
request, ng["floating_ip_pool"]))
if ng.get("node_group_template_id", None):
ng["node_group_template"] = helpers.safe_call(
ng["node_group_template"] = saharaclient.safe_call(
sahara.node_group_templates.get,
ng["node_group_template_id"])

View File

@ -132,21 +132,39 @@ class UpdateRow(tables.Row):
messages.error(request, _("Unable to update row"))
def get_job_link(job_execution):
return reverse("horizon:project:data_processing.jobs:details",
args=(http.urlquote(job_execution.job_id),))
def get_cluster_link(job_execution):
return reverse("horizon:project:data_processing.clusters:details",
args=(http.urlquote(job_execution.cluster_id),))
class JobExecutionsTable(tables.DataTable):
class StatusColumn(tables.Column):
def get_raw_data(self, datum):
return datum.info['status']
class JobNameColumn(tables.Column):
@staticmethod
def link(job_execution):
if job_execution.job_name:
return reverse("horizon:project:data_processing.jobs:details",
args=(http.urlquote(job_execution.job_id),))
else:
# No link should be generated for a deleted Job.
return None
def get_data(self, job_execution):
return job_execution.job_name or _("Not available")
class ClusterNameColumn(tables.Column):
@staticmethod
def link(job_execution):
if job_execution.cluster_name:
return reverse(
"horizon:project:data_processing.clusters:details",
args=(http.urlquote(job_execution.cluster_id),))
else:
# No link should be generated for a deleted Cluster.
return None
def get_data(self, job_execution):
return job_execution.cluster_name or _("Not available")
STATUS_CHOICES = (
("DONEWITHERROR", False),
("FAILED", False),
@ -170,14 +188,16 @@ class JobExecutionsTable(tables.DataTable):
("name", pgettext_lazy("Name")),),
link=("horizon:project:data_processing."
"job_executions:details"))
job_name = tables.Column(
job_name = JobNameColumn(
"job_name",
verbose_name=_("Job Template"),
link=get_job_link)
cluster_name = tables.Column(
link=JobNameColumn.link)
cluster_name = ClusterNameColumn(
"cluster_name",
verbose_name=_("Cluster"),
link=get_cluster_link)
link=ClusterNameColumn.link)
status = StatusColumn("info",
status=True,
status_choices=STATUS_CHOICES,

View File

@ -31,6 +31,12 @@ class DataProcessingJobExecutionTests(test.TestCase):
.AndReturn(self.job_executions.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertEqual(
"cluster-1",
res.context_data["job_executions_table"].data[0].cluster_name)
self.assertEqual(
"job-1",
res.context_data["job_executions_table"].data[0].job_name)
self.assertTemplateUsed(
res, 'project/data_processing.job_executions/job_executions.html')
self.assertContains(res, 'Jobs')

View File

@ -141,13 +141,6 @@ def parse_configs_from_context(context, defaults):
return configs_dict
def safe_call(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
return None
def get_security_groups(request, security_group_ids):
security_groups = []
for group in security_group_ids or []:

View File

@ -482,7 +482,11 @@ def data(TEST):
"return_code": None,
"start_time": "2014-06-05T16:03:32",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": "2014-06-05 20:03:46.438248"
"updated_at": "2014-06-05 20:03:46.438248",
"cluster_name_set": True,
"job_name_set": True,
"cluster_name": "cluster-1",
"job_name": "job-1"
}
jobex1 = job_executions.JobExecution(