Update Formatting with YAPF

Change-Id: I351f55b5ffbbae8c15d529777bfac058054c43c9
This commit is contained in:
Mark Burnett 2018-06-07 16:15:54 -05:00
parent 6dad448ca6
commit 714528a549
51 changed files with 332 additions and 320 deletions

View File

@ -62,6 +62,7 @@ class DesignShow(CliAction): # pylint: disable=too-few-public-methods
return self.api_client.get_design(
design_id=self.design_id, source=self.source)
class DesignValidate(CliAction): # pylint: disable=too-few-public-methods
"""Action to validate a design.

View File

@ -59,11 +59,10 @@ def design_show(ctx, design_id):
click.echo(json.dumps(DesignShow(ctx.obj['CLIENT'], design_id).invoke()))
@design.command(name='validate')
@click.option(
'--design-href',
'-h',
help='The design href key to the design ref')
'--design-href', '-h', help='The design href key to the design ref')
@click.pass_context
def design_validate(ctx, design_href=None):
"""Validate a design."""

View File

@ -80,9 +80,9 @@ class PartShow(PartBase): # pylint: disable=too-few-public-methods
self.kind = kind
self.key = key
self.source = source
self.logger.debug('DesignShow action initialized for design_id=%s,'
' kind=%s, key=%s, source=%s', design_id, kind, key,
source)
self.logger.debug(
'DesignShow action initialized for design_id=%s,'
' kind=%s, key=%s, source=%s', design_id, kind, key, source)
def invoke(self):
return self.api_client.get_part(

View File

@ -88,8 +88,8 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
NodeBuildDataResource(state_manager=state_manager)),
# API to list current node names based
('/nodefilter',
NodeFilterResource(state_manager=state_manager,
orchestrator=orchestrator)),
NodeFilterResource(
state_manager=state_manager, orchestrator=orchestrator)),
# API for nodes to discover their boot actions during curtin install
('/bootactions/nodes/{hostname}/units',
BootactionUnitsResource(

View File

@ -57,8 +57,8 @@ class BaseResource(object):
self.error(
req.context,
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
raise errors.InvalidFormat("%s: Invalid JSON in body: %s" %
(req.path, jex))
raise errors.InvalidFormat(
"%s: Invalid JSON in body: %s" % (req.path, jex))
else:
raise errors.InvalidFormat("Requires application/json payload")

View File

@ -186,9 +186,9 @@ class BootactionAssetsResource(StatefulResource):
tarball = BootactionUtils.tarbuilder(asset_list=assets)
resp.set_header('Content-Type', 'application/gzip')
resp.set_header('Content-Disposition',
"attachment; filename=\"%s-%s.tar.gz\"" %
(hostname, asset_type))
resp.set_header(
'Content-Disposition', "attachment; filename=\"%s-%s.tar.gz\""
% (hostname, asset_type))
resp.data = tarball
resp.status = falcon.HTTP_200
return

View File

@ -104,7 +104,8 @@ class NodeFilterResource(StatefulResource):
node_filter = json_data.get('node_filter', None)
design_ref = json_data.get('design_ref', None)
if design_ref is None:
self.info(req.context, 'Missing required input value: design_ref')
self.info(req.context,
'Missing required input value: design_ref')
self.return_error(
resp,
falcon.HTTP_400,
@ -112,8 +113,8 @@ class NodeFilterResource(StatefulResource):
retry=False)
return
_, site_design = self.orchestrator.get_effective_site(design_ref)
nodes = self.orchestrator.process_node_filter(node_filter=node_filter,
site_design=site_design)
nodes = self.orchestrator.process_node_filter(
node_filter=node_filter, site_design=site_design)
resp_list = [n.name for n in nodes if nodes]
resp.body = json.dumps(resp_list)

View File

@ -45,9 +45,9 @@ class TasksResource(StatefulResource):
resp.body = json.dumps(task_list)
resp.status = falcon.HTTP_200
except Exception as ex:
self.error(req.context,
"Unknown error: %s\n%s" % (str(ex),
traceback.format_exc()))
self.error(
req.context,
"Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
@ -79,9 +79,9 @@ class TasksResource(StatefulResource):
else:
supported_actions.get(action)(self, req, resp, json_data)
except Exception as ex:
self.error(req.context,
"Unknown error: %s\n%s" % (str(ex),
traceback.format_exc()))
self.error(
req.context,
"Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
@ -318,8 +318,9 @@ class TaskResource(StatefulResource):
else:
# If layers is passed in then it returns a dict of tasks instead of the task dict.
if layers:
resp_data, errors = self.handle_layers(req, resp, task_id, builddata, subtask_errors, layers,
first_task)
resp_data, errors = self.handle_layers(
req, resp, task_id, builddata, subtask_errors, layers,
first_task)
# Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.
if (subtask_errors):
resp_data['subtask_errors'] = errors
@ -327,8 +328,9 @@ class TaskResource(StatefulResource):
resp_data = first_task
# Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.
if (subtask_errors):
_, errors = self.handle_layers(req, resp, task_id, False, subtask_errors, 1,
first_task)
_, errors = self.handle_layers(req, resp, task_id,
False, subtask_errors,
1, first_task)
resp_data['subtask_errors'] = errors
resp.body = json.dumps(resp_data)
@ -357,7 +359,8 @@ class TaskResource(StatefulResource):
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
def handle_layers(self, req, resp, task_id, builddata, subtask_errors, layers, first_task):
def handle_layers(self, req, resp, task_id, builddata, subtask_errors,
layers, first_task):
resp_data = {}
errors = {}
resp_data['init_task_id'] = task_id
@ -366,7 +369,8 @@ class TaskResource(StatefulResource):
# first_task is layer 1
current_layer = 1
# The while loop handles each layer.
while queued_ids and (current_layer < layers or layers == -1 or subtask_errors):
while queued_ids and (current_layer < layers or layers == -1
or subtask_errors):
# Copies the current list (a layer) then clears the queue for the next layer.
processing_ids = list(queued_ids)
queued_ids = []
@ -378,7 +382,8 @@ class TaskResource(StatefulResource):
resp_data[id] = task
if task:
queued_ids.extend(task.get('subtask_id_list', []))
if task.get('result', {}).get('details', {}).get('errorCount', 0) > 0 and subtask_errors:
if task.get('result', {}).get('details', {}).get(
'errorCount', 0) > 0 and subtask_errors:
result = task.get('result', {})
result['task_id'] = id
errors[id] = task.get('result', {})

View File

@ -916,8 +916,8 @@ class ConfigureHardware(BaseMaasAction):
# TODO(sh8121att): Better way of representing the node statuses than static strings
for n in nodes:
try:
self.logger.debug("Locating node %s for commissioning" %
(n.name))
self.logger.debug(
"Locating node %s for commissioning" % (n.name))
machine = machine_list.identify_baremetal_node(
n, update_name=False)
if machine is not None:
@ -932,8 +932,8 @@ class ConfigureHardware(BaseMaasAction):
# Poll machine status
attempts = 0
max_attempts = config.config_mgr.conf.timeouts.configure_hardware * (
60 // config.config_mgr.conf.maasdriver.
poll_interval)
60 //
config.config_mgr.conf.maasdriver.poll_interval)
while (attempts < max_attempts and
(machine.status_name != 'Ready' and
@ -1349,10 +1349,9 @@ class ApplyNodeNetworking(BaseMaasAction):
ctx=n.name,
ctx_type='node')
elif machine.status_name == 'Broken':
msg = (
"Located node %s in MaaS, status broken. Run "
"ConfigureHardware before configurating network" %
(n.name))
msg = ("Located node %s in MaaS, status broken. Run "
"ConfigureHardware before configurating network"
% (n.name))
self.logger.info(msg)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
@ -1520,8 +1519,8 @@ class ApplyNodePlatform(BaseMaasAction):
try:
if n.tags is not None and len(n.tags) > 0:
self.logger.info("Configuring static tags for node %s" %
(n.name))
self.logger.info(
"Configuring static tags for node %s" % (n.name))
for t in n.tags:
tag_list.refresh()
@ -1549,8 +1548,8 @@ class ApplyNodePlatform(BaseMaasAction):
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
tag.apply_to_node(machine.resource_id)
self.logger.info("Applied static tags to node %s" %
(n.name))
self.logger.info(
"Applied static tags to node %s" % (n.name))
self.task.success(focus=n.get_id())
else:
msg = "No node tags to apply for %s." % n.name
@ -1728,9 +1727,9 @@ class ApplyNodeStorage(BaseMaasAction):
maas_dev.resource_id)
continue
self.logger.debug("Partitioning dev %s (%s) on node %s" %
(d.name, n.get_logicalname(d.name),
n.name))
self.logger.debug(
"Partitioning dev %s (%s) on node %s" %
(d.name, n.get_logicalname(d.name), n.name))
for p in d.partitions:
if p.is_sys():
self.logger.debug(
@ -2022,8 +2021,8 @@ class DeployNode(BaseMaasAction):
continue
# Saving boot action context for a node
self.logger.info("Saving Boot Action context for node %s." %
(n.name))
self.logger.info(
"Saving Boot Action context for node %s." % (n.name))
try:
ba_key = self.orchestrator.create_bootaction_context(
n.name, self.task)

View File

@ -177,6 +177,6 @@ class MaasRequestFactory(object):
"Received error response - URL: %s %s - RESPONSE: %s" %
(prepared_req.method, prepared_req.url, resp.status_code))
self.logger.debug("Response content: %s" % resp.text)
raise errors.DriverError("MAAS Error: %s - %s" % (resp.status_code,
resp.text))
raise errors.DriverError(
"MAAS Error: %s - %s" % (resp.status_code, resp.text))
return resp

View File

@ -129,9 +129,8 @@ class MaasNodeDriver(NodeDriver):
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
@ -207,9 +206,8 @@ class MaasNodeDriver(NodeDriver):
maas_client=maas_client)
action.start()
except Exception as e:
msg = (
"Subtask for action %s raised unexpected exception: %s" %
(task.action, str(e)))
msg = ("Subtask for action %s raised unexpected exception: %s"
% (task.action, str(e)))
self.logger.error(msg, exc_info=e)
task.add_status_msg(
msg=msg,

View File

@ -216,9 +216,8 @@ class ResourceCollectionBase(object):
res.set_resource_id(resp_json.get('id'))
return res
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))
"""
Append a resource instance to the list locally only

View File

@ -93,8 +93,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='format', files=data)
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -126,8 +126,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='unformat')
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: unformat of device %s on node %s failed: %s" \
@ -156,8 +156,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='mount', files=data)
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -183,8 +183,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='unmount')
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -202,8 +202,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='set_boot_disk')
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:

View File

@ -85,8 +85,8 @@ class Interface(model_base.ResourceBase):
fabric_vlan = fabric.vlans.singleton({'vid': 0})
if fabric_vlan is None:
self.logger.warning("Cannot locate untagged VLAN on fabric %s" %
(fabric_id))
self.logger.warning(
"Cannot locate untagged VLAN on fabric %s" % (fabric_id))
raise errors.DriverError(
"Cannot locate untagged VLAN on fabric %s" % (fabric_id))
@ -112,8 +112,8 @@ class Interface(model_base.ResourceBase):
"""Disconnect this interface from subnets and VLANs."""
url = self.interpolate_url()
self.logger.debug("Disconnecting interface %s from networks." %
(self.name))
self.logger.debug(
"Disconnecting interface %s from networks." % (self.name))
resp = self.api_client.post(url, op='disconnect')
if not resp.ok:
@ -299,10 +299,10 @@ class Interfaces(model_base.ResourceCollectionBase):
parent_iface = self.singleton({'name': parent_name})
if parent_iface is None:
self.logger.error("Cannot locate parent interface %s" %
(parent_name))
raise errors.DriverError("Cannot locate parent interface %s" %
(parent_name))
self.logger.error(
"Cannot locate parent interface %s" % (parent_name))
raise errors.DriverError(
"Cannot locate parent interface %s" % (parent_name))
if parent_iface.vlan is None:
self.logger.error(

View File

@ -72,6 +72,5 @@ class IpRanges(model_base.ResourceCollectionBase):
res.set_resource_id(resp_json.get('id'))
return res
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))

View File

@ -68,8 +68,8 @@ class Machine(model_base.ResourceBase):
api_client, system_id=self.resource_id)
self.volume_groups.refresh()
except Exception:
self.logger.warning("Failed load node %s volume groups." %
(self.resource_id))
self.logger.warning(
"Failed load node %s volume groups." % (self.resource_id))
else:
self.interfaces = None
self.block_devices = None
@ -117,15 +117,15 @@ class Machine(model_base.ResourceBase):
Removes all the volume groups/logical volumes and all the physical
device partitions on this machine.
"""
self.logger.info("Resetting storage configuration on node %s" %
(self.resource_id))
self.logger.info(
"Resetting storage configuration on node %s" % (self.resource_id))
if self.volume_groups is not None and self.volume_groups.len() > 0:
for vg in self.volume_groups:
self.logger.debug("Removing VG %s" % vg.name)
vg.delete()
else:
self.logger.debug("No VGs configured on node %s" %
(self.resource_id))
self.logger.debug(
"No VGs configured on node %s" % (self.resource_id))
if self.block_devices is not None:
for d in self.block_devices:
@ -137,8 +137,8 @@ class Machine(model_base.ResourceBase):
self.logger.debug(
"No partitions found on device %s" % d.name)
else:
self.logger.debug("No block devices found on node %s" %
(self.resource_id))
self.logger.debug(
"No block devices found on node %s" % (self.resource_id))
def set_storage_layout(self,
layout_type='flat',
@ -197,8 +197,8 @@ class Machine(model_base.ResourceBase):
url, op='set_storage_layout', files=data)
if not resp.ok:
raise Exception("MAAS Error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS Error: %s - %s" % (resp.status_code, resp.text))
except Exception as ex:
msg = "Error: failed configuring node %s storage layout: %s" % (
self.resource_id, str(ex))
@ -550,6 +550,5 @@ class Machines(model_base.ResourceCollectionBase):
res.set_resource_id(resp_json.get('system_id'))
return res
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))

View File

@ -77,8 +77,8 @@ class Partition(model_base.ResourceBase):
resp = self.api_client.post(url, op='format', files=data)
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -109,8 +109,8 @@ class Partition(model_base.ResourceBase):
(self.name, self.system_id))
resp = self.api_client.post(url, op='unformat')
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: unformat of device %s on node %s failed: %s" \
@ -138,8 +138,8 @@ class Partition(model_base.ResourceBase):
(self.resource_id, self.system_id, mount_point))
resp = self.api_client.post(url, op='mount', files=data)
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: mount of device %s on node %s failed: %s" \
@ -163,8 +163,8 @@ class Partition(model_base.ResourceBase):
(self.name, self.system_id))
resp = self.api_client.post(url, op='unmount')
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: unmount of device %s on node %s failed: %s" \
@ -180,8 +180,8 @@ class Partition(model_base.ResourceBase):
(self.resource_id, self.system_id))
resp = self.api_client.post(url, op='set_boot_disk')
if not resp.ok:
raise Exception("MAAS error: %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: setting device %s on node %s to boot failed: %s" \

View File

@ -64,8 +64,8 @@ class Tag(model_base.ResourceBase):
"""
if system_id in self.get_applied_nodes():
self.logger.debug("Tag %s already applied to node %s" %
(self.name, system_id))
self.logger.debug(
"Tag %s already applied to node %s" % (self.name, system_id))
else:
url = self.interpolate_url()

View File

@ -60,9 +60,8 @@ class VolumeGroup(model_base.ResourceBase):
data = {'name': name, 'uuid': uuid_str, 'size': size}
self.logger.debug(
"Creating logical volume %s in VG %s on node %s" %
(name, self.name, self.system_id))
self.logger.debug("Creating logical volume %s in VG %s on node %s"
% (name, self.name, self.system_id))
url = self.interpolate_url()
@ -70,8 +69,8 @@ class VolumeGroup(model_base.ResourceBase):
url, op='create_logical_volume', files=data)
if not resp.ok:
raise Exception("MAAS error - %s - %s" % (resp.status_code,
resp.txt))
raise Exception(
"MAAS error - %s - %s" % (resp.status_code, resp.txt))
res = resp.json()
if 'id' in res:
@ -106,8 +105,8 @@ class VolumeGroup(model_base.ResourceBase):
url, op='delete_logical_volume', files={'id': target_lv})
if not resp.ok:
raise Exception("MAAS error - %s - %s" % (resp.status_code,
resp.text))
raise Exception(
"MAAS error - %s - %s" % (resp.status_code, resp.text))
else:
raise Exception("VG %s has no logical volumes" % self.name)
except Exception as ex:

View File

@ -41,8 +41,8 @@ class LibvirtBaseAction(BaseAction):
virsh_url = node.oob_parameters.get('libvirt_uri', None)
if not virsh_url:
raise errors.DriverError("Node %s has no 'libvirt_url' defined" %
(node.name))
raise errors.DriverError(
"Node %s has no 'libvirt_url' defined" % (node.name))
url_parts = urlparse(virsh_url)
@ -51,8 +51,8 @@ class LibvirtBaseAction(BaseAction):
"Node %s has invalid libvirt URL scheme %s. "
"Only 'qemu+ssh' supported." % (node.name, url_parts.scheme))
self.logger.debug("Starting libvirt session to hypervisor %s " %
(virsh_url))
self.logger.debug(
"Starting libvirt session to hypervisor %s " % (virsh_url))
virsh_ses = libvirt.open(virsh_url)
if not virsh_ses:
@ -213,8 +213,8 @@ class SetNodeBoot(LibvirtBaseAction):
ctx=n.name,
ctx_type='node')
self.task.failure(focus=n.name)
self.logger.warning("Unable to set node %s to PXE boot." %
(n.name))
self.logger.warning(
"Unable to set node %s to PXE boot." % (n.name))
else:
self.task.add_status_msg(
msg="Set bootdev to PXE.",

View File

@ -82,9 +82,8 @@ class LibvirtDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()

View File

@ -48,15 +48,14 @@ class ManualDriver(oob.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
design_ref = task.design_ref
if design_ref is None:
raise errors.DriverError("No design ID specified in task %s" %
(task_id))
raise errors.DriverError(
"No design ID specified in task %s" % (task_id))
self.orchestrator.task_field_update(
task.get_id(), status=hd_fields.TaskStatus.Running)

View File

@ -44,8 +44,8 @@ class PyghmiBaseAction(BaseAction):
ipmi_address = node.get_network_address(ipmi_network)
if ipmi_address is None:
raise errors.DriverError("Node %s has no IPMI address" %
(node.name))
raise errors.DriverError(
"Node %s has no IPMI address" % (node.name))
ipmi_account = node.oob_parameters['account']
ipmi_credential = node.oob_parameters['credential']
@ -176,8 +176,8 @@ class SetNodeBoot(PyghmiBaseAction):
ctx=n.name,
ctx_type='node')
self.task.failure(focus=n.name)
self.logger.warning("Unable to set node %s to PXE boot." %
(n.name))
self.logger.warning(
"Unable to set node %s to PXE boot." % (n.name))
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()

View File

@ -86,9 +86,8 @@ class PyghmiDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" % (self.driver_desc,
task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()

View File

@ -47,10 +47,7 @@ class DrydockClient(object):
:return: A list of node names based on the node_filter and design_ref.
"""
endpoint = 'v1.0/nodefilter'
body = {
'node_filter': node_filter,
'design_ref': design_ref
}
body = {'node_filter': node_filter, 'design_ref': design_ref}
resp = self.session.post(endpoint, data=body)
self._check_response(resp)
@ -72,7 +69,11 @@ class DrydockClient(object):
return resp.json()
def get_task(self, task_id, builddata=None, subtaskerrors=None, layers=None):
def get_task(self,
task_id,
builddata=None,
subtaskerrors=None,
layers=None):
"""
Get the current description of a Drydock task
@ -138,9 +139,7 @@ class DrydockClient(object):
:return: A dict containing the validation.
"""
endpoint = 'v1.0/validatedesign'
body = {
'href': href
}
body = {'href': href}
resp = self.session.post(endpoint, data=body)
self._check_response(resp)

View File

@ -171,9 +171,10 @@ class DrydockSession(object):
elif timeout is not None:
raise ValueError("Non integer timeout value")
except ValueError:
self.logger.warn("Timeout value must be a tuple of integers or a "
"single integer. Proceeding with values of "
"(%s, %s)", connect_timeout, read_timeout)
self.logger.warn(
"Timeout value must be a tuple of integers or a "
"single integer. Proceeding with values of "
"(%s, %s)", connect_timeout, read_timeout)
return (connect_timeout, read_timeout)

View File

@ -50,8 +50,8 @@ class Ingester(object):
klass = getattr(mod, classname)
self.registered_plugin = klass()
except Exception as ex:
self.logger.error("Could not enable plugin %s - %s" % (plugin,
str(ex)))
self.logger.error(
"Could not enable plugin %s - %s" % (plugin, str(ex)))
if self.registered_plugin is None:
self.logger.error("Could not enable at least one plugin")

View File

@ -208,9 +208,9 @@ class DeckhandIngester(IngesterPlugin):
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError('Unknown definition_type in '
'tag_definition instance: %s' %
(t.definition_type))
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])

View File

@ -193,9 +193,9 @@ class YamlIngester(IngesterPlugin):
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError('Unknown definition_type in '
'tag_definition instance: %s' %
(t.definition_type))
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])

View File

@ -114,9 +114,10 @@ class HostProfile(base.DrydockPersistentObject, base.DrydockObject):
# applied values
for f in inheritable_field_list:
setattr(self, f,
objects.Utils.apply_field_inheritance(
getattr(self, f, None), getattr(parent, f, None)))
setattr(
self, f,
objects.Utils.apply_field_inheritance(
getattr(self, f, None), getattr(parent, f, None)))
# Now compute inheritance for complex types
self.oob_parameters = objects.Utils.merge_dicts(
@ -373,10 +374,10 @@ class HostVolumeGroup(base.DrydockObject):
inheritable_field_list = ['vg_uuid']
for f in inheritable_field_list:
setattr(p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(
i, f, None)))
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
p.partitions = HostPartitionList.from_basic_list(
HostPartition.merge_lists(
@ -487,10 +488,10 @@ class HostStorageDevice(base.DrydockObject):
inherit_field_list = ['volume_group']
for f in inherit_field_list:
setattr(p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(
i, f, None)))
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
p.labels = objects.Utils.merge_dicts(
getattr(j, 'labels', None),
@ -634,10 +635,10 @@ class HostPartition(base.DrydockObject):
p.name = j.get_name()
for f in inherit_field_list:
setattr(p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(
i, f, None)))
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)
@ -761,10 +762,10 @@ class HostVolume(base.DrydockObject):
p.name = j.get_name()
for f in inherit_field_list:
setattr(p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(
i, f, None)))
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)

View File

@ -196,8 +196,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
:param address: String value that is used to find the logicalname.
:return: String value of the logicalname or the alias_name if logicalname is not found.
"""
nodes = xml_root.findall(
".//node[businfo='" + bus_type + "@" + address + "'].logicalname")
nodes = xml_root.findall(".//node[businfo='" + bus_type + "@" +
address + "'].logicalname")
if len(nodes) >= 1 and nodes[0].text:
if (len(nodes) > 1):
self.logger.info("Multiple nodes found for businfo=%s@%s" %
@ -205,9 +205,9 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
for logicalname in reversed(nodes[0].text.split("/")):
self.logger.debug(
"Logicalname build dict: node_name = %s, alias_name = %s, "
"bus_type = %s, address = %s, to logicalname = %s" % (
self.get_name(), alias_name, bus_type, address,
logicalname))
"bus_type = %s, address = %s, to logicalname = %s" %
(self.get_name(), alias_name, bus_type, address,
logicalname))
return logicalname
self.logger.debug(
"Logicalname build dict: alias_name = %s, bus_type = %s, address = %s, not found"
@ -234,20 +234,21 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
if xml_data:
xml_root = fromstring(xml_data)
try:
hardware_profile = site_design.get_hardware_profile(self.hardware_profile)
hardware_profile = site_design.get_hardware_profile(
self.hardware_profile)
for device in hardware_profile.devices:
logicalname = self._apply_logicalname(
xml_root, device.alias, device.bus_type,
device.address)
logicalnames[device.alias] = logicalname
except errors.DesignError:
self.logger.exception("Failed to load hardware profile while "
"resolving logical names for node %s",
self.get_name())
self.logger.exception(
"Failed to load hardware profile while "
"resolving logical names for node %s", self.get_name())
raise
else:
self.logger.info("No Build Data found for node_name %s" %
(self.get_name()))
self.logger.info(
"No Build Data found for node_name %s" % (self.get_name()))
self.logicalnames = logicalnames

View File

@ -184,8 +184,8 @@ class Task(object):
" marked for termination")
if self.statemgr.add_subtask(self.task_id, subtask.task_id):
self.add_status_msg(
msg="Started subtask %s for action %s" %
(str(subtask.get_id()), subtask.action),
msg="Started subtask %s for action %s" % (str(
subtask.get_id()), subtask.action),
error=False,
ctx=str(self.get_id()),
ctx_type='task')

View File

@ -357,8 +357,8 @@ class PrepareSite(BaseAction):
error=False,
ctx=str(site_network_task.get_id()),
ctx_type='task')
self.logger.info("Node driver task %s complete" %
(site_network_task.get_id()))
self.logger.info(
"Node driver task %s complete" % (site_network_task.get_id()))
def step_usercredentials(self, driver):
"""Run the ConfigureUserCredentials step of this action.
@ -381,8 +381,8 @@ class PrepareSite(BaseAction):
error=False,
ctx=str(user_creds_task.get_id()),
ctx_type='task')
self.logger.info("Node driver task %s complete" %
(user_creds_task.get_id()))
self.logger.info(
"Node driver task %s complete" % (user_creds_task.get_id()))
class VerifyNodes(BaseAction):
@ -640,9 +640,8 @@ class PrepareNodes(BaseAction):
create_nodefilter_from_nodelist(node_list))
self.task.register_subtask(node_identify_task)
self.logger.info(
"Starting node driver task %s to identify nodes." %
(node_identify_task.get_id()))
self.logger.info("Starting node driver task %s to identify nodes."
% (node_identify_task.get_id()))
node_driver.execute_task(node_identify_task.get_id())
@ -892,8 +891,8 @@ class DeployNodes(BaseAction):
if (node_storage_task is not None
and len(node_storage_task.result.successes) > 0):
self.logger.info(
"Configured storage on %s nodes, configuring platform." %
(len(node_storage_task.result.successes)))
"Configured storage on %s nodes, configuring platform." % (len(
node_storage_task.result.successes)))
node_platform_task = self.orchestrator.create_task(
design_ref=self.task.design_ref,
@ -919,8 +918,8 @@ class DeployNodes(BaseAction):
if node_platform_task is not None and len(
node_platform_task.result.successes) > 0:
self.logger.info(
"Configured platform on %s nodes, starting deployment." %
(len(node_platform_task.result.successes)))
"Configured platform on %s nodes, starting deployment." % (len(
node_platform_task.result.successes)))
while True:
if node_deploy_task is None:

View File

@ -391,8 +391,7 @@ class Orchestrator(object):
raise AttributeError()
except AttributeError:
self.logger.debug(
"Invalid site design, no baremetal nodes in site_design."
)
"Invalid site design, no baremetal nodes in site_design.")
return []
if node_filter is None:

View File

@ -55,10 +55,10 @@ class BootStorageRational(Validators):
msg = (
'Root volume has an invalid size format on BaremetalNode'
'%s.' % baremetal_node.name)
self.report_error(msg, [
baremetal_node.doc_ref
], "Use a valid root volume storage specification."
)
self.report_error(
msg, [baremetal_node.doc_ref],
"Use a valid root volume storage specification."
)
# check make sure root has been defined and boot volume > 1GB
if root_set and host_partition.name == 'boot':
@ -78,18 +78,18 @@ class BootStorageRational(Validators):
msg = (
'Boot volume has an invalid size format on BaremetalNode '
'%s.' % baremetal_node.name)
self.report_error(msg, [
baremetal_node.doc_ref
], "Use a valid boot volume storage specification."
)
self.report_error(
msg, [baremetal_node.doc_ref],
"Use a valid boot volume storage specification."
)
# This must be set
if not root_set:
msg = (
'Root volume has to be set and must be > 20GB on BaremetalNode '
'%s' % baremetal_node.name)
self.report_error(msg, [
baremetal_node.doc_ref
], "All nodes require a defined root volume at least 20GB in size."
)
self.report_error(
msg, [baremetal_node.doc_ref],
"All nodes require a defined root volume at least 20GB in size."
)
return

View File

@ -83,9 +83,10 @@ class IpLocalityCheck(Validators):
if ip_address_network_name not in network_dict:
msg = '%s is not a valid network.' \
% (ip_address_network_name)
self.report_error(msg, [
node.doc_ref
], "Define network or correct address definition.")
self.report_error(
msg, [node.doc_ref],
"Define network or correct address definition."
)
else:
if IPAddress(address) not in IPNetwork(
network_dict[ip_address_network_name]):

View File

@ -37,9 +37,8 @@ class MtuRational(Validators):
mtu = network_link.mtu
if mtu and (mtu < MtuRational.MIN_MTU_SIZE
or mtu > MtuRational.MAX_MTU_SIZE):
msg = ("MTU must be between %d and %d, value is %d" %
(MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE,
mtu))
msg = ("MTU must be between %d and %d, value is %d" % (
MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
self.report_error(
msg, [network_link.doc_ref],
"Define a valid MTU. Standard is 1500, Jumbo is 9100.")
@ -53,9 +52,8 @@ class MtuRational(Validators):
if network_mtu and (network_mtu < MtuRational.MIN_MTU_SIZE
or network_mtu > MtuRational.MAX_MTU_SIZE):
msg = ("MTU must be between %d and %d, value is %d" %
(MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE,
mtu))
msg = ("MTU must be between %d and %d, value is %d" % (
MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
self.report_error(
msg, [network.doc_ref],
"Define a valid MTU. Standard is 1500, Jumbo is 9100.")
@ -67,9 +65,9 @@ class MtuRational(Validators):
if network_mtu > parent_mtu:
msg = 'MTU must be <= the parent Network Link; for Network %s' % (
network.name)
self.report_error(msg, [
network.doc_ref
], "Define a MTU less than or equal to that of the carrying network link."
)
self.report_error(
msg, [network.doc_ref],
"Define a MTU less than or equal to that of the carrying network link."
)
return

View File

@ -34,10 +34,10 @@ class NetworkTrunkingRational(Validators):
hd_fields.NetworkLinkTrunkingMode.Disabled):
msg = ('If there is more than 1 allowed network,'
'trunking mode must be enabled')
self.report_error(msg, [
network_link.doc_ref
], "Reduce the allowed network list to 1 or enable trunking on the link."
)
self.report_error(
msg, [network_link.doc_ref],
"Reduce the allowed network list to 1 or enable trunking on the link."
)
# trunking mode is disabled, default_network must be defined
if (network_link.trunk_mode ==
@ -54,9 +54,9 @@ class NetworkTrunkingRational(Validators):
network = site_design.get_network(network_link.native_network)
if network and network.vlan_id:
msg = "Network link native network has a defined VLAN tag."
self.report_error(msg, [
network.doc_ref, network_link.doc_ref
], "Tagged network not allowed on non-trunked network links."
)
self.report_error(
msg, [network.doc_ref, network_link.doc_ref],
"Tagged network not allowed on non-trunked network links."
)
return

View File

@ -46,6 +46,7 @@ class IpmiValidity(Validators):
if not oob_addr:
msg = ('OOB address missing for IPMI node %s.' %
baremetal_node.name)
self.report_error(msg, [baremetal_node.doc_ref],
"Provide address to node OOB interface.")
self.report_error(
msg, [baremetal_node.doc_ref],
"Provide address to node OOB interface.")
return

View File

@ -44,8 +44,8 @@ class LibvirtValidity(Validators):
"Only scheme 'qemu+ssh' is supported.")
if not baremetal_node.boot_mac:
msg = 'libvirt-based node requries defined boot MAC address.'
self.report_error(msg, [
baremetal_node.doc_ref
], "Specify the node's PXE MAC address in metadata.boot_mac"
)
self.report_error(
msg, [baremetal_node.doc_ref],
"Specify the node's PXE MAC address in metadata.boot_mac"
)
return

View File

@ -50,9 +50,9 @@ class PlatformSelection(Validators):
if n.kernel in valid_kernels[n.image]:
continue
msg = "Platform Validation: invalid kernel %s" % (n.kernel)
self.report_error(msg, [n.doc_ref],
"Select a valid kernel from: %s" % ",".join(
valid_kernels[n.image]))
self.report_error(
msg, [n.doc_ref], "Select a valid kernel from: %s" %
",".join(valid_kernels[n.image]))
continue
msg = "Platform Validation: invalid image %s" % (n.image)
self.report_error(

View File

@ -52,18 +52,18 @@ class RationalNetworkBond(Validators):
if network_link.bonding_up_delay < mon_rate:
msg = ('Up delay %d is less than mon rate %d' %
(network_link.bonding_up_delay, mon_rate))
self.report_error(msg, [
network_link.doc_ref
], "Link up delay must be equal or greater than the mon_rate"
)
self.report_error(
msg, [network_link.doc_ref],
"Link up delay must be equal or greater than the mon_rate"
)
if network_link.bonding_down_delay < mon_rate:
msg = ('Down delay %d is less than mon rate %d' %
(network_link.bonding_down_delay, mon_rate))
self.report_error(msg, [
network_link.doc_ref
], "Link down delay must be equal or greater than the mon_rate"
)
self.report_error(
msg, [network_link.doc_ref],
"Link down delay must be equal or greater than the mon_rate"
)
elif bonding_mode in ['active-backup', 'balanced-rr']:
# make sure hash and peer_rate are NOT defined

View File

@ -70,12 +70,11 @@ class StoragePartitioning(Validators):
all_volume_groups = baremetal_node.volume_groups or []
for volume_group in all_volume_groups:
if volume_group.name not in volume_group_check_list:
msg = (
'Volume group %s not assigned any physical volumes' %
(volume_group.name))
self.report_error(msg, [
baremetal_node.doc_ref
], "Each volume group should be assigned at least one storage device "
"or partition as a physical volume.")
msg = ('Volume group %s not assigned any physical volumes'
% (volume_group.name))
self.report_error(
msg, [baremetal_node.doc_ref],
"Each volume group should be assigned at least one storage device "
"or partition as a physical volume.")
return

View File

@ -50,10 +50,10 @@ class StorageSizing(Validators):
msg = (
'Cumulative partition sizes on device %s is greater than 99%%.'
% (storage_device.name))
self.report_error(msg, [
baremetal_node.doc_ref
], "Percentage-based sizes must sum to less than 100%."
)
self.report_error(
msg, [baremetal_node.doc_ref],
"Percentage-based sizes must sum to less than 100%."
)
volume_groups = baremetal_node.volume_groups or []
volume_sum = 0
@ -74,9 +74,9 @@ class StorageSizing(Validators):
msg = ('Cumulative logical volume size is greater '
'than 99% in volume group %s' %
(volume_group.name))
self.report_error(msg, [
baremetal_node.doc_ref
], "Percentage-based sizes must sum to less than 100%."
)
self.report_error(
msg, [baremetal_node.doc_ref],
"Percentage-based sizes must sum to less than 100%."
)
return

View File

@ -72,16 +72,16 @@ class UniqueNetworkCheck(Validators):
msg = (
"Interface %s attached to network %s not allowed on interface link"
% (i.get_name(), nw))
self.report_error(msg, [
n.doc_ref
], "Interfaces can only be attached to networks allowed on the network link "
"connected to the interface.")
self.report_error(
msg, [n.doc_ref],
"Interfaces can only be attached to networks allowed on the network link "
"connected to the interface.")
except KeyError:
msg = (
"Interface %s connected to undefined network link %s."
% (i.get_name(), nic_link))
self.report_error(msg, [
n.doc_ref
], "Define the network link attached to this interface."
)
self.report_error(
msg, [n.doc_ref],
"Define the network link attached to this interface."
)
return

View File

@ -241,9 +241,8 @@ class DrydockState(object):
conn.close()
return True
except Exception as ex:
self.logger.error(
"Error inserting result message for task %s: %s" %
(str(task_id), str(ex)))
self.logger.error("Error inserting result message for task %s: %s"
% (str(task_id), str(ex)))
return False
def _assemble_tasks(self, task_list=None):
@ -282,14 +281,14 @@ class DrydockState(object):
"""
try:
conn = self.db_engine.connect()
query = self.tasks_tbl.insert().values(
**(task.to_db(include_id=True)))
query = self.tasks_tbl.insert().values(**(
task.to_db(include_id=True)))
conn.execute(query)
conn.close()
return True
except Exception as ex:
self.logger.error("Error inserting task %s: %s" %
(str(task.task_id), str(ex)))
self.logger.error(
"Error inserting task %s: %s" % (str(task.task_id), str(ex)))
return False
def put_task(self, task):
@ -300,8 +299,8 @@ class DrydockState(object):
try:
conn = self.db_engine.connect()
query = self.tasks_tbl.update().where(
self.tasks_tbl.c.task_id == task.task_id.bytes).values(
**(task.to_db(include_id=False)))
self.tasks_tbl.c.task_id == task.task_id.bytes).values(**(
task.to_db(include_id=False)))
rs = conn.execute(query)
if rs.rowcount == 1:
conn.close()
@ -310,8 +309,8 @@ class DrydockState(object):
conn.close()
return False
except Exception as ex:
self.logger.error("Error updating task %s: %s" %
(str(task.task_id), str(ex)))
self.logger.error(
"Error updating task %s: %s" % (str(task.task_id), str(ex)))
return False
def add_subtask(self, task_id, subtask_id):
@ -382,8 +381,8 @@ class DrydockState(object):
"ON CONFLICT (dummy_key) DO UPDATE SET "
"identity = :instance_id, last_ping = timezone('UTC', now()) "
"WHERE active_instance.last_ping < (now() - interval '%d seconds')"
% (config.config_mgr.conf.leader_grace_period
)).execution_options(autocommit=True)
% (config.config_mgr.conf.leader_grace_period)).execution_options(
autocommit=True)
try:
conn = self.db_engine.connect()

View File

@ -26,8 +26,7 @@ from drydock_provisioner.control.api import start_api
class TestClass(object):
def test_bootaction_context(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename']
auth_hdr = {'X-Bootaction-Key': "%s" % seed_bootaction['identity_key']}
result = falcontest.simulate_get(url, headers=auth_hdr)
@ -48,8 +47,7 @@ class TestClass(object):
def test_bootaction_context_noauth(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename']
result = falcontest.simulate_get(url)
@ -57,8 +55,7 @@ class TestClass(object):
def test_bootaction_context_badauth(self, falcontest, seed_bootaction):
"""Test that the API will return a boot action context"""
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction[
'nodename']
url = "/api/v1.0/bootactions/nodes/%s/units" % seed_bootaction['nodename']
auth_hdr = {'X-Bootaction-Key': 'deadbeef'}
result = falcontest.simulate_get(url, headers=auth_hdr)

View File

@ -29,7 +29,8 @@ LOG = logging.getLogger(__name__)
class TestNodesApiUnit(object):
def test_post_nodes_resp(self, input_files, falcontest, mock_process_node_filter):
def test_post_nodes_resp(self, input_files, falcontest,
mock_process_node_filter):
input_file = input_files.join("deckhand_fullsite.yaml")
design_ref = "file://%s" % str(input_file)
@ -82,6 +83,7 @@ class TestNodesApiUnit(object):
}
return hdr
@pytest.fixture()
def mock_process_node_filter(deckhand_orchestrator):
def side_effect(**kwargs):
@ -97,5 +99,6 @@ def mock_process_node_filter(deckhand_orchestrator):
deckhand_orchestrator.process_node_filter = Mock(side_effect=side_effect)
yield
deckhand_orchestrator.process_node_filter = Mock(wraps=None, side_effect=None)
deckhand_orchestrator.process_node_filter = Mock(
wraps=None, side_effect=None)
deckhand_orchestrator.process_node_filter = deckhand_orchestrator.real_process_node_filter

View File

@ -38,7 +38,8 @@ class TestTasksApiUnit(object):
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json['task_id'] == '11111111-1111-1111-1111-111111111111'
assert response_json[
'task_id'] == '11111111-1111-1111-1111-111111111111'
try:
response_json['build_data']
key_error = False
@ -56,29 +57,36 @@ class TestTasksApiUnit(object):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='subtaskerrors=true')
result = falcontest.simulate_get(
url, headers=hdr, query_string='subtaskerrors=true')
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json['task_id'] == '11111111-1111-1111-1111-111111111111'
assert response_json[
'task_id'] == '11111111-1111-1111-1111-111111111111'
assert response_json['subtask_errors'] == {}
def test_get_tasks_id_subtaskerror_errors_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='subtaskerrors=true')
result = falcontest.simulate_get(
url, headers=hdr, query_string='subtaskerrors=true')
assert result.status == falcon.HTTP_200
response_json = json.loads(result.text)
assert response_json['task_id'] == '11111111-1111-1111-1111-111111111113'
assert response_json['subtask_errors']['11111111-1111-1111-1111-111111111116']['details']['errorCount'] == 1
assert response_json[
'task_id'] == '11111111-1111-1111-1111-111111111113'
assert response_json['subtask_errors'][
'11111111-1111-1111-1111-111111111116']['details'][
'errorCount'] == 1
def test_get_tasks_id_builddata_resp(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='builddata=true')
result = falcontest.simulate_get(
url, headers=hdr, query_string='builddata=true')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
@ -95,7 +103,8 @@ class TestTasksApiUnit(object):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111111'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='builddata=true&subtaskerrors=true')
result = falcontest.simulate_get(
url, headers=hdr, query_string='builddata=true&subtaskerrors=true')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
@ -107,7 +116,8 @@ class TestTasksApiUnit(object):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='layers=2')
result = falcontest.simulate_get(
url, headers=hdr, query_string='layers=2')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
@ -130,7 +140,8 @@ class TestTasksApiUnit(object):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='layers=-1')
result = falcontest.simulate_get(
url, headers=hdr, query_string='layers=-1')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
@ -159,7 +170,8 @@ class TestTasksApiUnit(object):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111113'
hdr = self.get_standard_header()
result = falcontest.simulate_get(url, headers=hdr, query_string='layers=-1&subtaskerrors=true')
result = falcontest.simulate_get(
url, headers=hdr, query_string='layers=-1&subtaskerrors=true')
LOG.debug(result.text)
assert result.status == falcon.HTTP_200
@ -177,7 +189,9 @@ class TestTasksApiUnit(object):
except KeyError as ex:
key_error = True
assert key_error is False
assert response_json['subtask_errors']['11111111-1111-1111-1111-111111111116']['details']['errorCount'] == 1
assert response_json['subtask_errors'][
'11111111-1111-1111-1111-111111111116']['details'][
'errorCount'] == 1
def test_input_not_found(self, falcontest):
url = '/api/v1.0/tasks/11111111-1111-1111-1111-111111111112'
@ -210,6 +224,7 @@ class TestTasksApiUnit(object):
}
return hdr
@pytest.fixture()
def mock_get_task(drydock_state):
def side_effect(*args):
@ -221,7 +236,8 @@ def mock_get_task(drydock_state):
new_task.task_id = '11111111-1111-1111-1111-111111111111'
new_task.result = objects.TaskStatus()
new_task.result.set_status(hd_fields.ActionResult.Failure)
new_task.result.add_status_msg(msg='Test', error=True, ctx_type='N/A', ctx='N/A')
new_task.result.add_status_msg(
msg='Test', error=True, ctx_type='N/A', ctx='N/A')
return new_task
# Task not found
if task_id == '11111111-1111-1111-1111-111111111112':
@ -230,8 +246,10 @@ def mock_get_task(drydock_state):
if task_id == '11111111-1111-1111-1111-111111111113':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111113'
new_task.subtask_id_list = ['11111111-1111-1111-1111-111111111114',
'11111111-1111-1111-1111-111111111115']
new_task.subtask_id_list = [
'11111111-1111-1111-1111-111111111114',
'11111111-1111-1111-1111-111111111115'
]
return new_task
if task_id == '11111111-1111-1111-1111-111111111114':
new_task = objects.Task()
@ -240,15 +258,18 @@ def mock_get_task(drydock_state):
if task_id == '11111111-1111-1111-1111-111111111115':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111115'
new_task.subtask_id_list = ['11111111-1111-1111-1111-111111111116',
'11111111-1111-1111-1111-111111111117']
new_task.subtask_id_list = [
'11111111-1111-1111-1111-111111111116',
'11111111-1111-1111-1111-111111111117'
]
return new_task
if task_id == '11111111-1111-1111-1111-111111111116':
new_task = objects.Task()
new_task.task_id = '11111111-1111-1111-1111-111111111116'
new_task.result = objects.TaskStatus()
new_task.result.set_status(hd_fields.ActionResult.Failure)
new_task.result.add_status_msg(msg='Test', error=True, ctx_type='N/A', ctx='N/A')
new_task.result.add_status_msg(
msg='Test', error=True, ctx_type='N/A', ctx='N/A')
LOG.debug('error_count')
LOG.debug(new_task.result.error_count)
return new_task

View File

@ -125,6 +125,7 @@ def test_client_task_get():
assert task_resp['status'] == task['status']
@responses.activate
def test_client_get_nodes_for_filter_post():
node_list = ['node1', 'node2']
@ -133,34 +134,29 @@ def test_client_get_nodes_for_filter_post():
responses.add(
responses.POST,
"http://%s/api/v1.0/nodefilter" %
(host),
"http://%s/api/v1.0/nodefilter" % (host),
json=node_list,
status=200)
dd_ses = dc_session.DrydockSession(host)
dd_client = dc_client.DrydockClient(dd_ses)
design_ref = {
'ref': 'hello'
}
design_ref = {'ref': 'hello'}
validation_resp = dd_client.get_nodes_for_filter(design_ref)
assert 'node1' in validation_resp
assert 'node2' in validation_resp
@responses.activate
def test_client_validate_design_post():
validation = {
'status': 'success'
}
validation = {'status': 'success'}
host = 'foo.bar.baz'
responses.add(
responses.POST,
"http://%s/api/v1.0/validatedesign" %
(host),
"http://%s/api/v1.0/validatedesign" % (host),
json=validation,
status=200)

View File

@ -63,8 +63,8 @@ class TestClass(object):
assert len(node_list) == 1
def test_no_baremetal_nodes(self, input_files, setup, deckhand_orchestrator,
deckhand_ingester):
def test_no_baremetal_nodes(self, input_files, setup,
deckhand_orchestrator, deckhand_ingester):
input_file = input_files.join("deckhand_fullsite_no_nodes.yaml")
design_state = DrydockState()
@ -73,6 +73,7 @@ class TestClass(object):
design_status, design_data = deckhand_ingester.ingest_data(
design_state=design_state, design_ref=design_ref)
node_list = deckhand_orchestrator.process_node_filter(None, design_data)
node_list = deckhand_orchestrator.process_node_filter(
None, design_data)
assert node_list == []