From cb8712281d711d7e5b6ab5322796e4c2a714a243 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Thu, 18 Dec 2014 14:03:37 +0200 Subject: [PATCH] Enable H904 style checking rule Use parentheses instead of backslash for line continuation. Plus, usage of escaped quotes in long strings is avoided where it makes sense. Change-Id: If2e78012b85a4430c6f03f65784cac2d032cf116 --- .../extraroute/tests/test_extraroute.py | 30 +- .../rackspace/resources/cloud_loadbalancer.py | 4 +- .../rackspace/resources/cloud_server.py | 8 +- .../tests/test_cloud_loadbalancer.py | 30 +- .../tests/test_rackspace_cloud_server.py | 12 +- heat/common/auth_password.py | 2 +- heat/common/config.py | 8 +- heat/common/environment_format.py | 7 +- heat/common/exception.py | 8 +- heat/common/heat_keystoneclient.py | 2 +- heat/common/wsgi.py | 10 +- heat/db/sqlalchemy/api.py | 123 ++--- .../versions/022_stack_event_soft_delete.py | 13 +- .../versions/035_event_uuid_to_id.py | 4 +- .../migrate_repo/versions/045_stack_backup.py | 4 +- heat/engine/cfn/template.py | 12 +- heat/engine/environment.py | 4 +- heat/engine/event.py | 8 +- heat/engine/hot/template.py | 11 +- heat/engine/parameters.py | 4 +- .../engine/resources/aws/autoscaling_group.py | 4 +- heat/engine/resources/ceilometer/alarm.py | 6 +- heat/engine/resources/eip.py | 7 +- heat/engine/resources/instance.py | 10 +- heat/engine/resources/loadbalancer.py | 4 +- heat/engine/resources/neutron/port.py | 8 +- heat/engine/resources/nova_floatingip.py | 4 +- heat/engine/resources/openstack/volume.py | 4 +- .../openstack/wait_condition_handle.py | 10 +- heat/engine/resources/resource_group.py | 24 +- heat/engine/resources/server.py | 8 +- .../software_config/software_component.py | 4 +- .../software_config/software_deployment.py | 4 +- heat/engine/resources/swiftsignal.py | 2 +- heat/engine/rsrc_defn.py | 4 +- heat/engine/stack.py | 4 +- heat/engine/stack_lock.py | 4 +- heat/engine/stack_resource.py | 4 +- heat/tests/aws/test_waitcondition.py | 4 +- heat/tests/common.py | 4 +- heat/tests/fakes.py | 20 +- heat/tests/generic_resource.py | 4 +- heat/tests/openstack/test_waitcondition.py | 4 +- heat/tests/test_api_openstack_v1.py | 10 +- heat/tests/test_autoscaling_update_policy.py | 12 +- heat/tests/test_ceilometer_alarm.py | 4 +- heat/tests/test_cinder_client.py | 8 +- heat/tests/test_eip.py | 10 +- heat/tests/test_engine_service.py | 41 +- heat/tests/test_fault_middleware.py | 8 +- heat/tests/test_glance_client.py | 12 +- heat/tests/test_heatclient.py | 10 +- heat/tests/test_hot.py | 4 +- heat/tests/test_identifier.py | 4 +- heat/tests/test_instance.py | 14 +- .../test_instance_group_update_policy.py | 12 +- heat/tests/test_lifecycle_plugin_utils.py | 4 +- heat/tests/test_loadbalancer.py | 4 +- heat/tests/test_nested_stack.py | 106 ++-- heat/tests/test_neutron_autoscaling.py | 56 +- heat/tests/test_nokey.py | 4 +- heat/tests/test_notifications.py | 8 +- heat/tests/test_parameters.py | 4 +- heat/tests/test_parser.py | 10 +- heat/tests/test_properties.py | 29 +- heat/tests/test_provider_template.py | 25 +- heat/tests/test_remote_stack.py | 8 +- heat/tests/test_resource_group.py | 2 +- heat/tests/test_server.py | 112 ++-- heat/tests/test_software_component.py | 4 +- heat/tests/test_sqlalchemy_api.py | 4 +- heat/tests/test_stack_resource.py | 8 +- heat/tests/test_swiftsignal.py | 8 +- heat/tests/test_validate.py | 488 ++++++++---------- .../test_version_negotiation_middleware.py | 9 +- heat/tests/test_watch.py | 4 +- heat/tests/test_wsgi.py | 6 +- tox.ini | 3 +- 78 files changed, 750 insertions(+), 760 deletions(-) diff --git a/contrib/extraroute/extraroute/tests/test_extraroute.py b/contrib/extraroute/extraroute/tests/test_extraroute.py index cd1de60507..800057afe9 100644 --- a/contrib/extraroute/extraroute/tests/test_extraroute.py +++ b/contrib/extraroute/extraroute/tests/test_extraroute.py @@ -76,8 +76,8 @@ class NeutronExtraRouteTest(common.HeatTestCase): def test_extraroute(self): # add first route neutronclient.Client.show_router( - '3e46229d-8fce-4733-819a-b5fe630550f8')\ - .AndReturn({'router': {'routes': []}}) + '3e46229d-8fce-4733-819a-b5fe630550f8' + ).AndReturn({'router': {'routes': []}}) neutronclient.Client.update_router( '3e46229d-8fce-4733-819a-b5fe630550f8', {"router": { @@ -87,9 +87,9 @@ class NeutronExtraRouteTest(common.HeatTestCase): }}).AndReturn(None) # add second route neutronclient.Client.show_router( - '3e46229d-8fce-4733-819a-b5fe630550f8')\ - .AndReturn({'router': {'routes': [{"destination": "192.168.0.0/24", - "nexthop": "1.1.1.1"}]}}) + '3e46229d-8fce-4733-819a-b5fe630550f8' + ).AndReturn({'router': {'routes': [{"destination": "192.168.0.0/24", + "nexthop": "1.1.1.1"}]}}) neutronclient.Client.update_router( '3e46229d-8fce-4733-819a-b5fe630550f8', {"router": { @@ -100,12 +100,12 @@ class NeutronExtraRouteTest(common.HeatTestCase): }}).AndReturn(None) # first delete neutronclient.Client.show_router( - '3e46229d-8fce-4733-819a-b5fe630550f8')\ - .AndReturn({'router': - {'routes': [{"destination": "192.168.0.0/24", - "nexthop": "1.1.1.1"}, - {"destination": "192.168.255.0/24", - "nexthop": "1.1.1.1"}]}}) + '3e46229d-8fce-4733-819a-b5fe630550f8' + ).AndReturn({'router': + {'routes': [{"destination": "192.168.0.0/24", + "nexthop": "1.1.1.1"}, + {"destination": "192.168.255.0/24", + "nexthop": "1.1.1.1"}]}}) neutronclient.Client.update_router( '3e46229d-8fce-4733-819a-b5fe630550f8', {"router": { @@ -115,10 +115,10 @@ class NeutronExtraRouteTest(common.HeatTestCase): }}).AndReturn(None) # second delete neutronclient.Client.show_router( - '3e46229d-8fce-4733-819a-b5fe630550f8')\ - .AndReturn({'router': - {'routes': [{"destination": "192.168.255.0/24", - "nexthop": "1.1.1.1"}]}}) + '3e46229d-8fce-4733-819a-b5fe630550f8' + ).AndReturn({'router': + {'routes': [{"destination": "192.168.255.0/24", + "nexthop": "1.1.1.1"}]}}) self.m.ReplayAll() t = template_format.parse(neutron_template) stack = utils.parse_stack(t) diff --git a/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py b/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py index 8037e4458e..4a7ea56508 100644 --- a/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py +++ b/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py @@ -541,8 +541,8 @@ class CloudLoadBalancer(resource.Resource): virtual_ips = self._setup_properties(vips, self.clb.VirtualIP) - (session_persistence, connection_logging, metadata) = \ - self._alter_properties_for_api() + (session_persistence, connection_logging, metadata + ) = self._alter_properties_for_api() lb_body = { 'port': self.properties[self.PORT], diff --git a/contrib/rackspace/rackspace/resources/cloud_server.py b/contrib/rackspace/rackspace/resources/cloud_server.py index ade42b1288..f2b004b374 100644 --- a/contrib/rackspace/rackspace/resources/cloud_server.py +++ b/contrib/rackspace/rackspace/resources/cloud_server.py @@ -208,12 +208,12 @@ class CloudServer(server.Server): self.client_plugin().refresh_server(server) - if 'rack_connect' in self.context.roles and not \ - self._check_rack_connect_complete(server): + if ('rack_connect' in self.context.roles and not + self._check_rack_connect_complete(server)): return False - if 'rax_managed' in self.context.roles and not \ - self._check_managed_cloud_complete(server): + if ('rax_managed' in self.context.roles and not + self._check_managed_cloud_complete(server)): return False return True diff --git a/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py b/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py index a2f8657593..a812b70860 100644 --- a/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py +++ b/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py @@ -32,8 +32,7 @@ from ..resources import cloud_loadbalancer as lb # noqa # The following fakes are for pyrax -cert = """\ ------BEGIN CERTIFICATE----- +cert = """-----BEGIN CERTIFICATE----- MIIFBjCCAu4CCQDWdcR5LY/+/jANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 cyBQdHkgTHRkMB4XDTE0MTAxNjE3MDYxNVoXDTE1MTAxNjE3MDYxNVowRTELMAkG @@ -63,8 +62,7 @@ eF5whPl36/GK8HUixCibkCyqEOBBuNqhOz7nVLM0eg5L+TE5coizEBagxVCovYSj fQ9zkIgaC5oeH6L0C1FFG1vRNSWokheBk14ztVoJCJyFr6p0/6pD7SeR -----END CERTIFICATE-----""" -private_key = """\ ------BEGIN PRIVATE KEY----- +private_key = """-----BEGIN PRIVATE KEY----- MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDJuTXD9LTCh25U +lHdZPE8Wff/Ljh8FDT27xbL0sgrqY9CdLxgk427gtiOU/wl0bZyxCLfxGq5TQKn I2wwlrUshCrN8w5ppK3qCAxGvKcgENsnLAlxjMQzfexd/8JS2WoFDTNBcBhy2VgY @@ -1032,8 +1030,8 @@ class LoadBalancerTest(common.HeatTestCase): def test_update_session_persistence_delete(self): template = copy.deepcopy(self.lb_template) lb_name = template['Resources'].keys()[0] - template['Resources'][lb_name]['Properties']['sessionPersistence'] = \ - "SOURCE_IP" + template['Resources'][lb_name]['Properties'][ + 'sessionPersistence'] = "SOURCE_IP" expected_body = copy.deepcopy(self.expected_body) expected_body['sessionPersistence'] = {'persistenceType': "SOURCE_IP"} rsrc, fake_loadbalancer = self._mock_loadbalancer(template, @@ -1240,8 +1238,8 @@ class LoadBalancerTest(common.HeatTestCase): def test_update_connection_logging_delete(self): template = copy.deepcopy(self.lb_template) lb_name = template['Resources'].keys()[0] - template['Resources'][lb_name]['Properties']['connectionLogging'] = \ - True + template['Resources'][lb_name]['Properties'][ + 'connectionLogging'] = True expected_body = copy.deepcopy(self.expected_body) expected_body['connectionLogging'] = {'enabled': True} rsrc, fake_loadbalancer = self._mock_loadbalancer(template, @@ -1267,8 +1265,8 @@ class LoadBalancerTest(common.HeatTestCase): def test_update_connection_logging_disable(self): template = copy.deepcopy(self.lb_template) lb_name = template['Resources'].keys()[0] - template['Resources'][lb_name]['Properties']['connectionLogging'] = \ - True + template['Resources'][lb_name]['Properties'][ + 'connectionLogging'] = True expected_body = copy.deepcopy(self.expected_body) expected_body['connectionLogging'] = {'enabled': True} rsrc, fake_loadbalancer = self._mock_loadbalancer(template, @@ -1317,8 +1315,8 @@ class LoadBalancerTest(common.HeatTestCase): def test_update_connection_throttle_delete(self): template = copy.deepcopy(self.lb_template) lb_name = template['Resources'].keys()[0] - template['Resources'][lb_name]['Properties']['connectionThrottle'] = \ - {'maxConnections': 1000} + template['Resources'][lb_name]['Properties'][ + 'connectionThrottle'] = {'maxConnections': 1000} expected_body = copy.deepcopy(self.expected_body) expected_body['connectionThrottle'] = { 'maxConnections': 1000, 'maxConnectionRate': None, @@ -1368,8 +1366,8 @@ class LoadBalancerTest(common.HeatTestCase): def test_update_content_caching_deleted(self): template = copy.deepcopy(self.lb_template) lb_name = template['Resources'].keys()[0] - template['Resources'][lb_name]['Properties']['contentCaching'] = \ - 'ENABLED' + template['Resources'][lb_name]['Properties'][ + 'contentCaching'] = 'ENABLED' # Enabling the content cache is done post-creation, so no need # to modify self.expected_body rsrc, fake_loadbalancer = self._mock_loadbalancer(template, @@ -1396,8 +1394,8 @@ class LoadBalancerTest(common.HeatTestCase): def test_update_content_caching_disable(self): template = copy.deepcopy(self.lb_template) lb_name = template['Resources'].keys()[0] - template['Resources'][lb_name]['Properties']['contentCaching'] = \ - 'ENABLED' + template['Resources'][lb_name]['Properties'][ + 'contentCaching'] = 'ENABLED' # Enabling the content cache is done post-creation, so no need # to modify self.expected_body rsrc, fake_loadbalancer = self._mock_loadbalancer(template, diff --git a/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py b/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py index ae5d71e782..20a500e489 100644 --- a/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py +++ b/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py @@ -101,15 +101,15 @@ class CloudServersTest(common.HeatTestCase): stack_name = '%s_s' % name (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['image'] = \ - image_id or 'CentOS 5.2' - tmpl.t['Resources']['WebServer']['Properties']['flavor'] = \ - '256 MB Server' + tmpl.t['Resources']['WebServer']['Properties'][ + 'image'] = image_id or 'CentOS 5.2' + tmpl.t['Resources']['WebServer']['Properties'][ + 'flavor'] = '256 MB Server' server_name = '%s' % name if override_name: - tmpl.t['Resources']['WebServer']['Properties']['name'] = \ - server_name + tmpl.t['Resources']['WebServer']['Properties'][ + 'name'] = server_name resource_defns = tmpl.resource_definitions(stack) server = cloud_server.CloudServer(server_name, diff --git a/heat/common/auth_password.py b/heat/common/auth_password.py index 88b41b9ea8..afacd8ec77 100644 --- a/heat/common/auth_password.py +++ b/heat/common/auth_password.py @@ -65,7 +65,7 @@ class KeystonePasswordAuthProtocol(object): def _reject_request(self, env, start_response, auth_url): """Redirect client to auth server.""" - headers = [('WWW-Authenticate', 'Keystone uri=\'%s\'' % auth_url)] + headers = [('WWW-Authenticate', "Keystone uri='%s'" % auth_url)] resp = exc.HTTPUnauthorized('Authentication required', headers) return resp(env, start_response) diff --git a/heat/common/config.py b/heat/common/config.py index 67ba062a0a..0856ea625e 100644 --- a/heat/common/config.py +++ b/heat/common/config.py @@ -124,10 +124,10 @@ engine_opts = [ 'retries.')), cfg.IntOpt('event_purge_batch_size', default=10, - help=_('Controls how many events will be pruned whenever a ' - ' stack\'s events exceed max_events_per_stack. Set this' - ' lower to keep more events at the expense of more' - ' frequent purges.')), + help=_("Controls how many events will be pruned whenever a " + "stack's events exceed max_events_per_stack. Set this " + "lower to keep more events at the expense of more " + "frequent purges.")), cfg.IntOpt('max_events_per_stack', default=1000, help=_('Maximum events that will be available per stack. Older' diff --git a/heat/common/environment_format.py b/heat/common/environment_format.py index faa91476df..e5bfe48443 100644 --- a/heat/common/environment_format.py +++ b/heat/common/environment_format.py @@ -15,8 +15,11 @@ from heat.common.i18n import _ from heat.common import template_format -SECTIONS = (PARAMETERS, RESOURCE_REGISTRY, PARAMETER_DEFAULTS) = \ - ('parameters', 'resource_registry', 'parameter_defaults') +SECTIONS = ( + PARAMETERS, RESOURCE_REGISTRY, PARAMETER_DEFAULTS +) = ( + 'parameters', 'resource_registry', 'parameter_defaults' +) def parse(env_str): diff --git a/heat/common/exception.py b/heat/common/exception.py index 98c82107bc..46e51a832d 100644 --- a/heat/common/exception.py +++ b/heat/common/exception.py @@ -135,8 +135,8 @@ class MissingCredentialError(HeatException): class BadAuthStrategy(HeatException): - msg_fmt = _("Incorrect auth strategy, expected \"%(expected)s\" but " - "received \"%(received)s\"") + msg_fmt = _('Incorrect auth strategy, expected "%(expected)s" but ' + 'received "%(received)s"') class AuthBadRequest(HeatException): @@ -216,8 +216,8 @@ class InvalidTemplateAttribute(HeatException): class InvalidTemplateReference(HeatException): - msg_fmt = _("The specified reference \"%(resource)s\" (in %(key)s)" - " is incorrect.") + msg_fmt = _('The specified reference "%(resource)s" (in %(key)s)' + ' is incorrect.') class UserKeyPairMissing(HeatException): diff --git a/heat/common/heat_keystoneclient.py b/heat/common/heat_keystoneclient.py index 0ef7205dc4..4de855152b 100644 --- a/heat/common/heat_keystoneclient.py +++ b/heat/common/heat_keystoneclient.py @@ -326,7 +326,7 @@ class KeystoneClientV3(object): if len(domains) == 1: return domains[0].id elif len(domains) == 0: - msg = _('Can\'t find domain id for %s!') + msg = _("Can't find domain id for %s!") LOG.error(msg, domain_name) raise exception.Error(msg % domain_name) else: diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py index 948fcef939..57c0fb1d08 100644 --- a/heat/common/wsgi.py +++ b/heat/common/wsgi.py @@ -543,8 +543,8 @@ def is_json_content_type(request): # for back compatible for null or plain content type if not content_type or content_type.startswith('text/plain'): content_type = 'application/json' - if content_type in ('JSON', 'application/json')\ - and request.body.startswith('{'): + if (content_type in ('JSON', 'application/json') + and request.body.startswith('{')): return True return False @@ -565,9 +565,9 @@ class JSONRequestDeserializer(object): try: if len(datastring) > cfg.CONF.max_json_body_size: msg = _('JSON body size (%(len)s bytes) exceeds maximum ' - 'allowed size (%(limit)s bytes).') % \ - {'len': len(datastring), - 'limit': cfg.CONF.max_json_body_size} + 'allowed size (%(limit)s bytes).' + ) % {'len': len(datastring), + 'limit': cfg.CONF.max_json_body_size} raise exception.RequestLimitExceeded(message=msg) return json.loads(datastring) except ValueError as ex: diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py index 9be5a0b87d..8cf34f28f8 100644 --- a/heat/db/sqlalchemy/api.py +++ b/heat/db/sqlalchemy/api.py @@ -123,10 +123,13 @@ def resource_get(context, resource_id): def resource_get_by_name_and_stack(context, resource_name, stack_id): - result = model_query(context, models.Resource).\ - filter_by(name=resource_name).\ - filter_by(stack_id=stack_id).\ - options(orm.joinedload("data")).first() + result = model_query( + context, models.Resource + ).filter_by( + name=resource_name + ).filter_by( + stack_id=stack_id + ).options(orm.joinedload("data")).first() return result @@ -258,9 +261,11 @@ def resource_create(context, values): def resource_get_all_by_stack(context, stack_id): - results = model_query(context, models.Resource).\ - filter_by(stack_id=stack_id).\ - options(orm.joinedload("data")).all() + results = model_query( + context, models.Resource + ).filter_by( + stack_id=stack_id + ).options(orm.joinedload("data")).all() if not results: raise exception.NotFound(_("no resources for stack_id %s were found") @@ -269,23 +274,22 @@ def resource_get_all_by_stack(context, stack_id): def stack_get_by_name_and_owner_id(context, stack_name, owner_id): - query = soft_delete_aware_query(context, models.Stack).\ - filter(sqlalchemy.or_( - models.Stack.tenant == context.tenant_id, - models.Stack.stack_user_project_id == context.tenant_id - )).\ - filter_by(name=stack_name).\ - filter_by(owner_id=owner_id) + query = soft_delete_aware_query( + context, models.Stack + ).filter(sqlalchemy.or_( + models.Stack.tenant == context.tenant_id, + models.Stack.stack_user_project_id == context.tenant_id) + ).filter_by(name=stack_name).filter_by(owner_id=owner_id) return query.first() def stack_get_by_name(context, stack_name): - query = soft_delete_aware_query(context, models.Stack).\ - filter(sqlalchemy.or_( - models.Stack.tenant == context.tenant_id, - models.Stack.stack_user_project_id == context.tenant_id - )).\ - filter_by(name=stack_name) + query = soft_delete_aware_query( + context, models.Stack + ).filter(sqlalchemy.or_( + models.Stack.tenant == context.tenant_id, + models.Stack.stack_user_project_id == context.tenant_id) + ).filter_by(name=stack_name) return query.first() @@ -310,8 +314,8 @@ def stack_get(context, stack_id, show_deleted=False, tenant_safe=True, def stack_get_all_by_owner_id(context, owner_id): - results = soft_delete_aware_query(context, models.Stack).\ - filter_by(owner_id=owner_id).all() + results = soft_delete_aware_query( + context, models.Stack).filter_by(owner_id=owner_id).all() return results @@ -353,13 +357,13 @@ def _paginate_query(context, query, model, limit=None, sort_keys=None, def _query_stack_get_all(context, tenant_safe=True, show_deleted=False, show_nested=False): if show_nested: - query = soft_delete_aware_query(context, models.Stack, - show_deleted=show_deleted).\ - filter_by(backup=False) + query = soft_delete_aware_query( + context, models.Stack, show_deleted=show_deleted + ).filter_by(backup=False) else: - query = soft_delete_aware_query(context, models.Stack, - show_deleted=show_deleted).\ - filter_by(owner_id=None) + query = soft_delete_aware_query( + context, models.Stack, show_deleted=show_deleted + ).filter_by(owner_id=None) if tenant_safe: query = query.filter_by(tenant=context.tenant_id) @@ -450,9 +454,10 @@ def stack_lock_steal(stack_id, old_engine_id, new_engine_id): session = get_session() with session.begin(): lock = session.query(models.StackLock).get(stack_id) - rows_affected = session.query(models.StackLock).\ - filter_by(stack_id=stack_id, engine_id=old_engine_id).\ - update({"engine_id": new_engine_id}) + rows_affected = session.query( + models.StackLock + ).filter_by(stack_id=stack_id, engine_id=old_engine_id + ).update({"engine_id": new_engine_id}) if not rows_affected: return lock.engine_id if lock is not None else True @@ -460,9 +465,9 @@ def stack_lock_steal(stack_id, old_engine_id, new_engine_id): def stack_lock_release(stack_id, engine_id): session = get_session() with session.begin(): - rows_affected = session.query(models.StackLock).\ - filter_by(stack_id=stack_id, engine_id=engine_id).\ - delete() + rows_affected = session.query( + models.StackLock + ).filter_by(stack_id=stack_id, engine_id=engine_id).delete() if not rows_affected: return True @@ -523,8 +528,9 @@ def event_get(context, event_id): def event_get_all(context): stacks = soft_delete_aware_query(context, models.Stack) stack_ids = [stack.id for stack in stacks] - results = model_query(context, models.Event).\ - filter(models.Event.stack_id.in_(stack_ids)).all() + results = model_query( + context, models.Event + ).filter(models.Event.stack_id.in_(stack_ids)).all() return results @@ -532,16 +538,16 @@ def event_get_all_by_tenant(context, limit=None, marker=None, sort_keys=None, sort_dir=None, filters=None): query = model_query(context, models.Event) query = db_filters.exact_filter(query, models.Event, filters) - query = query.join(models.Event.stack).\ - filter_by(tenant=context.tenant_id).filter_by(deleted_at=None) + query = query.join( + models.Event.stack + ).filter_by(tenant=context.tenant_id).filter_by(deleted_at=None) filters = None return _events_filter_and_page_query(context, query, limit, marker, sort_keys, sort_dir, filters).all() def _query_all_by_stack(context, stack_id): - query = model_query(context, models.Event).\ - filter_by(stack_id=stack_id) + query = model_query(context, models.Event).filter_by(stack_id=stack_id) return query @@ -568,8 +574,8 @@ def _events_paginate_query(context, query, model, limit=None, sort_keys=None, if marker: # not to use model_query(context, model).get(marker), because # user can only see the ID(column 'uuid') and the ID as the marker - model_marker = model_query(context, model).filter_by(uuid=marker).\ - first() + model_marker = model_query( + context, model).filter_by(uuid=marker).first() try: query = utils.paginate_query(query, model, limit, sort_keys, model_marker, sort_dir) @@ -634,8 +640,8 @@ def watch_rule_get(context, watch_rule_id): def watch_rule_get_by_name(context, watch_rule_name): - result = model_query(context, models.WatchRule).\ - filter_by(name=watch_rule_name).first() + result = model_query( + context, models.WatchRule).filter_by(name=watch_rule_name).first() return result @@ -645,8 +651,8 @@ def watch_rule_get_all(context): def watch_rule_get_all_by_stack(context, stack_id): - results = model_query(context, models.WatchRule).\ - filter_by(stack_id=stack_id).all() + results = model_query( + context, models.WatchRule).filter_by(stack_id=stack_id).all() return results @@ -745,12 +751,12 @@ def software_deployment_get(context, deployment_id): def software_deployment_get_all(context, server_id=None): sd = models.SoftwareDeployment - query = model_query(context, sd).\ - filter(sqlalchemy.or_( - sd.tenant == context.tenant_id, - sd.stack_user_project_id == context.tenant_id - )).\ - order_by(sd.created_at) + query = model_query( + context, sd + ).filter(sqlalchemy.or_( + sd.tenant == context.tenant_id, + sd.stack_user_project_id == context.tenant_id) + ).order_by(sd.created_at) if server_id: query = query.filter_by(server_id=server_id) return query.all() @@ -837,10 +843,11 @@ def purge_deleted(age, granularity='days'): raw_template = sqlalchemy.Table('raw_template', meta, autoload=True) user_creds = sqlalchemy.Table('user_creds', meta, autoload=True) - stmt = sqlalchemy.select([stack.c.id, - stack.c.raw_template_id, - stack.c.user_creds_id]).\ - where(stack.c.deleted_at < time_line) + stmt = sqlalchemy.select( + [stack.c.id, + stack.c.raw_template_id, + stack.c.user_creds_id] + ).where(stack.c.deleted_at < time_line) deleted_stacks = engine.execute(stmt) for s in deleted_stacks: @@ -848,8 +855,8 @@ def purge_deleted(age, granularity='days'): engine.execute(event_del) stack_del = stack.delete().where(stack.c.id == s[0]) engine.execute(stack_del) - raw_template_del = raw_template.delete().\ - where(raw_template.c.id == s[1]) + raw_template_del = raw_template.delete().where( + raw_template.c.id == s[1]) engine.execute(raw_template_del) user_creds_del = user_creds.delete().where(user_creds.c.id == s[2]) engine.execute(user_creds_del) diff --git a/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py b/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py index f779a77f96..92f11de26c 100644 --- a/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py +++ b/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py @@ -32,18 +32,19 @@ def downgrade(migrate_engine): # Remove soft deleted data not_deleted = None - stmt = sqlalchemy.select([stack.c.id, - stack.c.raw_template_id, - stack.c.user_creds_id]).\ - where(stack.c.deleted_at != not_deleted) + stmt = sqlalchemy.select( + [stack.c.id, + stack.c.raw_template_id, + stack.c.user_creds_id] + ).where(stack.c.deleted_at != not_deleted) deleted_stacks = migrate_engine.execute(stmt) for s in deleted_stacks: event_del = event.delete().where(event.c.stack_id == s[0]) migrate_engine.execute(event_del) stack_del = stack.delete().where(stack.c.id == s[0]) migrate_engine.execute(stack_del) - raw_template_del = raw_template.delete().\ - where(raw_template.c.id == s[1]) + raw_template_del = raw_template.delete( + ).where(raw_template.c.id == s[1]) migrate_engine.execute(raw_template_del) user_creds_del = user_creds.delete().where(user_creds.c.id == s[2]) migrate_engine.execute(user_creds_del) diff --git a/heat/db/sqlalchemy/migrate_repo/versions/035_event_uuid_to_id.py b/heat/db/sqlalchemy/migrate_repo/versions/035_event_uuid_to_id.py index 9df1b42ed5..715a235961 100644 --- a/heat/db/sqlalchemy/migrate_repo/versions/035_event_uuid_to_id.py +++ b/heat/db/sqlalchemy/migrate_repo/versions/035_event_uuid_to_id.py @@ -88,8 +88,8 @@ def upgrade(migrate_engine): # NOTE(chenxiao): For DB2, setting "ID" column "autoincrement=True" # can't make sense after above "tmp_id=>id" transformation, # so should work around it. - sql = "ALTER TABLE EVENT ALTER COLUMN ID SET GENERATED BY " \ - "DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)" + sql = ("ALTER TABLE EVENT ALTER COLUMN ID SET GENERATED BY " + "DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)") migrate_engine.execute(sql) else: event_table.c.tmp_id.alter(sqlalchemy.Integer, autoincrement=True) diff --git a/heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py b/heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py index b92c3068bc..cd893066d2 100644 --- a/heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py +++ b/heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py @@ -24,8 +24,8 @@ def upgrade(migrate_engine): # Set backup flag for backup stacks, which are the only ones named "foo*" not_deleted = None stmt = sqlalchemy.select([stack.c.id, - stack.c.name]).\ - where(stack.c.deleted_at == not_deleted) + stack.c.name] + ).where(stack.c.deleted_at == not_deleted) stacks = migrate_engine.execute(stmt) for s in stacks: if s.name.endswith('*'): diff --git a/heat/engine/cfn/template.py b/heat/engine/cfn/template.py index dfb314732d..7e3354f4f0 100644 --- a/heat/engine/cfn/template.py +++ b/heat/engine/cfn/template.py @@ -35,11 +35,13 @@ _RESOURCE_KEYS = ( class CfnTemplate(template.Template): '''A stack template.''' - SECTIONS = (VERSION, ALTERNATE_VERSION, DESCRIPTION, MAPPINGS, - PARAMETERS, RESOURCES, OUTPUTS) = \ - ('AWSTemplateFormatVersion', 'HeatTemplateFormatVersion', - 'Description', 'Mappings', 'Parameters', 'Resources', 'Outputs' - ) + SECTIONS = ( + VERSION, ALTERNATE_VERSION, + DESCRIPTION, MAPPINGS, PARAMETERS, RESOURCES, OUTPUTS + ) = ( + 'AWSTemplateFormatVersion', 'HeatTemplateFormatVersion', + 'Description', 'Mappings', 'Parameters', 'Resources', 'Outputs' + ) SECTIONS_NO_DIRECT_ACCESS = set([PARAMETERS, VERSION, ALTERNATE_VERSION]) diff --git a/heat/engine/environment.py b/heat/engine/environment.py index f8d7511a6a..76a09b399e 100644 --- a/heat/engine/environment.py +++ b/heat/engine/environment.py @@ -197,8 +197,8 @@ class ResourceRegistry(object): if name.endswith('*'): # delete all matching entries. for res_name in registry.keys(): - if isinstance(registry[res_name], ResourceInfo) and \ - res_name.startswith(name[:-1]): + if (isinstance(registry[res_name], ResourceInfo) and + res_name.startswith(name[:-1])): LOG.warn(_LW('Removing %(item)s from %(path)s'), { 'item': res_name, 'path': descriptive_path}) diff --git a/heat/engine/event.py b/heat/engine/event.py index ff03dad8b2..4d01c5ce6e 100644 --- a/heat/engine/event.py +++ b/heat/engine/event.py @@ -51,14 +51,14 @@ class Event(object): '''Retrieve an Event from the database.''' from heat.engine import stack as parser - ev = event if event is not None else\ - db_api.event_get(context, event_id) + ev = (event if event is not None else + db_api.event_get(context, event_id)) if ev is None: message = _('No event exists with id "%s"') % str(event_id) raise exception.NotFound(message) - st = stack if stack is not None else\ - parser.Stack.load(context, ev.stack_id) + st = (stack if stack is not None else + parser.Stack.load(context, ev.stack_id)) return cls(context, st, ev.resource_action, ev.resource_status, ev.resource_status_reason, ev.physical_resource_id, diff --git a/heat/engine/hot/template.py b/heat/engine/hot/template.py index 32ebcbd953..a1ba06f003 100644 --- a/heat/engine/hot/template.py +++ b/heat/engine/hot/template.py @@ -39,10 +39,13 @@ class HOTemplate20130523(template.Template): A Heat Orchestration Template format stack template. """ - SECTIONS = (VERSION, DESCRIPTION, PARAMETER_GROUPS, PARAMETERS, - RESOURCES, OUTPUTS, MAPPINGS) = \ - ('heat_template_version', 'description', 'parameter_groups', - 'parameters', 'resources', 'outputs', '__undefined__') + SECTIONS = ( + VERSION, DESCRIPTION, PARAMETER_GROUPS, + PARAMETERS, RESOURCES, OUTPUTS, MAPPINGS + ) = ( + 'heat_template_version', 'description', 'parameter_groups', + 'parameters', 'resources', 'outputs', '__undefined__' + ) SECTIONS_NO_DIRECT_ACCESS = set([PARAMETERS, VERSION]) diff --git a/heat/engine/parameters.py b/heat/engine/parameters.py index 053a2fc232..4aa2a37faf 100644 --- a/heat/engine/parameters.py +++ b/heat/engine/parameters.py @@ -522,8 +522,8 @@ class Parameters(collections.Mapping): raise exception.InvalidTemplateParameter(key=name) def _pseudo_parameters(self, stack_identifier): - stack_id = stack_identifier.arn() \ - if stack_identifier is not None else 'None' + stack_id = (stack_identifier.arn() + if stack_identifier is not None else 'None') stack_name = stack_identifier and stack_identifier.stack_name yield Parameter(self.PARAM_STACK_ID, diff --git a/heat/engine/resources/aws/autoscaling_group.py b/heat/engine/resources/aws/autoscaling_group.py index d1d54d2264..538ec31539 100644 --- a/heat/engine/resources/aws/autoscaling_group.py +++ b/heat/engine/resources/aws/autoscaling_group.py @@ -339,8 +339,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin): # availability zones, it will be possible to specify multiple subnets. # For now, only one subnet can be specified. The bug #1096017 tracks # this issue. - if self.properties.get(self.VPCZONE_IDENTIFIER) and \ - len(self.properties[self.VPCZONE_IDENTIFIER]) != 1: + if (self.properties.get(self.VPCZONE_IDENTIFIER) and + len(self.properties[self.VPCZONE_IDENTIFIER]) != 1): raise exception.NotSupported(feature=_("Anything other than one " "VPCZoneIdentifier")) diff --git a/heat/engine/resources/ceilometer/alarm.py b/heat/engine/resources/ceilometer/alarm.py index ea2218dd09..d3086eb130 100644 --- a/heat/engine/resources/ceilometer/alarm.py +++ b/heat/engine/resources/ceilometer/alarm.py @@ -63,9 +63,9 @@ common_properties_schema = { ), REPEAT_ACTIONS: properties.Schema( properties.Schema.BOOLEAN, - _('False to trigger actions when the threshold is reached AND ' - 'the alarm\'s state has changed. By default, actions are called ' - 'each time the threshold is reached.'), + _("False to trigger actions when the threshold is reached AND " + "the alarm's state has changed. By default, actions are called " + "each time the threshold is reached."), default='true', update_allowed=True ) diff --git a/heat/engine/resources/eip.py b/heat/engine/resources/eip.py index 2ff9ca696a..6ceca9bfd3 100644 --- a/heat/engine/resources/eip.py +++ b/heat/engine/resources/eip.py @@ -140,8 +140,8 @@ class ElasticIp(resource.Resource): server.remove_floating_ip(self._ipaddress()) except Exception as e: is_not_found = self.client_plugin('nova').is_not_found(e) - is_unprocessable_entity = self.client_plugin('nova').\ - is_unprocessable_entity(e) + is_unprocessable_entity = self.client_plugin( + 'nova').is_unprocessable_entity(e) if (not is_not_found and not is_unprocessable_entity): raise @@ -259,8 +259,7 @@ class ElasticIpAssociation(resource.Resource): router = vpc.VPC.router_for_vpc(self.neutron(), network_id) if router is not None: floatingip = self.neutron().show_floatingip(float_id) - floating_net_id = \ - floatingip['floatingip']['floating_network_id'] + floating_net_id = floatingip['floatingip']['floating_network_id'] self.neutron().add_gateway_router( router['id'], {'network_id': floating_net_id}) diff --git a/heat/engine/resources/instance.py b/heat/engine/resources/instance.py index fd268e9b55..046a5f53d2 100644 --- a/heat/engine/resources/instance.py +++ b/heat/engine/resources/instance.py @@ -510,9 +510,8 @@ class Instance(resource.Resource): # if SubnetId property in Instance, ensure subnet exists if subnet_id: neutronclient = self.neutron() - network_id = \ - self.client_plugin('neutron').network_id_from_subnet_id( - subnet_id) + network_id = self.client_plugin( + 'neutron').network_id_from_subnet_id(subnet_id) # if subnet verified, create a port to use this subnet # if port is not created explicitly, nova will choose # the first subnet in the given network. @@ -525,9 +524,8 @@ class Instance(resource.Resource): } if security_groups: - props['security_groups'] = \ - self.client_plugin('neutron').get_secgroup_uuids( - security_groups) + props['security_groups'] = self.client_plugin( + 'neutron').get_secgroup_uuids(security_groups) port = neutronclient.create_port({'port': props})['port'] diff --git a/heat/engine/resources/loadbalancer.py b/heat/engine/resources/loadbalancer.py index 16481af211..255a2b74be 100644 --- a/heat/engine/resources/loadbalancer.py +++ b/heat/engine/resources/loadbalancer.py @@ -526,8 +526,8 @@ backend servers if res: return res - if cfg.CONF.loadbalancer_template and \ - not os.access(cfg.CONF.loadbalancer_template, os.R_OK): + if (cfg.CONF.loadbalancer_template and + not os.access(cfg.CONF.loadbalancer_template, os.R_OK)): msg = _('Custom LoadBalancer template can not be found') raise exception.StackValidationFailed(message=msg) diff --git a/heat/engine/resources/neutron/port.py b/heat/engine/resources/neutron/port.py index 1e11f44961..de346f7fb5 100644 --- a/heat/engine/resources/neutron/port.py +++ b/heat/engine/resources/neutron/port.py @@ -282,13 +282,13 @@ class Port(neutron.NeutronResource): # 'default' securityGroup. If has the 'security_groups' and the # value is [], which means to create the port without securityGroup. if props.get(self.SECURITY_GROUPS) is not None: - props[self.SECURITY_GROUPS] = self.client_plugin().\ - get_secgroup_uuids(props.get(self.SECURITY_GROUPS)) + props[self.SECURITY_GROUPS] = self.client_plugin( + ).get_secgroup_uuids(props.get(self.SECURITY_GROUPS)) else: # And the update should has the same behavior. if prepare_for_update: - props[self.SECURITY_GROUPS] = self.client_plugin().\ - get_secgroup_uuids(['default']) + props[self.SECURITY_GROUPS] = self.client_plugin( + ).get_secgroup_uuids(['default']) if not props[self.FIXED_IPS]: del(props[self.FIXED_IPS]) diff --git a/heat/engine/resources/nova_floatingip.py b/heat/engine/resources/nova_floatingip.py index b4563dc078..55b5499f1f 100644 --- a/heat/engine/resources/nova_floatingip.py +++ b/heat/engine/resources/nova_floatingip.py @@ -143,8 +143,8 @@ class NovaFloatingIpAssociation(resource.Resource): try: server = self.nova().servers.get(self.properties[self.SERVER]) if server: - fl_ip = self.nova().floating_ips.\ - get(self.properties[self.FLOATING_IP]) + fl_ip = self.nova().floating_ips.get( + self.properties[self.FLOATING_IP]) self.nova().servers.remove_floating_ip(server, fl_ip.ip) except Exception as e: self.client_plugin().ignore_not_found(e) diff --git a/heat/engine/resources/openstack/volume.py b/heat/engine/resources/openstack/volume.py index d891df3077..3a14ce0bb5 100644 --- a/heat/engine/resources/openstack/volume.py +++ b/heat/engine/resources/openstack/volume.py @@ -340,8 +340,8 @@ class CinderVolume(aws_vol.Volume): return res # Scheduler hints are only supported from Cinder API v2 - if self.properties.get(self.CINDER_SCHEDULER_HINTS) \ - and self.cinder().volume_api_version == 1: + if (self.properties.get(self.CINDER_SCHEDULER_HINTS) + and self.cinder().volume_api_version == 1): raise exception.StackValidationFailed( message=_('Scheduler hints are not supported by the current ' 'volume API.')) diff --git a/heat/engine/resources/openstack/wait_condition_handle.py b/heat/engine/resources/openstack/wait_condition_handle.py index 77d51f1b4f..343db5da83 100644 --- a/heat/engine/resources/openstack/wait_condition_handle.py +++ b/heat/engine/resources/openstack/wait_condition_handle.py @@ -101,11 +101,11 @@ class HeatWaitConditionHandle(wc_base.BaseWaitConditionHandle): return self.data().get('endpoint') elif key == self.CURL_CLI: # Construct curl command for template-author convenience - return ('curl -i -X POST ' - '-H \'X-Auth-Token: %(token)s\' ' - '-H \'Content-Type: application/json\' ' - '-H \'Accept: application/json\' ' - '%(endpoint)s' % + return ("curl -i -X POST " + "-H 'X-Auth-Token: %(token)s' " + "-H 'Content-Type: application/json' " + "-H 'Accept: application/json' " + "%(endpoint)s" % dict(token=self.data().get('token'), endpoint=self.data().get('endpoint'))) diff --git a/heat/engine/resources/resource_group.py b/heat/engine/resources/resource_group.py index b7cd711d15..3dedf840fb 100644 --- a/heat/engine/resources/resource_group.py +++ b/heat/engine/resources/resource_group.py @@ -146,18 +146,18 @@ class ResourceGroup(stack_resource.StackResource): schema={ REMOVAL_RSRC_LIST: properties.Schema( properties.Schema.LIST, - _('List of resources to be removed ' - 'when doing an update which requires removal of ' - 'specific resources. ' - 'The resource may be specified several ways: ' - '(1) The resource name, as in the nested stack, ' - '(2) The resource reference returned from ' - 'get_resource in a template, as available via ' - 'the \'refs\' attribute ' - 'Note this is destructive on update when specified; ' - 'even if the count is not being reduced, and once ' - 'a resource name is removed, it\'s name is never ' - 'reused in subsequent updates' + _("List of resources to be removed " + "when doing an update which requires removal of " + "specific resources. " + "The resource may be specified several ways: " + "(1) The resource name, as in the nested stack, " + "(2) The resource reference returned from " + "get_resource in a template, as available via " + "the 'refs' attribute " + "Note this is destructive on update when specified; " + "even if the count is not being reduced, and once " + "a resource name is removed, it's name is never " + "reused in subsequent updates" ), default=[] ), diff --git a/heat/engine/resources/server.py b/heat/engine/resources/server.py index 051ec162a6..2d07563e5a 100644 --- a/heat/engine/resources/server.py +++ b/heat/engine/resources/server.py @@ -841,8 +841,8 @@ class Server(stack_user.StackUser): else: # remove not updated networks from old and new networks lists, # also get list these networks - not_updated_networks = \ - self._get_network_matches(old_networks, new_networks) + not_updated_networks = self._get_network_matches( + old_networks, new_networks) self.update_networks_matching_iface_port( old_networks + not_updated_networks, interfaces) @@ -986,8 +986,8 @@ class Server(stack_user.StackUser): # record if any networks include explicit ports networks_with_port = False for network in networks: - networks_with_port = networks_with_port or \ - network.get(self.NETWORK_PORT) + networks_with_port = (networks_with_port or + network.get(self.NETWORK_PORT)) if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID): msg = _('Properties "%(uuid)s" and "%(id)s" are both set ' 'to the network "%(network)s" for the server ' diff --git a/heat/engine/resources/software_config/software_component.py b/heat/engine/resources/software_config/software_component.py index 7725fd24b7..96b2b24836 100644 --- a/heat/engine/resources/software_config/software_component.py +++ b/heat/engine/resources/software_config/software_component.py @@ -150,8 +150,8 @@ class SoftwareComponent(sc.SoftwareConfig): actions = config.get(self.CONFIG_ACTIONS) if any(action in config_actions for action in actions): msg = _('Defining more than one configuration for the same ' - 'action in SoftwareComponent "%s" is not allowed.')\ - % self.name + 'action in SoftwareComponent "%s" is not allowed.' + ) % self.name raise exception.StackValidationFailed(message=msg) config_actions.update(actions) diff --git a/heat/engine/resources/software_config/software_deployment.py b/heat/engine/resources/software_config/software_deployment.py index 2e735da980..9f9405a24d 100644 --- a/heat/engine/resources/software_config/software_deployment.py +++ b/heat/engine/resources/software_config/software_deployment.py @@ -215,8 +215,8 @@ class SoftwareDeployment(signal_responder.SignalResponder): config = self.rpc_client().show_software_config( self.context, config_id) - if action not in self.properties[self.DEPLOY_ACTIONS]\ - and not config[rpc_api.SOFTWARE_CONFIG_GROUP] == 'component': + if (action not in self.properties[self.DEPLOY_ACTIONS] + and not config[rpc_api.SOFTWARE_CONFIG_GROUP] == 'component'): return props = self._build_properties( diff --git a/heat/engine/resources/swiftsignal.py b/heat/engine/resources/swiftsignal.py index 45aca0601e..3d45a655ad 100644 --- a/heat/engine/resources/swiftsignal.py +++ b/heat/engine/resources/swiftsignal.py @@ -106,7 +106,7 @@ class SwiftSignalHandle(resource.Resource): elif key == self.ENDPOINT: return self.data().get(self.ENDPOINT) elif key == self.CURL_CLI: - return ('curl -i -X PUT \'%s\'' % + return ("curl -i -X PUT '%s'" % self.data().get(self.ENDPOINT)) def handle_delete(self): diff --git a/heat/engine/rsrc_defn.py b/heat/engine/rsrc_defn.py index 6ce36ed386..9cc989053c 100644 --- a/heat/engine/rsrc_defn.py +++ b/heat/engine/rsrc_defn.py @@ -127,8 +127,8 @@ class ResourceDefinitionCore(object): This returns a new resource definition, with all of the functions parsed in the context of the specified stack and template. """ - assert not getattr(self, '_frozen', False), \ - "Cannot re-parse a frozen definition" + assert not getattr(self, '_frozen', False + ), "Cannot re-parse a frozen definition" def reparse_snippet(snippet): return template.parse(stack, copy.deepcopy(snippet)) diff --git a/heat/engine/stack.py b/heat/engine/stack.py index d5e798e047..a675b947ce 100644 --- a/heat/engine/stack.py +++ b/heat/engine/stack.py @@ -901,8 +901,8 @@ class Stack(collections.Mapping): # rights to delete the trust unless an admin trustor_id = user_creds.get('trustor_user_id') if self.context.user_id != trustor_id: - LOG.debug('Context user_id doesn\'t match ' - 'trustor, using stored context') + LOG.debug("Context user_id doesn't match " + "trustor, using stored context") sc = self.stored_context() sc.clients.client('keystone').delete_trust( trust_id) diff --git a/heat/engine/stack_lock.py b/heat/engine/stack_lock.py index c4f7707f37..9d5fb78370 100644 --- a/heat/engine/stack_lock.py +++ b/heat/engine/stack_lock.py @@ -73,8 +73,8 @@ class StackLock(object): 'stack': self.stack.id}) return - if lock_engine_id == self.engine_id or \ - self.engine_alive(self.context, lock_engine_id): + if (lock_engine_id == self.engine_id or + self.engine_alive(self.context, lock_engine_id)): LOG.debug("Lock on stack %(stack)s is owned by engine " "%(engine)s" % {'stack': self.stack.id, 'engine': lock_engine_id}) diff --git a/heat/engine/stack_resource.py b/heat/engine/stack_resource.py index 579366b194..a9847e6b76 100644 --- a/heat/engine/stack_resource.py +++ b/heat/engine/stack_resource.py @@ -154,8 +154,8 @@ class StackResource(resource.Resource): def _parse_nested_stack(self, stack_name, child_template, child_params, timeout_mins=None, adopt_data=None): if self.stack.nested_depth >= cfg.CONF.max_nested_stack_depth: - msg = _("Recursion depth exceeds %d.") % \ - cfg.CONF.max_nested_stack_depth + msg = _("Recursion depth exceeds %d." + ) % cfg.CONF.max_nested_stack_depth raise exception.RequestLimitExceeded(message=msg) parsed_template = self._parse_child_template(child_template) diff --git a/heat/tests/aws/test_waitcondition.py b/heat/tests/aws/test_waitcondition.py index 79942bb98b..7b2d25f63d 100644 --- a/heat/tests/aws/test_waitcondition.py +++ b/heat/tests/aws/test_waitcondition.py @@ -106,8 +106,8 @@ class WaitConditionTest(common.HeatTestCase): id = identifier.ResourceIdentifier('test_tenant', stack.name, stack.id, '', 'WaitHandle') self.m.StubOutWithMock(aws_wch.WaitConditionHandle, 'identifier') - aws_wch.WaitConditionHandle.identifier().\ - MultipleTimes().AndReturn(id) + aws_wch.WaitConditionHandle.identifier( + ).MultipleTimes().AndReturn(id) if stub_status: self.m.StubOutWithMock(aws_wch.WaitConditionHandle, diff --git a/heat/tests/common.py b/heat/tests/common.py index 53389db3b4..a44e1816e2 100644 --- a/heat/tests/common.py +++ b/heat/tests/common.py @@ -152,8 +152,8 @@ class HeatTestCase(testscenarios.WithScenarios, self.m.StubOutWithMock(glance.ImageConstraint, 'validate') if num is None: glance.ImageConstraint.validate( - mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().\ - AndReturn(True) + mox.IgnoreArg(), mox.IgnoreArg() + ).MultipleTimes().AndReturn(True) else: for x in range(num): glance.ImageConstraint.validate( diff --git a/heat/tests/fakes.py b/heat/tests/fakes.py index defc46d0a4..aae0cb64c3 100644 --- a/heat/tests/fakes.py +++ b/heat/tests/fakes.py @@ -33,11 +33,11 @@ class FakeClient(object): expected = (method, url) called = self.client.callstack[pos][0:2] - assert self.client.callstack, \ - "Expected %s %s but no calls were made." % expected + assert self.client.callstack, ("Expected %s %s " + "but no calls were made." % expected) - assert expected == called, 'Expected %s %s; got %s %s' % \ - (expected + called) + assert expected == called, 'Expected %s %s; got %s %s' % ( + expected + called) if body is not None: assert self.client.callstack[pos][2] == body @@ -48,8 +48,8 @@ class FakeClient(object): """ expected = (method, url) - assert self.client.callstack, \ - "Expected %s %s but no calls were made." % expected + assert self.client.callstack, ("Expected %s %s but no calls " + "were made." % expected) found = False for entry in self.client.callstack: @@ -57,8 +57,8 @@ class FakeClient(object): found = True break - assert found, 'Expected %s %s; got %s' % \ - (expected, self.client.callstack) + assert found, 'Expected %s %s; got %s' % (expected, + self.client.callstack) if body is not None: try: assert entry[2] == body @@ -133,8 +133,8 @@ class FakeKeystoneClient(object): def url_for(self, **kwargs): if self.only_services is not None: - if 'service_type' in kwargs and \ - kwargs['service_type'] not in self.only_services: + if ('service_type' in kwargs and + kwargs['service_type'] not in self.only_services): # keystone client throws keystone exceptions, not cinder # exceptions. raise exceptions.EndpointNotFound() diff --git a/heat/tests/generic_resource.py b/heat/tests/generic_resource.py index 5ef67bdd82..19305f4a57 100644 --- a/heat/tests/generic_resource.py +++ b/heat/tests/generic_resource.py @@ -160,7 +160,7 @@ class StackUserResource(stack_user.StackUser): class ResourceWithCustomConstraint(GenericResource): - properties_schema = \ - {'Foo': properties.Schema( + properties_schema = { + 'Foo': properties.Schema( properties.Schema.STRING, constraints=[constraints.CustomConstraint('neutron.network')])} diff --git a/heat/tests/openstack/test_waitcondition.py b/heat/tests/openstack/test_waitcondition.py index fb69ee505e..70cbf931a8 100644 --- a/heat/tests/openstack/test_waitcondition.py +++ b/heat/tests/openstack/test_waitcondition.py @@ -99,8 +99,8 @@ class HeatWaitConditionTest(common.HeatTestCase): stack.id, '', 'wait_handle') self.m.StubOutWithMock(heat_wch.HeatWaitConditionHandle, 'identifier') - heat_wch.HeatWaitConditionHandle.\ - identifier().MultipleTimes().AndReturn(id) + heat_wch.HeatWaitConditionHandle.identifier( + ).MultipleTimes().AndReturn(id) if stub_status: self.m.StubOutWithMock(heat_wch.HeatWaitConditionHandle, diff --git a/heat/tests/test_api_openstack_v1.py b/heat/tests/test_api_openstack_v1.py index 2894912be3..52d57b617d 100644 --- a/heat/tests/test_api_openstack_v1.py +++ b/heat/tests/test_api_openstack_v1.py @@ -65,9 +65,9 @@ class InstantiationDataTest(common.HeatTestCase): data = {"AWSTemplateFormatVersion": "2010-09-09", "key1": ["val1[0]", "val1[1]"], "key2": "val2"} - json_repr = '{"AWSTemplateFormatVersion" : "2010-09-09",' \ - '"key1": [ "val1[0]", "val1[1]" ], ' \ - '"key2": "val2" }' + json_repr = ('{"AWSTemplateFormatVersion" : "2010-09-09",' + '"key1": [ "val1[0]", "val1[1]" ], ' + '"key2": "val2" }') parsed = stacks.InstantiationData.format_parse(json_repr, 'foo') self.assertEqual(data, parsed) @@ -108,8 +108,8 @@ parameters: self.assertEqual(template, data.template()) def test_template_string_json(self): - template = '{"heat_template_version": "2013-05-23",' \ - '"foo": "bar", "blarg": "wibble"}' + template = ('{"heat_template_version": "2013-05-23",' + '"foo": "bar", "blarg": "wibble"}') body = {'template': template} data = stacks.InstantiationData(body) self.assertEqual(json.loads(template), data.template()) diff --git a/heat/tests/test_autoscaling_update_policy.py b/heat/tests/test_autoscaling_update_policy.py index 8ca86fc171..c75396c6fe 100644 --- a/heat/tests/test_autoscaling_update_policy.py +++ b/heat/tests/test_autoscaling_update_policy.py @@ -299,14 +299,14 @@ class AutoScalingGroupTest(common.HeatTestCase): self.m.StubOutWithMock(self.fc.servers, 'get') self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action') - self.fc.servers.get(mox.IgnoreArg()).\ - MultipleTimes().AndReturn(return_server) + self.fc.servers.get( + mox.IgnoreArg()).MultipleTimes().AndReturn(return_server) self.fc.client.post_servers_1234_action( - body={'resize': {'flavorRef': 3}}).\ - MultipleTimes().AndReturn((202, None)) + body={'resize': {'flavorRef': 3}} + ).MultipleTimes().AndReturn((202, None)) self.fc.client.post_servers_1234_action( - body={'confirmResize': None}).\ - MultipleTimes().AndReturn((202, None)) + body={'confirmResize': None} + ).MultipleTimes().AndReturn((202, None)) self._stub_grp_replace(num_creates_expected_on_updt, num_deletes_expected_on_updt, diff --git a/heat/tests/test_ceilometer_alarm.py b/heat/tests/test_ceilometer_alarm.py index 082fa9668f..3d9b4cea63 100644 --- a/heat/tests/test_ceilometer_alarm.py +++ b/heat/tests/test_ceilometer_alarm.py @@ -326,8 +326,8 @@ class CeilometerAlarmTest(common.HeatTestCase): properties = t['Resources']['MEMAlarmHigh']['Properties'] # Test for bug/1383521, where meter_name is in NOVA_METERS properties[alarm.CeilometerAlarm.METER_NAME] = 'memory.usage' - properties['matching_metadata'] =\ - {'metadata.user_metadata.groupname': 'foo'} + properties['matching_metadata'] = {'metadata.user_metadata.groupname': + 'foo'} self.stack = self.create_stack(template=json.dumps(t)) diff --git a/heat/tests/test_cinder_client.py b/heat/tests/test_cinder_client.py index 91de76ca32..1ac74b754a 100644 --- a/heat/tests/test_cinder_client.py +++ b/heat/tests/test_cinder_client.py @@ -40,8 +40,8 @@ class CinderClientPluginTests(common.HeatTestCase): volume_id = str(uuid.uuid4()) my_volume = self.m.CreateMockAnything() self.cinder_client.volumes = self.m.CreateMockAnything() - self.cinder_client.volumes.get(volume_id).MultipleTimes().\ - AndReturn(my_volume) + self.cinder_client.volumes.get( + volume_id).MultipleTimes().AndReturn(my_volume) self.m.ReplayAll() self.assertEqual(my_volume, self.cinder_plugin.get_volume(volume_id)) @@ -53,8 +53,8 @@ class CinderClientPluginTests(common.HeatTestCase): snapshot_id = str(uuid.uuid4()) my_snapshot = self.m.CreateMockAnything() self.cinder_client.volume_snapshots = self.m.CreateMockAnything() - self.cinder_client.volume_snapshots.get(snapshot_id).MultipleTimes().\ - AndReturn(my_snapshot) + self.cinder_client.volume_snapshots.get( + snapshot_id).MultipleTimes().AndReturn(my_snapshot) self.m.ReplayAll() self.assertEqual(my_snapshot, diff --git a/heat/tests/test_eip.py b/heat/tests/test_eip.py index 7d3791a82f..2fb775be65 100644 --- a/heat/tests/test_eip.py +++ b/heat/tests/test_eip.py @@ -332,14 +332,16 @@ class EIPTest(common.HeatTestCase): self._mock_server_get(mock_server=server, multiple=True) self.m.StubOutWithMock(self.fc.servers, 'add_floating_ip') - self.fc.servers.add_floating_ip(server, floating_ip.ip, None).\ - AndRaise(nova_exceptions.BadRequest(400)) + self.fc.servers.add_floating_ip( + server, floating_ip.ip, None + ).AndRaise(nova_exceptions.BadRequest(400)) self.m.StubOutWithMock(self.fc.servers, 'remove_floating_ip') msg = ("ClientException: Floating ip 172.24.4.13 is not associated " "with instance 1234.") - self.fc.servers.remove_floating_ip(server, floating_ip.ip).\ - AndRaise(nova_exceptions.ClientException(422, msg)) + self.fc.servers.remove_floating_ip( + server, floating_ip.ip + ).AndRaise(nova_exceptions.ClientException(422, msg)) self.m.StubOutWithMock(self.fc.floating_ips, 'delete') self.fc.floating_ips.delete(mox.IsA(object)) diff --git a/heat/tests/test_engine_service.py b/heat/tests/test_engine_service.py index 9772c174c8..efbd5baee8 100644 --- a/heat/tests/test_engine_service.py +++ b/heat/tests/test_engine_service.py @@ -200,8 +200,8 @@ def setup_keystone_mocks(mocks, stack): def setup_mock_for_image_constraint(mocks, imageId_input, imageId_output=744): mocks.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id') - glance.GlanceClientPlugin.get_image_id(imageId_input).\ - MultipleTimes().AndReturn(imageId_output) + glance.GlanceClientPlugin.get_image_id( + imageId_input).MultipleTimes().AndReturn(imageId_output) def setup_mocks(mocks, stack, mock_image_constraint=True): @@ -856,8 +856,8 @@ class StackServiceCreateUpdateDeleteTest(common.HeatTestCase): stack_lock.StackLock.try_acquire().AndReturn("other-engine-fake-uuid") self.m.StubOutWithMock(stack_lock.StackLock, 'engine_alive') - stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\ - .AndReturn(True) + stack_lock.StackLock.engine_alive( + self.ctx, "other-engine-fake-uuid").AndReturn(True) self.m.StubOutWithMock(rpc_client._CallContext, 'call') rpc_client._CallContext.call( @@ -889,8 +889,8 @@ class StackServiceCreateUpdateDeleteTest(common.HeatTestCase): stack_lock.StackLock.try_acquire().AndReturn("other-engine-fake-uuid") self.m.StubOutWithMock(stack_lock.StackLock, 'engine_alive') - stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\ - .AndReturn(True) + stack_lock.StackLock.engine_alive( + self.ctx, "other-engine-fake-uuid").AndReturn(True) self.m.StubOutWithMock(rpc_client._CallContext, 'call') rpc_client._CallContext.call( @@ -921,8 +921,8 @@ class StackServiceCreateUpdateDeleteTest(common.HeatTestCase): stack_lock.StackLock.try_acquire().AndReturn("other-engine-fake-uuid") self.m.StubOutWithMock(stack_lock.StackLock, 'engine_alive') - stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\ - .AndReturn(False) + stack_lock.StackLock.engine_alive( + self.ctx, "other-engine-fake-uuid").AndReturn(False) self.m.StubOutWithMock(stack_lock.StackLock, 'acquire') stack_lock.StackLock.acquire().AndReturn(None) @@ -1878,8 +1878,10 @@ class StackServiceTest(common.HeatTestCase): @stack_context('service_list_all_test_stack') def test_stack_list_all(self): self.m.StubOutWithMock(parser.Stack, '_from_db') - parser.Stack._from_db(self.ctx, mox.IgnoreArg(), resolve_data=False)\ - .AndReturn(self.stack) + parser.Stack._from_db( + self.ctx, mox.IgnoreArg(), + resolve_data=False + ).AndReturn(self.stack) self.m.ReplayAll() sl = self.eng.list_stacks(self.ctx) @@ -2458,9 +2460,9 @@ class StackServiceTest(common.HeatTestCase): stack_not_found_exc = exception.StackNotFound(stack_name='test') self.m.StubOutWithMock(service.EngineService, '_get_stack') - service.EngineService \ - ._get_stack(self.ctx, non_exist_identifier, show_deleted=True) \ - .AndRaise(stack_not_found_exc) + service.EngineService._get_stack( + self.ctx, non_exist_identifier, show_deleted=True + ).AndRaise(stack_not_found_exc) self.m.ReplayAll() ex = self.assertRaises(dispatcher.ExpectedException, @@ -2860,8 +2862,8 @@ class StackServiceTest(common.HeatTestCase): self.m.StubOutWithMock(watchrule.WatchRule, 'set_watch_state') for state in ["HGJHGJHG", "1234", "!\*(&%"]: - watchrule.WatchRule.set_watch_state(state)\ - .InAnyOrder().AndRaise(ValueError) + watchrule.WatchRule.set_watch_state( + state).InAnyOrder().AndRaise(ValueError) self.m.ReplayAll() for state in ["HGJHGJHG", "1234", "!\*(&%"]: @@ -2876,8 +2878,9 @@ class StackServiceTest(common.HeatTestCase): state = watchrule.WatchRule.ALARM # State valid self.m.StubOutWithMock(watchrule.WatchRule, 'load') - watchrule.WatchRule.load(self.ctx, "nonexistent")\ - .AndRaise(exception.WatchRuleNotFound(watch_name='test')) + watchrule.WatchRule.load( + self.ctx, "nonexistent" + ).AndRaise(exception.WatchRuleNotFound(watch_name='test')) self.m.ReplayAll() ex = self.assertRaises(dispatcher.ExpectedException, @@ -3018,8 +3021,8 @@ class StackServiceTest(common.HeatTestCase): self.eng._validate_new_stack, self.ctx, 'test_existing_stack', parsed_template) - msg = \ - u'u\'"Type" is not a valid keyword inside a resource definition\'' + msg = (u'u\'"Type" is not a valid keyword ' + 'inside a resource definition\'') self.assertEqual(msg, six.text_type(ex)) def test_validate_new_stack_checks_incorrect_sections(self): diff --git a/heat/tests/test_fault_middleware.py b/heat/tests/test_fault_middleware.py index 16be4c8641..2ceeb2cc72 100644 --- a/heat/tests/test_fault_middleware.py +++ b/heat/tests/test_fault_middleware.py @@ -104,8 +104,8 @@ class FaultMiddlewareTest(common.HeatTestCase): serialized, ["heat.common.exception"]) wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) - expected_message, expected_traceback = six.text_type(remote_error).\ - split('\n', 1) + expected_message, expected_traceback = six.text_type( + remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, @@ -211,8 +211,8 @@ class FaultMiddlewareTest(common.HeatTestCase): wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) - expected_message, expected_traceback = six.text_type(remote_error).\ - split('\n', 1) + expected_message, expected_traceback = six.text_type( + remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, diff --git a/heat/tests/test_glance_client.py b/heat/tests/test_glance_client.py index a4c34074c9..74f887a6e5 100644 --- a/heat/tests/test_glance_client.py +++ b/heat/tests/test_glance_client.py @@ -68,8 +68,8 @@ class GlanceUtilsTests(common.HeatTestCase): self.glance_client.images.get(img_name).AndRaise( glance_exceptions.HTTPNotFound()) filters = {'name': img_name} - self.glance_client.images.list(filters=filters).MultipleTimes().\ - AndReturn([my_image]) + self.glance_client.images.list( + filters=filters).MultipleTimes().AndReturn([my_image]) self.m.ReplayAll() self.assertEqual(img_id, self.glance_plugin.get_image_id(img_name)) @@ -101,8 +101,8 @@ class GlanceUtilsTests(common.HeatTestCase): self.glance_client.images.get(img_name).AndRaise( glance_exceptions.HTTPNotFound()) filters = {'name': img_name} - self.glance_client.images.list(filters=filters).MultipleTimes().\ - AndReturn([]) + self.glance_client.images.list( + filters=filters).MultipleTimes().AndReturn([]) self.m.ReplayAll() self.assertRaises(exception.ImageNotFound, @@ -118,8 +118,8 @@ class GlanceUtilsTests(common.HeatTestCase): self.glance_client.images = self.m.CreateMockAnything() filters = {'name': img_name} - self.glance_client.images.list(filters=filters).MultipleTimes().\ - AndReturn(image_list) + self.glance_client.images.list( + filters=filters).MultipleTimes().AndReturn(image_list) self.m.ReplayAll() self.assertRaises(exception.PhysicalResourceNameAmbiguity, self.glance_plugin.get_image_id, img_name) diff --git a/heat/tests/test_heatclient.py b/heat/tests/test_heatclient.py index 5976fb36f5..44d90ac308 100644 --- a/heat/tests/test_heatclient.py +++ b/heat/tests/test_heatclient.py @@ -203,7 +203,7 @@ class KeystoneClientTest(common.HeatTestCase): err = self.assertRaises(exception.Error, heat_ks_client.create_stack_user, 'auser', password='password') - self.assertIn('Can\'t find role heat_stack_user', six.text_type(err)) + self.assertIn("Can't find role heat_stack_user", six.text_type(err)) def _mock_roles_list(self, heat_stack_user='heat_stack_user'): mock_roles_list = [] @@ -287,7 +287,7 @@ class KeystoneClientTest(common.HeatTestCase): err = self.assertRaises(exception.Error, heat_ks_client.create_stack_domain_user, username='duser', project_id='aproject') - self.assertIn('Can\'t find role heat_stack_user', six.text_type(err)) + self.assertIn("Can't find role heat_stack_user", six.text_type(err)) def test_delete_stack_domain_user(self): """Test deleting a stack domain user.""" @@ -570,7 +570,7 @@ class KeystoneClientTest(common.HeatTestCase): heat_ks_client = heat_keystoneclient.KeystoneClient(ctx) exc = self.assertRaises(exception.MissingCredentialError, heat_ks_client.create_trust_context) - expected = 'Missing required credential: roles [\'heat_stack_owner\']' + expected = "Missing required credential: roles ['heat_stack_owner']" self.assertIn(expected, six.text_type(exc)) def test_init_domain_cfg_not_set_fallback(self): @@ -1423,8 +1423,8 @@ class KeystoneClientTest(common.HeatTestCase): """ self._stubs_v3() self.mock_ks_v3_client.service_catalog = self.m.CreateMockAnything() - self.mock_ks_v3_client.service_catalog.url_for(**expected_kwargs)\ - .AndReturn(service_url) + self.mock_ks_v3_client.service_catalog.url_for( + **expected_kwargs).AndReturn(service_url) self.m.ReplayAll() ctx = ctx or utils.dummy_context() diff --git a/heat/tests/test_hot.py b/heat/tests/test_hot.py index e7534ff0dd..4a34a5b9b8 100644 --- a/heat/tests/test_hot.py +++ b/heat/tests/test_hot.py @@ -928,8 +928,8 @@ class StackTest(test_parser.StackTest): {'Type': 'ResourceWithPropsType', 'Properties': {'Foo': 'xyz'}}, {'Type': 'ResourceWithPropsType', - 'Properties': {'Foo': 'abc'}}).WithSideEffects(check_props) \ - .AndRaise(resource.UpdateReplace) + 'Properties': {'Foo': 'abc'}} + ).WithSideEffects(check_props).AndRaise(resource.UpdateReplace) self.m.ReplayAll() self.stack.update(updated_stack) diff --git a/heat/tests/test_identifier.py b/heat/tests/test_identifier.py index af0251d414..ac20d01dde 100644 --- a/heat/tests/test_identifier.py +++ b/heat/tests/test_identifier.py @@ -113,8 +113,8 @@ class IdentifierTest(testtools.TestCase): self.assertEqual('/p', hi.path) def test_arn_url_parse_qs(self): - url = self.url_prefix +\ - 'arn%3Aopenstack%3Aheat%3A%3At%3Astacks/s/i/p?foo=bar' + url = (self.url_prefix + + 'arn%3Aopenstack%3Aheat%3A%3At%3Astacks/s/i/p?foo=bar') hi = identifier.HeatIdentifier.from_arn_url(url) self.assertEqual('t', hi.tenant) self.assertEqual('s', hi.stack_name) diff --git a/heat/tests/test_instance.py b/heat/tests/test_instance.py index 0a0bc822c9..6fc4bde7e2 100644 --- a/heat/tests/test_instance.py +++ b/heat/tests/test_instance.py @@ -97,10 +97,10 @@ class InstancesTest(common.HeatTestCase): def _get_test_template(self, stack_name, image_id=None): (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['ImageId'] = \ - image_id or 'CentOS 5.2' - tmpl.t['Resources']['WebServer']['Properties']['InstanceType'] = \ - '256 MB Server' + tmpl.t['Resources']['WebServer']['Properties'][ + 'ImageId'] = image_id or 'CentOS 5.2' + tmpl.t['Resources']['WebServer']['Properties'][ + 'InstanceType'] = '256 MB Server' return tmpl, stack @@ -381,9 +381,9 @@ class InstancesTest(common.HeatTestCase): create = scheduler.TaskRunner(instance.create) error = self.assertRaises(exception.ResourceFailure, create) self.assertEqual( - 'StackValidationFailed: Property error : WebServer: ' - 'ImageId Error validating value \'Slackware\': ' - 'The Image (Slackware) could not be found.', + "StackValidationFailed: Property error : WebServer: " + "ImageId Error validating value 'Slackware': " + "The Image (Slackware) could not be found.", six.text_type(error)) self.m.VerifyAll() diff --git a/heat/tests/test_instance_group_update_policy.py b/heat/tests/test_instance_group_update_policy.py index 101f7ec566..f74804ff5a 100644 --- a/heat/tests/test_instance_group_update_policy.py +++ b/heat/tests/test_instance_group_update_policy.py @@ -221,14 +221,14 @@ class InstanceGroupTest(common.HeatTestCase): self.m.StubOutWithMock(self.fc.servers, 'get') self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action') - self.fc.servers.get(mox.IgnoreArg()).\ - MultipleTimes().AndReturn(return_server) + self.fc.servers.get( + mox.IgnoreArg()).MultipleTimes().AndReturn(return_server) self.fc.client.post_servers_1234_action( - body={'resize': {'flavorRef': 3}}).\ - MultipleTimes().AndReturn((202, None)) + body={'resize': {'flavorRef': 3}} + ).MultipleTimes().AndReturn((202, None)) self.fc.client.post_servers_1234_action( - body={'confirmResize': None}).\ - MultipleTimes().AndReturn((202, None)) + body={'confirmResize': None} + ).MultipleTimes().AndReturn((202, None)) self._stub_grp_replace(num_creates_expected_on_updt, num_deletes_expected_on_updt) diff --git a/heat/tests/test_lifecycle_plugin_utils.py b/heat/tests/test_lifecycle_plugin_utils.py index feed8c9dc3..288c426494 100644 --- a/heat/tests/test_lifecycle_plugin_utils.py +++ b/heat/tests/test_lifecycle_plugin_utils.py @@ -43,8 +43,8 @@ class LifecyclePluginUtilsTests(common.HeatTestCase): self.m.UnsetStubs() self.m.StubOutWithMock(resources.global_env(), 'get_stack_lifecycle_plugins') - resources.global_env().get_stack_lifecycle_plugins().\ - MultipleTimes().AndReturn(lcp_mappings) + resources.global_env().get_stack_lifecycle_plugins( + ).MultipleTimes().AndReturn(lcp_mappings) self.m.ReplayAll() # reset cache lifecycle_plugin_utils.pp_class_instances = None diff --git a/heat/tests/test_loadbalancer.py b/heat/tests/test_loadbalancer.py index b0727ea31c..1dd6953f5f 100644 --- a/heat/tests/test_loadbalancer.py +++ b/heat/tests/test_loadbalancer.py @@ -123,8 +123,8 @@ class LoadBalancerTest(common.HeatTestCase): def _mock_get_image_id_success(self, imageId_input, imageId): self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id') - glance.GlanceClientPlugin.get_image_id(imageId_input).\ - MultipleTimes().AndReturn(imageId) + glance.GlanceClientPlugin.get_image_id( + imageId_input).MultipleTimes().AndReturn(imageId) def _create_stubs(self, key_name='test', stub_meta=True): server_name = utils.PhysName( diff --git a/heat/tests/test_nested_stack.py b/heat/tests/test_nested_stack.py index 0dd3ec0c19..1a4df13c7b 100644 --- a/heat/tests/test_nested_stack.py +++ b/heat/tests/test_nested_stack.py @@ -96,8 +96,8 @@ Outputs: return stack def test_nested_stack_create(self): - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) self.m.ReplayAll() stack = self.create_stack(self.test_template) @@ -124,19 +124,20 @@ Outputs: def test_nested_stack_adopt(self): resource._register_class('GenericResource', generic_rsrc.GenericResource) - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(''' - HeatTemplateFormatVersion: '2012-12-12' - Parameters: - KeyName: - Type: String - Resources: - NestedResource: - Type: GenericResource - Outputs: - Foo: - Value: bar - ''') + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn( + ''' +HeatTemplateFormatVersion: '2012-12-12' +Parameters: + KeyName: + Type: String +Resources: + NestedResource: + Type: GenericResource +Outputs: + Foo: + Value: bar +''') self.m.ReplayAll() adopt_data = { @@ -166,19 +167,20 @@ Outputs: def test_nested_stack_adopt_fail(self): resource._register_class('GenericResource', generic_rsrc.GenericResource) - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(''' - HeatTemplateFormatVersion: '2012-12-12' - Parameters: - KeyName: - Type: String - Resources: - NestedResource: - Type: GenericResource - Outputs: - Foo: - Value: bar - ''') + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn( + ''' +HeatTemplateFormatVersion: '2012-12-12' +Parameters: + KeyName: + Type: String +Resources: + NestedResource: + Type: GenericResource +Outputs: + Foo: + Value: bar +''') self.m.ReplayAll() adopt_data = { @@ -200,8 +202,8 @@ Outputs: self.m.VerifyAll() def test_nested_stack_create_with_timeout(self): - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) self.m.ReplayAll() timeout_template = template_format.parse( @@ -217,8 +219,9 @@ Outputs: cfg.CONF.set_override('max_resources_per_stack', 1) resource._register_class('GenericResource', generic_rsrc.GenericResource) - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(''' + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn( + ''' HeatTemplateFormatVersion: '2012-12-12' Parameters: KeyName: @@ -245,8 +248,9 @@ Outputs: cfg.CONF.set_override('max_resources_per_stack', 2) resource._register_class('GenericResource', generic_rsrc.GenericResource) - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(''' + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn( + ''' HeatTemplateFormatVersion: '2012-12-12' Parameters: KeyName: @@ -270,10 +274,10 @@ Outputs: self.m.VerifyAll() def test_nested_stack_update(self): - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) - urlfetch.get('https://server.test/new.template').MultipleTimes().\ - AndReturn(self.update_template) + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) + urlfetch.get('https://server.test/new.template' + ).MultipleTimes().AndReturn(self.update_template) self.m.ReplayAll() @@ -312,10 +316,11 @@ Outputs: def test_nested_stack_update_equals_resource_limit(self): resource._register_class('GenericResource', generic_rsrc.GenericResource) - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) - urlfetch.get('https://server.test/new.template').MultipleTimes().\ - AndReturn(''' + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) + urlfetch.get('https://server.test/new.template' + ).MultipleTimes().AndReturn( + ''' HeatTemplateFormatVersion: '2012-12-12' Parameters: KeyName: @@ -352,10 +357,11 @@ Outputs: def test_nested_stack_update_exceeds_limit(self): resource._register_class('GenericResource', generic_rsrc.GenericResource) - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) - urlfetch.get('https://server.test/new.template').MultipleTimes().\ - AndReturn(''' + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) + urlfetch.get('https://server.test/new.template' + ).MultipleTimes().AndReturn( + ''' HeatTemplateFormatVersion: '2012-12-12' Parameters: KeyName: @@ -567,8 +573,8 @@ Resources: self.m.VerifyAll() def test_nested_stack_delete(self): - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) self.m.ReplayAll() stack = self.create_stack(self.test_template) @@ -583,8 +589,8 @@ Resources: self.m.VerifyAll() def test_nested_stack_delete_then_delete_parent_stack(self): - urlfetch.get('https://server.test/the.template').MultipleTimes().\ - AndReturn(self.nested_template) + urlfetch.get('https://server.test/the.template' + ).MultipleTimes().AndReturn(self.nested_template) self.m.ReplayAll() stack = self.create_stack(self.test_template) diff --git a/heat/tests/test_neutron_autoscaling.py b/heat/tests/test_neutron_autoscaling.py index e2c83ba11e..11040d7a37 100644 --- a/heat/tests/test_neutron_autoscaling.py +++ b/heat/tests/test_neutron_autoscaling.py @@ -225,11 +225,11 @@ class AutoScalingTest(common.HeatTestCase): memberc_ret_block = copy.deepcopy(memberc_block) memberc_ret_block['member']['id'] = str(uuid.uuid4()) - neutronclient.Client.create_health_monitor(mon_block).\ - AndReturn(mon_ret_block) + neutronclient.Client.create_health_monitor( + mon_block).AndReturn(mon_ret_block) - neutronclient.Client.create_pool(pool_block).\ - AndReturn(pool_ret_block) + neutronclient.Client.create_pool( + pool_block).AndReturn(pool_ret_block) neutronclient.Client.associate_health_monitor( pool_ret_block['pool']['id'], @@ -237,57 +237,57 @@ class AutoScalingTest(common.HeatTestCase): 'id': mon_ret_block['health_monitor']['id'] }}).AndReturn(None) - neutronclient.Client.create_vip(vip_block).\ - AndReturn(vip_ret_block) + neutronclient.Client.create_vip( + vip_block).AndReturn(vip_ret_block) - neutronclient.Client.show_pool(pool_ret_block['pool']['id']).\ - AndReturn(pool_ret_block) + neutronclient.Client.show_pool( + pool_ret_block['pool']['id']).AndReturn(pool_ret_block) - neutronclient.Client.show_vip(vip_ret_block['vip']['id']).\ - AndReturn(vip_ret_block) + neutronclient.Client.show_vip( + vip_ret_block['vip']['id']).AndReturn(vip_ret_block) parser.Stack.validate() instid = str(uuid.uuid4()) instance.Instance.handle_create().AndReturn(instid) - instance.Instance.check_create_complete(mox.IgnoreArg())\ - .AndReturn(False) - instance.Instance.check_create_complete(mox.IgnoreArg())\ - .AndReturn(True) + instance.Instance.check_create_complete( + mox.IgnoreArg()).AndReturn(False) + instance.Instance.check_create_complete( + mox.IgnoreArg()).AndReturn(True) self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() nova.NovaClientPlugin.server_to_ipaddress( mox.IgnoreArg()).AndReturn('1.2.3.4') - neutronclient.Client.create_member(membera_block).\ - AndReturn(membera_ret_block) + neutronclient.Client.create_member( + membera_block).AndReturn(membera_ret_block) # Start of update parser.Stack.validate() instid = str(uuid.uuid4()) instance.Instance.handle_create().AndReturn(instid) - instance.Instance.check_create_complete(mox.IgnoreArg())\ - .AndReturn(False) - instance.Instance.check_create_complete(mox.IgnoreArg())\ - .AndReturn(True) + instance.Instance.check_create_complete( + mox.IgnoreArg()).AndReturn(False) + instance.Instance.check_create_complete( + mox.IgnoreArg()).AndReturn(True) instid = str(uuid.uuid4()) instance.Instance.handle_create().AndReturn(instid) - instance.Instance.check_create_complete(mox.IgnoreArg())\ - .AndReturn(False) - instance.Instance.check_create_complete(mox.IgnoreArg())\ - .AndReturn(True) + instance.Instance.check_create_complete( + mox.IgnoreArg()).AndReturn(False) + instance.Instance.check_create_complete( + mox.IgnoreArg()).AndReturn(True) nova.NovaClientPlugin.server_to_ipaddress( mox.IgnoreArg()).AndReturn('1.2.3.5') - neutronclient.Client.create_member(memberb_block).\ - AndReturn(memberb_ret_block) + neutronclient.Client.create_member( + memberb_block).AndReturn(memberb_ret_block) nova.NovaClientPlugin.server_to_ipaddress( mox.IgnoreArg()).AndReturn('1.2.3.6') - neutronclient.Client.create_member(memberc_block).\ - AndReturn(memberc_ret_block) + neutronclient.Client.create_member( + memberc_block).AndReturn(memberc_ret_block) self.m.ReplayAll() diff --git a/heat/tests/test_nokey.py b/heat/tests/test_nokey.py index 0dd393de54..f09a1e1e39 100644 --- a/heat/tests/test_nokey.py +++ b/heat/tests/test_nokey.py @@ -52,8 +52,8 @@ class nokeyTest(common.HeatTestCase): stack = utils.parse_stack(t, stack_name=stack_name) t['Resources']['WebServer']['Properties']['ImageId'] = 'CentOS 5.2' - t['Resources']['WebServer']['Properties']['InstanceType'] = \ - '256 MB Server' + t['Resources']['WebServer']['Properties'][ + 'InstanceType'] = '256 MB Server' resource_defns = stack.t.resource_definitions(stack) instance = instances.Instance('create_instance_name', resource_defns['WebServer'], stack) diff --git a/heat/tests/test_notifications.py b/heat/tests/test_notifications.py index 8ccac03c07..124f2635ba 100644 --- a/heat/tests/test_notifications.py +++ b/heat/tests/test_notifications.py @@ -173,10 +173,10 @@ class ScaleNotificationTest(common.HeatTestCase): self.patchobject(nova.KeypairConstraint, 'validate') self.patchobject(glance.ImageConstraint, 'validate') self.patchobject(nova.FlavorConstraint, 'validate') - self.patchobject(instance.Instance, 'handle_create')\ - .return_value = True - self.patchobject(instance.Instance, 'check_create_complete')\ - .return_value = True + self.patchobject(instance.Instance, 'handle_create' + ).return_value = True + self.patchobject(instance.Instance, 'check_create_complete' + ).return_value = True self.patchobject(stack_resource.StackResource, 'check_update_complete').return_value = True diff --git a/heat/tests/test_parameters.py b/heat/tests/test_parameters.py index 1fae69dce3..561ff6fd36 100644 --- a/heat/tests/test_parameters.py +++ b/heat/tests/test_parameters.py @@ -374,8 +374,8 @@ class ParameterTest(testtools.TestCase): 'AllowedPattern': '[a-z]*'} err = self.assertRaises(exception.StackValidationFailed, self.new_parameter, 'testparam', schema, '234') - expected = 'Parameter \'testparam\' is invalid: '\ - '"234" does not match pattern "[a-z]*"' + expected = ("Parameter 'testparam' is invalid: " + '"234" does not match pattern "[a-z]*"') self.assertEqual(expected, six.text_type(err)) diff --git a/heat/tests/test_parser.py b/heat/tests/test_parser.py index ccdbe82332..25cb58cf9c 100644 --- a/heat/tests/test_parser.py +++ b/heat/tests/test_parser.py @@ -2310,8 +2310,8 @@ class StackTest(common.HeatTestCase): {'Type': 'ResourceWithPropsType', 'Properties': {'Foo': 'xyz'}}, {'Type': 'ResourceWithPropsType', - 'Properties': {'Foo': 'abc'}}).WithSideEffects(check_props) \ - .AndRaise(resource.UpdateReplace) + 'Properties': {'Foo': 'abc'}} + ).WithSideEffects(check_props).AndRaise(resource.UpdateReplace) self.m.ReplayAll() self.stack.update(updated_stack) @@ -3082,8 +3082,8 @@ class StackTest(common.HeatTestCase): self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_create') - generic_rsrc.ResourceWithProps.handle_create().MultipleTimes().\ - AndReturn(None) + generic_rsrc.ResourceWithProps.handle_create().MultipleTimes( + ).AndReturn(None) self.m.ReplayAll() @@ -3696,7 +3696,7 @@ class StackTest(common.HeatTestCase): def test_stack_name_invalid(self): stack_names = ['_foo', '1bad', '.kcats', 'test stack', ' teststack', - '^-^', '\"stack\"', '1234', 'cat|dog', '$(foo)', + '^-^', '"stack"', '1234', 'cat|dog', '$(foo)', 'test/stack', 'test\stack', 'test::stack', 'test;stack', 'test~stack', '#test'] for stack_name in stack_names: diff --git a/heat/tests/test_properties.py b/heat/tests/test_properties.py index 688f58d622..8bd385772d 100644 --- a/heat/tests/test_properties.py +++ b/heat/tests/test_properties.py @@ -917,8 +917,8 @@ class PropertyTest(testtools.TestCase): p = properties.Property({'Type': 'List', 'Schema': list_schema}) ex = self.assertRaises(exception.StackValidationFailed, p.get_value, [42, 'fish'], True) - self.assertEqual('Property error : 1 Value \'fish\' is not ' - 'an integer', six.text_type(ex)) + self.assertEqual("Property error : 1 Value 'fish' is not " + "an integer", six.text_type(ex)) class PropertiesTest(testtools.TestCase): @@ -1613,8 +1613,9 @@ class PropertiesValidationTest(testtools.TestCase): prop_expected = {'foo': {'Ref': 'foo'}} param_expected = {'foo': {'Type': 'Json'}} - (parameters, props) = \ - properties.Properties.schema_to_parameters_and_properties(schema) + (parameters, + props) = properties.Properties.schema_to_parameters_and_properties( + schema) self.assertEqual(param_expected, parameters) self.assertEqual(prop_expected, props) @@ -1628,8 +1629,9 @@ class PropertiesValidationTest(testtools.TestCase): prop_expected = {'foo': {'Fn::Split': [",", {'Ref': 'foo'}]}} param_expected = {'foo': {'Type': 'CommaDelimitedList'}} - (parameters, props) = \ - properties.Properties.schema_to_parameters_and_properties(schema) + (parameters, + props) = properties.Properties.schema_to_parameters_and_properties( + schema) self.assertEqual(param_expected, parameters) self.assertEqual(prop_expected, props) @@ -1648,8 +1650,9 @@ class PropertiesValidationTest(testtools.TestCase): prop_expected = {'foo': {'Fn::Split': [",", {'Ref': 'foo'}]}} param_expected = {'foo': {'Type': 'CommaDelimitedList'}} - (parameters, props) = \ - properties.Properties.schema_to_parameters_and_properties(schema) + (parameters, + props) = properties.Properties.schema_to_parameters_and_properties( + schema) self.assertEqual(param_expected, parameters) self.assertEqual(prop_expected, props) @@ -1661,8 +1664,9 @@ class PropertiesValidationTest(testtools.TestCase): prop_expected = {'foo': {'Ref': 'foo'}} param_expected = {'foo': {'Type': 'String'}} - (parameters, props) = \ - properties.Properties.schema_to_parameters_and_properties(schema) + (parameters, + props) = properties.Properties.schema_to_parameters_and_properties( + schema) self.assertEqual(param_expected, parameters) self.assertEqual(prop_expected, props) @@ -1769,8 +1773,9 @@ class PropertiesValidationTest(testtools.TestCase): def test_schema_to_template_empty_schema(self): schema = {} - (parameters, props) = \ - properties.Properties.schema_to_parameters_and_properties(schema) + (parameters, + props) = properties.Properties.schema_to_parameters_and_properties( + schema) self.assertEqual({}, parameters) self.assertEqual({}, props) diff --git a/heat/tests/test_provider_template.py b/heat/tests/test_provider_template.py index 5910c20399..32b989afb3 100644 --- a/heat/tests/test_provider_template.py +++ b/heat/tests/test_provider_template.py @@ -163,8 +163,10 @@ class ProviderTemplateTest(common.HeatTestCase): # verify List conversion self.assertEqual("one,two,three", converted_params.get("AList")) # verify Member List conversion - mem_exp = '.member.0.key=name,.member.0.value=three,' \ - '.member.1.key=name,.member.1.value=four' + mem_exp = ('.member.0.key=name,' + '.member.0.value=three,' + '.member.1.key=name,' + '.member.1.value=four') self.assertEqual(mem_exp, converted_params.get("MemList")) # verify Number conversion self.assertEqual(5, converted_params.get("ANum")) @@ -492,8 +494,9 @@ class ProviderTemplateTest(common.HeatTestCase): self.assertTrue(test_templ, "Empty test template") self.m.StubOutWithMock(urlfetch, "get") urlfetch.get(test_templ_name, - allowed_schemes=('file',))\ - .AndRaise(urlfetch.URLFetchError(_('Failed to retrieve template'))) + allowed_schemes=('file',) + ).AndRaise(urlfetch.URLFetchError( + _('Failed to retrieve template'))) urlfetch.get(test_templ_name, allowed_schemes=('http', 'https')).AndReturn(test_templ) parsed_test_templ = template_format.parse(test_templ) @@ -607,8 +610,9 @@ class ProviderTemplateTest(common.HeatTestCase): self.m.StubOutWithMock(urlfetch, "get") urlfetch.get(test_templ_name, allowed_schemes=('http', 'https', - 'file'))\ - .AndRaise(urlfetch.URLFetchError(_('Failed to retrieve template'))) + 'file') + ).AndRaise(urlfetch.URLFetchError( + _('Failed to retrieve template'))) self.m.ReplayAll() definition = rsrc_defn.ResourceDefinition('test_t_res', @@ -635,8 +639,9 @@ class ProviderTemplateTest(common.HeatTestCase): self.m.StubOutWithMock(urlfetch, "get") urlfetch.get(test_templ_name, - allowed_schemes=('http', 'https'))\ - .AndRaise(urlfetch.URLFetchError(_('Failed to retrieve template'))) + allowed_schemes=('http', 'https') + ).AndRaise(urlfetch.URLFetchError( + _('Failed to retrieve template'))) self.m.ReplayAll() definition = rsrc_defn.ResourceDefinition('test_t_res', @@ -686,8 +691,8 @@ class ProviderTemplateTest(common.HeatTestCase): self.m.StubOutWithMock(urlfetch, "get") urlfetch.get(test_templ_name, - allowed_schemes=('http', 'https'))\ - .AndReturn(wrong_template) + allowed_schemes=('http', 'https') + ).AndReturn(wrong_template) self.m.ReplayAll() definition = rsrc_defn.ResourceDefinition('test_t_res', diff --git a/heat/tests/test_remote_stack.py b/heat/tests/test_remote_stack.py index 5746f0bde9..3bd75c0a17 100644 --- a/heat/tests/test_remote_stack.py +++ b/heat/tests/test_remote_stack.py @@ -254,8 +254,8 @@ class RemoteStackTest(tests_common.HeatTestCase): ex = self.assertRaises(exception.StackValidationFailed, rsrc.validate) - msg = 'Cannot establish connection to Heat endpoint at region "%s"'\ - % self.bad_region + msg = ('Cannot establish connection to Heat endpoint ' + 'at region "%s"' % self.bad_region) self.assertIn(msg, six.text_type(ex)) def test_remote_validation_failed(self): @@ -373,8 +373,8 @@ class RemoteStackTest(tests_common.HeatTestCase): remote_stack_id = rsrc.resource_id error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.delete)) - error_msg = 'ResourceInError: Went to status DELETE_FAILED due to '\ - '"Remote stack deletion failed"' + error_msg = ('ResourceInError: Went to status DELETE_FAILED due to ' + '"Remote stack deletion failed"') self.assertIn(error_msg, six.text_type(error)) self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state) self.heat.stacks.delete.assert_called_with(stack_id=remote_stack_id) diff --git a/heat/tests/test_resource_group.py b/heat/tests/test_resource_group.py index 6b329670a3..885302fbc6 100644 --- a/heat/tests/test_resource_group.py +++ b/heat/tests/test_resource_group.py @@ -631,7 +631,7 @@ class ResourceGroupTest(common.HeatTestCase): resg = resource_group.ResourceGroup('test', snip, stack) exc = self.assertRaises(exception.StackValidationFailed, resg.validate) - errstr = 'removal_policies "\'notallowed\'" is not a list' + errstr = "removal_policies \"'notallowed'\" is not a list" self.assertIn(errstr, six.text_type(exc)) def test_invalid_removal_policies_nomap(self): diff --git a/heat/tests/test_server.py b/heat/tests/test_server.py index 148be18e34..e904d24005 100644 --- a/heat/tests/test_server.py +++ b/heat/tests/test_server.py @@ -167,14 +167,14 @@ class ServersTest(common.HeatTestCase): image_id=None): (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['image'] = \ - image_id or 'CentOS 5.2' - tmpl.t['Resources']['WebServer']['Properties']['flavor'] = \ - '256 MB Server' + tmpl.t['Resources']['WebServer']['Properties'][ + 'image'] = image_id or 'CentOS 5.2' + tmpl.t['Resources']['WebServer']['Properties'][ + 'flavor'] = '256 MB Server' if server_name is not None: - tmpl.t['Resources']['WebServer']['Properties']['name'] = \ - server_name + tmpl.t['Resources']['WebServer']['Properties'][ + 'name'] = server_name return tmpl, stack @@ -235,8 +235,8 @@ class ServersTest(common.HeatTestCase): imageId_input).MultipleTimes().AndReturn(imageId) if server_rebuild: - glance.GlanceClientPlugin.get_image_id('F17-x86_64-gold').\ - MultipleTimes().AndReturn(744) + glance.GlanceClientPlugin.get_image_id( + 'F17-x86_64-gold').MultipleTimes().AndReturn(744) def _mock_get_image_id_fail(self, image_id, exp): self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id') @@ -244,8 +244,8 @@ class ServersTest(common.HeatTestCase): def _mock_get_keypair_success(self, keypair_input, keypair): self.m.StubOutWithMock(nova.NovaClientPlugin, 'get_keypair') - nova.NovaClientPlugin.get_keypair(keypair_input).MultipleTimes().\ - AndReturn(keypair) + nova.NovaClientPlugin.get_keypair( + keypair_input).MultipleTimes().AndReturn(keypair) def _server_validate_mock(self, server): self.m.StubOutWithMock(nova.NovaClientPlugin, '_create') @@ -331,8 +331,8 @@ class ServersTest(common.HeatTestCase): stack_name = 'create_metadata_test_stack' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl['Resources']['WebServer']['Properties']['metadata'] = \ - {'a': 1} + tmpl['Resources']['WebServer']['Properties'][ + 'metadata'] = {'a': 1} resource_defns = tmpl.resource_definitions(stack) server = servers.Server('create_metadata_test_server', resource_defns['WebServer'], stack) @@ -426,9 +426,9 @@ class ServersTest(common.HeatTestCase): create = scheduler.TaskRunner(server.create) error = self.assertRaises(exception.ResourceFailure, create) self.assertEqual( - 'StackValidationFailed: Property error : WebServer: ' - 'image Error validating value \'Slackware\': ' - 'The Image (Slackware) could not be found.', + "StackValidationFailed: Property error : WebServer: " + "image Error validating value 'Slackware': " + "The Image (Slackware) could not be found.", six.text_type(error)) self.m.VerifyAll() @@ -477,9 +477,9 @@ class ServersTest(common.HeatTestCase): create = scheduler.TaskRunner(server.create) error = self.assertRaises(exception.ResourceFailure, create) self.assertEqual( - 'StackValidationFailed: Property error : WebServer: ' - 'image Error validating value \'1\': ' - 'The Image (1) could not be found.', + "StackValidationFailed: Property error : WebServer: " + "image Error validating value '1': " + "The Image (1) could not be found.", six.text_type(error)) self.m.VerifyAll() @@ -524,8 +524,8 @@ class ServersTest(common.HeatTestCase): stack_name = 'raw_userdata_s' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl['Resources']['WebServer']['Properties']['user_data_format'] = \ - 'RAW' + tmpl['Resources']['WebServer']['Properties'][ + 'user_data_format'] = 'RAW' resource_defns = tmpl.resource_definitions(stack) server = servers.Server('WebServer', @@ -556,10 +556,10 @@ class ServersTest(common.HeatTestCase): stack_name = 'raw_userdata_s' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl['Resources']['WebServer']['Properties']['user_data_format'] = \ - 'RAW' - tmpl['Resources']['WebServer']['Properties']['user_data'] = \ - '8c813873-f6ee-4809-8eec-959ef39acb55' + tmpl['Resources']['WebServer']['Properties'][ + 'user_data_format'] = 'RAW' + tmpl['Resources']['WebServer']['Properties'][ + 'user_data'] = '8c813873-f6ee-4809-8eec-959ef39acb55' resource_defns = tmpl.resource_definitions(stack) server = servers.Server('WebServer', @@ -597,8 +597,8 @@ class ServersTest(common.HeatTestCase): (tmpl, stack) = self._setup_test_stack(stack_name) sc_id = '8c813873-f6ee-4809-8eec-959ef39acb55' - tmpl['Resources']['WebServer']['Properties']['user_data_format'] = \ - 'RAW' + tmpl['Resources']['WebServer']['Properties'][ + 'user_data_format'] = 'RAW' tmpl['Resources']['WebServer']['Properties']['user_data'] = sc_id resource_defns = tmpl.resource_definitions(stack) @@ -635,8 +635,8 @@ class ServersTest(common.HeatTestCase): stack_name = 'software_config_s' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl['Resources']['WebServer']['Properties']['user_data_format'] = \ - 'SOFTWARE_CONFIG' + tmpl['Resources']['WebServer']['Properties'][ + 'user_data_format'] = 'SOFTWARE_CONFIG' stack.stack_user_project_id = '8888' resource_defns = tmpl.resource_definitions(stack) @@ -1006,8 +1006,8 @@ class ServersTest(common.HeatTestCase): error = self.assertRaises(exception.StackValidationFailed, server.validate) self.assertEqual( - 'Property error : WebServer: key_name Error validating ' - 'value \'test2\': The Key (test2) could not be found.', + "Property error : WebServer: key_name Error validating " + "value 'test2': The Key (test2) could not be found.", six.text_type(error)) self.m.VerifyAll() @@ -1049,8 +1049,8 @@ class ServersTest(common.HeatTestCase): tmpl['Resources']['WebServer']['Properties']['networks'] = [ {'port': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}] - tmpl['Resources']['WebServer']['Properties']['security_groups'] = \ - ['my_security_group'] + tmpl['Resources']['WebServer']['Properties'][ + 'security_groups'] = ['my_security_group'] resource_defns = tmpl.resource_definitions(stack) server = servers.Server('server_validate_net_security_groups', @@ -1988,8 +1988,8 @@ class ServersTest(common.HeatTestCase): ex = self.assertRaises(exception.StackValidationFailed, server.validate) - msg = 'Either volume_id or snapshot_id must be specified for device' +\ - ' mapping vdb' + msg = ("Either volume_id or snapshot_id must be specified " + "for device mapping vdb") self.assertEqual(msg, six.text_type(ex)) self.m.VerifyAll() @@ -2012,8 +2012,8 @@ class ServersTest(common.HeatTestCase): ex = self.assertRaises(exception.StackValidationFailed, server.validate) - msg = 'Neither image nor bootable volume is specified for instance %s'\ - % server.name + msg = ('Neither image nor bootable volume is specified ' + 'for instance %s' % server.name) self.assertEqual(msg, six.text_type(ex)) self.m.VerifyAll() @@ -2069,13 +2069,13 @@ class ServersTest(common.HeatTestCase): stack_name = 'srv_val' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['personality'] = \ - {"/fake/path1": "fake contents1", - "/fake/path2": "fake_contents2", - "/fake/path3": "fake_contents3", - "/fake/path4": "fake_contents4", - "/fake/path5": "fake_contents5", - "/fake/path6": "fake_contents6"} + tmpl.t['Resources']['WebServer']['Properties'][ + 'personality'] = {"/fake/path1": "fake contents1", + "/fake/path2": "fake_contents2", + "/fake/path3": "fake_contents3", + "/fake/path4": "fake_contents4", + "/fake/path5": "fake_contents5", + "/fake/path6": "fake_contents6"} resource_defns = tmpl.resource_definitions(stack) server = servers.Server('server_create_image_err', resource_defns['WebServer'], stack) @@ -2098,12 +2098,12 @@ class ServersTest(common.HeatTestCase): stack_name = 'srv_val' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['personality'] = \ - {"/fake/path1": "fake contents1", - "/fake/path2": "fake_contents2", - "/fake/path3": "fake_contents3", - "/fake/path4": "fake_contents4", - "/fake/path5": "fake_contents5"} + tmpl.t['Resources']['WebServer']['Properties'][ + 'personality'] = {"/fake/path1": "fake contents1", + "/fake/path2": "fake_contents2", + "/fake/path3": "fake_contents3", + "/fake/path4": "fake_contents4", + "/fake/path5": "fake_contents5"} resource_defns = tmpl.resource_definitions(stack) server = servers.Server('server_create_image_err', resource_defns['WebServer'], stack) @@ -2123,8 +2123,8 @@ class ServersTest(common.HeatTestCase): stack_name = 'srv_val' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['personality'] = \ - {"/fake/path1": "a" * 10240} + tmpl.t['Resources']['WebServer']['Properties'][ + 'personality'] = {"/fake/path1": "a" * 10240} resource_defns = tmpl.resource_definitions(stack) server = servers.Server('server_create_image_err', resource_defns['WebServer'], stack) @@ -2144,8 +2144,8 @@ class ServersTest(common.HeatTestCase): stack_name = 'srv_val' (tmpl, stack) = self._setup_test_stack(stack_name) - tmpl.t['Resources']['WebServer']['Properties']['personality'] = \ - {"/fake/path1": "a" * 10241} + tmpl.t['Resources']['WebServer']['Properties'][ + 'personality'] = {"/fake/path1": "a" * 10241} resource_defns = tmpl.resource_definitions(stack) server = servers.Server('server_create_image_err', resource_defns['WebServer'], stack) @@ -2160,9 +2160,9 @@ class ServersTest(common.HeatTestCase): exc = self.assertRaises(exception.StackValidationFailed, server.validate) - self.assertEqual("The contents of personality file \"/fake/path1\" " - "is larger than the maximum allowed personality " - "file size (10240 bytes).", six.text_type(exc)) + self.assertEqual('The contents of personality file "/fake/path1" ' + 'is larger than the maximum allowed personality ' + 'file size (10240 bytes).', six.text_type(exc)) self.m.VerifyAll() def test_resolve_attribute_server_not_found(self): diff --git a/heat/tests/test_software_component.py b/heat/tests/test_software_component.py index e48a4990b6..7c8889ebff 100644 --- a/heat/tests/test_software_component.py +++ b/heat/tests/test_software_component.py @@ -89,8 +89,8 @@ class SoftwareComponentTest(common.HeatTestCase): self.component.resource_id = None self.assertIsNone(self.component._resolve_attribute('configs')) self.component.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c' - configs = self.\ - template['resources']['mysql_component']['properties']['configs'] + configs = self.template['resources']['mysql_component' + ]['properties']['configs'] # configs list is stored in 'config' property of SoftwareConfig value = {'config': {'configs': configs}} self.rpc_client.show_software_config.return_value = value diff --git a/heat/tests/test_sqlalchemy_api.py b/heat/tests/test_sqlalchemy_api.py index b42c846fc4..e9feab15bd 100644 --- a/heat/tests/test_sqlalchemy_api.py +++ b/heat/tests/test_sqlalchemy_api.py @@ -125,8 +125,8 @@ class SqlAlchemyTest(common.HeatTestCase): userdata=mox.IgnoreArg(), scheduler_hints=None, meta=None, nics=None, availability_zone=None, - block_device_mapping=None).MultipleTimes().\ - AndReturn(fc.servers.list()[4]) + block_device_mapping=None + ).MultipleTimes().AndReturn(fc.servers.list()[4]) return fc def _mock_delete(self, mocks): diff --git a/heat/tests/test_stack_resource.py b/heat/tests/test_stack_resource.py index a1a04677c1..c8cd2908f4 100644 --- a/heat/tests/test_stack_resource.py +++ b/heat/tests/test_stack_resource.py @@ -303,8 +303,8 @@ class StackResourceTest(common.HeatTestCase): 'test', resource_defns[self.ws_resname], self.parent_stack) - stk_resource.child_template = \ - mock.Mock(return_value=templatem.Template(self.simple_template)) + stk_resource.child_template = mock.Mock( + return_value=templatem.Template(self.simple_template)) stk_resource.child_params = mock.Mock() exc = exception.RequestLimitExceeded(message='Validation Failed') validation_mock = mock.Mock(side_effect=exc) @@ -320,8 +320,8 @@ class StackResourceTest(common.HeatTestCase): 'test', resource_defns[self.ws_resname], self.parent_stack) - stk_resource.child_template = \ - mock.Mock(return_value=self.simple_template) + stk_resource.child_template = mock.Mock( + return_value=self.simple_template) stk_resource.child_params = mock.Mock() exc = exception.RequestLimitExceeded(message='Validation Failed') validation_mock = mock.Mock(side_effect=exc) diff --git a/heat/tests/test_swiftsignal.py b/heat/tests/test_swiftsignal.py index 492c871515..9380f37769 100644 --- a/heat/tests/test_swiftsignal.py +++ b/heat/tests/test_swiftsignal.py @@ -735,10 +735,10 @@ class SwiftSignalTest(common.HeatTestCase): st.create() self.assertEqual(('CREATE', 'COMPLETE'), st.state) - expected = ('curl -i -X PUT \'http://fake-host.com:8080/v1/' - 'AUTH_test_tenant/%s/test_st-test_wait_condition_' - 'handle-abcdefghijkl\?temp_url_sig=[0-9a-f]{40}&' - 'temp_url_expires=[0-9]{10}\'') % st.id + expected = ("curl -i -X PUT 'http://fake-host.com:8080/v1/" + "AUTH_test_tenant/%s/test_st-test_wait_condition_" + "handle-abcdefghijkl\?temp_url_sig=[0-9a-f]{40}&" + "temp_url_expires=[0-9]{10}'") % st.id self.assertThat(handle.FnGetAtt('curl_cli'), matchers.MatchesRegex(expected)) diff --git a/heat/tests/test_validate.py b/heat/tests/test_validate.py index 3deb47c760..35de1bfdbd 100644 --- a/heat/tests/test_validate.py +++ b/heat/tests/test_validate.py @@ -68,83 +68,77 @@ test_template_ref = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "WikiDatabase": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" } - } - }, - "DataVolume" : { - "Type" : "AWS::EC2::Volume", - "Properties" : { - "Size" : "6", - "AvailabilityZone" : "nova" - } - }, - "MountPoint" : { - "Type" : "AWS::EC2::VolumeAttachment", - "Properties" : { - "InstanceId" : { "Ref" : "%s" }, - "VolumeId" : { "Ref" : "DataVolume" }, - "Device" : "/dev/vdb" - } - } + "Resources" : { + "WikiDatabase": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" } + } + }, + "DataVolume" : { + "Type" : "AWS::EC2::Volume", + "Properties" : { + "Size" : "6", + "AvailabilityZone" : "nova" + } + }, + "MountPoint" : { + "Type" : "AWS::EC2::VolumeAttachment", + "Properties" : { + "InstanceId" : { "Ref" : "%s" }, + "VolumeId" : { "Ref" : "DataVolume" }, + "Device" : "/dev/vdb" } } - ''' + } +} +''' test_template_findinmap_valid = ''' { "AWSTemplateFormatVersion" : "2010-09-09", "Description" : "test.", "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2 KeyPair to' + \ - 'enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "WikiDatabase": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" } - } - }, - "DataVolume" : { - "Type" : "AWS::EC2::Volume", - "Properties" : { - "Size" : "6", - "AvailabilityZone" : "nova" - } - }, + "Resources" : { + "WikiDatabase": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" } + } + }, + "DataVolume" : { + "Type" : "AWS::EC2::Volume", + "Properties" : { + "Size" : "6", + "AvailabilityZone" : "nova" + } + }, - "MountPoint" : { - "Type" : "AWS::EC2::VolumeAttachment", - "Properties" : { - "InstanceId" : { "Ref" : "WikiDatabase" }, - "VolumeId" : { "Ref" : "DataVolume" }, - "Device" : "/dev/vdb" - } - } + "MountPoint" : { + "Type" : "AWS::EC2::VolumeAttachment", + "Properties" : { + "InstanceId" : { "Ref" : "WikiDatabase" }, + "VolumeId" : { "Ref" : "DataVolume" }, + "Device" : "/dev/vdb" } } - ''' + } +} +''' test_template_findinmap_invalid = ''' { "AWSTemplateFormatVersion" : "2010-09-09", @@ -152,40 +146,39 @@ test_template_findinmap_invalid = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2 KeyPair to enable SSH ' + \ - 'access to the instances",' + \ - ''' "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Mappings" : { - "AWSInstanceType2Arch" : { - "t1.micro" : { "Arch" : "64" }, - "m1.small" : { "Arch" : "64" }, - "m1.medium" : { "Arch" : "64" }, - "m1.large" : { "Arch" : "64" }, - "m1.xlarge" : { "Arch" : "64" }, - "m2.xlarge" : { "Arch" : "64" }, - "m2.2xlarge" : { "Arch" : "64" }, - "m2.4xlarge" : { "Arch" : "64" }, - "c1.medium" : { "Arch" : "64" }, - "c1.xlarge" : { "Arch" : "64" }, - "cc1.4xlarge" : { "Arch" : "64HVM" }, - "cc2.8xlarge" : { "Arch" : "64HVM" }, - "cg1.4xlarge" : { "Arch" : "64HVM" } - } - }, - "Resources" : { - "WikiDatabase": { - "Type": "AWS::EC2::Instance", - "Properties": { - ''' + \ - '"ImageId" : { "Fn::FindInMap" : [ "DistroArch2AMI", { "Ref" : ' + \ - '"LinuxDistribution" },' + \ - '{ "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : ' + \ - '"InstanceType" }, "Arch" ] } ] },' + \ - ''' + "Mappings" : { + "AWSInstanceType2Arch" : { + "t1.micro" : { "Arch" : "64" }, + "m1.small" : { "Arch" : "64" }, + "m1.medium" : { "Arch" : "64" }, + "m1.large" : { "Arch" : "64" }, + "m1.xlarge" : { "Arch" : "64" }, + "m2.xlarge" : { "Arch" : "64" }, + "m2.2xlarge" : { "Arch" : "64" }, + "m2.4xlarge" : { "Arch" : "64" }, + "c1.medium" : { "Arch" : "64" }, + "c1.xlarge" : { "Arch" : "64" }, + "cc1.4xlarge" : { "Arch" : "64HVM" }, + "cc2.8xlarge" : { "Arch" : "64HVM" }, + "cg1.4xlarge" : { "Arch" : "64HVM" } + } + }, + "Resources" : { + "WikiDatabase": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId" : { + "Fn::FindInMap" : [ + "DistroArch2AMI", { "Ref" : "LinuxDistribution" }, + { "Fn::FindInMap" : [ + "AWSInstanceType2Arch", + { "Ref" : "InstanceType" }, "Arch" ] } ] + }, "InstanceType": "m1.large", "KeyName": { "Ref" : "KeyName"} } @@ -242,13 +235,10 @@ test_template_invalid_property = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2 KeyPai", + "Type" : "String" + } + }, "Resources" : { "WikiDatabase": { @@ -271,27 +261,24 @@ test_template_unimplemented_property = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "WikiDatabase": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" }, - "SourceDestCheck": "false" - } - } + "Resources" : { + "WikiDatabase": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" }, + "SourceDestCheck": "false" } } - ''' + } +} +''' test_template_invalid_deletion_policy = ''' { @@ -300,27 +287,24 @@ test_template_invalid_deletion_policy = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "WikiDatabase": { - "Type": "AWS::EC2::Instance", - "DeletionPolicy": "Destroy", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" } - } - } + "Resources" : { + "WikiDatabase": { + "Type": "AWS::EC2::Instance", + "DeletionPolicy": "Destroy", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" } } } - ''' + } +} +''' test_template_snapshot_deletion_policy = ''' { @@ -329,27 +313,24 @@ test_template_snapshot_deletion_policy = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "WikiDatabase": { - "Type": "AWS::EC2::Instance", - "DeletionPolicy": "Snapshot", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" } - } - } + "Resources" : { + "WikiDatabase": { + "Type": "AWS::EC2::Instance", + "DeletionPolicy": "Snapshot", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" } } } - ''' + } +} +''' test_template_volume_snapshot = ''' { @@ -375,26 +356,23 @@ test_unregistered_key = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" } - } - } + "Resources" : { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" } } } - ''' + } +} +''' test_template_image = ''' { @@ -403,26 +381,23 @@ test_template_image = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" } - } - } + "Resources" : { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" } } } - ''' + } +} +''' test_template_invalid_secgroups = ''' { @@ -431,28 +406,25 @@ test_template_invalid_secgroups = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" }, - "SecurityGroups": [ "default" ], - "NetworkInterfaces": [ "mgmt", "data" ] - } - } + "Resources" : { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" }, + "SecurityGroups": [ "default" ], + "NetworkInterfaces": [ "mgmt", "data" ] } } - ''' + } +} +''' test_template_invalid_secgroupids = ''' { @@ -461,28 +433,25 @@ test_template_invalid_secgroupids = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + } + }, - "Resources" : { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" }, - "SecurityGroupIds": [ "default" ], - "NetworkInterfaces": [ "mgmt", "data" ] - } - } + "Resources" : { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" }, + "SecurityGroupIds": [ "default" ], + "NetworkInterfaces": [ "mgmt", "data" ] } } - ''' + } +} +''' test_template_glance_client_exception = ''' { @@ -508,34 +477,28 @@ test_template_unique_logical_name = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String" + }, "AName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String" - } - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String", + } + }, - "Resources" : { - "AName": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "image_name", - "InstanceType": "m1.large", - "KeyName": { "Ref" : "KeyName" }, - "NetworkInterfaces": [ "mgmt", "data" ] - } - } + "Resources" : { + "AName": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "image_name", + "InstanceType": "m1.large", + "KeyName": { "Ref" : "KeyName" }, + "NetworkInterfaces": [ "mgmt", "data" ] } } - ''' + } +} +''' test_template_cfn_parameter_label = ''' { @@ -544,13 +507,10 @@ test_template_cfn_parameter_label = ''' "Parameters" : { "KeyName" : { -''' + \ - '"Description" : "Name of an existing EC2' + \ - 'KeyPair to enable SSH access to the instances",' + \ - ''' - "Type" : "String", - "Label" : "Nova KeyPair Name" - }, + "Description" : "Name of an existing EC2KeyPair", + "Type" : "String", + "Label" : "Nova KeyPair Name" + } }, "Resources" : { @@ -930,8 +890,7 @@ class validateTest(common.HeatTestCase): # API layer in heat.engine.api.format_validate_parameter. expected = {'KeyName': { 'Type': 'String', - 'Description': 'Name of an existing EC2KeyPair to enable SSH ' - 'access to the instances', + 'Description': 'Name of an existing EC2KeyPair', 'NoEcho': 'false', 'Label': 'KeyName'}} self.assertEqual(expected, res['Parameters']) @@ -1002,8 +961,7 @@ class validateTest(common.HeatTestCase): expected = {'KeyName': { 'Type': 'String', - 'Description': 'Name of an existing EC2KeyPair to enable SSH ' - 'access to the instances', + 'Description': 'Name of an existing EC2KeyPair', 'NoEcho': 'false', 'Label': 'Nova KeyPair Name'}} self.assertEqual(expected, parameters) diff --git a/heat/tests/test_version_negotiation_middleware.py b/heat/tests/test_version_negotiation_middleware.py index 6182d024b4..b720d9601e 100644 --- a/heat/tests/test_version_negotiation_middleware.py +++ b/heat/tests/test_version_negotiation_middleware.py @@ -106,8 +106,9 @@ class VersionNegotiationMiddlewareTest(common.HeatTestCase): major_version = 1 minor_version = 0 request = webob.Request({'PATH_INFO': 'resource'}) - request.headers['Accept'] = 'application/vnd.openstack.' \ - 'orchestration-v{0}.{1}'.format(major_version, minor_version) + request.headers['Accept'] = ( + 'application/vnd.openstack.orchestration-v{0}.{1}'.format( + major_version, minor_version)) response = version_negotiation.process_request(request) @@ -119,8 +120,8 @@ class VersionNegotiationMiddlewareTest(common.HeatTestCase): version_negotiation = vn.VersionNegotiationFilter( self._version_controller_factory, None, None) request = webob.Request({'PATH_INFO': 'resource'}) - request.headers['Accept'] = 'application/vnd.openstack.' \ - 'orchestration-v2.0' + request.headers['Accept'] = ( + 'application/vnd.openstack.orchestration-v2.0') response = version_negotiation.process_request(request) diff --git a/heat/tests/test_watch.py b/heat/tests/test_watch.py index 3a30853b49..7d110ca65f 100644 --- a/heat/tests/test_watch.py +++ b/heat/tests/test_watch.py @@ -75,8 +75,8 @@ class WatchRuleTest(common.HeatTestCase): if action_expected: dummy_action = DummyAction() self.m.StubOutWithMock(parser.Stack, 'resource_by_refid') - parser.Stack.resource_by_refid(mox.IgnoreArg()).\ - MultipleTimes().AndReturn(dummy_action) + parser.Stack.resource_by_refid( + mox.IgnoreArg()).MultipleTimes().AndReturn(dummy_action) self.m.ReplayAll() diff --git a/heat/tests/test_wsgi.py b/heat/tests/test_wsgi.py index 44c4f865b8..55e24356df 100644 --- a/heat/tests/test_wsgi.py +++ b/heat/tests/test_wsgi.py @@ -394,7 +394,7 @@ class JSONRequestDeserializerTest(common.HeatTestCase): error = self.assertRaises(exception.RequestLimitExceeded, wsgi.JSONRequestDeserializer().from_json, body) - msg = 'Request limit exceeded: JSON body size ' + \ - '(%s bytes) exceeds maximum allowed size (%s bytes).' % \ - (len(body), cfg.CONF.max_json_body_size) + msg = ('Request limit exceeded: JSON body size ' + '(%s bytes) exceeds maximum allowed size (%s bytes).' % ( + len(body), cfg.CONF.max_json_body_size)) self.assertEqual(msg, six.text_type(error)) diff --git a/tox.ini b/tox.ini index 08687b6be8..57759aaecb 100644 --- a/tox.ini +++ b/tox.ini @@ -55,8 +55,7 @@ commands = oslo-config-generator --config-file=config-generator.conf # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line # H803 no full stop at the end of the commit message -# H904 Wrap long lines in parentheses instead of a backslash -ignore = H404,H405,H803,H904 +ignore = H404,H405,H803 show-source = true exclude=.venv,.git,.tox,dist,*openstack/common*,*lib/python*,*egg,tools,build max-complexity=20