Merge "[codespell] fix final typos and enable ci"

This commit is contained in:
Zuul 2023-12-18 21:20:29 +00:00 committed by Gerrit Code Review
commit 258e6f7b91
51 changed files with 106 additions and 90 deletions

View File

@ -34,3 +34,8 @@ repos:
hooks: hooks:
- id: autopep8 - id: autopep8
files: '^.*\.py$' files: '^.*\.py$'
- repo: https://github.com/codespell-project/codespell
rev: v2.2.4
hooks:
- id: codespell
args: ['--ignore-words=doc/dictionary.txt']

View File

@ -9,4 +9,5 @@ wile
usera usera
dettach dettach
excpt excpt
imigration imigration
childs

View File

@ -91,7 +91,7 @@ steps:
needs to track how many slots are available and used in order to needs to track how many slots are available and used in order to
avoid attempting to exceed that limit in the hardware. avoid attempting to exceed that limit in the hardware.
Since version 8.0.0, libvirt exposes maximun mumber of SEV guests Since version 8.0.0, libvirt exposes maximum number of SEV guests
which can run concurrently in its host, so the limit is automatically which can run concurrently in its host, so the limit is automatically
detected using this feature. detected using this feature.

View File

@ -380,7 +380,7 @@ class _CyborgClient(object):
once, the 2nd and later calls will throw errors. once, the 2nd and later calls will throw errors.
Cyborg deletes the ARQs without error, or returns 404 if there is ARQ Cyborg deletes the ARQs without error, or returns 404 if there is ARQ
which already deleted. In either way, existed ARQs in arq_uuids wil be which already deleted. In either way, existed ARQs in arq_uuids will be
deleted. Such 404 error can be ignored safely. deleted. Such 404 error can be ignored safely.
If this fails, an error is logged but no exception is raised If this fails, an error is logged but no exception is raised

View File

@ -165,7 +165,7 @@ class ProjectMapper(APIMapper):
def _get_project_id_token(self): def _get_project_id_token(self):
# NOTE(sdague): project_id parameter is only valid if its hex # NOTE(sdague): project_id parameter is only valid if its hex
# or hex + dashes (note, integers are a subset of this). This # or hex + dashes (note, integers are a subset of this). This
# is required to hand our overlaping routes issues. # is required to hand our overlapping routes issues.
return '{project_id:[0-9a-f-]+}' return '{project_id:[0-9a-f-]+}'
def resource(self, member_name, collection_name, **kwargs): def resource(self, member_name, collection_name, **kwargs):

View File

@ -1244,6 +1244,6 @@ Name (FQDN).
--------------------------------------------------- ---------------------------------------------------
Any evacuated instances will be now stopped at destination. This Any evacuated instances will be now stopped at destination. This
requires minimun nova release 27.0.0, OpenStack release 2023.1 requires minimum nova release 27.0.0, OpenStack release 2023.1
Antelope. Operators can still use previous microversion for older Antelope. Operators can still use previous microversion for older
behavior. behavior.

View File

@ -685,7 +685,7 @@ query_params_v273['properties'].update({
query_params_v275 = copy.deepcopy(query_params_v273) query_params_v275 = copy.deepcopy(query_params_v273)
# 1. Update sort_keys to allow only valid sort keys: # 1. Update sort_keys to allow only valid sort keys:
# NOTE(gmann): Remove the ignored sort keys now because 'additionalProperties' # NOTE(gmann): Remove the ignored sort keys now because 'additionalProperties'
# is Flase for query schema. Starting from miceoversion 2.75, API will # is False for query schema. Starting from miceoversion 2.75, API will
# raise 400 for any not-allowed sort keys instead of ignoring them. # raise 400 for any not-allowed sort keys instead of ignoring them.
VALID_SORT_KEYS_V275 = copy.deepcopy(VALID_SORT_KEYS_V273) VALID_SORT_KEYS_V275 = copy.deepcopy(VALID_SORT_KEYS_V273)
VALID_SORT_KEYS_V275['enum'] = list( VALID_SORT_KEYS_V275['enum'] = list(

View File

@ -165,7 +165,7 @@ class ServerGroupController(wsgi.Controller):
# In existing behavior, if non-admin users requesting # In existing behavior, if non-admin users requesting
# all projects server groups they do not get error instead # all projects server groups they do not get error instead
# get their own server groups. Once we switch to policy # get their own server groups. Once we switch to policy
# new defaults completly then we can remove the above check. # new defaults completely then we can remove the above check.
# Until then, let's keep the old behaviour. # Until then, let's keep the old behaviour.
context.can(sg_policies.POLICY_ROOT % 'index:all_projects', context.can(sg_policies.POLICY_ROOT % 'index:all_projects',
target={'project_id': project_id}) target={'project_id': project_id})

View File

@ -45,7 +45,7 @@ def verify_project_id(context, project_id):
msg = _("Nova was unable to find Keystone service endpoint.") msg = _("Nova was unable to find Keystone service endpoint.")
# TODO(astupnik). It may be reasonable to switch to HTTP 503 # TODO(astupnik). It may be reasonable to switch to HTTP 503
# (HTTP Service Unavailable) instead of HTTP Bad Request here. # (HTTP Service Unavailable) instead of HTTP Bad Request here.
# If proper Keystone servie is inaccessible, then technially # If proper Keystone service is inaccessible, then technially
# this is a server side error and not an error in Nova. # this is a server side error and not an error in Nova.
raise webob.exc.HTTPBadRequest(explanation=msg) raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException: except kse.ClientException:

View File

@ -142,7 +142,7 @@ class Request(wsgi.Request):
# no match. This is also little tricky that 'default' value cannot be # no match. This is also little tricky that 'default' value cannot be
# None. At least one of default_tag or default must be supplied as # None. At least one of default_tag or default must be supplied as
# an argument to the method, to define the defaulting behavior. # an argument to the method, to define the defaulting behavior.
# So passing a sentinal value to return None from this function. # So passing a sentinel value to return None from this function.
best_match = self.accept_language.lookup( best_match = self.accept_language.lookup(
i18n.get_available_languages(), default='fake_LANG') i18n.get_available_languages(), default='fake_LANG')

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Validators for (preferrably) ``aggregate_instance_extra_specs`` namespaced """Validators for (preferably) ``aggregate_instance_extra_specs`` namespaced
extra specs. extra specs.
These are used by the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. These are used by the ``AggregateInstanceExtraSpecsFilter`` scheduler filter.
@ -60,7 +60,7 @@ EXTRA_SPEC_VALIDATORS = [
}, },
], ],
value={ value={
# this is totally arbitary, since we need to support specific # this is totally arbitrary, since we need to support specific
# values # values
'type': str, 'type': str,
}, },

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Validators for (preferrably) ``capabilities`` namespaced extra specs. """Validators for (preferably) ``capabilities`` namespaced extra specs.
These are used by the ``ComputeCapabilitiesFilter`` scheduler filter. Note that These are used by the ``ComputeCapabilitiesFilter`` scheduler filter. Note that
we explicitly do not allow the unnamespaced variant of extra specs since this we explicitly do not allow the unnamespaced variant of extra specs since this
@ -72,7 +72,7 @@ for capability in (
name=f'capabilities:{capability}', name=f'capabilities:{capability}',
description=DESCRIPTION.format(capability=capability), description=DESCRIPTION.format(capability=capability),
value={ value={
# this is totally arbitary, since we need to support specific # this is totally arbitrary, since we need to support specific
# values # values
'type': str, 'type': str,
}, },

View File

@ -760,7 +760,7 @@ class CellV2Commands(object):
return 0 return 0
def _map_cell0(self, database_connection=None): def _map_cell0(self, database_connection=None):
"""Faciliate creation of a cell mapping for cell0. """Facilitate creation of a cell mapping for cell0.
See map_cell0 for more. See map_cell0 for more.
""" """
def cell0_default_connection(): def cell0_default_connection():
@ -855,7 +855,7 @@ class CellV2Commands(object):
# iteration, we search for the special name and unmunge the UUID to # iteration, we search for the special name and unmunge the UUID to
# pick up where we left off. This is done until all mappings are # pick up where we left off. This is done until all mappings are
# processed. The munging is necessary as there's a unique constraint on # processed. The munging is necessary as there's a unique constraint on
# the UUID field and we need something reversable. For more # the UUID field and we need something reversible. For more
# information, see commit 9038738d0. # information, see commit 9038738d0.
if max_count is not None: if max_count is not None:
@ -3013,7 +3013,7 @@ class VolumeAttachmentCommands(object):
We can do that here as the command requires that the instance is We can do that here as the command requires that the instance is
stopped, something that isn't always the case with the current driver stopped, something that isn't always the case with the current driver
BDM approach and thus the two are kept seperate for the time being. BDM approach and thus the two are kept separate for the time being.
:param instance_uuid: UUID of instance :param instance_uuid: UUID of instance
:param volume_id: ID of volume attached to the instance :param volume_id: ID of volume attached to the instance

View File

@ -3005,7 +3005,7 @@ class API:
The results will be sorted based on the list of sort keys in the The results will be sorted based on the list of sort keys in the
'sort_keys' parameter (first value is primary sort key, second value is 'sort_keys' parameter (first value is primary sort key, second value is
secondary sort ket, etc.). For each sort key, the associated sort secondary sort key, etc.). For each sort key, the associated sort
direction is based on the list of sort directions in the 'sort_dirs' direction is based on the list of sort directions in the 'sort_dirs'
parameter. parameter.
@ -4125,7 +4125,7 @@ class API:
desired destination of the instance during the cold migration desired destination of the instance during the cold migration
:param allow_cross_cell_resize: If True, cross-cell resize is allowed :param allow_cross_cell_resize: If True, cross-cell resize is allowed
for this operation and the host could be in a different cell from for this operation and the host could be in a different cell from
the one that the instance is currently in. If False, the speciifed the one that the instance is currently in. If False, the specified
host must be in the same cell as the instance. host must be in the same cell as the instance.
:returns: ComputeNode object of the requested host :returns: ComputeNode object of the requested host
:raises: CannotMigrateToSameHost if the host is the same as the :raises: CannotMigrateToSameHost if the host is the same as the

View File

@ -2966,7 +2966,7 @@ class ComputeManager(manager.Manager):
return arqs return arqs
def _split_network_arqs(self, arqs, requested_networks): def _split_network_arqs(self, arqs, requested_networks):
"""splif arq request by exra spec from ARQ requested by port. """split arq request by extra spec from ARQ requested by port.
Return ARQ groups tuple:(spec_arqs, port_arqs) Return ARQ groups tuple:(spec_arqs, port_arqs)
Each item in the tuple is a dict like: Each item in the tuple is a dict like:
@ -3682,7 +3682,7 @@ class ComputeManager(manager.Manager):
# manually as we want to maintain a 'reserved' state # manually as we want to maintain a 'reserved' state
# throughout the reimage process from the cinder side so # throughout the reimage process from the cinder side so
# we are excluding the root BDM from certain operations # we are excluding the root BDM from certain operations
# here i.e. deleteing it's mapping before the destroy call. # here i.e. deleting it's mapping before the destroy call.
block_device_info_copy = copy.deepcopy(block_device_info) block_device_info_copy = copy.deepcopy(block_device_info)
root_bdm = compute_utils.get_root_bdm(context, instance, bdms) root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
mapping = block_device_info_copy["block_device_mapping"] mapping = block_device_info_copy["block_device_mapping"]
@ -8169,7 +8169,7 @@ class ComputeManager(manager.Manager):
compute_node_uuid = objects.ComputeNode.get_by_nodename( compute_node_uuid = objects.ComputeNode.get_by_nodename(
context, instance.node).uuid context, instance.node).uuid
# we can have multiple request groups, it would be enough to restrict # we can have multiple request groups, it would be enough to restrict
# only one of them to the compute tree but for symetry we restrict # only one of them to the compute tree but for symmetry we restrict
# all of them # all of them
for request_group in request_groups: for request_group in request_groups:
request_group.in_tree = compute_node_uuid request_group.in_tree = compute_node_uuid
@ -8228,7 +8228,7 @@ class ComputeManager(manager.Manager):
exception.AmbiguousResourceProviderForPCIRequest, exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest exception.UnexpectedResourceProviderNameForPCIRequest
): ):
# These are programing errors. So we clean up an re-raise to let # These are programming errors. So we clean up an re-raise to let
# the request fail # the request fail
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.reportclient.remove_resources_from_instance_allocation( self.reportclient.remove_resources_from_instance_allocation(
@ -10158,7 +10158,7 @@ class ComputeManager(manager.Manager):
if instance.flavor.extra_specs.get('accel:device_profile'): if instance.flavor.extra_specs.get('accel:device_profile'):
# TODO(brinzhang): After cyborg support batch query ARQs # TODO(brinzhang): After cyborg support batch query ARQs
# for more than one instances, we will improve efficiency # for more than one instances, we will improve efficiency
# with this implemention. # with this implementation.
accel_uuids = cyclient.get_arq_uuids_for_instance(instance) accel_uuids = cyclient.get_arq_uuids_for_instance(instance)
self.shelve_offload_instance( self.shelve_offload_instance(
context, instance, clean_shutdown=False, context, instance, clean_shutdown=False,

View File

@ -205,7 +205,7 @@ class CrossCellLister(metaclass=abc.ABCMeta):
This is the standard filtered/sorted list method for the data type This is the standard filtered/sorted list method for the data type
we are trying to list out of the database. Additional kwargs are we are trying to list out of the database. Additional kwargs are
passsed through. passed through.
:param ctx: A RequestContext :param ctx: A RequestContext
:param filters: A dict of column=filter items :param filters: A dict of column=filter items

View File

@ -33,7 +33,7 @@ SUPPORTED_SCHEMA_VERSIONS = {
# Supported provider config file schema # Supported provider config file schema
SCHEMA_V1 = { SCHEMA_V1 = {
# This defintion uses JSON Schema Draft 7. # This definition uses JSON Schema Draft 7.
# https://json-schema.org/draft-07/json-schema-release-notes.html # https://json-schema.org/draft-07/json-schema-release-notes.html
'type': 'object', 'type': 'object',
'properties': { 'properties': {
@ -137,7 +137,7 @@ SCHEMA_V1 = {
'patternProperties': { 'patternProperties': {
# Allows any key name matching the resource class # Allows any key name matching the resource class
# pattern, check to prevent conflicts with virt # pattern, check to prevent conflicts with virt
# driver owned resouces classes will be done after # driver owned resources classes will be done after
# schema validation. # schema validation.
'^[A-Z0-9_]{1,255}$': { '^[A-Z0-9_]{1,255}$': {
'type': 'object', 'type': 'object',

View File

@ -159,7 +159,7 @@ def get_device_name_for_instance(instance, bdms, device):
This method is a wrapper for get_next_device_name that gets the list This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping. of used devices and the root device from a block device mapping.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a :raises TooManyDiskDevices: if the maximum allowed devices to attach to a
single instance is exceeded. single instance is exceeded.
""" """
mappings = block_device.instance_block_mapping(instance, bdms) mappings = block_device.instance_block_mapping(instance, bdms)
@ -172,7 +172,7 @@ def default_device_names_for_instance(instance, root_device_name,
"""Generate missing device names for an instance. """Generate missing device names for an instance.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a :raises TooManyDiskDevices: if the maximum allowed devices to attach to a
single instance is exceeded. single instance is exceeded.
""" """
@ -212,7 +212,7 @@ def get_next_device_name(instance, device_name_list,
/dev/vdc is specified but the backend uses /dev/xvdc), the device /dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format. name will be converted to the appropriate format.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a :raises TooManyDiskDevices: if the maximum allowed devices to attach to a
single instance is exceeded. single instance is exceeded.
""" """

View File

@ -45,7 +45,7 @@ def supports_vif_related_pci_allocations(context, host):
def supports_vpmem_live_migration(context): def supports_vpmem_live_migration(context):
"""Checks if the commpute host service is new enough to support """Checks if the compute host service is new enough to support
instance live migration with virtual persistent memory. instance live migration with virtual persistent memory.
:param context: The user request context. :param context: The user request context.

View File

@ -161,7 +161,7 @@ Related options:
The fully qualified path to a PEM file containing the x509 certificate which The fully qualified path to a PEM file containing the x509 certificate which
the VNC proxy server presents to the compute node during VNC authentication. the VNC proxy server presents to the compute node during VNC authentication.
Realted options: Related options:
* ``vnc.auth_schemes``: must include ``vencrypt`` * ``vnc.auth_schemes``: must include ``vencrypt``
* ``vnc.vencrypt_client_key``: must also be set * ``vnc.vencrypt_client_key``: must also be set

View File

@ -457,7 +457,7 @@ the Ironic node (by deleting the nova instance) it takes a while
for Nova to un-reserve that Ironic node in placement. Usually this for Nova to un-reserve that Ironic node in placement. Usually this
is a good idea, because it avoids placement providing an Ironic is a good idea, because it avoids placement providing an Ironic
as a valid candidate when it is still being cleaned. as a valid candidate when it is still being cleaned.
Howerver, if you don't use automatic cleaning, it can cause an However, if you don't use automatic cleaning, it can cause an
extra delay before and Ironic node is available for building a extra delay before and Ironic node is available for building a
new Nova instance. new Nova instance.
"""), """),

View File

@ -1590,7 +1590,7 @@ def instance_get_all_by_filters(
def _get_query_nova_resource_by_changes_time(query, filters, model_object): def _get_query_nova_resource_by_changes_time(query, filters, model_object):
"""Filter resources by changes-since or changes-before. """Filter resources by changes-since or changes-before.
Special keys are used to tweek the query further:: Special keys are used to tweak the query further::
| 'changes-since' - only return resources updated after | 'changes-since' - only return resources updated after
| 'changes-before' - only return resources updated before | 'changes-before' - only return resources updated before
@ -1646,7 +1646,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
| ] | ]
| } | }
Special keys are used to tweek the query further:: Special keys are used to tweak the query further::
| 'changes-since' - only return instances updated after | 'changes-since' - only return instances updated after
| 'changes-before' - only return instances updated before | 'changes-before' - only return instances updated before

View File

@ -178,7 +178,7 @@ def _create_shadow_tables(connection):
'shadow_instance_extra', 'shadow_instance_extra',
['instance_uuid']) ['instance_uuid'])
# 373_migration_uuid; we should't create indexes for shadow tables # 373_migration_uuid; we shouldn't create indexes for shadow tables
# (fixed in migration 16f1fbcab42b) # (fixed in migration 16f1fbcab42b)
op.create_index( op.create_index(

View File

@ -52,7 +52,7 @@ def _find_alembic_conf(database='main'):
def _upgrade_alembic(engine, config, version): def _upgrade_alembic(engine, config, version):
# re-use the connection rather than creating a new one # reuse the connection rather than creating a new one
with engine.begin() as connection: with engine.begin() as connection:
config.attributes['connection'] = connection config.attributes['connection'] = connection
alembic_api.upgrade(config, version or 'head') alembic_api.upgrade(config, version or 'head')

View File

@ -161,7 +161,7 @@ class Forbidden(NovaException):
class NotSupported(NovaException): class NotSupported(NovaException):
# This exception use return code as 400 and can be used # This exception use return code as 400 and can be used
# directly or as base exception for operations whihc are not # directly or as base exception for operations which are not
# supported in Nova. Any feature that is not yet implemented # supported in Nova. Any feature that is not yet implemented
# but plan to implement in future (example: Cyborg # but plan to implement in future (example: Cyborg
# integration operations), should use this exception as base # integration operations), should use this exception as base

View File

@ -198,7 +198,7 @@ def _convert_keys_to_legacy_name(
) -> ty.Dict[str, int]: ) -> ty.Dict[str, int]:
legacy = {} legacy = {}
for new_name, old_name in LEGACY_LIMITS.items(): for new_name, old_name in LEGACY_LIMITS.items():
# defensive incase oslo or keystone doesn't give us an answer # defensive in case oslo or keystone doesn't give us an answer
legacy[old_name] = new_dict.get(new_name) or 0 legacy[old_name] = new_dict.get(new_name) or 0
return legacy return legacy

View File

@ -197,7 +197,7 @@ def enforce_num_instances_and_flavor(
def _convert_keys_to_legacy_name(new_dict): def _convert_keys_to_legacy_name(new_dict):
legacy = {} legacy = {}
for new_name, old_name in LEGACY_LIMITS.items(): for new_name, old_name in LEGACY_LIMITS.items():
# defensive incase oslo or keystone doesn't give us an answer # defensive in case oslo or keystone doesn't give us an answer
legacy[old_name] = new_dict.get(new_name) or 0 legacy[old_name] = new_dict.get(new_name) or 0
return legacy return legacy

View File

@ -1626,7 +1626,7 @@ class API:
self._get_vf_pci_device_profile(pci_dev)) self._get_vf_pci_device_profile(pci_dev))
if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF: if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
# In general the MAC address information flows fom the neutron # In general the MAC address information flows from the neutron
# port to the device in the backend. Except for direct-physical # port to the device in the backend. Except for direct-physical
# ports. In that case the MAC address flows from the physical # ports. In that case the MAC address flows from the physical
# device, the PF, to the neutron port. So when such a port is # device, the PF, to the neutron port. So when such a port is
@ -2226,7 +2226,7 @@ class API:
:param port_id: The id of port to be queried :param port_id: The id of port to be queried
:return: A tuple of vNIC type, trusted status, network ID, resource :return: A tuple of vNIC type, trusted status, network ID, resource
request of the port if any and port numa affintiy policy, request of the port if any and port numa affinity policy,
and device_profile. and device_profile.
Trusted status only affects SR-IOV ports and will always be Trusted status only affects SR-IOV ports and will always be
None for other port types. If no port numa policy is None for other port types. If no port numa policy is
@ -3783,7 +3783,7 @@ class API:
# the migration object... # the migration object...
if migration is not None: if migration is not None:
# NOTE(artom) ... except for live migrations, because the # NOTE(artom) ... except for live migrations, because the
# conductor has already done that whe calling # conductor has already done that when calling
# bind_ports_to_host(). # bind_ports_to_host().
if not migration.is_live_migration: if not migration.is_live_migration:
pci_mapping = self._get_pci_mapping_for_migration( pci_mapping = self._get_pci_mapping_for_migration(

View File

@ -42,7 +42,7 @@ class NetworkRequest(obj_base.NovaObject):
# arq_uuid save cyborg managed port device, pass # arq_uuid save cyborg managed port device, pass
# arq info from conductor to compute # arq info from conductor to compute
'arq_uuid': fields.UUIDField(nullable=True), 'arq_uuid': fields.UUIDField(nullable=True),
# tranfer port's device_profile info from api to conductor # transfer port's device_profile info from api to conductor
'device_profile': fields.StringField(nullable=True) 'device_profile': fields.StringField(nullable=True)
} }

View File

@ -283,7 +283,7 @@ def fill_virtual_interface_list(context, max_count):
return count_all, count_hit return count_all, count_hit
# NOTE(mjozefcz): This is similiar to marker mechanism made for # NOTE(mjozefcz): This is similar to marker mechanism made for
# RequestSpecs object creation. # RequestSpecs object creation.
# Since we have a lot of instances to be check this # Since we have a lot of instances to be check this
# will add a FAKE row that points to last instance # will add a FAKE row that points to last instance
@ -305,7 +305,7 @@ def _set_or_delete_marker_for_migrate_instances(context, marker=None):
instance.project_id = FAKE_UUID instance.project_id = FAKE_UUID
instance.user_id = FAKE_UUID instance.user_id = FAKE_UUID
instance.create() instance.create()
# Thats fake instance, lets destroy it. # That's fake instance, lets destroy it.
# We need only its row to solve constraint issue. # We need only its row to solve constraint issue.
instance.destroy() instance.destroy()

View File

@ -630,7 +630,7 @@ class PciDeviceStats(object):
corresponds to the ``id`` of host NUMACell objects. corresponds to the ``id`` of host NUMACell objects.
:param rp_uuids: A list of PR uuids this request fulfilled from in :param rp_uuids: A list of PR uuids this request fulfilled from in
placement. So here we have to consider only the pools matching with placement. So here we have to consider only the pools matching with
thes RP uuids these RP uuids
:returns: A list of pools that can be used to support the request if :returns: A list of pools that can be used to support the request if
this is possible, else None. this is possible, else None.
""" """

View File

@ -25,7 +25,7 @@ quota_class_sets_policies = [
policy.DocumentedRuleDefault( policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show', name=POLICY_ROOT % 'show',
check_str=base.ADMIN, check_str=base.ADMIN,
description="List quotas for specific quota classs", description="List quotas for specific quota classes",
operations=[ operations=[
{ {
'method': 'GET', 'method': 'GET',

View File

@ -83,7 +83,7 @@ def unprivileged_convert_image(source, dest, in_format, out_format,
@nova.privsep.sys_admin_pctxt.entrypoint @nova.privsep.sys_admin_pctxt.entrypoint
def privileged_qemu_img_info(path, format=None): def privileged_qemu_img_info(path, format=None):
"""Return an oject containing the parsed output from qemu-img info """Return an object containing the parsed output from qemu-img info
This is a privileged call to qemu-img info using the sys_admin_pctxt This is a privileged call to qemu-img info using the sys_admin_pctxt
entrypoint allowing host block devices etc to be accessed. entrypoint allowing host block devices etc to be accessed.

View File

@ -11,7 +11,7 @@
# under the License. # under the License.
""" """
Hypervisor Version Weigher. Weigh hosts by their relative hypervior version. Hypervisor Version Weigher. Weigh hosts by their relative hypervisor version.
The default is to select newer hosts. If you prefer The default is to select newer hosts. If you prefer
to invert the behavior set the 'hypervisor_version_weight_multiplier' option to invert the behavior set the 'hypervisor_version_weight_multiplier' option

View File

@ -35,7 +35,7 @@ class NumInstancesWeigher(weights.BaseHostWeigher):
CONF.filter_scheduler.num_instances_weight_multiplier) CONF.filter_scheduler.num_instances_weight_multiplier)
def _weigh_object(self, host_state, weight_properties): def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want to chooose hosts with fewer instances """Higher weights win. We want to choose hosts with fewer instances
as the default, hence the negative value of the multiplier. as the default, hence the negative value of the multiplier.
""" """
return host_state.num_instances return host_state.num_instances

View File

@ -128,7 +128,7 @@ class RBDDriver(object):
connect_timeout=None): connect_timeout=None):
# NOTE(lyarwood): Ensure the rbd and rados modules have been imported # NOTE(lyarwood): Ensure the rbd and rados modules have been imported
# correctly before continuing, this is done in a seperate private # correctly before continuing, this is done in a separate private
# method to allow us to skip this check in unit tests etc. # method to allow us to skip this check in unit tests etc.
self._check_for_import_failure() self._check_for_import_failure()

View File

@ -735,7 +735,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
# This is a wrapped function. The signature we're going to # This is a wrapped function. The signature we're going to
# see here is that of the wrapper, which is almost certainly # see here is that of the wrapper, which is almost certainly
# going to involve varargs and kwargs, and therefore is # going to involve varargs and kwargs, and therefore is
# unlikely to be what we want. If the wrapper manupulates the # unlikely to be what we want. If the wrapper manipulates the
# arguments taken by the wrapped function, the wrapped function # arguments taken by the wrapped function, the wrapped function
# isn't what we want either. In that case we're just stumped: # isn't what we want either. In that case we're just stumped:
# if it ever comes up, add more knobs here to work round it (or # if it ever comes up, add more knobs here to work round it (or

View File

@ -229,7 +229,7 @@ class PowerManagementTestsGovernorNotSupported(PowerManagementTestsBase):
self.useFixture(nova_fixtures.SysFileSystemFixture( self.useFixture(nova_fixtures.SysFileSystemFixture(
cpufreq_enabled=False)) cpufreq_enabled=False))
# Definining the CPUs to be pinned. # Defining the CPUs to be pinned.
self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None, self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
group='compute') group='compute')
self.flags(vcpu_pin_set=None) self.flags(vcpu_pin_set=None)

View File

@ -618,7 +618,7 @@ class ComputeDriver(object):
disk_bus=None, device_type=None, encryption=None): disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info. """Attach the disk to the instance at mountpoint using info.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach :raises TooManyDiskDevices: if the maximum allowed devices to attach
to a single instance is exceeded. to a single instance is exceeded.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -1053,7 +1053,7 @@ class ComputeDriver(object):
node, as well as the inventory, aggregates, and traits associated with node, as well as the inventory, aggregates, and traits associated with
those resource providers. those resource providers.
Implementors of this interface are expected to set ``allocation_ratio`` Implementers of this interface are expected to set ``allocation_ratio``
and ``reserved`` values for inventory records, which may be based on and ``reserved`` values for inventory records, which may be based on
configuration options, e.g. ``[DEFAULT]/cpu_allocation_ratio``, configuration options, e.g. ``[DEFAULT]/cpu_allocation_ratio``,
depending on the driver and resource class. If not provided, allocation depending on the driver and resource class. If not provided, allocation
@ -1171,7 +1171,7 @@ class ComputeDriver(object):
:param disk_info: instance disk information :param disk_info: instance disk information
:param migrate_data: a LiveMigrateData object :param migrate_data: a LiveMigrateData object
:returns: migrate_data modified by the driver :returns: migrate_data modified by the driver
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach :raises TooManyDiskDevices: if the maximum allowed devices to attach
to a single instance is exceeded. to a single instance is exceeded.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -1725,7 +1725,7 @@ class ComputeDriver(object):
The metadata of the image of the instance. The metadata of the image of the instance.
:param nova.objects.BlockDeviceMapping root_bdm: :param nova.objects.BlockDeviceMapping root_bdm:
The description of the root device. The description of the root device.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach :raises TooManyDiskDevices: if the maximum allowed devices to attach
to a single instance is exceeded. to a single instance is exceeded.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -1734,7 +1734,7 @@ class ComputeDriver(object):
*block_device_lists): *block_device_lists):
"""Default the missing device names in the block device mapping. """Default the missing device names in the block device mapping.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach :raises TooManyDiskDevices: if the maximum allowed devices to attach
to a single instance is exceeded. to a single instance is exceeded.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -1753,7 +1753,7 @@ class ComputeDriver(object):
implementation if not set. implementation if not set.
:returns: The chosen device name. :returns: The chosen device name.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach :raises TooManyDiskDevices: if the maximum allowed devices to attach
to a single instance is exceeded. to a single instance is exceeded.
""" """
raise NotImplementedError() raise NotImplementedError()

View File

@ -602,7 +602,7 @@ class FakeDriver(driver.ComputeDriver):
allocations, block_device_info=None, power_on=True): allocations, block_device_info=None, power_on=True):
injected_files = admin_password = None injected_files = admin_password = None
# Finish migration is just like spawning the guest on a destination # Finish migration is just like spawning the guest on a destination
# host during resize/cold migrate, so re-use the spawn() fake to # host during resize/cold migrate, so reuse the spawn() fake to
# claim resources and track the instance on this "hypervisor". # claim resources and track the instance on this "hypervisor".
self.spawn(context, instance, image_meta, injected_files, self.spawn(context, instance, image_meta, injected_files,
admin_password, allocations, admin_password, allocations,

View File

@ -2746,7 +2746,7 @@ def get_ephemeral_encryption_constraint(
flavor: 'objects.Flavor', flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta', image_meta: 'objects.ImageMeta',
) -> bool: ) -> bool:
"""Get the ephemeral encryption constrants based on the flavor and image. """Get the ephemeral encryption constraints based on the flavor and image.
:param flavor: an objects.Flavor object :param flavor: an objects.Flavor object
:param image_meta: an objects.ImageMeta object :param image_meta: an objects.ImageMeta object

View File

@ -606,7 +606,7 @@ class IronicDriver(virt_driver.ComputeDriver):
try: try:
# NOTE(dustinc): The generator returned by the SDK can only be # NOTE(dustinc): The generator returned by the SDK can only be
# interated once. Since there are cases where it needs to be # iterated once. Since there are cases where it needs to be
# iterated more than once, we should return it as a list. In the # iterated more than once, we should return it as a list. In the
# future it may be worth refactoring these other usages so it can # future it may be worth refactoring these other usages so it can
# be returned as a generator. # be returned as a generator.
@ -892,7 +892,7 @@ class IronicDriver(virt_driver.ComputeDriver):
reserved = False reserved = False
if self._node_resources_unavailable(node): if self._node_resources_unavailable(node):
# Operators might mark a node as in maintainance, # Operators might mark a node as in maintenance,
# even when an instance is on the node, # even when an instance is on the node,
# either way lets mark this as reserved # either way lets mark this as reserved
reserved = True reserved = True
@ -1585,7 +1585,7 @@ class IronicDriver(virt_driver.ComputeDriver):
def plug_vifs(self, instance, network_info): def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks. """Plug VIFs into networks.
This method is present for compatability. Any call will result This method is present for compatibility. Any call will result
in a DEBUG log entry being generated, and will otherwise be in a DEBUG log entry being generated, and will otherwise be
ignored, as Ironic manages VIF attachments through a node ignored, as Ironic manages VIF attachments through a node
lifecycle. Please see ``attach_interface``, which is the lifecycle. Please see ``attach_interface``, which is the

View File

@ -512,7 +512,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.provider_tree: provider_tree.ProviderTree = None self.provider_tree: provider_tree.ProviderTree = None
# driver traits will not change during the runtime of the agent # driver traits will not change during the runtime of the agent
# so calcuate them once and save them # so calculate them once and save them
self._static_traits = None self._static_traits = None
# The CPU models in the configuration are case-insensitive, but the CPU # The CPU models in the configuration are case-insensitive, but the CPU
@ -675,7 +675,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(acewit): If the [libvirt]disk_cachemodes is set as # NOTE(acewit): If the [libvirt]disk_cachemodes is set as
# `block=writeback` or `block=writethrough` or `block=unsafe`, # `block=writeback` or `block=writethrough` or `block=unsafe`,
# whose correponding Linux's IO semantic is not O_DIRECT in # whose corresponding Linux's IO semantic is not O_DIRECT in
# file nova.conf, then it will result in an attachment failure # file nova.conf, then it will result in an attachment failure
# because of the libvirt bug # because of the libvirt bug
# (https://bugzilla.redhat.com/show_bug.cgi?id=1086704) # (https://bugzilla.redhat.com/show_bug.cgi?id=1086704)
@ -4314,7 +4314,7 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.InstanceNotRescuable( raise exception.InstanceNotRescuable(
instance_id=instance.uuid, reason=reason % virt_type) instance_id=instance.uuid, reason=reason % virt_type)
# NOTE(lyarwood): Stable device rescue provides the original disk # NOTE(lyarwood): Stable device rescue provides the original disk
# mapping of the instance with the rescue device appened to the # mapping of the instance with the rescue device appended to the
# end. As a result we need to provide the original image_meta, the # end. As a result we need to provide the original image_meta, the
# new rescue_image_meta and block_device_info when calling # new rescue_image_meta and block_device_info when calling
# get_disk_info. # get_disk_info.
@ -5010,7 +5010,7 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.ImageNotFound: except exception.ImageNotFound:
# We must flatten here in order to remove dependency with an orphan # We must flatten here in order to remove dependency with an orphan
# backing file (as snapshot image will be dropped once # backing file (as snapshot image will be dropped once
# unshelve/cross_cell_resize is successfull). # unshelve/cross_cell_resize is successful).
LOG.warning('Current disk image is created on top of a snapshot ' LOG.warning('Current disk image is created on top of a snapshot '
'image and cannot be rebased to original image ' 'image and cannot be rebased to original image '
'because it is no longer available in the image ' 'because it is no longer available in the image '
@ -5156,7 +5156,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance) instance=instance)
guest.attach_device(cfg) guest.attach_device(cfg)
# TODO(sean-k-mooney): we should try and converge this fuction with # TODO(sean-k-mooney): we should try and converge this function with
# _detach_direct_passthrough_vifs which does the same operation correctly # _detach_direct_passthrough_vifs which does the same operation correctly
# for live migration # for live migration
def _detach_direct_passthrough_ports(self, context, instance, guest): def _detach_direct_passthrough_ports(self, context, instance, guest):
@ -5209,7 +5209,7 @@ class LibvirtDriver(driver.ComputeDriver):
# interface element. # interface element.
# So using it for all devices would break vnic-type direct when # So using it for all devices would break vnic-type direct when
# using the sriov_nic_agent ml2 driver or vif of vnic_type vdpa. # using the sriov_nic_agent ml2 driver or vif of vnic_type vdpa.
# Since PF ports cant have the same MAC that means that this # Since PF ports can't have the same MAC that means that this
# use case was for hardware offloaded OVS? many NICs do not allow # use case was for hardware offloaded OVS? many NICs do not allow
# two VFs to have the same MAC on different VLANs due to the # two VFs to have the same MAC on different VLANs due to the
# ordering of the VLAN and MAC filters in there static packet # ordering of the VLAN and MAC filters in there static packet
@ -5217,8 +5217,8 @@ class LibvirtDriver(driver.ComputeDriver):
# non ovs offload case. We should look into this more closely # non ovs offload case. We should look into this more closely
# as from my testing in this patch we appear to use the interface # as from my testing in this patch we appear to use the interface
# element for hardware offloaded ovs too. Infiniband and vnic_type # element for hardware offloaded ovs too. Infiniband and vnic_type
# direct-physical port type do need this code path, both those cant # direct-physical port type do need this code path, but those
# have duplicate MACs... # can't have duplicate MACs...
self._detach_pci_devices(guest, direct_passthrough_pci_addresses) self._detach_pci_devices(guest, direct_passthrough_pci_addresses)
# for ports that are attached with interface elements we cannot use # for ports that are attached with interface elements we cannot use
@ -5426,7 +5426,7 @@ class LibvirtDriver(driver.ComputeDriver):
cpu.mode = mode cpu.mode = mode
cpu.model = models[0] if models else None cpu.model = models[0] if models else None
# compare flavor trait and cpu models, select the first mathched model # compare flavor trait and cpu models, select the first matched model
if flavor and mode == "custom": if flavor and mode == "custom":
flags = libvirt_utils.get_flags_by_flavor_specs(flavor) flags = libvirt_utils.get_flags_by_flavor_specs(flavor)
if flags: if flags:
@ -5502,9 +5502,9 @@ class LibvirtDriver(driver.ComputeDriver):
elif arch == fields.Architecture.PPC64LE: elif arch == fields.Architecture.PPC64LE:
cpu.model = "POWER8" cpu.model = "POWER8"
# TODO(chateaulav): re-evaluate when libvirtd adds overall # TODO(chateaulav): re-evaluate when libvirtd adds overall
# RISCV suuport as a supported architecture, as there is no # RISCV support as a supported architecture, as there is no
# cpu models associated, this simply associates X vcpus to the # cpu models associated, this simply associates X vcpus to the
# guest according to the flavor. Thes same issue should be # guest according to the flavor. These same issue should be
# present with mipsel due to same limitation, but has not been # present with mipsel due to same limitation, but has not been
# tested. # tested.
elif arch == fields.Architecture.MIPSEL: elif arch == fields.Architecture.MIPSEL:
@ -6353,7 +6353,7 @@ class LibvirtDriver(driver.ComputeDriver):
# the guest has the native kernel driver (called "virtio-gpu" in # the guest has the native kernel driver (called "virtio-gpu" in
# Linux) -- i.e. if the guest has the VirtIO GPU driver, it'll # Linux) -- i.e. if the guest has the VirtIO GPU driver, it'll
# be used; otherwise, the 'virtio' model will gracefully # be used; otherwise, the 'virtio' model will gracefully
# fallback to VGA compatibiliy mode. # fallback to VGA compatibility mode.
if ( if (
guestarch in ( guestarch in (
fields.Architecture.I686, fields.Architecture.I686,
@ -8749,7 +8749,7 @@ class LibvirtDriver(driver.ComputeDriver):
for cell in topology.cells: for cell in topology.cells:
cpus = set(cpu.id for cpu in cell.cpus) cpus = set(cpu.id for cpu in cell.cpus)
# NOTE(artom) We assume we'll never see hardware with multipe # NOTE(artom) We assume we'll never see hardware with multiple
# sockets in a single NUMA node - IOW, the socket_id for all CPUs # sockets in a single NUMA node - IOW, the socket_id for all CPUs
# in a single cell will be the same. To make that assumption # in a single cell will be the same. To make that assumption
# explicit, we leave the cell's socket_id as None if that's the # explicit, we leave the cell's socket_id as None if that's the
@ -10692,7 +10692,7 @@ class LibvirtDriver(driver.ComputeDriver):
# cancel migration job. # cancel migration job.
self.live_migration_abort(instance) self.live_migration_abort(instance)
except libvirt.libvirtError: except libvirt.libvirtError:
LOG.warning("Error occured when trying to abort live ", LOG.warning("Error occurred when trying to abort live ",
"migration job, ignoring it.", instance=instance) "migration job, ignoring it.", instance=instance)
raise raise
finally: finally:
@ -11694,7 +11694,7 @@ class LibvirtDriver(driver.ComputeDriver):
shutil.rmtree(swtpm_dir) shutil.rmtree(swtpm_dir)
# apparently shutil.rmtree() isn't reliable on NFS so don't rely # apparently shutil.rmtree() isn't reliable on NFS so don't rely
# only on path existance here. # only on path existence here.
if copy_swtpm_dir and os.path.exists(swtpm_dir): if copy_swtpm_dir and os.path.exists(swtpm_dir):
libvirt_utils.restore_vtpm_dir(swtpm_dir) libvirt_utils.restore_vtpm_dir(swtpm_dir)
elif new_vtpm_config: elif new_vtpm_config:
@ -12396,15 +12396,15 @@ class LibvirtDriver(driver.ComputeDriver):
""" """
dom_caps = self._host.get_domain_capabilities() dom_caps = self._host.get_domain_capabilities()
supported_models: ty.Set[str] = {fields.VIOMMUModel.AUTO} supported_models: ty.Set[str] = {fields.VIOMMUModel.AUTO}
# our min version of qemu/libvirt supprot q35 and virt machine types. # our min version of qemu/libvirt support q35 and virt machine types.
# They also support the smmuv3 and intel iommu modeles so if the qemu # They also support the smmuv3 and intel iommu modeles so if the qemu
# binary is avaiable we can report the trait. # binary is available we can report the trait.
if fields.Architecture.AARCH64 in dom_caps: if fields.Architecture.AARCH64 in dom_caps:
supported_models.add(fields.VIOMMUModel.SMMUV3) supported_models.add(fields.VIOMMUModel.SMMUV3)
if fields.Architecture.X86_64 in dom_caps: if fields.Architecture.X86_64 in dom_caps:
supported_models.add(fields.VIOMMUModel.INTEL) supported_models.add(fields.VIOMMUModel.INTEL)
# the virtio iommu model requires a newer libvirt then our min # the virtio iommu model requires a newer libvirt then our min
# libvirt so we need to check the version explcitly. # libvirt so we need to check the version explicitly.
if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL): if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
supported_models.add(fields.VIOMMUModel.VIRTIO) supported_models.add(fields.VIOMMUModel.VIRTIO)
return { return {

View File

@ -261,7 +261,7 @@ class Guest(object):
LOG.debug(f'No interface of type: {type(cfg)} found in domain') LOG.debug(f'No interface of type: {type(cfg)} found in domain')
return None return None
# FIXME(sean-k-mooney): we should be able to print the list of # FIXME(sean-k-mooney): we should be able to print the list of
# interfaces however some tests use incomplete objects that cant # interfaces however some tests use incomplete objects that can't
# be printed due to incomplete mocks or defects in the libvirt # be printed due to incomplete mocks or defects in the libvirt
# fixture. Lets address this later. # fixture. Lets address this later.
# LOG.debug(f'within interfaces: {list(interfaces)}') # LOG.debug(f'within interfaces: {list(interfaces)}')

View File

@ -1774,7 +1774,7 @@ class Host(object):
return self._supports_secure_boot return self._supports_secure_boot
# we only check the host architecture since the libvirt driver doesn't # we only check the host architecture since the libvirt driver doesn't
# truely support non-host architectures currently # truly support non-host architectures currently
arch = self.get_capabilities().host.cpu.arch arch = self.get_capabilities().host.cpu.arch
domain_caps = self.get_domain_capabilities() domain_caps = self.get_domain_capabilities()
for machine_type in domain_caps[arch]: for machine_type in domain_caps[arch]:

View File

@ -253,7 +253,7 @@ def _update_volume_xml(xml_doc, migrate_data, instance, get_volume_config):
item_dst.tail = None item_dst.tail = None
disk_dev.insert(cnt, item_dst) disk_dev.insert(cnt, item_dst)
# If destination has additional items, thses items should be # If destination has additional items, these items should be
# added here. # added here.
for item_dst in list(xml_doc2): for item_dst in list(xml_doc2):
if item_dst.tag != 'address': if item_dst.tag != 'address':

View File

@ -98,7 +98,7 @@ class _HostMountStateManager(object):
self.cond.notify_all() self.cond.notify_all()
def host_up(self, host): def host_up(self, host):
"""Inialise a new _HostMountState when the libvirt connection comes """Initialise a new _HostMountState when the libvirt connection comes
up. up.
host_up will destroy and re-initialise the current state if one host_up will destroy and re-initialise the current state if one

View File

@ -1513,7 +1513,7 @@ def find_rescue_device(hardware_devices, instance):
"""Returns the rescue device. """Returns the rescue device.
The method will raise an exception if the rescue device does not The method will raise an exception if the rescue device does not
exist. The resuce device has suffix '-rescue.vmdk'. exist. The rescue device has suffix '-rescue.vmdk'.
:param hardware_devices: the hardware devices for the instance :param hardware_devices: the hardware devices for the instance
:param instance: nova.objects.instance.Instance object :param instance: nova.objects.instance.Instance object
:return: the rescue disk device object :return: the rescue disk device object

View File

@ -770,7 +770,7 @@ class API(object):
"""Create a volume attachment. This requires microversion >= 3.44. """Create a volume attachment. This requires microversion >= 3.44.
The attachment_create call was introduced in microversion 3.27. We The attachment_create call was introduced in microversion 3.27. We
need 3.44 as minmum here as we need attachment_complete to finish the need 3.44 as minimum here as we need attachment_complete to finish the
attaching process and it which was introduced in version 3.44. attaching process and it which was introduced in version 3.44.
:param context: The nova request context. :param context: The nova request context.
@ -840,7 +840,7 @@ class API(object):
'code': getattr(ex, 'code', None)}) 'code': getattr(ex, 'code', None)})
def attachment_get_all(self, context, instance_id=None, volume_id=None): def attachment_get_all(self, context, instance_id=None, volume_id=None):
"""Get all attchments by instance id or volume id """Get all attachments by instance id or volume id
:param context: The nova request context. :param context: The nova request context.
:param instance_id: UUID of the instance attachment to get. :param instance_id: UUID of the instance attachment to get.

View File

@ -103,6 +103,6 @@ check_untyped_defs = true
warn_unused_ignores = true warn_unused_ignores = true
[codespell] [codespell]
skip = *.po,*.js,*.css,*.html,*.svg,HACKING.py,*hacking*,*build*,*_static*,doc/dictionary.txt,*.pyc,*.inv,*.gz,*.jpg,*.png,*.vsd,*.graffle skip = *.po,*.js,*.css,*.html,*.svg,HACKING.py,*hacking*,*build*,*_static*,doc/dictionary.txt,*.pyc,*.inv,*.gz,*.jpg,*.png,*.vsd,*.graffle,*.json
count = count =
quiet-level = 4 quiet-level = 4

10
tox.ini
View File

@ -107,15 +107,25 @@ extras =
commands = commands =
bash tools/mypywrap.sh {posargs} bash tools/mypywrap.sh {posargs}
[testenv:codespell]
description =
Run codespell to check spelling.
deps =
pre-commit
commands =
pre-commit run codespell --all-files --show-diff-on-failure
[testenv:pep8] [testenv:pep8]
description = description =
Run style checks. Run style checks.
deps = deps =
{[testenv]deps} {[testenv]deps}
{[testenv:codespell]deps}
autopep8 autopep8
extras = extras =
commands = commands =
{[testenv:mypy]commands} {[testenv:mypy]commands}
{[testenv:codespell]commands}
# check if autopep8 would alter the formatting but don't actually change it # check if autopep8 would alter the formatting but don't actually change it
# so we can gate on this in the ci # so we can gate on this in the ci
autopep8 --exit-code --max-line-length=79 --diff -r nova doc setup.py autopep8 --exit-code --max-line-length=79 --diff -r nova doc setup.py