Merge "[codespell] fix typos in tests"

This commit is contained in:
Zuul 2023-12-13 23:46:08 +00:00 committed by Gerrit Code Review
commit 17b7aa3926
34 changed files with 68 additions and 65 deletions

View File

@ -6,4 +6,7 @@ deques
affinitized
noes
wile
usera
usera
dettach
excpt
imigration

View File

@ -1033,7 +1033,7 @@ class API:
context,
['nova-compute']))
if min_version < SUPPORT_VNIC_TYPE_ACCELERATOR:
msg = ("Port with cyborg profile is not avaliable"
msg = ("Port with cyborg profile is not available"
" until upgrade finished.")
raise exception.ForbiddenPortsWithAccelerator(msg)

View File

@ -254,7 +254,7 @@ class CinderFixture(fixtures.Fixture):
'attach_status': 'attached',
'attachments': attachments,
})
# Otherwise mark the volume as avilable and detached
# Otherwise mark the volume as available and detached
else:
volume.update({
'status': 'available',

View File

@ -925,7 +925,7 @@ def disable_event_thread(self):
problematic because it means we've got a floating thread calling
sleep(1) over the life of the unit test. Seems harmless? It's not,
because we sometimes want to test things like retry loops that
should have specific sleep paterns. An unlucky firing of the
should have specific sleep patterns. An unlucky firing of the
libvirt thread will cause a test failure.
"""

View File

@ -53,7 +53,7 @@ class _Sub(object):
# notifications = pprint.pformat(
# {event: sub._notifications
# for event, sub in VERSIONED_SUBS.items()})
# FIXME: tranform this to get access to all the
# FIXME: transform this to get access to all the
# versioned notifications
notifications = []
raise AssertionError(

View File

@ -990,7 +990,7 @@ class OSAPIFixture(fixtures.Fixture):
loader = wsgi.Loader().load_app(service_name)
app = lambda: loader
# re-use service setup code from wsgi_app to register
# reuse service setup code from wsgi_app to register
# service, which is looked for in some tests
wsgi_app._setup_service(CONF.host, service_name)
intercept = interceptor.RequestsInterceptor(app, url=endpoint)
@ -1523,7 +1523,7 @@ class DownCellFixture(fixtures.Fixture):
def wrap(cell_uuid, thing):
# We should embed the cell_uuid into the context before
# wrapping since its used to calcualte the cells_timed_out and
# wrapping since its used to calculate the cells_timed_out and
# cells_failed properties in the object.
ctxt.cell_uuid = cell_uuid
return multi_cell_list.RecordWrapper(ctxt, sort_ctx, thing)

View File

@ -36,7 +36,7 @@ def get_connector_properties(
class ISCSIConnector(object):
"""Mimick the iSCSI connector."""
"""Mimic the iSCSI connector."""
def __init__(
self, root_helper, driver=None, execute=None, use_multipath=False,

View File

@ -162,7 +162,7 @@ class InstanceListTestCase(test.TestCase):
This requests $pages of $pagesize, followed by a final page with
no limit, and a final-final page which should be empty. It validates
that we got a consistent set of results no patter where the page
that we got a consistent set of results no pattern where the page
boundary is, that we got all the results after the unlimited query,
and that the final page comes back empty when we use the last
instance as a marker.

View File

@ -190,7 +190,7 @@ class TestDatabaseArchive(integrated_helpers._IntegratedTestBase):
And when the operator has e.g. 500,000 deleted instances rows (and
millions of deleted rows total) they are trying to archive, being
forced to use a max_rows value serveral orders of magnitude lower than
forced to use a max_rows value several orders of magnitude lower than
the number of rows they need to archive was a poor user experience.
This tests that we are archiving each parent plus their child rows one

View File

@ -128,9 +128,9 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
core, and 16GB of RAM.
:param connection: A fake libvirt connection. You should not provide it
directly. However it is used by restart_compute_service to
implement restart without loosing the hypervisor state.
implement restart without losing the hypervisor state.
:returns: The hostname of the created service, which can be used to
lookup the created service and UUID of the assocaited resource
lookup the created service and UUID of the associated resource
provider.
"""
if connection and (
@ -264,7 +264,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
class LibvirtMigrationMixin(object):
"""A simple mixin to facilliate successful libvirt live migrations
Requires that the test class set self.server for the specific test instnace
Requires that the test class set self.server for the specific test instance
and self.{src,dest} to indicate the direction of the migration. For any
scenarios more complex than this they should override _migrate_stub with
their own implementation.

View File

@ -141,7 +141,7 @@ class NUMALiveMigrationPositiveBase(NUMALiveMigrationBase):
class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
"""Tests that expect the live migration to succeed. Stubs out fakelibvirt's
migrateToURI3() with a stub that "suceeds" the migration.
migrateToURI3() with a stub that "succeeds" the migration.
"""
def _migrate_stub(self, domain, destination, params, flags):

View File

@ -849,7 +849,7 @@ class NUMAServersTest(NUMAServersTestBase):
self.assertEqual(2, len(src_numa_topology.cells[0].pinned_cpus))
self.assertEqual(2, len(dst_numa_topology.cells[0].pinned_cpus))
# before continuing with the actualy confirm process
# before continuing with the actually confirm process
return orig_confirm(*args, **kwargs)
self.stub_out(

View File

@ -1419,7 +1419,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
# * one type-PFs (slot 2) with 1 type-VFs
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=1, num_vfs=1)
# the config matches the PCI devs and hte PF but not the VFs
# the config matches the PCI devs and the PF but not the VFs
compute2_device_spec = self._to_list_of_json_str(
[
{

View File

@ -1132,7 +1132,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
pci_info_no_sriov = copy.deepcopy(pci_info)
# Disable SRIOV capabilties in PF and delete the VFs
# Disable SRIOV capabilities in PF and delete the VFs
self._disable_sriov_in_pf(pci_info_no_sriov)
self.start_compute('test_compute0', pci_info=pci_info_no_sriov)
@ -1178,7 +1178,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
As the vnic_type is changed on the port but the vif_type is hwveb
instead of macvtap the vif plug logic will try to look up the netdev
of the parent VF. Howvere that VF consumed by the instance so the
of the parent VF. However that VF consumed by the instance so the
netdev does not exists. This causes that the compute service will fail
with an exception during startup
"""
@ -1750,7 +1750,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
# 1 PF and 2 VFs .
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 3)
server = self._confirm_resize(server)
# but once we confrim it should be reduced back to 1 PF and 1 VF
# but once we confirm it should be reduced back to 1 PF and 1 VF
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
# assert the hostname has not have changed as part
# of the resize.
@ -1849,7 +1849,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
# enable the dest we do not need to disable the source since cold
# migrate wont happen to the same host in the libvirt driver
# migrate won't happen to the same host in the libvirt driver
self.api.put_service(
self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
with mock.patch(
@ -1918,7 +1918,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
# enable the dest we do not need to disable the source since cold
# migrate wont happen to the same host in the libvirt driver
# migrate won't happen to the same host in the libvirt driver
self.api.put_service(
self.computes['dest'].service_ref.uuid, {'status': 'enabled'})

View File

@ -92,7 +92,7 @@ class PowerManagementTests(PowerManagementTestsBase):
self.useFixture(nova_fixtures.SysFileSystemFixture())
# Definining the CPUs to be pinned.
# Defining the CPUs to be pinned.
self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
group='compute')
self.flags(vcpu_pin_set=None)
@ -183,7 +183,7 @@ class PowerManagementTestsGovernor(PowerManagementTestsBase):
self.useFixture(nova_fixtures.SysFileSystemFixture())
# Definining the CPUs to be pinned.
# Defining the CPUs to be pinned.
self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
group='compute')
self.flags(vcpu_pin_set=None)
@ -263,7 +263,7 @@ class PowerManagementMixedInstances(PowerManagementTestsBase):
self.useFixture(nova_fixtures.SysFileSystemFixture())
# Definining 6 CPUs to be dedicated, not all of them in a series.
# Defining 6 CPUs to be dedicated, not all of them in a series.
self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
group='compute')
self.flags(vcpu_pin_set=None)

View File

@ -92,7 +92,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""Regression test for bug 1922053 using microversion 2.52.
Required as the PUT /os-services/force-down API used by this test via
self.api.force_down_service is superseeded by PUT /os-services/{service_id}
self.api.force_down_service is superseded by PUT /os-services/{service_id}
API used by our parent ForceUpWithDoneEvacuations class from >=2.53.
This test also uses the 'availability_zone' parameter to force the server

View File

@ -130,7 +130,7 @@ class HostStatusPolicyTestCase(test.TestCase,
# Verify non-admin do not receive the host_status field.
self.assertNotIn('host_status', server)
self._set_server_state_active(server)
# Verify that admin will not receive ths host_status field if the
# Verify that admin will not receive the host_status field if the
# API microversion < 2.16.
with utils.temporary_mutation(self.admin_api, microversion='2.15'):
server = self._get_server(admin_func(*args))

View File

@ -58,7 +58,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
# Start compute without mocking update_provider_tree. The fake driver
# doesn't implement the method, so this will cause us to start with the
# legacy get_available_resource()-based inventory discovery and
# boostrapping of placement data.
# bootstrapping of placement data.
self.compute = self._start_compute(host='host1')
# Mock out update_provider_tree *after* starting compute with the

View File

@ -11,9 +11,9 @@
# under the License.
# This file contains ddt test data consumed by test_provider_config.py to test
# bad provider configrations through _validate_provider_config().
# bad provider configurations through _validate_provider_config().
# Sample is required for each test and passed to _validate_provider_config().
# If excpetion is raised with expected message, the test passes.
# If exception is raised with expected message, the test passes.
one_invalid_additional_inventory:
sample:
providers:

View File

@ -7165,7 +7165,7 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid')
def test_update_future_instance_with_buildreq(self, mock_buildreq_get):
# This test checks that a new instance which is not yet peristed in
# This test checks that a new instance which is not yet persisted in
# DB can be found by looking up the BuildRequest object so we can
# update it.
@ -7547,7 +7547,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
new_callable=mock.PropertyMock(return_value=old_image_meta)):
# The old image has a NUMA topology defined but the new image
# used to rebuild does not. This would alter the NUMA constrains
# and therefor should raise.
# and therefore should raise.
self.assertRaises(
exception.ImageNUMATopologyRebuildConflict,
self.compute_api._validate_numa_rebuild, instance,
@ -7605,7 +7605,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(device_profile='smartnic1')])
self.assertRaisesRegex(exception.ForbiddenPortsWithAccelerator,
'Port with cyborg profile is not avaliable until upgrade'
'Port with cyborg profile is not available until upgrade'
' finished.',
self.compute_api._check_support_vnic_accelerator,
self.context,

View File

@ -1667,10 +1667,10 @@ class ComputeTestCase(BaseTestCase,
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
def failure(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.assertRaises(NotImplementedError, failure,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
@ -1687,10 +1687,10 @@ class ComputeTestCase(BaseTestCase,
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
def failure(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.assertRaises(NotImplementedError, failure,
self.compute, self.context, inst)
self.assertTrue(called['fault_added'])
@ -1707,10 +1707,10 @@ class ComputeTestCase(BaseTestCase,
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
def failure(self2, context, instance):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.assertRaises(exception.InstanceNotFound, failer,
self.assertRaises(exception.InstanceNotFound, failure,
self.compute, self.context, inst)
self.assertFalse(called['fault_added'])
@ -5904,7 +5904,7 @@ class ComputeTestCase(BaseTestCase,
updated_topology = objects.NUMATopology.obj_from_primitive(
jsonutils.loads(cn.numa_topology))
# after confirming resize all cpus on currect host must be free
# after confirming resize all cpus on current host must be free
self.assertEqual(2, len(updated_topology.cells))
for cell in updated_topology.cells:
self.assertEqual(set(), cell.pinned_cpus)

View File

@ -4635,7 +4635,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_notify_attach_detach,
notify_inst_usage,
expected=None):
"""Re-usable code for detach volume evacuate test cases.
"""Reusable code for detach volume evacuate test cases.
:param conn_info_str: String form of the stashed connector.
:param expected: Dict of the connector that is expected in the
@ -7908,7 +7908,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def test_build_and_run_no_more_fixedips_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.NoMoreFixedIps("error messge"))
exception.NoMoreFixedIps("error message"))
def test_build_and_run_flavor_disk_smaller_image_exception(self):
self._test_build_and_run_spawn_exceptions(
@ -8423,7 +8423,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
mock_prepspawn.assert_called_once_with(self.instance)
# Complete should have occured with _shutdown_instance
# Complete should have occurred with _shutdown_instance
# so calling after the fact is not necessary.
mock_failedspawn.assert_not_called()
@ -8553,7 +8553,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
mock_prepspawn.assert_called_once_with(self.instance)
# Complete should have occured with _shutdown_instance
# Complete should have occurred with _shutdown_instance
# so calling after the fact is not necessary.
mock_failedspawn.assert_not_called()

View File

@ -858,7 +858,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
supp_binding_ext_retval
self.task.instance.pci_requests = instance_pci_reqs
self.task._check_can_migrate_pci("Src", "Dst")
# in case we managed to get away without rasing, check mocks
# in case we managed to get away without raising, check mocks
if instance_pci_reqs:
mock_supp_port_binding_ext.assert_called_once_with(
self.context)

View File

@ -2312,12 +2312,12 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
])
def test_instance_update_and_get_original_conflict_race_fallthrough(self):
# Ensure that is update_match continuously fails for no discernable
# Ensure that is update_match continuously fails for no discernible
# reason, we evantually raise UnknownInstanceUpdateConflict
instance = self.create_instance_with_args()
# Reproduce the conditions of a race between fetching and updating the
# instance by making update_on_match fail for no discernable reason.
# instance by making update_on_match fail for no discernible reason.
with mock.patch.object(update_match, 'update_on_match',
side_effect=update_match.NoRowsMatched):
self.assertRaises(exception.UnknownInstanceUpdateConflict,
@ -2669,7 +2669,7 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
block_dev = block_device.BlockDeviceDict(bdm_values)
db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
# Crate a second BDM that is soft-deleted to simulate that the
# Create a second BDM that is soft-deleted to simulate that the
# volume was detached and the BDM was deleted before the instance
# was hard destroyed.
bdm2_values = {

View File

@ -206,7 +206,7 @@ class FakeLibvirtTests(test.NoDBTestCase):
self.assertFalse(dom.isActive(), 'Defined domain was running.')
dom.createWithFlags(0)
self.assertTrue(dom.isActive(),
'Domain wasn\'t running after createWithFlags')
"Domain wasn't running after createWithFlags")
def test_managedSave(self):
uuid = uuidutils.generate_uuid()

View File

@ -788,7 +788,7 @@ class TestDownloadNoDirectUri(test.NoDBTestCase):
show_mock.return_value = {
'locations': [
{
'url': 'rbd://cluser/pool/image/snapshot',
'url': 'rbd://cluster/pool/image/snapshot',
'metadata': mock.sentinel.loc_meta
}
]
@ -843,7 +843,7 @@ class TestDownloadNoDirectUri(test.NoDBTestCase):
show_mock.return_value = {
'locations': [
{
'url': 'funky://cluser/pool/image/snapshot',
'url': 'funky://cluster/pool/image/snapshot',
'metadata': mock.sentinel.loc_meta
}
]

View File

@ -144,7 +144,7 @@ class BasePolicyTest(test.TestCase):
self.system_admin_context, self.system_foo_context,
self.system_member_context, self.system_reader_context,
])
# A few commmon set of contexts to be used in tests
# A few common set of contexts to be used in tests
#
# With scope disable and no legacy rule, any admin,
# project members have access. No other role in that project

View File

@ -171,7 +171,7 @@ class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
self.reader_req = fakes.HTTPRequest.blank('')
self.reader_req.environ['nova.context'] = self.project_reader_context
self.deprecated_policy = "os_compute_api:os-attach-interfaces"
# Overridde rule with different checks than defaults so that we can
# Override rule with different checks than defaults so that we can
# verify the rule overridden case.
override_rules = {self.deprecated_policy: base_policy.RULE_ADMIN_API}
# NOTE(gmann): Only override the deprecated rule in policy file so

View File

@ -171,7 +171,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
self.reader_req = fakes.HTTPRequest.blank('')
self.reader_req.environ['nova.context'] = self.project_reader_context
self.deprecated_policy = ia_policies.ROOT_POLICY
# Overridde rule with different checks than defaults so that we can
# Override rule with different checks than defaults so that we can
# verify the rule overridden case.
override_rules = {
self.deprecated_policy: base_policy.RULE_ADMIN_OR_OWNER,

View File

@ -52,7 +52,7 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
# Check that system reader or and server owner is able to get
# the server topology.
# With legacy rule and no scope checks, all admin is able to get
# server topology wth host info.
# server topology with host info.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]

View File

@ -22879,7 +22879,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_disk_info):
# Check error on resize root disk down for rbd.
# The difference is that get_instance_disk_info always returns
# an emply list for rbd.
# an empty list for rbd.
# Ephemeral size is not changed in this case (otherwise other check
# will raise the same error).
self.flags(images_type='rbd', group='libvirt')
@ -24129,7 +24129,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
]
self._test_inject_data(instance, injection_info, "/path", disk_params)
# Test with the configuration setted to false.
# Test with the configuration set to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(instance, injection_info, "/path", disk_params,
called=False)
@ -24149,7 +24149,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self._test_inject_data(instance, injection_info, "/path",
disk_params)
# Test with the configuration setted to false.
# Test with the configuration set to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(instance, injection_info, "/path", disk_params,
called=False)
@ -24980,8 +24980,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
)
# By mocking threading.Event.wait we prevent the test to wait until the
# timeout happens, and by returning False first we simulate to the
# caller that the wait returned not becasuse the event is set but
# because timeout happend. Then during the retry we return True
# caller that the wait returned not because the event is set but
# because timeout happened. Then during the retry we return True
# signalling that the event is set, i.e. the libvirt event the caller
# is waiting for has been received
mock_event_wait.side_effect = [False, True]
@ -25108,8 +25108,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# By mocking threading.Event.wait we prevent the test to wait until the
# timeout happens, and by returning False we simulate to the
# caller that the wait returned not becasuse the event is set but
# because timeout happend.
# caller that the wait returned not because the event is set but
# because timeout happened.
mock_event_wait.return_value = False
self.assertRaises(

View File

@ -1049,7 +1049,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
# Make sure these are not called
self.volume_api.get_snapshot.assert_not_called()
self.volume_api.create.assert_not_called()
@ -1182,7 +1182,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
# Make sure these are not called
self.volume_api.get_snapshot.assert_not_called()
self.volume_api.create.assert_not_called()

View File

@ -337,7 +337,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
def test_image_defaults(self):
image = images.VMwareImage(image_id='fake-image-id')
# N.B. We intentially don't use the defined constants here. Amongst
# N.B. We intentionally don't use the defined constants here. Amongst
# other potential failures, we're interested in changes to their
# values, which would not otherwise be picked up.
self.assertEqual('otherGuest', image.os_type)

View File

@ -1025,7 +1025,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
# avoid real sleeps during test due to te retry decorator on create_vm
# avoid real sleeps during test due to the retry decorator on create_vm
self.useFixture(oslo_svc_fixture.SleepFixture())
found = [False]