diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst index e048fd07755f..a5e20d83b72b 100644 --- a/doc/source/devref/filter_scheduler.rst +++ b/doc/source/devref/filter_scheduler.rst @@ -111,7 +111,7 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`): * |DifferentHostFilter| - allows to put the instance on a different host from a set of instances. * |SameHostFilter| - puts the instance on the same host as another instance in - a set of of instances. + a set of instances. * |RetryFilter| - filters hosts that have been attempted for scheduling. Only passes hosts that have not been previously attempted. * |TrustedFilter| - filters hosts based on their trust. Only passes hosts @@ -310,7 +310,7 @@ easily. Therefore the final weight for the object will be:: A weigher should be a subclass of ``weights.BaseHostWeigher`` and they must implement the ``weight_multiplier`` and ``weight_object`` methods. If the -``weight_objects`` method is overriden it just return a list of weights, and not +``weight_objects`` method is overridden it just return a list of weights, and not modify the weight of the object directly, since final weights are normalized and computed by ``weight.BaseWeightHandler``. diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py index a22a7505b6aa..142b9fc28e7c 100644 --- a/nova/api/metadata/base.py +++ b/nova/api/metadata/base.py @@ -180,7 +180,7 @@ class InstanceMetadata(): 'content_path': "/%s/%s" % (CONTENT_DIR, key)} # 'content' is passed in from the configdrive code in - # nova/virt/libvirt/driver.py. Thats how we get the injected files + # nova/virt/libvirt/driver.py. That's how we get the injected files # (personalities) in. AFAIK they're not stored in the db at all, # so are not available later (web service metadata time). for (path, contents) in content: diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py index 9f95351af8f8..56dd0dc987f5 100644 --- a/nova/compute/cells_api.py +++ b/nova/compute/cells_api.py @@ -397,7 +397,7 @@ class ComputeCellsAPI(compute_api.API): @check_instance_cell def get_console_output(self, context, instance, *args, **kwargs): - """Get console output for an an instance.""" + """Get console output for an instance.""" # NOTE(comstud): Calling super() just to get policy check super(ComputeCellsAPI, self).get_console_output(context, instance, *args, **kwargs) diff --git a/nova/compute/claims.py b/nova/compute/claims.py index be5b29d72627..a2a75abb08d6 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -90,7 +90,7 @@ class Claim(NopClaim): self.overhead = overhead - # Check claim at constuctor to avoid mess code + # Check claim at constructor to avoid mess code # Raise exception ComputeResourcesUnavailable if claim failed self._claim_test(resources, limits) diff --git a/nova/compute/monitors/__init__.py b/nova/compute/monitors/__init__.py index 344747b4d160..46e92971e27b 100644 --- a/nova/compute/monitors/__init__.py +++ b/nova/compute/monitors/__init__.py @@ -196,6 +196,6 @@ def all_monitors(): """Return a list of monitor classes found in this directory. This method is used as the default for available monitors - and should return a list of all monitor classes avaiable. + and should return a list of all monitor classes available. """ return ResourceMonitorHandler().get_all_classes() diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 81418e494ab4..7fd250301343 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -182,7 +182,7 @@ class XVPConsoleProxy(object): - password: the password to encode, max 8 char for vm passwords, and 16 chars for pool passwords. passwords will be trimmed to max len before encoding. - - is_pool_password: True if this this is the XenServer api password + - is_pool_password: True if this is the XenServer api password False if it's a VM console password (xvp uses different keys and max lengths for pool passwords) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 837e44e18679..e6641a0ca74d 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -609,7 +609,7 @@ class IptablesManager(object): return True # We filter duplicates, letting the *last* occurrence take - # precendence. We also filter out anything in the "remove" + # precedence. We also filter out anything in the "remove" # lists. new_filter.reverse() new_filter = filter(_weed_out_duplicates, new_filter) @@ -1388,7 +1388,7 @@ def get_dev(network): class LinuxNetInterfaceDriver(object): """Abstract class that defines generic network host API - for for all Linux interface drivers. + for all Linux interface drivers. """ def plug(self, network, mac_address): diff --git a/nova/network/manager.py b/nova/network/manager.py index 73286d3dfeb9..f26fd19c67bb 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -386,7 +386,7 @@ class NetworkManager(manager.Manager): try: # NOTE(vish): We need to make sure the instance info cache has been # updated with new ip info before we trigger the - # security group refresh. This is somewhat ineffecient + # security group refresh. This is somewhat inefficient # but avoids doing some dangerous refactoring for a # bug fix. nw_info = self.get_instance_nw_info(admin_context, instance_id, @@ -1484,7 +1484,7 @@ class FlatManager(NetworkManager): The idea is to create a single network for the host with a command like: nova-manage network create 192.168.0.0/24 1 256. Creating multiple - networks for for one manager is currently not supported, but could be + networks for one manager is currently not supported, but could be added by modifying allocate_fixed_ip and get_network to get the network with new logic. Arbitrary lists of addresses in a single network can be accomplished with manual db editing. diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py index a0f8974352d2..69ddf55d8a06 100644 --- a/nova/network/security_group/neutron_driver.py +++ b/nova/network/security_group/neutron_driver.py @@ -190,7 +190,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase): def add_rules(self, context, id, name, vals): """Add security group rule(s) to security group. - Note: the Nova security group API doesn't support adding muliple + Note: the Nova security group API doesn't support adding multiple security group rules at once but the EC2 one does. Therefore, this function is written to support both. Multiple rules are installed to a security group in neutron using bulk support. @@ -504,7 +504,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase): self.raise_not_found(msg) def populate_security_groups(self, instance, security_groups): - # Setting to emply list since we do not want to populate this field + # Setting to empty list since we do not want to populate this field # in the nova database if using the neutron driver instance['security_groups'] = security_group.SecurityGroupList() instance['security_groups'].objects = [] diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py index 1caf508978d2..a1c0ae2445c5 100644 --- a/nova/scheduler/filters/affinity_filter.py +++ b/nova/scheduler/filters/affinity_filter.py @@ -54,7 +54,7 @@ class DifferentHostFilter(AffinityFilter): class SameHostFilter(AffinityFilter): '''Schedule the instance on the same host as another instance in a set of - of instances. + instances. ''' # The hosts the instances are running on doesn't change within a request diff --git a/nova/scheduler/filters/type_filter.py b/nova/scheduler/filters/type_filter.py index e9b69e7e410d..13e7756350c5 100644 --- a/nova/scheduler/filters/type_filter.py +++ b/nova/scheduler/filters/type_filter.py @@ -19,7 +19,7 @@ from nova.scheduler import filters class TypeAffinityFilter(filters.BaseHostFilter): - """TypeAffinityFilter doesn't allow more then one VM type per host. + """TypeAffinityFilter doesn't allow more than one VM type per host. Note: this works best with ram_weight_multiplier (spread) set to 1 (default). diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py index e4b570ca7336..c9a72285d6de 100644 --- a/nova/servicegroup/api.py +++ b/nova/servicegroup/api.py @@ -142,7 +142,7 @@ class ServiceGroupDriver(object): """Base class for ServiceGroup drivers.""" def join(self, member_id, group_id, service=None): - """Join the given service with it's group.""" + """Join the given service with its group.""" raise NotImplementedError() def is_up(self, member): diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py index c39c1fc130b2..75479d86db5c 100644 --- a/nova/servicegroup/drivers/db.py +++ b/nova/servicegroup/drivers/db.py @@ -38,7 +38,7 @@ class DbDriver(api.ServiceGroupDriver): self.service_down_time = CONF.service_down_time def join(self, member_id, group_id, service=None): - """Join the given service with it's group.""" + """Join the given service with its group.""" msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to ' 'the %(group_id)s group, service = %(service)s') diff --git a/nova/tests/README.rst b/nova/tests/README.rst index 9dd7e7e8b308..8ac999c7406f 100644 --- a/nova/tests/README.rst +++ b/nova/tests/README.rst @@ -34,7 +34,7 @@ verify and clean up during the tearDown step. If using test.TestCase, calling the super class setUp is required and calling the super class tearDown is required to be last if tearDown -is overriden. +is overridden. Writing Functional Tests ------------------------ diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 3fdb32262151..9834305908e1 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -438,7 +438,7 @@ class TestCollectionLinks(test.NoDBTestCase): {"uuid": "123"} ] req = mock.MagicMock() - # Given limit is greater then default max, only return default max + # Given limit is greater than default max, only return default max params = mock.PropertyMock(return_value=dict(limit=2)) type(req).params = params self.flags(osapi_max_limit=1) diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py index e4214cfb7f10..0d0ad96c8ca5 100644 --- a/nova/tests/compute/test_host_api.py +++ b/nova/tests/compute/test_host_api.py @@ -464,7 +464,7 @@ class ComputeHostAPICellsTestCase(ComputeHostAPITestCase): self.assertEqual('fake-response', result) def test_get_host_uptime_service_down(self): - # The corresponing Compute test case depends on the + # The corresponding Compute test case depends on the # _assert_host_exists which is a no-op in the cells api pass diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 036486be61c4..4916db36eec9 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -3798,7 +3798,7 @@ class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin): def create_ips(i, j): return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)] - # NOTE(boris-42): Create more then 256 ip to check that + # NOTE(boris-42): Create more than 256 ip to check that # _ip_range_splitter works properly. for i in range(1, 3): ips_for_delete.extend(create_ips(i, 255)) @@ -4315,7 +4315,7 @@ class VolumeUsageDBApiTestCase(test.TestCase): user_id='fake-user-uuid1') # Instance rebooted or crashed. block device stats were reset and are - # less then the previous values + # less than the previous values db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200, wr_req=300, wr_bytes=400, @@ -4369,7 +4369,7 @@ class VolumeUsageDBApiTestCase(test.TestCase): user_id='fake-user-uuid1') # Instance rebooted or crashed. block device stats were reset and are - # less then the previous values + # less than the previous values db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200, wr_req=300, wr_bytes=400, diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py index b34090dbbea8..fee7cd86d40f 100644 --- a/nova/tests/integrated/test_multiprocess_api.py +++ b/nova/tests/integrated/test_multiprocess_api.py @@ -56,7 +56,7 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase): # processes running the same tests (and possibly forking more # processes that end up in the same situation). So we need # to catch all exceptions and make sure nothing leaks out, in - # particlar SystemExit, which is raised by sys.exit(). We use + # particular SystemExit, which is raised by sys.exit(). We use # os._exit() which doesn't have this problem. status = 0 try: diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py index 4432054dbe78..12bf9c1f3ea0 100644 --- a/nova/tests/test_notifications.py +++ b/nova/tests/test_notifications.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""Tests for common notifcations.""" +"""Tests for common notifications.""" import copy @@ -110,7 +110,7 @@ class NotificationsTestCase(test.TestCase): def test_notif_disabled(self): - # test config disable of the notifcations + # test config disable of the notifications self.flags(notify_on_state_change=None) old = copy.copy(self.instance) @@ -148,7 +148,7 @@ class NotificationsTestCase(test.TestCase): self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) - # ok now enable task state notifcations and re-try + # ok now enable task state notifications and re-try self.flags(notify_on_state_change="vm_and_task_state") notifications.send_update(self.context, old, self.instance) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 52a40794080f..3f37fa6050ba 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -435,7 +435,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase): console = conn.get_vnc_console(self.context, instance) - # Note(sulo): We dont care about session id in test + # Note(sulo): We don't care about session id in test # they will always differ so strip that out actual_path = console['internal_access_path'].split('&')[0] expected_path = "/console?ref=%s" % str(vm_ref) @@ -453,7 +453,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase): console = conn.get_vnc_console(self.context, instance) - # Note(sulo): We dont care about session id in test + # Note(sulo): We don't care about session id in test # they will always differ so strip that out actual_path = console['internal_access_path'].split('&')[0] expected_path = "/console?ref=%s" % str(rescue_vm) diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py index 53918ec1705e..35e70a4be8b1 100644 --- a/nova/virt/baremetal/db/sqlalchemy/api.py +++ b/nova/virt/baremetal/db/sqlalchemy/api.py @@ -188,7 +188,7 @@ def bm_node_update(context, bm_node_id, values): def bm_node_associate_and_update(context, node_uuid, values): """Associate an instance to a node safely - Associate an instance to a node only if that node is not yet assocated. + Associate an instance to a node only if that node is not yet associated. Allow the caller to set any other fields they require in the same operation. For example, this is used to set the node's task_state to BUILDING at the beginning of driver.spawn(). diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index 2d414350ce5b..00af6952da8a 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -310,7 +310,7 @@ class PXE(base.NodeDriver): if injected_files is None: injected_files = [] else: - # NOTE(deva): copy so we dont modify the original + # NOTE(deva): copy so we don't modify the original injected_files = list(injected_files) net_config = build_network_config(network_info) diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py index f474dbc3ea9d..486dfbda9690 100644 --- a/nova/virt/baremetal/virtual_power_driver.py +++ b/nova/virt/baremetal/virtual_power_driver.py @@ -196,7 +196,7 @@ class VirtualPowerManager(base.PowerManager): err_msg = _('Node "%(name)s" with MAC address %(mac)s not found.') LOG.error(err_msg, {'name': self._node_name, 'mac': self._mac_addresses}) - # in our case the _node_name is the the node_id + # in our case the _node_name is the node_id raise exception.NodeNotFound(node_id=self._node_name) cmd = self._vp_cmd.list_running_cmd diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index 319a3682e68b..1d577f4c6d72 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -125,7 +125,7 @@ class DriverBlockDevice(dict): def attach(self, **kwargs): """Make the device available to be used by VMs. - To be overriden in subclasses with the connecting logic for + To be overridden in subclasses with the connecting logic for the type of device the subclass represents. """ raise NotImplementedError() diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py index b8f51f409c75..aa1b0f0862b6 100644 --- a/nova/virt/disk/vfs/guestfs.py +++ b/nova/virt/disk/vfs/guestfs.py @@ -109,7 +109,7 @@ class VFSGuestFS(vfs.VFS): except TypeError as e: if 'close_on_exit' in str(e): # NOTE(russellb) In case we're not using a version of - # libguestfs new enough to support the close_on_exit paramater, + # libguestfs new enough to support the close_on_exit parameter, # which was added in libguestfs 1.20. self.handle = tpool.Proxy(guestfs.GuestFS()) else: diff --git a/nova/virt/imagecache.py b/nova/virt/imagecache.py index b63013f6b04c..1e75fe48dc3e 100644 --- a/nova/virt/imagecache.py +++ b/nova/virt/imagecache.py @@ -118,7 +118,7 @@ class ImageCacheManager(object): 'originals': []} def _age_and_verify_cached_images(self, context, all_instances, base_dir): - """Ages and verfies cached images.""" + """Ages and verifies cached images.""" raise NotImplementedError() diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py index 1daf7abf8f4b..720849c3a027 100644 --- a/nova/virt/libvirt/blockinfo.py +++ b/nova/virt/libvirt/blockinfo.py @@ -544,7 +544,7 @@ def get_disk_mapping(virt_type, instance, # NOTE (ndipanov): This implicitly relies on image->local BDMs not # being considered in the driver layer - so missing # bdm with boot_index 0 means - use image, unless it was - # overriden. This can happen when using legacy syntax and + # overridden. This can happen when using legacy syntax and # no root_device_name is set on the instance. if not root_bdm and not block_device.volume_in_mapping(root_info['dev'], block_device_info): diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index 5320bad7b37e..772c491b8a34 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -332,7 +332,7 @@ class ImageCacheManager(imagecache.ImageCacheManager): Yields the name of the base file, a boolean which is True if the image is "small", and a boolean which indicates if this is a resized image. - Note that is is possible for more than one yield to result from this + Note that it is possible for more than one yield to result from this check. If no base file is found, then nothing is yielded. diff --git a/nova/virt/vmwareapi/imagecache.py b/nova/virt/vmwareapi/imagecache.py index 1dc180e16b29..dd9972de7752 100644 --- a/nova/virt/vmwareapi/imagecache.py +++ b/nova/virt/vmwareapi/imagecache.py @@ -26,7 +26,7 @@ At each aging iteration we check if the image can be aged. This is done by comparing the current nova compute time to the time embedded in the timestamp. If the time exceeds the configured aging time then the parent folder, that is the image ID folder, will be deleted. -That effectivly ages the cached image. +That effectively ages the cached image. If an image is used then the timestamps will be deleted. When accessing a timestamp we make use of locking. This ensure that aging diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c8ba26370b1f..1a09b69c6612 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1758,7 +1758,7 @@ def preconfigure_instance(session, instance, vdi_ref, network_info): """Makes alterations to the image before launching as part of spawn. """ - # As mounting the image VDI is expensive, we only want do do it once, + # As mounting the image VDI is expensive, we only want do it once, # if at all, so determine whether it's required first, and then do # everything mount_required = False diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py index 180feb78194f..58c33cc0e995 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py @@ -193,7 +193,7 @@ def apply_ovs_ipv6_flows(ovs, bridge, params): ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop") ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop") - # do not allow sending specifc ICMPv6 types + # do not allow sending specific ICMPv6 types # Router Advertisement ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop") # Redirect Gateway diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py index 349201f8c21d..4e21ed1c5066 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/utils.py @@ -221,7 +221,7 @@ def _validate_vhd(vdi_path): may pick up other errors also. This check ensures that the timestamps listed in the VHD footer aren't in - the future. This can occur during a migration if the clocks on the the two + the future. This can occur during a migration if the clocks on the two Dom0's are out-of-sync. This would corrupt the SR if it were imported, so generate an exception to bail. """