clean up numeric expressions with byte constants

Replace numeric expressions with constants in nova/virt
to make code more readable.

Implements: blueprint byte-unit-clean

Change-Id: I71aba00ac3c33931082cf0402e4b68764caa0212
This commit is contained in:
Chang Bo Guo 2013-10-22 19:29:25 -07:00
parent b2ade54e82
commit b823db7378
20 changed files with 84 additions and 64 deletions

View File

@ -27,6 +27,7 @@ from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova import utils
from nova import version
@ -55,7 +56,7 @@ CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
CONFIGDRIVESIZE_BYTES = 64 * unit.Mi
class ConfigDriveBuilder(object):

View File

@ -33,6 +33,7 @@ from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log
from nova import unit
from nova import utils
import nova.virt.docker.client
from nova.virt.docker import hostinfo
@ -152,11 +153,11 @@ class DockerDriver(driver.ComputeDriver):
stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': memory['total'] / (1024 ** 2),
'memory_mb_used': memory['used'] / (1024 ** 2),
'local_gb': disk['total'] / (1024 ** 3),
'local_gb_used': disk['used'] / (1024 ** 3),
'disk_available_least': disk['available'] / (1024 ** 3),
'memory_mb': memory['total'] / unit.Mi,
'memory_mb_used': memory['used'] / unit.Mi,
'local_gb': disk['total'] / unit.Gi,
'local_gb_used': disk['used'] / unit.Gi,
'disk_available_least': disk['available'] / unit.Gi,
'hypervisor_type': 'docker',
'hypervisor_version': '1.0',
'hypervisor_hostname': self._nodename,
@ -260,7 +261,7 @@ class DockerDriver(driver.ComputeDriver):
if metadata['deleted']:
continue
if metadata['key'] == 'instance_type_memory_mb':
return int(metadata['value']) * 1024 * 1024
return int(metadata['value']) * unit.Mi
return 0
def _get_image_name(self, context, instance, image):

View File

@ -26,6 +26,7 @@ from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import unit
from nova.virt.hyperv import constants
from nova.virt.hyperv import utilsfactory
@ -80,8 +81,8 @@ class HostOps(object):
drive = os.path.splitdrive(self._pathutils.get_instances_dir())[0]
(size, free_space) = self._hostutils.get_volume_info(drive)
total_gb = size / (1024 ** 3)
free_gb = free_space / (1024 ** 3)
total_gb = size / unit.Gi
free_gb = free_space / unit.Gi
used_gb = total_gb - free_gb
return (total_gb, free_gb, used_gb)

View File

@ -25,6 +25,7 @@ from nova.compute import flavors
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova import utils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vhdutilsv2
@ -64,7 +65,7 @@ class ImageCache(object):
vhd_size = vhd_info['MaxInternalSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * 1024 ** 3
root_vhd_size = root_vhd_size_gb * unit.Gi
# NOTE(lpetrut): Checking the namespace is needed as the following
# method is not yet implemented in the vhdutilsv2 module.

View File

@ -23,6 +23,7 @@ import os
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
@ -252,12 +253,12 @@ class MigrationOps(object):
src_base_disk_path)
if resize_instance:
new_size = instance['root_gb'] * 1024 ** 3
new_size = instance['root_gb'] * unit.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * 1024 ** 3
new_size = instance.get('ephemeral_gb', 0) * unit.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)

View File

@ -31,6 +31,7 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import constants
@ -159,7 +160,7 @@ class VMOps(object):
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['MaxInternalSize']
root_vhd_size = instance['root_gb'] * 1024 ** 3
root_vhd_size = instance['root_gb'] * unit.Gi
# NOTE(lpetrut): Checking the namespace is needed as the
# following method is not yet implemented in vhdutilsv2.
@ -187,7 +188,7 @@ class VMOps(object):
return root_vhd_path
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * 1024 ** 3
eph_vhd_size = instance.get('ephemeral_gb', 0) * unit.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()

View File

@ -28,6 +28,7 @@ helpers for populating up config object instances.
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from lxml import etree
@ -1038,7 +1039,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.virt_type = None
self.uuid = None
self.name = None
self.memory = 1024 * 1024 * 500
self.memory = 500 * unit.Mi
self.vcpus = 1
self.cpuset = None
self.cpu = None

View File

@ -85,6 +85,7 @@ from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import unit
from nova import utils
from nova import version
from nova.virt import configdrive
@ -233,7 +234,7 @@ DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 102400
MAX_CONSOLE_BYTES = 100 * unit.Ki
def patch_tpool_proxy():
@ -2360,7 +2361,7 @@ class LibvirtDriver(driver.ComputeDriver):
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
size = instance['root_gb'] * unit.Gi
if size == 0 or suffix == '.rescue':
size = None
@ -2386,7 +2387,7 @@ class LibvirtDriver(driver.ComputeDriver):
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
size = ephemeral_gb * unit.Gi
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
@ -2399,7 +2400,7 @@ class LibvirtDriver(driver.ComputeDriver):
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * 1024 * 1024 * 1024
size = eph['size'] * unit.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
@ -2420,7 +2421,7 @@ class LibvirtDriver(driver.ComputeDriver):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
size = swap_mb * unit.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
@ -3431,7 +3432,7 @@ class LibvirtDriver(driver.ComputeDriver):
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / (1024 ** 3)
info[k] = v / unit.Gi
return info
@ -3905,7 +3906,7 @@ class LibvirtDriver(driver.ComputeDriver):
available = 0
if available_mb:
available = available_mb * (1024 ** 2)
available = available_mb * unit.Mi
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
@ -4555,7 +4556,7 @@ class LibvirtDriver(driver.ComputeDriver):
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
size *= unit.Gi
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
@ -4832,8 +4833,8 @@ class HostState(object):
disk_over_committed = (self.driver.
get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
return (available_least / (1024 ** 3))
available_least = disk_free_gb * unit.Gi - disk_over_committed
return (available_least / unit.Gi)
LOG.debug(_("Updating host stats"))
disk_info_dict = self.driver.get_local_gb_info()

View File

@ -29,6 +29,7 @@ from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import unit
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
@ -294,7 +295,7 @@ class Qcow2(Image):
backing_parts[-1].isdigit():
legacy_backing_size = int(backing_parts[-1])
legacy_base += '_%d' % legacy_backing_size
legacy_backing_size *= 1024 * 1024 * 1024
legacy_backing_size *= unit.Gi
# Create the legacy backing file if necessary.
if legacy_backing_size:

View File

@ -30,6 +30,7 @@ from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova import utils
from nova.virt import images
@ -237,7 +238,7 @@ def create_lvm_image(vg, lv, size, sparse=False):
'lv': lv})
if sparse:
preallocated_space = 64 * 1024 * 1024
preallocated_space = 64 * unit.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
@ -357,7 +358,7 @@ def clear_logical_volume(path):
# for more or less security conscious setups.
vol_size = logical_volume_size(path)
bs = 1024 * 1024
bs = unit.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = vol_size

View File

@ -28,6 +28,7 @@ from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import unit
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
@ -181,7 +182,7 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
# we respect the minimum root device size in constants
instance_type = flavors.extract_flavor(instance)
size_gb = max(instance_type['root_gb'], constants.POWERVM_MIN_ROOT_GB)
size = size_gb * 1024 * 1024 * 1024
size = size_gb * unit.Gi
disk_name = None
try:
@ -317,7 +318,7 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
# If it's not a multiple of 1MB we get the next
# multiple and use it as the megabyte_size.
megabyte = 1024 * 1024
megabyte = unit.Mi
if (size % megabyte) != 0:
megabyte_size = int(size / megabyte) + 1
else:

View File

@ -24,6 +24,7 @@ from nova.compute import utils as compute_utils
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.virt import driver
from nova.virt.powervm import exception
from nova.virt.powervm import operator
@ -295,7 +296,7 @@ class PowerVMDriver(driver.ComputeDriver):
old_lv_size = disk_info['old_lv_size']
if 'root_disk_file' in disk_info:
disk_size = max(int(new_lv_size), int(old_lv_size))
disk_size_bytes = disk_size * 1024 * 1024 * 1024
disk_size_bytes = disk_size * unit.Gi
self._powervm.deploy_from_migrated_file(
lpar_obj, disk_info['root_disk_file'], disk_size_bytes,
power_on)

View File

@ -29,6 +29,7 @@ from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import unit
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
@ -382,8 +383,8 @@ class ResourcePool(ManagedObject):
memoryAllocation = DataObject()
cpuAllocation = DataObject()
memory.maxUsage = 1000 * 1024 * 1024
memory.overallUsage = 500 * 1024 * 1024
memory.maxUsage = 1000 * unit.Mi
memory.overallUsage = 500 * unit.Mi
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
@ -480,7 +481,7 @@ class ClusterComputeResource(ManagedObject):
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / (1024 * 1024)
free_memory = (host_summary.hardware.memorySize / unit.Mi
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
@ -494,8 +495,8 @@ class Datastore(ManagedObject):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", 1024 * 1024 * 1024 * 1024)
self.set("summary.freeSpace", 500 * 1024 * 1024 * 1024)
self.set("summary.capacity", unit.Ti)
self.set("summary.freeSpace", 500 * unit.Gi)
self.set("summary.accessible", True)
@ -546,7 +547,7 @@ class HostSystem(ManagedObject):
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = 1024 * 1024 * 1024
hardware.memorySize = unit.Gi
summary.hardware = hardware
quickstats = DataObject()

View File

@ -21,6 +21,7 @@ Management class for host-related functions (start, reboot, etc).
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@ -126,10 +127,10 @@ class HostState(object):
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_total"] = ds[2] / unit.Gi
data["disk_available"] = ds[3] / unit.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
data["host_memory_total"] = summary.hardware.memorySize / unit.Mi
data["host_memory_free"] = data["host_memory_total"] - \
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
@ -180,8 +181,8 @@ class VCState(object):
"model": stats['cpu']['model'],
"topology": {"cores": stats['cpu']['cores'],
"threads": stats['cpu']['vcpus']}}
data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_total"] = ds[2] / unit.Gi
data["disk_available"] = ds[3] / unit.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = stats['mem']['total']
data["host_memory_free"] = stats['mem']['free']

View File

@ -26,6 +26,7 @@ import copy
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import unit
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
@ -801,9 +802,9 @@ def get_stats_from_cluster(session, cluster):
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / (1024 * 1024))
mem_info['total'] = int(res_usage.maxUsage / unit.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / (1024 * 1024))
consumed = int(res_usage.overallUsage / unit.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats

View File

@ -41,6 +41,7 @@ from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
@ -218,7 +219,7 @@ class VMwareVMOps(object):
vif_model, image_linked_clone)
root_gb = instance['root_gb']
root_gb_in_kb = root_gb * 1024 * 1024
root_gb_in_kb = root_gb * unit.Mi
(vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model,
image_linked_clone) = _get_image_properties(root_gb_in_kb)

View File

@ -54,6 +54,7 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
from nova import unit
from nova import utils
from nova.virt import driver
from nova.virt.xenapi import host
@ -434,12 +435,12 @@ class XenAPIDriver(driver.ComputeDriver):
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
total_ram_mb = host_stats['host_memory_total'] / unit.Mi
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
free_ram_mb = host_stats['host_memory_free_computed'] / unit.Mi
total_disk_gb = host_stats['disk_total'] / unit.Gi
used_disk_gb = host_stats['disk_used'] / unit.Gi
hyper_ver = utils.convert_version_to_int(self._session.product_version)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,

View File

@ -64,6 +64,7 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import unit
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
@ -232,8 +233,8 @@ def after_VBD_create(vbd_ref, vbd_rec):
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('memory_static_max', str(8 * 1024 * 1024 * 1024))
vm_rec.setdefault('memory_dynamic_max', str(8 * 1024 * 1024 * 1024))
vm_rec.setdefault('memory_static_max', str(8 * unit.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * unit.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('resident_on', '')
@ -602,7 +603,7 @@ class SessionBase(object):
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
return 12 * unit.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
@ -685,7 +686,7 @@ class SessionBase(object):
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * 1024 * 1024 * 1024
return 1 * unit.Gi
def VDI_resize_online(self, *args):
return 'derp'

View File

@ -48,6 +48,7 @@ from nova.openstack.common import processutils
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import xmlutils
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt.disk import api as disk
@ -77,7 +78,7 @@ xenapi_vm_utils_opts = [
default=10,
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * 1024 * 1024,
default=16 * unit.Mi,
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='default-sr:true',
@ -205,7 +206,7 @@ def create_vm(session, instance, name_label, kernel, ramdisk,
3. Using hardware virtualization
"""
instance_type = flavors.extract_flavor(instance)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
mem = str(long(instance_type['memory_mb']) * unit.Mi)
vcpus = str(instance_type['vcpus'])
vcpu_weight = instance_type['vcpu_weight']
@ -325,7 +326,7 @@ def is_vm_shutdown(session, vm_ref):
def is_enough_free_mem(session, instance):
instance_type = flavors.extract_flavor(instance)
mem = long(instance_type['memory_mb']) * 1024 * 1024
mem = long(instance_type['memory_mb']) * unit.Mi
host = session.get_xenapi_host()
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
host))
@ -836,7 +837,7 @@ def resize_disk(session, instance, vdi_ref, instance_type):
_auto_configure_disk(session, clone_ref, size_gb)
# Create new VDI
vdi_size = size_gb * 1024 * 1024 * 1024
vdi_size = size_gb * unit.Gi
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
@ -845,7 +846,7 @@ def resize_disk(session, instance, vdi_ref, instance_type):
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = size_gb * 1024 * 1024 * 1024
virtual_size = size_gb * unit.Gi
_copy_partition(session, clone_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
@ -882,7 +883,7 @@ def _auto_configure_disk(session, vdi_ref, new_gb):
_num, start, old_sectors, ptype = partitions[0]
if ptype in ('ext3', 'ext4'):
new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
new_sectors = new_gb * unit.Gi / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
else:
reason = _('Disk contains a filesystem '
@ -945,7 +946,7 @@ def _generate_disk(session, instance, vm_ref, userdevice, name_label,
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = 1024 * 1024
ONE_MEG = unit.Mi
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
@ -1363,7 +1364,7 @@ def _get_vdi_chain_size(session, vdi_uuid):
def _check_vdi_size(context, session, instance, vdi_uuid):
instance_type = flavors.extract_flavor(instance)
allowed_size = (instance_type['root_gb'] +
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * (1024 ** 3)
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * unit.Gi
if not instance_type['root_gb']:
# root_gb=0 indicates that we're disabling size checks

View File

@ -45,6 +45,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import unit
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
@ -1052,7 +1053,7 @@ class VMOps(object):
def _resize_up_root_vdi(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
new_disk_size = instance['root_gb'] * unit.Gi
if not new_disk_size:
return
@ -1061,7 +1062,7 @@ class VMOps(object):
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
old_gb = virtual_size / unit.Gi
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
@ -2036,7 +2037,7 @@ class VMOps(object):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
memory_mb = int(vm_rec['memory_static_max']) / unit.Mi
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage