Refactor the flavor <-> profile work

There is a single interface to pass in an instance and get back
a matching profile for the instance now: `flavor.to_profile`. It
uses a map of functions to generate the config and devices from
the instance and network/block device info.

All the unit tests that are added here were added *before* the
beginning of the refactor, so there should be no broken functionality
in this refactor. Along with that, this increases our test coverage
by about 15%.

Change-Id: I575054422386462df170a0a75fac74fbec6f6086
This commit is contained in:
Paul Hummer 2016-12-16 13:47:39 -07:00
parent 9b899e71c0
commit b664cc294d
4 changed files with 691 additions and 214 deletions

View File

@ -530,11 +530,12 @@ class LXDDriverTest(test.NoDBTestCase):
lxd_driver.cleanup.assert_called_once_with(
ctx, instance, network_info, None)
@mock.patch('nova.virt.lxd.driver.network')
@mock.patch('os.path.exists', mock.Mock(return_value=True))
@mock.patch('pwd.getpwuid')
@mock.patch('shutil.rmtree')
@mock.patch.object(driver.utils, 'execute')
def test_cleanup(self, execute, rmtree, getpwuid):
def test_cleanup(self, execute, rmtree, getpwuid, _):
mock_profile = mock.Mock()
self.client.profiles.get.return_value = mock_profile
pwuid = mock.Mock()
@ -575,12 +576,13 @@ class LXDDriverTest(test.NoDBTestCase):
self.client.containers.get.assert_called_once_with(instance.name)
@mock.patch('nova.virt.lxd.driver.network')
@mock.patch('pwd.getpwuid', mock.Mock(return_value=mock.Mock(pw_uid=1234)))
@mock.patch('os.getuid', mock.Mock())
@mock.patch('os.path.exists', mock.Mock(return_value=True))
@mock.patch('six.moves.builtins.open')
@mock.patch.object(driver.utils, 'execute')
def test_get_console_output(self, execute, _open):
def test_get_console_output(self, execute, _open, _):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
@ -734,11 +736,12 @@ class LXDDriverTest(test.NoDBTestCase):
self.assertEqual(0, self.client.profiles.get.call_count)
container.stop.assert_called_once_with(wait=True)
@mock.patch('nova.virt.lxd.driver.network')
@mock.patch('os.major')
@mock.patch('os.minor')
@mock.patch('os.stat')
@mock.patch('os.path.realpath')
def test_attach_volume(self, realpath, stat, minor, major):
def test_attach_volume(self, realpath, stat, minor, major, _):
profile = mock.Mock()
self.client.profiles.get.return_value = profile
realpath.return_value = '/dev/sdc'
@ -1287,40 +1290,3 @@ class LXDDriverTest(test.NoDBTestCase):
profile.delete.assert_called_once_with()
lxd_driver.cleanup.assert_called_once_with(ctx, instance, network_info)
class LXDDriverPrivateMethodsTest(LXDDriverTest):
"""Tests for private methods of nova.virt.lxd.driver.LXDDriver."""
def test_generate_profile_data(self):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_device_info = None
lxd_driver = driver.LXDDriver(None)
lxd_driver.init_host(None)
name, config, devices = lxd_driver._generate_profile_data(
instance, network_info, block_device_info)
expected_config = {
'boot.autostart': 'True',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
self.assertEqual(instance.name, name)
self.assertEqual(expected_config, config)
self.assertEqual(expected_devices, devices)

View File

@ -0,0 +1,456 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import exception
from nova import test
from nova.network import model as network_model
from nova.tests.unit import fake_instance
from nova.virt.lxd import flavor
class ToProfileTest(test.NoDBTestCase):
"""Tests for nova.virt.lxd.flavor.to_profile."""
def setUp(self):
super(ToProfileTest, self).setUp()
self.client = mock.Mock()
self.client.host_info = {
'api_extensions': [],
'environment': {
'storage': 'zfs'
}
}
self.patchers = []
CONF_patcher = mock.patch('nova.virt.lxd.driver.nova.conf.CONF')
self.patchers.append(CONF_patcher)
self.CONF = CONF_patcher.start()
self.CONF.instances_path = '/i'
def tearDown(self):
super(ToProfileTest, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def test_to_profile(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_lvm(self):
"""A profile configuration is requested of the LXD client."""
self.client.host_info['environment']['storage'] = 'lvm'
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_security(self):
self.client.host_info['api_extensions'].append('id_map')
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'lxd:nested_allowed': True,
'lxd:privileged_allowed': True,
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
'security.nesting': 'True',
'security.privileged': 'True',
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_idmap(self):
self.client.host_info['api_extensions'].append('id_map')
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'lxd:isolated': True,
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'security.idmap.isolated': 'True',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_idmap_unsupported(self):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'lxd:isolated': True,
}
network_info = []
block_info = []
self.assertRaises(
exception.NovaException,
flavor.to_profile, self.client, instance, network_info, block_info)
def test_to_profile_quota_extra_specs_bytes(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_read_bytes_sec': '3000000',
'quota:disk_write_bytes_sec': '4000000',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.read': '2MB',
'limits.write': '3MB',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_quota_extra_specs_iops(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_read_iops_sec': '300',
'quota:disk_write_iops_sec': '400',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.read': '300iops',
'limits.write': '400iops',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_quota_extra_specs_max_bytes(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_total_bytes_sec': '6000000',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.max': '5MB',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_quota_extra_specs_max_iops(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_total_iops_sec': '500',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.max': '500iops',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_network_config_average(self):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:vif_inbound_average': '1000000',
'quota:vif_outbound_average': '2000000',
}
network_info = [{
'id': '0123456789abcdef',
'type': network_model.VIF_TYPE_OVS,
'address': '00:11:22:33:44:55',
'network': {
'bridge': 'fakebr'}}]
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'qbr0123456789a': {
'host_name': 'nic0123456789a',
'hwaddr': '00:11:22:33:44:55',
'nictype': 'bridged',
'parent': 'qbr0123456789a',
'type': 'nic',
'limits.egress': '16000Mbit',
'limits.ingress': '8000Mbit',
},
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_network_config_peak(self):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:vif_inbound_peak': '3000000',
'quota:vif_outbound_peak': '4000000',
}
network_info = [{
'id': '0123456789abcdef',
'type': network_model.VIF_TYPE_OVS,
'address': '00:11:22:33:44:55',
'network': {
'bridge': 'fakebr'}}]
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'qbr0123456789a': {
'host_name': 'nic0123456789a',
'hwaddr': '00:11:22:33:44:55',
'nictype': 'bridged',
'parent': 'qbr0123456789a',
'type': 'nic',
'limits.egress': '32000Mbit',
'limits.ingress': '24000Mbit',
},
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
@mock.patch('nova.virt.lxd.flavor.driver.block_device_info_get_ephemerals')
def test_to_profile_ephemeral_storage(self, get_ephemerals):
"""A profile configuration is requested of the LXD client."""
get_ephemerals.return_value = [
{'virtual_name': 'ephemeral1'},
]
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
'ephemeral1': {
'type': 'disk',
'path': '/mnt',
'source': '/i/{}/storage/ephemeral1'.format(instance.name),
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)

View File

@ -45,6 +45,7 @@ from pylxd import exceptions as lxd_exceptions
from nova.virt.lxd import vif as lxd_vif
from nova.virt.lxd import common
from nova.virt.lxd import flavor
from nova.virt.lxd import session
from nova.virt.lxd import storage
@ -177,115 +178,6 @@ def _get_power_state(lxd_state):
raise ValueError('Unknown LXD power state: {}'.format(lxd_state))
def _make_disk_quota_config(instance):
md = instance.flavor.extra_specs
disk_config = {}
md_namespace = 'quota:'
params = ['disk_read_iops_sec', 'disk_read_bytes_sec',
'disk_write_iops_sec', 'disk_write_bytes_sec',
'disk_total_iops_sec', 'disk_total_bytes_sec']
# Get disk quotas from flavor metadata and cast the values to int
q = {}
for param in params:
q[param] = int(md.get(md_namespace + param, 0))
# Bytes and iops are not separate config options in a container
# profile - we let Bytes take priority over iops if both are set.
# Align all limits to MiB/s, which should be a sensible middle road.
if q.get('disk_read_iops_sec'):
disk_config['limits.read'] = \
('%s' + 'iops') % q['disk_read_iops_sec']
if q.get('disk_read_bytes_sec'):
disk_config['limits.read'] = \
('%s' + 'MB') % (q['disk_read_bytes_sec'] / units.Mi)
if q.get('disk_write_iops_sec'):
disk_config['limits.write'] = \
('%s' + 'iops') % q['disk_write_iops_sec']
if q.get('disk_write_bytes_sec'):
disk_config['limits.write'] = \
('%s' + 'MB') % (q['disk_write_bytes_sec'] / units.Mi)
# If at least one of the above limits has been defined, do not set
# the "max" quota (which would apply to both read and write)
minor_quota_defined = any(
q.get(param) for param in
['disk_read_iops_sec', 'disk_write_iops_sec',
'disk_read_bytes_sec', 'disk_write_bytes_sec'])
if q.get('disk_total_iops_sec') and not minor_quota_defined:
disk_config['limits.max'] = \
('%s' + 'iops') % q['disk_total_iops_sec']
if q.get('disk_total_bytes_sec') and not minor_quota_defined:
disk_config['limits.max'] = \
('%s' + 'MB') % (q['disk_total_bytes_sec'] / units.Mi)
return disk_config
def _make_network_config(instance, network_info):
network_devices = {}
if not network_info:
return
for vifaddr in network_info:
cfg = lxd_vif.get_config(vifaddr)
if 'bridge' in cfg:
key = str(cfg['bridge'])
network_devices[key] = {
'nictype': 'bridged',
'hwaddr': str(cfg['mac_address']),
'parent': str(cfg['bridge']),
'type': 'nic'
}
else:
key = 'unbridged'
network_devices[key] = {
'nictype': 'p2p',
'hwaddr': str(cfg['mac_address']),
'type': 'nic'
}
host_device = lxd_vif.get_vif_devname(vifaddr)
if host_device:
network_devices[key]['host_name'] = host_device
# Set network device quotas
md = instance.flavor.extra_specs
network_config = {}
md_namespace = 'quota:'
params = ['vif_inbound_average', 'vif_inbound_peak',
'vif_outbound_average', 'vif_outbound_peak']
# Get network quotas from flavor metadata and cast the values to int
q = {}
for param in params:
q[param] = int(md.get(md_namespace + param, 0))
# Since LXD does not implement average NIC IO and number of burst
# bytes, we take the max(vif_*_average, vif_*_peak) to set the peak
# network IO and simply ignore the burst bytes.
# Align values to MBit/s (8 * powers of 1000 in this case), having
# in mind that the values are recieved in Kilobytes/s.
vif_inbound_limit = max(
q.get('vif_inbound_average'),
q.get('vif_inbound_peak')
)
if vif_inbound_limit:
network_config['limits.ingress'] = \
('%s' + 'Mbit') % (vif_inbound_limit * units.k * 8 / units.M)
vif_outbound_limit = max(
q.get('vif_outbound_average'),
q.get('vif_outbound_peak')
)
if vif_outbound_limit:
network_config['limits.egress'] = \
('%s' + 'Mbit') % (vif_outbound_limit * units.k * 8 / units.M)
network_devices[key].update(network_config)
return network_devices
def _sync_glance_image_to_lxd(client, context, image_ref):
"""Sync an image from glance to LXD image store.
@ -490,9 +382,8 @@ class LXDDriver(driver.ComputeDriver):
# Create the profile
try:
profile_data = self._generate_profile_data(
instance, network_info, block_device_info)
profile = self.client.profiles.create(*profile_data)
profile = flavor.to_profile(
self.client, instance, network_info, block_device_info)
except lxd_exceptions.LXDAPIException as e:
with excutils.save_and_reraise_exception():
self.cleanup(
@ -748,19 +639,15 @@ class LXDDriver(driver.ComputeDriver):
profile.save(wait=True)
def migrate_disk_and_power_off(
self, context, instance, dest, flavor, network_info,
self, context, instance, dest, _flavor, network_info,
block_device_info=None, timeout=0, retry_interval=0):
if CONF.my_ip == dest:
# Make sure that the profile for the container is up-to-date to
# the actual state of the container.
name, config, devices = self._generate_profile_data(
instance, network_info, block_device_info)
profile = self.client.profiles.get(name)
profile.devices = devices
profile.config = config
profile.save()
flavor.to_profile(
self.client, instance, network_info, block_device_info,
update=True)
container = self.client.containers.get(instance.name)
container.stop(wait=True)
return ''
@ -1037,8 +924,7 @@ class LXDDriver(driver.ComputeDriver):
fileutils.ensure_tree(instance_dir)
# Step 1 - Setup the profile on the dest host
profile_data = self._generate_profile_data(instance, network_info)
self.client.profiles.create(*profile_data)
flavor.to_profile(instance, network_info, block_device_info)
# Step 2 - Open a websocket on the srct and and
# generate the container config
@ -1069,8 +955,7 @@ class LXDDriver(driver.ComputeDriver):
self.firewall_driver.apply_instance_filter(
instance, network_info)
profile_data = self._generate_profile_data(instance, network_info)
self.client.profiles.create(*profile_data)
flavor.to_profile(instance, network_info, block_device_info)
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
@ -1112,56 +997,6 @@ class LXDDriver(driver.ComputeDriver):
#
# LXDDriver "private" implementation methods
#
def _generate_profile_data(
self, instance, network_info, block_device_info=None):
"""Generate a LXD profile configuration.
Every container created via nova-lxd has a profile assigned to it
by the same name. The profile is sync'd with the configuration of
the container. When the container is deleted, so is the profile.
"""
instance_attributes = common.InstanceAttributes(instance)
config = {
'boot.autostart': 'True', # Start when host reboots
}
if instance.flavor.extra_specs.get('lxd:nested_allowed', False):
config['security.nesting'] = 'True'
if instance.flavor.extra_specs.get('lxd:privileged_allowed', False):
config['security.privileged'] = 'True'
mem = instance.memory_mb
if mem >= 0:
config['limits.memory'] = '%sMB' % mem
vcpus = instance.flavor.vcpus
if vcpus >= 0:
config['limits.cpu'] = str(vcpus)
config['raw.lxc'] = 'lxc.console.logfile={}\n'.format(
instance_attributes.console_path)
devices = {}
lxd_config = self.client.host_info['environment']
devices.setdefault('root', {'type': 'disk', 'path': '/'})
if str(lxd_config['storage']) in ['btrfs', 'zfs']:
devices['root'].update({'size': '%sGB' % str(instance.root_gb)})
devices['root'].update(_make_disk_quota_config(instance))
ephemeral_storage = driver.block_device_info_get_ephemerals(
block_device_info)
if ephemeral_storage:
for ephemeral in ephemeral_storage:
ephemeral_src = os.path.join(
instance_attributes.storage_path,
ephemeral['virtual_name'])
devices[ephemeral['virtual_name']] = {
'path': '/mnt',
'source': ephemeral_src,
'type': 'disk',
}
if network_info:
devices.update(_make_network_config(instance, network_info))
return instance.name, config, devices
# XXX: rockstar (21 Nov 2016) - The methods and code below this line
# have not been through the cleanup process. We know the cleanup process
# is complete when there is no more code below this comment, and the

220
nova/virt/lxd/flavor.py Normal file
View File

@ -0,0 +1,220 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import exception
from nova import i18n
from nova.virt import driver
from oslo_utils import units
from nova.virt.lxd import common
from nova.virt.lxd import vif
_ = i18n._
def _base_config(instance, _):
instance_attributes = common.InstanceAttributes(instance)
return {
'environment.product_name': 'OpenStack Nova',
'raw.lxc': 'lxc.console.logfile={}\n'.format(
instance_attributes.console_path),
}
def _nesting(instance, _):
if instance.flavor.extra_specs.get('lxd:nested_allowed'):
return {'security.nesting': 'True'}
def _security(instance, _):
if instance.flavor.extra_specs.get('lxd:privileged_allowed'):
return {'security.privileged': 'True'}
def _memory(instance, _):
mem = instance.memory_mb
if mem >= 0:
return {'limits.memory': '{}MB'.format(mem)}
def _cpu(instance, _):
vcpus = instance.flavor.vcpus
if vcpus >= 0:
return {'limits.cpu': str(vcpus)}
def _isolated(instance, client):
lxd_isolated = instance.flavor.extra_specs.get('lxd:isolated')
if lxd_isolated:
extensions = client.host_info.get('api_extensions', [])
if 'id_map' in extensions:
return {'security.idmap.isolated': 'True'}
else:
msg = _('Host does not support isolated instances')
raise exception.NovaException(msg)
_CONFIG_FILTER_MAP = [
_base_config,
_nesting,
_security,
_memory,
_cpu,
_isolated,
]
def _root(instance, client, *_):
"""Configure the root disk."""
device = {'type': 'disk', 'path': '/'}
environment = client.host_info['environment']
if environment['storage'] in ['btrfs', 'zfs']:
device['size'] = '{}GB'.format(instance.root_gb)
specs = instance.flavor.extra_specs
# Bytes and iops are not separate config options in a container
# profile - we let Bytes take priority over iops if both are set.
# Align all limits to MiB/s, which should be a sensible middle road.
if specs.get('quota:disk_read_iops_sec'):
device['limits.read'] = '{}iops'.format(
specs['quota:disk_read_iops_sec'])
if specs.get('quota:disk_write_iops_sec'):
device['limits.write'] = '{}iops'.format(
specs['quota:disk_write_iops_sec'])
if specs.get('quota:disk_read_bytes_sec'):
device['limits.read'] = '{}MB'.format(
int(specs['quota:disk_read_bytes_sec']) / units.Mi)
if specs.get('quota:disk_write_bytes_sec'):
device['limits.write'] = '{}MB'.format(
int(specs['quota:disk_write_bytes_sec']) / units.Mi)
minor_quota_defined = 'limits.write' in device or 'limits.read' in device
if specs.get('quota:disk_total_iops_sec') and not minor_quota_defined:
device['limits.max'] = '{}iops'.format(
specs['quota:disk_total_iops_sec'])
if specs.get('quota:disk_total_bytes_sec') and not minor_quota_defined:
device['limits.max'] = '{}MB'.format(
int(specs['quota:disk_total_bytes_sec']) / units.Mi)
return {'root': device}
def _ephemeral_storage(instance, _, __, block_info):
instance_attributes = common.InstanceAttributes(instance)
ephemeral_storage = driver.block_device_info_get_ephemerals(block_info)
if ephemeral_storage:
devices = {}
for ephemeral in ephemeral_storage:
ephemeral_src = os.path.join(
instance_attributes.storage_path,
ephemeral['virtual_name'])
devices[ephemeral['virtual_name']] = {
'path': '/mnt',
'source': ephemeral_src,
'type': 'disk',
}
return devices
def _network(instance, _, network_info, __):
if not network_info:
return
devices = {}
for vifaddr in network_info:
cfg = vif.get_config(vifaddr)
if 'bridge' in cfg:
key = str(cfg['bridge'])
devices[key] = {
'nictype': 'bridged',
'hwaddr': str(cfg['mac_address']),
'parent': str(cfg['bridge']),
'type': 'nic'
}
else:
key = 'unbridged'
devices[key] = {
'nictype': 'p2p',
'hwaddr': str(cfg['mac_address']),
'type': 'nic'
}
host_device = vif.get_vif_devname(vifaddr)
if host_device:
devices[key]['host_name'] = host_device
specs = instance.flavor.extra_specs
# Since LXD does not implement average NIC IO and number of burst
# bytes, we take the max(vif_*_average, vif_*_peak) to set the peak
# network IO and simply ignore the burst bytes.
# Align values to MBit/s (8 * powers of 1000 in this case), having
# in mind that the values are recieved in Kilobytes/s.
vif_inbound_limit = max(
int(specs.get('quota:vif_inbound_average', 0)),
int(specs.get('quota:vif_inbound_peak', 0)),
)
if vif_inbound_limit:
devices[key]['limits.ingress'] = '{}Mbit'.format(
vif_inbound_limit * units.k * 8 / units.M)
vif_outbound_limit = max(
int(specs.get('quota:vif_outbound_average', 0)),
int(specs.get('quota:vif_outbound_peak', 0)),
)
if vif_outbound_limit:
devices[key]['limits.egress'] = '{}Mbit'.format(
vif_outbound_limit * units.k * 8 / units.M)
return devices
_DEVICE_FILTER_MAP = [
_root,
_ephemeral_storage,
_network,
]
def to_profile(client, instance, network_info, block_info, update=False):
"""Convert a nova flavor to a lxd profile.
Every instance container created via nova-lxd has a profiled by the
same name. The profile is sync'd with the configuration of the container.
When the instance container is deleted, so is the profile.
"""
name = instance.name
config = {}
for f in _CONFIG_FILTER_MAP:
new = f(instance, client)
if new:
config.update(new)
devices = {}
for f in _DEVICE_FILTER_MAP:
new = f(instance, client, network_info, block_info)
if new:
devices.update(new)
if update is True:
profile = client.profiles.get(name)
profile.devices = devices
profile.config = config
profile.save()
return profile
else:
return client.profiles.create(name, config, devices)