separate management port and used cloud init

This change cuts the ties between the ports and the service VM and
enable future VRRP support, but creating VM specific ports.

This updates unit tests to pass with recent Kilo refactoring and
enhancements.  A few irrelevant tests were removed, specifically
those that tested a specific state where routers have no management
port, which is no longer a possible state.

Implements blueprint: ci-updates
Co-Authored-By: Adam Gandelman<adamg@ubuntu.com>
Change-Id: I8a78b487df0d49fffd7a924f170d26147864994b
This commit is contained in:
Mark McClain 2015-02-27 03:40:46 -05:00
parent 34f781c228
commit dc37af313b
10 changed files with 699 additions and 698 deletions

View File

@ -43,10 +43,15 @@ SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
def build_config(client, router, management_port, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
networks = generate_network_config(
client,
router,
management_port,
interfaces
)
gateway = get_default_v4_gateway(client, router, networks)
return {
@ -110,26 +115,26 @@ def load_provider_rules(path):
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
def generate_network_config(client, router, management_port, iface_map):
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
iface_map[router.external_port.network_id],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
_network_config(
client,
management_port,
iface_map[management_port.network_id],
MANAGEMENT_NET
)
]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
iface_map[p.network_id],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
@ -193,6 +198,9 @@ def _allocation_config(ports, subnets_dict):
allocations = []
for port in ports:
if port.name.startswith('AKANDA:VRRP:'):
continue
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips

View File

@ -71,15 +71,13 @@ class MissingIPAllocation(Exception):
class Router(object):
def __init__(self, id_, tenant_id, name, admin_state_up, status,
external_port=None, internal_ports=None,
management_port=None, floating_ips=None):
external_port=None, internal_ports=None, floating_ips=None):
self.id = id_
self.tenant_id = tenant_id
self.name = name
self.admin_state_up = admin_state_up
self.status = status
self.external_port = external_port
self.management_port = management_port
self.internal_ports = internal_ports or []
self.floating_ips = floating_ips or []
@ -97,16 +95,14 @@ class Router(object):
@classmethod
def from_dict(cls, d):
external_port = None
management_port = None
internal_ports = []
for port_dict in d.get('ports', []):
if d.get('gw_port'):
external_port = Port.from_dict(d.get('gw_port'))
for port_dict in d.get('_interfaces', []):
port = Port.from_dict(port_dict)
if port.device_owner == DEVICE_OWNER_ROUTER_GW:
external_port = port
elif port.device_owner == DEVICE_OWNER_ROUTER_MGT:
management_port = port
elif port.device_owner == DEVICE_OWNER_ROUTER_INT:
if port.device_owner == DEVICE_OWNER_ROUTER_INT:
internal_ports.append(port)
fips = [FloatingIP.from_dict(fip) for fip in d.get('_floatingips', [])]
@ -119,14 +115,13 @@ class Router(object):
d['status'],
external_port,
internal_ports,
management_port,
floating_ips=fips
)
@property
def ports(self):
return itertools.chain(
[self.management_port, self.external_port],
[self.external_port],
self.internal_ports
)
@ -177,13 +172,14 @@ class Subnet(object):
class Port(object):
def __init__(self, id_, device_id='', fixed_ips=None, mac_address='',
network_id='', device_owner=''):
network_id='', device_owner='', name=''):
self.id = id_
self.device_id = device_id
self.fixed_ips = fixed_ips or []
self.mac_address = mac_address
self.network_id = network_id
self.device_owner = device_owner
self.name = name
def __eq__(self, other):
return type(self) == type(other) and vars(self) == vars(other)
@ -204,7 +200,8 @@ class Port(object):
fixed_ips=[FixedIp.from_dict(fip) for fip in d['fixed_ips']],
mac_address=d['mac_address'],
network_id=d['network_id'],
device_owner=d['device_owner'])
device_owner=d['device_owner'],
name=d['name'])
class FixedIp(object):
@ -326,26 +323,48 @@ class Neutron(object):
network_id, e)
return response
def create_router_management_port(self, router_id):
port_dict = dict(admin_state_up=True,
network_id=self.conf.management_network_id,
device_owner=DEVICE_OWNER_ROUTER_MGT
)
def get_ports_for_instance(self, instance_id):
ports = self.api_client.list_ports(device_id=instance_id)['ports']
mgt_port = None
intf_ports = []
for port in (Port.from_dict(p) for p in ports):
if port.network_id == self.conf.management_network_id:
mgt_port = port
else:
intf_ports.append(port)
return mgt_port, intf_ports
def create_management_port(self, object_id):
return self.create_vrrp_port(
object_id,
self.conf.management_network_id,
'MGT'
)
def create_vrrp_port(self, object_id, network_id, label='VRRP'):
port_dict = dict(
admin_state_up=True,
network_id=network_id,
name='AKANDA:%s:%s' % (label, object_id),
security_groups=[]
)
if label == 'VRRP':
port_dict['fixed_ips'] = []
response = self.api_client.create_port(dict(port=port_dict))
port_data = response.get('port')
if not port_data:
raise ValueError('No port data found for router %s network %s' %
(router_id, self.conf.management_network_id))
raise ValueError(
'Unable to create %s port for %s on network %s' %
(label, object_id, network_id)
)
port = Port.from_dict(port_data)
args = dict(port_id=port.id, owner=DEVICE_OWNER_ROUTER_MGT)
self.api_client.add_interface_router(router_id, args)
return port
def delete_router_management_port(self, router_id, port_id):
args = dict(port_id=port_id, owner=DEVICE_OWNER_ROUTER_MGT)
self.api_client.remove_interface_router(router_id, args)
def create_router_external_port(self, router):
# FIXME: Need to make this smarter in case the switch is full.
network_args = {'network_id': self.conf.external_network_id}
@ -389,12 +408,13 @@ class Neutron(object):
i,
cfg.CONF.max_retries,
)
ports = [
p for p in self.api_client.show_router(
router.id
)['router']['ports']
if p['network_id'] == self.conf.external_network_id
]
query_dict = {
'device_owner': DEVICE_OWNER_ROUTER_GW,
'device_id': router.id,
'network_id': self.conf.external_network_id
}
ports = self.api_client.list_ports(**query_dict)['ports']
if len(ports):
port = Port.from_dict(ports[0])
LOG.debug('Found router external port: %s' % port.id)
@ -409,8 +429,11 @@ class Neutron(object):
host_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname()))
name = 'AKANDA:RUG:%s' % network_type.upper()
query_dict = dict(device_owner=DEVICE_OWNER_RUG,
device_id=host_id,
name=name,
network_id=network_id)
ports = self.api_client.list_ports(**query_dict)['ports']
@ -425,6 +448,7 @@ class Neutron(object):
'admin_state_up': True,
'network_id': network_id,
'device_owner': DEVICE_OWNER_ROUTER_INT, # lying here for IP
'name': name,
'device_id': host_id,
'fixed_ips': [{
'ip_address': ip_address.split('/')[0],
@ -480,7 +504,11 @@ class Neutron(object):
self.conf
)
host_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname()))
query_dict = dict(device_owner=DEVICE_OWNER_RUG, device_id=host_id)
query_dict = dict(
device_owner=DEVICE_OWNER_RUG,
name='AKANDA:RUG:MANAGEMENT',
device_id=host_id
)
ports = self.api_client.list_ports(**query_dict)['ports']
if ports:

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import logging
from novaclient.v1_1 import client
@ -22,6 +23,46 @@ from novaclient.v1_1 import client
LOG = logging.getLogger(__name__)
class InstanceInfo(object):
def __init__(self, instance_id, name, management_port=None, ports=(),
image_uuid=None, booting=False, last_boot=None):
self.id_ = instance_id
self.name = name
self.image_uuid = image_uuid
self.booting = booting
self.last_boot = datetime.utcnow() if booting else last_boot
self.instance_up = True
self.boot_duration = None
self.nova_status = None
self.management_port = management_port
self._ports = ports
@property
def management_address(self):
return str(self.management_port.fixed_ips[0].ip_address)
@property
def time_since_boot(self):
if self.last_boot:
return datetime.utcnow() - self.last_boot
@property
def ports(self):
return self._ports
@ports.setter
def ports(self, port_list):
self._ports = [p for p in port_list if p != self.management_port]
def confirm_up(self):
if self.booting:
self.booting = False
if self.last_boot:
self.boot_duration = (datetime.utcnow() - self.last_boot)
class Nova(object):
def __init__(self, conf):
self.conf = conf
@ -33,55 +74,127 @@ class Nova(object):
auth_system=conf.auth_strategy,
region_name=conf.auth_region)
def create_router_instance(self, router, router_image_uuid):
nics = [{'net-id': p.network_id, 'v4-fixed-ip': '', 'port-id': p.id}
for p in router.ports]
def create_instance(self, router_id, image_uuid, make_ports_callback):
mgt_port, instance_ports = make_ports_callback()
nics = [{'net-id': p.network_id, 'v4-fixed-ip': '', 'port-id': p.id}
for p in ([mgt_port] + instance_ports)]
# Sometimes a timing problem makes Nova try to create an akanda
# instance using some ports that haven't been cleaned up yet from
# Neutron. This problem makes the novaclient return an Internal Server
# Error to the rug.
# We can safely ignore this exception because the failed task is going
# to be requeued and executed again later when the ports should be
# finally cleaned up.
LOG.debug('creating vm for router %s with image %s',
router.id, router_image_uuid)
router_id, image_uuid)
name = 'ak-' + router_id
server = self.client.servers.create(
'ak-' + router.id,
image=router_image_uuid,
name,
image=image_uuid,
flavor=self.conf.router_instance_flavor,
nics=nics)
nics=nics,
config_drive=True,
userdata=_format_userdata(mgt_port)
)
instance_info = InstanceInfo(
server.id,
name,
mgt_port,
instance_ports,
image_uuid,
True
)
assert server and server.created
def get_instance(self, router):
instance_info.nova_status = server.status
return instance_info
def get_instance_info_for_obj(self, router_id):
instance = self.get_instance_for_obj(router_id)
if instance:
return InstanceInfo(
instance.id,
instance.name,
image_uuid=instance.image['id']
)
def get_instance_for_obj(self, router_id):
instances = self.client.servers.list(
search_opts=dict(name='ak-' + router.id))
search_opts=dict(name='ak-' + router_id)
)
if instances:
return instances[0]
else:
return None
def get_router_instance_status(self, router):
instance = self.get_instance(router)
if instance:
return instance.status
def get_instance_by_id(self, instance_id):
return self.client.servers.get(instance_id)
def destroy_instance(self, instance_info):
if instance_info:
LOG.debug('deleting vm for router %s', instance_info.name)
self.client.servers.delete(instance_info.id_)
def boot_instance(self, prev_instance_info, router_id, router_image_uuid,
make_ports_callback):
instance_info = None
if not prev_instance_info:
instance = self.get_instance_for_obj(router_id)
else:
return None
instance = self.get_instance_by_id(prev_instance_info.id_)
def destroy_router_instance(self, router):
instance = self.get_instance(router)
if instance:
LOG.debug('deleting vm for router %s', router.id)
self.client.servers.delete(instance.id)
def reboot_router_instance(self, router, router_image_uuid):
instance = self.get_instance(router)
# check to make sure this instance isn't pre-existing
if instance:
if 'BUILD' in instance.status:
return True
# return the same instance with updated status
prev_instance_info.nova_status = instance.status
return prev_instance_info
self.client.servers.delete(instance.id)
return False
self.create_router_instance(router, router_image_uuid)
return True
self.client.servers.delete(instance_info.id_)
return None
# it is now safe to attempt boot
instance_info = self.create_instance(
router_id,
router_image_uuid,
make_ports_callback
)
return instance_info
# TODO(mark): Convert this to dynamic yaml, proper network prefix and ssh-keys
TEMPLATE = """#cloud-config
cloud_config_modules:
- emit_upstart
- set_hostname
- locale
- set-passwords
- timezone
- disable-ec2-metadata
- runcmd
output: {all: '| tee -a /var/log/cloud-init-output.log'}
debug:
- verbose: true
bootcmd:
- /usr/local/bin/akanda-configure-management %s %s/64
users:
- name: akanda
gecos: Akanda
groups: users
shell: /bin/bash
sudo: ALL=(ALL) NOPASSWD:ALL
passwd: $6$rounds=4096$zxaBh6omTayBSA$rI1.FNliuUl7R2SMdkj7zWv.FBhqGVd1lLYDatJd6MiE9WqEQx0M.o7bLyp5nA0CxV6ahoDb0m8Y5OQMDHx1V/
lock-passwd: false
final_message: "Akanda appliance is running"
""" # noqa
def _format_userdata(mgt_port):
return TEMPLATE % (mgt_port.mac_address, mgt_port.fixed_ips[0].ip_address)

View File

@ -40,12 +40,14 @@ fake_ext_port = FakeModel(
fake_mgt_port = FakeModel(
'2',
name='AKANDA:MGT:foo',
mac_address='aa:bb:cc:cc:bb:aa',
network_id='mgt-net',
device_id='m-m-m-m')
fake_int_port = FakeModel(
'3',
name='AKANDA:RUG:foo',
mac_address='aa:aa:aa:aa:aa:aa',
network_id='int-net',
fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s1')],
@ -53,6 +55,25 @@ fake_int_port = FakeModel(
fake_vm_port = FakeModel(
'4',
name='foo',
mac_address='aa:aa:aa:aa:aa:bb',
network_id='int-net',
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
first_v4='192.168.1.2',
device_id='v-v-v-v')
fake_vm_mgt_port = FakeModel(
'4',
name='AKANDA:MGT:foo',
mac_address='aa:aa:aa:aa:aa:bb',
network_id='int-net',
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
first_v4='192.168.1.2',
device_id='v-v-v-v')
fake_vm_vrrp_port = FakeModel(
'4',
name='AKANDA:VRRP:foo',
mac_address='aa:aa:aa:aa:aa:bb',
network_id='int-net',
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
@ -137,7 +158,8 @@ class TestAkandaClient(unittest.TestCase):
mocks['generate_floating_config'].return_value = 'floating_config'
mocks['get_default_v4_gateway'].return_value = 'default_gw'
config = conf_mod.build_config(mock_client, fake_router, ifaces)
config = conf_mod.build_config(mock_client, fake_router,
fake_mgt_port, ifaces)
expected = {
'default_v4_gateway': 'default_gw',
@ -154,7 +176,7 @@ class TestAkandaClient(unittest.TestCase):
mocks['load_provider_rules'].assert_called_once_with('/the/path')
mocks['generate_network_config'].assert_called_once_with(
mock_client, fake_router, ifaces)
mock_client, fake_router, fake_mgt_port, ifaces)
def test_load_provider_rules(self):
rules_dict = {'labels': {}, 'preanchors': [], 'postanchors': []}
@ -175,42 +197,38 @@ class TestAkandaClient(unittest.TestCase):
mock_client = mock.Mock()
ifaces = [
{'ifname': 'ge0', 'lladdr': fake_mgt_port.mac_address},
{'ifname': 'ge1', 'lladdr': fake_ext_port.mac_address},
{'ifname': 'ge2', 'lladdr': fake_int_port.mac_address}
]
iface_map = {
fake_mgt_port.network_id: 'ge0',
fake_ext_port.network_id: 'ge1',
fake_int_port.network_id: 'ge2'
}
with mock.patch.multiple(conf_mod, **methods) as mocks:
mocks['_network_config'].return_value = 'configured_network'
mocks['_management_network_config'].return_value = 'mgt_net'
result = conf_mod.generate_network_config(
mock_client, fake_router, ifaces)
mock_client, fake_router, fake_mgt_port, iface_map)
expected = [
'configured_network',
'mgt_net',
'configured_network',
'configured_network'
]
self.assertEqual(result, expected)
mocks['_network_config'].assert_has_calls([
expected_calls = [
mock.call(
mock_client,
fake_router.external_port,
'ge1',
'external'),
mock_client, fake_router.external_port,
'ge1', 'external'),
mock.call(
mock_client,
fake_int_port,
'ge2',
'internal',
mock.ANY)])
mocks['_management_network_config'].assert_called_once_with(
fake_router.management_port, 'ge0', ifaces)
mock_client, fake_router.management_port,
'ge0', 'management'),
mock.call(
mock_client, fake_int_port,
'ge2', 'internal', mock.ANY)]
mocks['_network_config'].assert_has_calls(expected_calls)
def test_managment_network_config(self):
with mock.patch.object(conf_mod, '_make_network_config_dict') as nc:
@ -348,7 +366,14 @@ class TestAkandaClient(unittest.TestCase):
host_routes={})
self.assertEqual(conf_mod._subnet_config(sn), expected)
def test_allocation_config(self):
def test_allocation_config_vrrp(self):
subnets_dict = {fake_subnet.id: fake_subnet}
self.assertEqual(
conf_mod._allocation_config([fake_vm_vrrp_port], subnets_dict),
[]
)
def test_allocation_config_mgt(self):
subnets_dict = {fake_subnet.id: fake_subnet}
expected = [
{'mac_address': 'aa:aa:aa:aa:aa:bb',
@ -356,9 +381,8 @@ class TestAkandaClient(unittest.TestCase):
'hostname': '192-168-1-2.local',
'device_id': 'v-v-v-v'}
]
self.assertEqual(
conf_mod._allocation_config([fake_vm_port], subnets_dict),
conf_mod._allocation_config([fake_vm_mgt_port], subnets_dict),
expected
)

View File

@ -27,16 +27,16 @@ from akanda.rug.api import neutron
class TestuNeutronModels(unittest.TestCase):
def test_router(self):
r = neutron.Router(
'1', 'tenant_id', 'name', True, 'ACTIVE', 'ext', ['int'], 'mgt')
'1', 'tenant_id', 'name', True, 'ACTIVE', 'ext', ['int'], ['fip'])
self.assertEqual(r.id, '1')
self.assertEqual(r.tenant_id, 'tenant_id')
self.assertEqual(r.name, 'name')
self.assertTrue(r.admin_state_up)
self.assertEqual(r.status, 'ACTIVE')
self.assertEqual(r.external_port, 'ext')
self.assertEqual(r.management_port, 'mgt')
self.assertEqual(r.floating_ips, ['fip'])
self.assertEqual(r.internal_ports, ['int'])
self.assertEqual(set(['ext', 'mgt', 'int']), set(r.ports))
self.assertEqual(set(['ext', 'int']), set(r.ports))
def test_router_from_dict(self):
p = {
@ -193,6 +193,7 @@ class TestuNeutronModels(unittest.TestCase):
def test_port_model(self):
d = {
'id': '1',
'name': 'name',
'device_id': 'device_id',
'fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'sub1'}],
'mac_address': 'aa:bb:cc:dd:ee:ff',
@ -371,6 +372,9 @@ class TestExternalPort(unittest.TestCase):
def test_create(self, client_wrapper):
mock_client = mock.Mock()
mock_client.show_router.return_value = {'router': self.ROUTER}
mock_client.list_ports.return_value = {
'ports': [self.ROUTER['ports'][0]]
}
client_wrapper.return_value = mock_client
neutron_wrapper = neutron.Neutron(self.conf)
with mock.patch.object(neutron_wrapper, 'get_network_subnets') as gns:
@ -385,6 +389,8 @@ class TestExternalPort(unittest.TestCase):
router = copy.deepcopy(self.ROUTER)
router['ports'] = []
mock_client.show_router.return_value = {'router': router}
mock_client.list_ports.return_value = {'ports': []}
client_wrapper.return_value = mock_client
neutron_wrapper = neutron.Neutron(self.conf)
with mock.patch.object(neutron_wrapper, 'get_network_subnets') as gns:
@ -402,6 +408,10 @@ class TestExternalPort(unittest.TestCase):
router = copy.deepcopy(self.ROUTER)
del router['ports'][0]['fixed_ips'][0]
mock_client.list_ports.return_value = {
'ports': [router['ports'][0]]
}
mock_client.show_router.return_value = {'router': router}
client_wrapper.return_value = mock_client
neutron_wrapper = neutron.Neutron(self.conf)
@ -421,6 +431,10 @@ class TestExternalPort(unittest.TestCase):
router = copy.deepcopy(self.ROUTER)
del router['ports'][0]['fixed_ips'][1]
mock_client.list_ports.return_value = {
'ports': [router['ports'][0]]
}
mock_client.show_router.return_value = {'router': router}
client_wrapper.return_value = mock_client
neutron_wrapper = neutron.Neutron(self.conf)
@ -441,7 +455,12 @@ class TestExternalPort(unittest.TestCase):
router['ports'][0]['fixed_ips'] = []
mock_client.show_router.return_value = {'router': router}
mock_client.list_ports.return_value = {
'ports': [router['ports'][0]]
}
client_wrapper.return_value = mock_client
neutron_wrapper = neutron.Neutron(self.conf)
with mock.patch.object(neutron_wrapper, 'get_network_subnets') as gns:
gns.return_value = self.SUBNETS

View File

@ -15,6 +15,7 @@
# under the License.
import datetime
import mock
import unittest2 as unittest
@ -64,6 +65,10 @@ class FakeConf:
router_instance_flavor = 1
def fake_make_ports_callback():
return (fake_mgt_port, [fake_ext_port, fake_int_port])
class TestNovaWrapper(unittest.TestCase):
def setUp(self):
self.addCleanup(mock.patch.stopall)
@ -73,7 +78,19 @@ class TestNovaWrapper(unittest.TestCase):
self.client_cls.return_value = self.client
self.nova = nova.Nova(FakeConf)
def test_create_router_instance(self):
self.INSTANCE_INFO = nova.InstanceInfo(
instance_id='fake_instance_id',
name='fake_name',
image_uuid='fake_image_id',
booting=False,
last_boot=datetime.datetime.utcnow(),
ports=(fake_ext_port, fake_int_port),
management_port=fake_mgt_port,
)
@mock.patch.object(nova, '_format_userdata')
def test_create_instance(self, mock_userdata):
mock_userdata.return_value = 'fake_userdata'
expected = [
mock.call.servers.create(
'ak-router_id',
@ -87,14 +104,17 @@ class TestNovaWrapper(unittest.TestCase):
'net-id': 'int-net',
'v4-fixed-ip': ''}],
flavor=1,
image='GLANCE-IMAGE-123'
image='GLANCE-IMAGE-123',
config_drive=True,
userdata='fake_userdata',
)
]
self.nova.create_router_instance(fake_router, 'GLANCE-IMAGE-123')
self.nova.create_instance(
'router_id', 'GLANCE-IMAGE-123', fake_make_ports_callback)
self.client.assert_has_calls(expected)
def test_get_instance(self):
def test_get_instance_for_obj(self):
instance = mock.Mock()
self.client.servers.list.return_value = [instance]
@ -102,87 +122,30 @@ class TestNovaWrapper(unittest.TestCase):
mock.call.servers.list(search_opts={'name': 'ak-router_id'})
]
result = self.nova.get_instance(fake_router)
result = self.nova.get_instance_for_obj('router_id')
self.client.assert_has_calls(expected)
self.assertEqual(result, instance)
def test_get_instance_not_found(self):
def test_get_instance_for_obj_not_found(self):
self.client.servers.list.return_value = []
expected = [
mock.call.servers.list(search_opts={'name': 'ak-router_id'})
]
result = self.nova.get_instance(fake_router)
result = self.nova.get_instance_for_obj('router_id')
self.client.assert_has_calls(expected)
self.assertIsNone(result)
def test_get_router_instance_status(self):
instance = mock.Mock()
instance.status = 'ACTIVE'
self.client.servers.list.return_value = [instance]
def test_get_instance_by_id(self):
self.client.servers.get.return_value = 'fake_instance'
expected = [
mock.call.servers.list(search_opts={'name': 'ak-router_id'})
mock.call.servers.get('instance_id')
]
result = self.nova.get_router_instance_status(fake_router)
self.client.assert_has_calls(expected)
self.assertEqual(result, 'ACTIVE')
def test_get_router_instance_status_not_found(self):
self.client.servers.list.return_value = []
expected = [
mock.call.servers.list(search_opts={'name': 'ak-router_id'})
]
result = self.nova.get_router_instance_status(fake_router)
self.client.assert_has_calls(expected)
self.assertIsNone(result)
result = self.nova.get_instance_by_id('instance_id')
self.client.servers.get.assert_has_calls(expected)
self.assertEquals(result, 'fake_instance')
def test_destroy_router_instance(self):
with mock.patch.object(self.nova, 'get_instance') as get_instance:
get_instance.return_value.id = 'instance_id'
expected = [
mock.call.servers.delete('instance_id')
]
self.nova.destroy_router_instance(fake_router)
self.client.assert_has_calls(expected)
def test_reboot_router_instance_exists(self):
with mock.patch.object(self.nova, 'get_instance') as get_instance:
get_instance.return_value.id = 'instance_id'
get_instance.return_value.status = 'ACTIVE'
expected = [
mock.call.servers.delete('instance_id'),
]
self.assertFalse(self.nova.reboot_router_instance(
fake_router,
'GLANCE-IMAGE-123'
))
self.client.assert_has_calls(expected)
def test_reboot_router_instance_rebooting(self):
with mock.patch.object(self.nova, 'get_instance') as get_instance:
get_instance.return_value.id = 'instance_id'
get_instance.return_value.status = 'BUILD'
self.nova.reboot_router_instance(fake_router, 'GLANCE-IMAGE-123')
self.assertEqual(self.client.mock_calls, [])
def test_reboot_router_instance_missing(self):
with mock.patch.object(self.nova, 'get_instance') as get_instance:
with mock.patch.object(self.nova, 'create_router_instance') as cr:
get_instance.return_value = None
self.nova.reboot_router_instance(
fake_router,
'GLANCE-IMAGE-123'
)
self.assertEqual(self.client.mock_calls, [])
cr.assert_called_once_with(fake_router, 'GLANCE-IMAGE-123')
self.nova.destroy_instance(self.INSTANCE_INFO)
self.client.servers.delete.assert_called_with(self.INSTANCE_INFO.id_)

View File

@ -21,7 +21,7 @@ import unittest2 as unittest
from datetime import datetime, timedelta
from akanda.rug import vm_manager
from akanda.rug.api import neutron
from akanda.rug.api import neutron, nova
vm_manager.RETRY_DELAY = 0.4
vm_manager.BOOT_WAIT = 1
@ -29,6 +29,37 @@ vm_manager.BOOT_WAIT = 1
LOG = logging.getLogger(__name__)
class FakeModel(object):
def __init__(self, id_, **kwargs):
self.id = id_
self.__dict__.update(kwargs)
fake_mgt_port = FakeModel(
'1',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='mgt-net',
fixed_ips=[FakeModel('', ip_address='9.9.9.9', subnet_id='s2')])
fake_int_port = FakeModel(
'2',
mac_address='bb:cc:cc:dd:ee:ff',
network_id='int-net',
fixed_ips=[FakeModel('', ip_address='10.10.10.10', subnet_id='s3')])
fake_ext_port = FakeModel(
'3',
mac_address='cc:cc:cc:dd:ee:ff',
network_id='ext-net',
fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s4')])
fake_add_port = FakeModel(
'4',
mac_address='aa:bb:cc:dd:ff:ff',
network_id='additional-net',
fixed_ips=[FakeModel('', ip_address='8.8.8.8', subnet_id='s3')])
class TestVmManager(unittest.TestCase):
def setUp(self):
@ -46,9 +77,20 @@ class TestVmManager(unittest.TestCase):
'update_state'
)
self.INSTANCE_INFO = nova.InstanceInfo(
instance_id='fake_instance_id',
name='fake_name',
image_uuid='fake_image_id',
booting=False,
last_boot=(datetime.utcnow() - timedelta(minutes=15)),
ports=[fake_int_port, fake_ext_port, fake_mgt_port],
management_port=fake_mgt_port,
)
self.mock_update_state = self.update_state_p.start()
self.vm_mgr = vm_manager.VmManager('the_id', 'tenant_id',
self.log, self.ctx)
self.vm_mgr.instance_info = self.INSTANCE_INFO
mock.patch.object(self.vm_mgr, '_ensure_cache', mock.Mock)
self.next_state = None
@ -60,20 +102,19 @@ class TestVmManager(unittest.TestCase):
self.mock_update_state.side_effect = next_state
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_update_state_is_alive(self, get_mgt_addr, router_api):
def test_update_state_is_alive(self, router_api):
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.return_value = True
self.assertEqual(self.vm_mgr.update_state(self.ctx), vm_manager.UP)
router_api.is_alive.assert_called_once_with('fe80::beef', 5000)
router_api.is_alive.assert_called_once_with(
self.INSTANCE_INFO.management_address,
self.conf.akanda_mgt_service_port)
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
@mock.patch('akanda.rug.api.configuration.build_config')
def test_router_status_sync(self, config, get_mgt_addr, router_api):
def test_router_status_sync(self, config, router_api):
self.update_state_p.stop()
router_api.is_alive.return_value = False
rtr = mock.sentinel.router
@ -103,17 +144,10 @@ class TestVmManager(unittest.TestCase):
n.update_router_status.assert_called_once_with('R1', 'ACTIVE')
n.update_router_status.reset_mock()
# Removing the management port will trigger a reboot
rtr.management_port = None
self.vm_mgr.update_state(self.ctx)
n.update_router_status.assert_called_once_with('R1', 'DOWN')
n.update_router_status.reset_mock()
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
@mock.patch('akanda.rug.api.configuration.build_config')
def test_router_status_caching(self, config, get_mgt_addr, router_api):
def test_router_status_caching(self, config, router_api):
self.update_state_p.stop()
router_api.is_alive.return_value = False
rtr = mock.sentinel.router
@ -134,11 +168,11 @@ class TestVmManager(unittest.TestCase):
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_boot_timeout_still_booting(self, get_mgt_addr, router_api, sleep):
self.vm_mgr.last_boot = datetime.utcnow()
def test_boot_timeout_still_booting(self, router_api, sleep):
now = datetime.utcnow()
self.INSTANCE_INFO.last_boot = now
self.vm_mgr.last_boot = now
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.return_value = False
self.assertEqual(
@ -146,19 +180,17 @@ class TestVmManager(unittest.TestCase):
vm_manager.BOOTING
)
router_api.is_alive.assert_has_calls([
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000)
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
])
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_boot_timeout_error(self, get_mgt_addr, router_api, sleep):
def test_boot_timeout_error(self, router_api, sleep):
self.vm_mgr.state = vm_manager.ERROR
self.vm_mgr.last_boot = datetime.utcnow()
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.return_value = False
self.assertEqual(
@ -166,20 +198,17 @@ class TestVmManager(unittest.TestCase):
vm_manager.ERROR,
)
router_api.is_alive.assert_has_calls([
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000)
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
])
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_boot_timeout_error_no_last_boot(self, get_mgt_addr, router_api,
sleep):
def test_boot_timeout_error_no_last_boot(self, router_api, sleep):
self.vm_mgr.state = vm_manager.ERROR
self.vm_mgr.last_boot = None
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.return_value = False
self.assertEqual(
@ -187,25 +216,23 @@ class TestVmManager(unittest.TestCase):
vm_manager.ERROR,
)
router_api.is_alive.assert_has_calls([
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000)
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
])
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_boot_timeout(self, get_mgt_addr, router_api, sleep):
def test_boot_timeout(self, router_api, sleep):
self.vm_mgr.last_boot = datetime.utcnow() - timedelta(minutes=5)
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.return_value = False
self.assertEqual(self.vm_mgr.update_state(self.ctx), vm_manager.DOWN)
router_api.is_alive.assert_has_calls([
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000)
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
])
self.vm_mgr.log.info.assert_called_once_with(
mock.ANY,
@ -214,25 +241,21 @@ class TestVmManager(unittest.TestCase):
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_update_state_is_down(self, get_mgt_addr, router_api, sleep):
def test_update_state_is_down(self, router_api, sleep):
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.return_value = False
self.assertEqual(self.vm_mgr.update_state(self.ctx), vm_manager.DOWN)
router_api.is_alive.assert_has_calls([
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000),
mock.call('fe80::beef', 5000)
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
mock.call(self.INSTANCE_INFO.management_address, 5000),
])
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_update_state_retry_delay(self, get_mgt_addr, router_api, sleep):
def test_update_state_retry_delay(self, router_api, sleep):
self.update_state_p.stop()
get_mgt_addr.return_value = 'fe80::beef'
router_api.is_alive.side_effect = [False, False, True]
max_retries = 5
self.conf.max_retries = max_retries
@ -243,17 +266,6 @@ class TestVmManager(unittest.TestCase):
mock.call('Alive check failed. Attempt %d of %d', 1, max_retries)
])
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_update_state_no_mgt_port(self, get_mgt_addr):
with mock.patch.object(self.ctx.neutron, 'get_router_detail') as grd:
r = mock.Mock()
r.management_port = None
grd.return_value = r
get_mgt_addr.side_effect = AssertionError('Should never be called')
self.update_state_p.stop()
self.assertEqual(self.vm_mgr.update_state(self.ctx),
vm_manager.DOWN)
@mock.patch('time.sleep')
def test_boot_success(self, sleep):
self.next_state = vm_manager.UP
@ -266,10 +278,8 @@ class TestVmManager(unittest.TestCase):
rtr.ports.__iter__.return_value = []
self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123')
self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) # async
self.ctx.nova_client.reboot_router_instance.assert_called_once_with(
self.vm_mgr.router_obj,
'GLANCE-IMAGE-123'
)
self.ctx.nova_client.boot_instance.assert_called_once_with(
self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY)
self.assertEqual(1, self.vm_mgr.attempts)
@mock.patch('time.sleep')
@ -284,10 +294,8 @@ class TestVmManager(unittest.TestCase):
rtr.ports.__iter__.return_value = []
self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123')
self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING)
self.ctx.nova_client.reboot_router_instance.assert_called_once_with(
self.vm_mgr.router_obj,
'GLANCE-IMAGE-123'
)
self.ctx.nova_client.boot_instance.assert_called_once_with(
self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY)
self.assertEqual(1, self.vm_mgr.attempts)
@mock.patch('time.sleep')
@ -300,13 +308,11 @@ class TestVmManager(unittest.TestCase):
rtr.ports = mock.MagicMock()
rtr.ports.__iter__.return_value = []
self.ctx.nova_client.reboot_router_instance.side_effect = RuntimeError
self.ctx.nova_client.boot_instance.side_effect = RuntimeError
self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123')
self.assertEqual(self.vm_mgr.state, vm_manager.DOWN)
self.ctx.nova_client.reboot_router_instance.assert_called_once_with(
self.vm_mgr.router_obj,
'GLANCE-IMAGE-123'
)
self.ctx.nova_client.boot_instance.assert_called_once_with(
self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY)
self.assertEqual(1, self.vm_mgr.attempts)
@mock.patch('time.sleep')
@ -330,16 +336,12 @@ class TestVmManager(unittest.TestCase):
internal_port]
self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123')
self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) # async
self.ctx.nova_client.reboot_router_instance.assert_called_once_with(
self.vm_mgr.router_obj,
'GLANCE-IMAGE-123'
self.ctx.nova_client.boot_instance.assert_called_once_with(
self.INSTANCE_INFO,
rtr.id,
'GLANCE-IMAGE-123',
mock.ANY, # TODO(adam_g): actually test make_vrrp_ports()
)
assert self.ctx.neutron.clear_device_id.call_count == 3
self.ctx.neutron.clear_device_id.assert_has_calls([
mock.call(management_port),
mock.call(external_port),
mock.call(internal_port)
], any_order=True)
def test_boot_check_up(self):
with mock.patch.object(
@ -422,10 +424,10 @@ class TestVmManager(unittest.TestCase):
@mock.patch('time.sleep')
def test_stop_success(self, sleep):
self.vm_mgr.state = vm_manager.UP
self.ctx.nova_client.get_router_instance_status.return_value = None
self.ctx.nova_client.get_instance_by_id.return_value = None
self.vm_mgr.stop(self.ctx)
self.ctx.nova_client.destroy_router_instance.assert_called_once_with(
self.vm_mgr.router_obj
self.ctx.nova_client.destroy_instance.assert_called_once_with(
self.INSTANCE_INFO
)
self.assertEqual(self.vm_mgr.state, vm_manager.DOWN)
@ -435,8 +437,8 @@ class TestVmManager(unittest.TestCase):
self.ctx.nova_client.get_router_instance_status.return_value = 'UP'
self.vm_mgr.stop(self.ctx)
self.assertEqual(self.vm_mgr.state, vm_manager.UP)
self.ctx.nova_client.destroy_router_instance.assert_called_once_with(
self.vm_mgr.router_obj
self.ctx.nova_client.destroy_instance.assert_called_once_with(
self.INSTANCE_INFO
)
self.log.error.assert_called_once_with(mock.ANY, 1)
@ -444,43 +446,33 @@ class TestVmManager(unittest.TestCase):
def test_stop_router_already_deleted_from_neutron(self, sleep):
self.vm_mgr.state = vm_manager.GONE
self.vm_mgr.stop(self.ctx)
# Because the Router object is actually deleted from Neutron at this
# point, an anonymous "fake" router (with an ID and tenant ID of the
# deleted router) is created. This allows us to pass an expected
# object to the Nova API code to cleans up the orphaned router VM.
args = self.ctx.nova_client.destroy_router_instance.call_args
assert args[0][0].name == 'unnamed'
self.ctx.nova_client.destroy_instance.assert_called_once_with(
self.INSTANCE_INFO)
self.assertEqual(self.vm_mgr.state, vm_manager.GONE)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
@mock.patch('akanda.rug.api.configuration.build_config')
def test_configure_success(self, config, get_mgt_addr, router_api):
get_mgt_addr.return_value = 'fe80::beef'
def test_configure_success(self, config, router_api):
rtr = mock.sentinel.router
self.ctx.neutron.get_router_detail.return_value = rtr
config.return_value = 'fake_config'
router_api.get_interfaces.return_value = []
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = True
self.vm_mgr.configure(self.ctx)
interfaces = router_api.get_interfaces.return_value
verify.assert_called_once_with(rtr, interfaces)
config.assert_called_once_with(self.ctx.neutron, rtr, interfaces)
verify.assert_called_once_with(rtr, [])
config.assert_called_once_with(
self.ctx.neutron, rtr, fake_mgt_port, {})
router_api.update_config.assert_called_once_with(
'fe80::beef',
5000,
config.return_value
self.INSTANCE_INFO.management_address, 5000, 'fake_config',
)
self.assertEqual(self.vm_mgr.state, vm_manager.CONFIGURED)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_configure_mismatched_interfaces(self, get_mgt_addr, router_api):
get_mgt_addr.return_value = 'fe80::beef'
def test_configure_mismatched_interfaces(self, router_api):
rtr = mock.sentinel.router
self.neutron.get_router_detail.return_value = rtr
@ -498,267 +490,180 @@ class TestVmManager(unittest.TestCase):
@mock.patch('time.sleep')
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
@mock.patch('akanda.rug.api.configuration.build_config')
def test_configure_failure(self, config, get_mgt_addr, router_api, sleep):
get_mgt_addr.return_value = 'fe80::beef'
def test_configure_failure(self, config, router_api, sleep):
rtr = {'id': 'the_id'}
self.neutron.get_router_detail.return_value = rtr
router_api.update_config.side_effect = Exception
config.return_value = 'fake_config'
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = True
self.vm_mgr.configure(self.ctx)
interfaces = router_api.get_interfaces.return_value
verify.assert_called_once_with(rtr, interfaces)
config.assert_called_once_with(self.neutron, rtr, interfaces)
router_api.update_config.assert_has_calls([
mock.call('fe80::beef', 5000, config.return_value),
mock.call('fe80::beef', 5000, config.return_value),
mock.call('fe80::beef', 5000, config.return_value),
])
config.assert_called_once_with(
self.neutron, rtr, fake_mgt_port, {})
expected_calls = [
mock.call(self.INSTANCE_INFO.management_address, 5000,
'fake_config')
for i in range(0, 2)]
router_api.update_config.assert_has_calls(expected_calls)
self.assertEqual(self.vm_mgr.state, vm_manager.RESTART)
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_replug_add_new_port_success(self, get_mgt_addr, router_api):
def test_replug_add_new_port_success(self, router_api):
self.vm_mgr.state = vm_manager.REPLUG
get_mgt_addr.return_value = 'fe80::beef'
rtr = mock.sentinel.router
rtr.management_port = mock.Mock()
rtr.external_port = mock.Mock()
rtr.management_port.mac_address = 'a:b:c:d'
rtr.external_port.mac_address = 'd:c:b:a'
p = mock.Mock()
p.id = 'ABC'
p.mac_address = 'a:a:a:a'
p2 = mock.Mock()
p2.id = 'DEF'
p2.mac_address = 'b:b:b:b'
rtr.internal_ports = [p, p2]
self.neutron.get_router_detail.return_value = rtr
self.vm_mgr.router_obj = rtr
fake_router = mock.Mock()
fake_router.id = 'fake_router_id'
fake_router.ports = [fake_ext_port, fake_int_port, fake_add_port]
self.neutron.get_router_detail.return_value = fake_router
self.vm_mgr.router_obj = fake_router
router_api.get_interfaces.return_value = [
{'lladdr': rtr.management_port.mac_address},
{'lladdr': rtr.external_port.mac_address},
{'lladdr': p.mac_address},
{'lladdr': fake_mgt_port.mac_address},
{'lladdr': fake_ext_port.mac_address},
{'lladdr': fake_int_port.mac_address}
]
self.conf.hotplug_timeout = 5
get_instance = self.ctx.nova_client.get_instance
get_instance.return_value = mock.Mock()
fake_instance = mock.MagicMock()
self.ctx.nova_client.get_instance_by_id = mock.Mock(
return_value=fake_instance)
fake_new_port = mock.Mock(id='fake_new_port_id')
self.ctx.neutron.create_vrrp_port.return_value = fake_new_port
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = True # the hotplug worked!
self.vm_mgr.replug(self.ctx)
assert self.vm_mgr.state == vm_manager.REPLUG
get_instance.return_value.interface_attach.assert_called_once_with(
p2.id, None, None
self.ctx.neutron.create_vrrp_port.assert_called_with(
fake_router.id, 'additional-net'
)
self.assertEqual(self.vm_mgr.state, vm_manager.REPLUG)
fake_instance.interface_attach.assert_called_once_with(
fake_new_port.id, None, None
)
self.assertIn(fake_new_port, self.INSTANCE_INFO.ports)
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_replug_add_new_port_failure(self, get_mgt_addr, router_api):
def test_replug_add_new_port_failure(self, router_api):
self.vm_mgr.state = vm_manager.REPLUG
get_mgt_addr.return_value = 'fe80::beef'
rtr = mock.sentinel.router
rtr.management_port = mock.Mock()
rtr.external_port = mock.Mock()
rtr.management_port.mac_address = 'a:b:c:d'
rtr.external_port.mac_address = 'd:c:b:a'
p = mock.Mock()
p.id = 'ABC'
p.mac_address = 'a:a:a:a'
p2 = mock.Mock()
p2.id = 'DEF'
p2.mac_address = 'b:b:b:b'
rtr.internal_ports = [p, p2]
self.neutron.get_router_detail.return_value = rtr
self.vm_mgr.router_obj = rtr
fake_router = mock.Mock()
fake_router.id = 'fake_router_id'
fake_router.ports = [fake_ext_port, fake_int_port, fake_add_port]
self.neutron.get_router_detail.return_value = fake_router
self.vm_mgr.router_obj = fake_router
router_api.get_interfaces.return_value = [
{'lladdr': rtr.management_port.mac_address},
{'lladdr': rtr.external_port.mac_address},
{'lladdr': p.mac_address},
{'lladdr': fake_mgt_port.mac_address},
{'lladdr': fake_ext_port.mac_address},
{'lladdr': fake_int_port.mac_address}
]
self.conf.hotplug_timeout = 5
get_instance = self.ctx.nova_client.get_instance
get_instance.return_value = mock.Mock()
fake_instance = mock.MagicMock()
self.ctx.nova_client.get_instance_by_id = mock.Mock(
return_value=fake_instance)
fake_new_port = mock.Mock(id='fake_new_port_id')
self.ctx.neutron.create_vrrp_port.return_value = fake_new_port
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = False # The hotplug didn't work!
self.vm_mgr.replug(self.ctx)
assert self.vm_mgr.state == vm_manager.RESTART
self.assertEqual(self.vm_mgr.state, vm_manager.RESTART)
get_instance.return_value.interface_attach.assert_called_once_with(
p2.id, None, None
fake_instance.interface_attach.assert_called_once_with(
fake_new_port.id, None, None
)
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_replug_with_missing_external_port(self, get_mgt_addr, router_api):
"""
If the router doesn't have a management or external port, we should
attempt to create (and plug) them.
"""
def test_replug_remove_port_success(self, router_api):
self.vm_mgr.state = vm_manager.REPLUG
get_mgt_addr.return_value = 'fe80::beef'
rtr = mock.sentinel.router
rtr.id = 'SOME-ROUTER-ID'
rtr.management_port = None
rtr.external_port = None
self.ctx.neutron.create_router_management_port.return_value = \
mock.Mock(mac_address='a:b:c:d')
self.ctx.neutron.create_router_external_port.return_value = mock.Mock(
mac_address='d:c:b:a'
)
p = mock.Mock()
p.id = 'ABC'
p.mac_address = 'a:a:a:a'
p2 = mock.Mock()
p2.id = 'DEF'
p2.mac_address = 'b:b:b:b'
rtr.internal_ports = [p, p2]
self.neutron.get_router_detail.return_value = rtr
self.vm_mgr.router_obj = rtr
fake_router = mock.Mock()
fake_router.id = 'fake_router_id'
# Router lacks the fake_ext_port, it will be unplugged
fake_router.ports = [fake_mgt_port, fake_int_port]
self.neutron.get_router_detail.return_value = fake_router
self.vm_mgr.router_obj = fake_router
router_api.get_interfaces.return_value = [
{'lladdr': 'd:c:b:a'},
{'lladdr': 'a:b:c:d'},
{'lladdr': p.mac_address},
{'lladdr': fake_mgt_port.mac_address},
{'lladdr': fake_ext_port.mac_address},
{'lladdr': fake_int_port.mac_address}
]
self.conf.hotplug_timeout = 5
get_instance = self.ctx.nova_client.get_instance
get_instance.return_value = mock.Mock()
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = True # the hotplug worked!
self.vm_mgr.replug(self.ctx)
assert self.vm_mgr.state == vm_manager.REPLUG
fake_instance = mock.MagicMock()
self.ctx.nova_client.get_instance_by_id = mock.Mock(
return_value=fake_instance)
self.ctx.neutron.create_router_management_port.assert_called_with(
'SOME-ROUTER-ID'
)
self.ctx.neutron.create_router_external_port.assert_called_with(
rtr
)
get_instance.return_value.interface_attach.assert_called_once_with(
p2.id, None, None
)
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_replug_remove_port_success(self, get_mgt_addr, router_api):
self.vm_mgr.state = vm_manager.REPLUG
get_mgt_addr.return_value = 'fe80::beef'
rtr = mock.sentinel.router
rtr.management_port = mock.Mock()
rtr.external_port = mock.Mock()
rtr.management_port.mac_address = 'a:b:c:d'
rtr.external_port.mac_address = 'd:c:b:a'
p = mock.Mock()
p.id = 'ABC'
p.mac_address = 'a:a:a:a'
rtr.internal_ports = []
self.neutron.get_router_detail.return_value = rtr
self.vm_mgr.router_obj = rtr
router_api.get_interfaces.return_value = [
{'lladdr': rtr.management_port.mac_address},
{'lladdr': rtr.external_port.mac_address},
{'lladdr': p.mac_address}
]
self.conf.hotplug_timeout = 5
get_instance = self.ctx.nova_client.get_instance
get_instance.return_value = mock.Mock()
self.ctx.neutron.api_client.list_ports.return_value = {
'ports': [{
'id': p.id,
'device_id': 'INSTANCE123',
'fixed_ips': [],
'mac_address': p.mac_address,
'network_id': 'NETWORK123',
'device_owner': 'network:router_interface'
}]
}
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = True # the unplug worked!
self.vm_mgr.replug(self.ctx)
assert self.vm_mgr.state == vm_manager.REPLUG
get_instance.return_value.interface_detach.assert_called_once_with(
p.id
self.assertEqual(self.vm_mgr.state, vm_manager.REPLUG)
fake_instance.interface_detach.assert_called_once_with(
fake_ext_port.id
)
self.assertNotIn(fake_ext_port, self.INSTANCE_INFO.ports)
@mock.patch('time.sleep', lambda *a: None)
@mock.patch('akanda.rug.vm_manager.router_api')
@mock.patch('akanda.rug.vm_manager._get_management_address')
def test_replug_remove_port_failure(self, get_mgt_addr, router_api):
def test_replug_remove_port_failure(self, router_api):
self.vm_mgr.state = vm_manager.REPLUG
get_mgt_addr.return_value = 'fe80::beef'
rtr = mock.sentinel.router
rtr.management_port = mock.Mock()
rtr.external_port = mock.Mock()
rtr.management_port.mac_address = 'a:b:c:d'
rtr.external_port.mac_address = 'd:c:b:a'
p = mock.Mock()
p.id = 'ABC'
p.mac_address = 'a:a:a:a'
rtr.internal_ports = []
self.neutron.get_router_detail.return_value = rtr
self.vm_mgr.router_obj = rtr
fake_router = mock.Mock()
fake_router.id = 'fake_router_id'
# Router lacks the fake_ext_port, it will be unplugged
fake_router.ports = [fake_mgt_port, fake_int_port]
self.neutron.get_router_detail.return_value = fake_router
self.vm_mgr.router_obj = fake_router
router_api.get_interfaces.return_value = [
{'lladdr': rtr.management_port.mac_address},
{'lladdr': rtr.external_port.mac_address},
{'lladdr': p.mac_address}
{'lladdr': fake_mgt_port.mac_address},
{'lladdr': fake_ext_port.mac_address},
{'lladdr': fake_int_port.mac_address}
]
self.conf.hotplug_timeout = 5
get_instance = self.ctx.nova_client.get_instance
get_instance.return_value = mock.Mock()
self.ctx.neutron.api_client.list_ports.return_value = {
'ports': [{
'id': p.id,
'device_id': 'INSTANCE123',
'fixed_ips': [],
'mac_address': p.mac_address,
'network_id': 'NETWORK123',
'device_owner': 'network:router_interface'
}]
}
fake_instance = mock.MagicMock()
self.ctx.nova_client.get_instance_by_id = mock.Mock(
return_value=fake_instance)
with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify:
verify.return_value = False # the unplug failed!
self.vm_mgr.replug(self.ctx)
assert self.vm_mgr.state == vm_manager.RESTART
get_instance.return_value.interface_detach.assert_called_once_with(
p.id
self.assertEquals(self.vm_mgr.state, vm_manager.RESTART)
fake_instance.interface_detach.assert_called_once_with(
fake_ext_port.id
)
def test_verify_interfaces(self):
rtr = mock.Mock()
rtr.management_port.mac_address = 'a:b:c:d'
rtr.external_port.mac_address = 'd:c:b:a'
rtr.management_port.mac_address = fake_mgt_port.mac_address
rtr.external_port.mac_address = fake_ext_port.mac_address
p = mock.Mock()
p.mac_address = 'a:a:a:a'
p.mac_address = fake_int_port.mac_address
rtr.internal_ports = [p]
rtr.ports = [p, rtr.management_port, rtr.external_port]
interfaces = [
{'lladdr': 'a:b:c:d'},
{'lladdr': 'd:c:b:a'},
{'lladdr': 'a:a:a:a'}
{'lladdr': fake_mgt_port.mac_address},
{'lladdr': fake_ext_port.mac_address},
{'lladdr': fake_int_port.mac_address}
]
self.assertTrue(self.vm_mgr._verify_interfaces(rtr, interfaces))
@ -782,15 +687,7 @@ class TestVmManager(unittest.TestCase):
def test_ensure_provider_ports(self):
rtr = mock.Mock()
rtr.id = 'id'
rtr.management_port = None
rtr.external_port = None
self.vm_mgr._ensure_provider_ports(rtr, self.ctx)
self.neutron.create_router_management_port.assert_called_once_with(
'id'
)
self.assertEqual(self.vm_mgr._ensure_provider_ports(rtr, self.ctx),
rtr)
self.neutron.create_router_external_port.assert_called_once_with(rtr)
@ -848,10 +745,8 @@ class TestVmManager(unittest.TestCase):
self.vm_mgr.set_error(self.ctx)
self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123')
self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) # async
self.ctx.nova_client.reboot_router_instance.assert_called_once_with(
self.vm_mgr.router_obj,
'GLANCE-IMAGE-123'
)
self.ctx.nova_client.boot_instance.assert_called_once_with(
self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY)
def test_error_cooldown(self):
self.conf.error_state_cooldown = 30

View File

@ -29,12 +29,12 @@ from akanda.rug import notifications
from akanda.rug import vm_manager
from akanda.rug import worker
from akanda.rug.api import neutron
class TestCreatingRouter(unittest.TestCase):
class WorkerTestBase(unittest.TestCase):
def setUp(self):
super(TestCreatingRouter, self).setUp()
super(WorkerTestBase, self).setUp()
self.conf = mock.patch.object(vm_manager.cfg, 'CONF').start()
self.conf.boot_timeout = 1
self.conf.akanda_mgt_service_port = 5000
@ -42,10 +42,21 @@ class TestCreatingRouter(unittest.TestCase):
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
fake_neutron_obj = mock.patch.object(
neutron, 'Neutron', autospec=True).start()
fake_neutron_obj.get_ports_for_instance.return_value = (
'mgt_port', ['ext_port', 'int_port'])
mock.patch.object(neutron, 'Neutron',
return_value=fake_neutron_obj).start()
self.w = worker.Worker(0, mock.Mock())
self.addCleanup(mock.patch.stopall)
class TestCreatingRouter(WorkerTestBase):
def setUp(self):
super(TestCreatingRouter, self).setUp()
self.w = worker.Worker(0, mock.Mock())
self.tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3'
self.router_id = 'ac194fc5-f317-412e-8611-fb290629f624'
@ -73,22 +84,11 @@ class TestCreatingRouter(unittest.TestCase):
self.assertEqual(1, len(sm._queue))
class TestWildcardMessages(unittest.TestCase):
class TestWildcardMessages(WorkerTestBase):
def setUp(self):
super(TestWildcardMessages, self).setUp()
self.conf = mock.patch.object(vm_manager.cfg, 'CONF').start()
self.conf.boot_timeout = 1
self.conf.akanda_mgt_service_port = 5000
self.conf.max_retries = 3
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.addCleanup(mock.patch.stopall)
self.w = worker.Worker(0, mock.Mock())
# Create some tenants
for msg in [
@ -125,14 +125,7 @@ class TestWildcardMessages(unittest.TestCase):
ids)
class TestShutdown(unittest.TestCase):
def setUp(self):
super(TestShutdown, self).setUp()
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.addCleanup(mock.patch.stopall)
class TestShutdown(WorkerTestBase):
def test_shutdown_on_null_message(self):
self.w = worker.Worker(0, mock.Mock())
with mock.patch.object(self.w, '_shutdown') as meth:
@ -159,24 +152,11 @@ class TestShutdown(unittest.TestCase):
self.assertFalse(self.w.notifier._t)
class TestUpdateStateMachine(unittest.TestCase):
class TestUpdateStateMachine(WorkerTestBase):
def setUp(self):
super(TestUpdateStateMachine, self).setUp()
self.conf = mock.patch.object(vm_manager.cfg, 'CONF').start()
self.conf.boot_timeout = 1
self.conf.akanda_mgt_service_port = 5000
self.conf.max_retries = 3
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.worker_context = worker.WorkerContext()
self.addCleanup(mock.patch.stopall)
def test(self):
w = worker.Worker(0, mock.Mock())
tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3'
@ -205,14 +185,7 @@ class TestUpdateStateMachine(unittest.TestCase):
meth.assert_called_once_with(used_context)
class TestReportStatus(unittest.TestCase):
def setUp(self):
super(TestReportStatus, self).setUp()
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.addCleanup(mock.patch.stopall)
class TestReportStatus(WorkerTestBase):
def test_report_status_dispatched(self):
self.w = worker.Worker(0, mock.Mock())
with mock.patch.object(self.w, 'report_status') as meth:
@ -234,7 +207,7 @@ class TestReportStatus(unittest.TestCase):
self.assertTrue(conf.log_opt_values.called)
class TestDebugRouters(unittest.TestCase):
class TestDebugRouters(WorkerTestBase):
def setUp(self):
super(TestDebugRouters, self).setUp()
@ -245,9 +218,6 @@ class TestDebugRouters(unittest.TestCase):
self.conf.max_retries = 3
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.w = worker.Worker(0, mock.Mock())
self.addCleanup(mock.patch.stopall)
@ -321,7 +291,7 @@ class TestDebugRouters(unittest.TestCase):
self.w.handle_message(tenant_id, msg)
class TestIgnoreRouters(unittest.TestCase):
class TestIgnoreRouters(WorkerTestBase):
def setUp(self):
super(TestIgnoreRouters, self).setUp()
@ -332,11 +302,6 @@ class TestIgnoreRouters(unittest.TestCase):
self.conf.max_retries = 3
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.addCleanup(mock.patch.stopall)
def testNoIgnorePath(self):
w = worker.Worker(0, mock.Mock(), ignore_directory=None)
ignored = w._get_routers_to_ignore()
@ -397,7 +362,7 @@ class TestIgnoreRouters(unittest.TestCase):
w.handle_message(tenant_id, msg)
class TestDebugTenants(unittest.TestCase):
class TestDebugTenants(WorkerTestBase):
def setUp(self):
super(TestDebugTenants, self).setUp()
@ -408,13 +373,8 @@ class TestDebugTenants(unittest.TestCase):
self.conf.max_retries = 3
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.w = worker.Worker(0, mock.Mock())
self.addCleanup(mock.patch.stopall)
def testNoDebugs(self):
self.assertEqual(set(), self.w._debug_tenants)
@ -459,24 +419,7 @@ class TestDebugTenants(unittest.TestCase):
self.w.handle_message(tenant_id, msg)
class TestConfigReload(unittest.TestCase):
def setUp(self):
super(TestConfigReload, self).setUp()
self.conf = mock.patch.object(worker.cfg, 'CONF').start()
self.conf.boot_timeout = 1
self.conf.akanda_mgt_service_port = 5000
self.conf.max_retries = 3
self.conf.management_prefix = 'fdca:3ba5:a17a:acda::/64'
mock.patch('akanda.rug.worker.nova').start()
mock.patch('akanda.rug.worker.neutron').start()
self.w = worker.Worker(0, mock.Mock())
self.addCleanup(mock.patch.stopall)
class TestConfigReload(WorkerTestBase):
def test(self):
tenant_id = '*'
router_id = '*'

View File

@ -17,7 +17,6 @@
from datetime import datetime
from functools import wraps
import netaddr
import time
from oslo.config import cfg
@ -85,10 +84,9 @@ class VmManager(object):
self.log = log
self.state = DOWN
self.router_obj = None
self.last_boot = None
self.instance_info = None
self.last_error = None
self._boot_counter = BootAttemptCounter()
self._currently_booting = False
self._last_synced_status = None
self.update_state(worker_context, silent=True)
@ -106,12 +104,12 @@ class VmManager(object):
self.log.debug('not updating state of deleted router')
return self.state
if self.router_obj.management_port is None:
self.log.debug('no management port, marking router as down')
if self.instance_info is None:
self.log.debug('no backing instance, marking router as down')
self.state = DOWN
return self.state
addr = _get_management_address(self.router_obj)
addr = self.instance_info.management_address
for i in xrange(cfg.CONF.max_retries):
if router_api.is_alive(addr, cfg.CONF.akanda_mgt_service_port):
if self.state != CONFIGURED:
@ -129,10 +127,13 @@ class VmManager(object):
self._check_boot_timeout()
# If the router isn't responding, make sure Nova knows about it
instance = worker_context.nova_client.get_instance(self.router_obj)
instance = worker_context.nova_client.get_instance_for_obj(
self.router_id
)
if instance is None and self.state != ERROR:
self.log.info('No router VM was found; rebooting')
self.state = DOWN
self.instance_info = None
# update_state() is called from Alive() to check the
# status of the router. If we can't talk to the API at
@ -147,22 +148,19 @@ class VmManager(object):
# After the router is all the way up, record how long it took
# to boot and accept a configuration.
if self._currently_booting and self.state == CONFIGURED:
if self.instance_info.booting and self.state == CONFIGURED:
# If we didn't boot the server (because we were restarted
# while it remained running, for example), we won't have a
# last_boot time to log.
if self.last_boot:
boot_duration = (datetime.utcnow() - self.last_boot)
# duration to log.
self.instance_info.confirm_up()
if self.instance_info.boot_duration:
self.log.info('Router booted in %s seconds after %s attempts',
boot_duration.total_seconds(),
self.instance_info.boot_duration.total_seconds(),
self._boot_counter.count)
# Always reset the boot counter, even if we didn't boot
# the server ourself, so we don't accidentally think we
# have an erroring router.
self._boot_counter.reset()
# We've reported how long it took to boot and reset the
# counter, so we are no longer "currently" booting.
self._currently_booting = False
return self.state
def boot(self, worker_context, router_image_uuid):
@ -175,36 +173,43 @@ class VmManager(object):
self.state = DOWN
self._boot_counter.start()
def make_vrrp_ports():
mgt_port = worker_context.neutron.create_management_port(
self.router_obj.id
)
# FIXME(mark): ideally this should be ordered and de-duped
instance_ports = [
worker_context.neutron.create_vrrp_port(self.router_obj.id, n)
for n in (p.network_id for p in self.router_obj.ports)
]
return mgt_port, instance_ports
try:
# TODO(mark): make this pluggable
self._ensure_provider_ports(self.router_obj, worker_context)
# In the event that the current akanda instance isn't deleted
# cleanly (which we've seen in certain circumstances, like
# hypervisor failures), or the vm has alredy been deleted but
# device_id is still set incorrectly, be proactive and attempt to
# clean up the router ports manually. This helps avoid a situation
# where the rug repeatedly attempts to plug stale router ports into
# the newly created akanda instance (and fails).
router = self.router_obj
for p in router.ports:
if p.device_id:
worker_context.neutron.clear_device_id(p)
created = worker_context.nova_client.reboot_router_instance(
router,
router_image_uuid
# TODO(mark): make this handle errors more gracefully on cb fail
# TODO(mark): checkout from a pool - boot on demand for now
instance_info = worker_context.nova_client.boot_instance(
self.instance_info,
self.router_obj.id,
router_image_uuid,
make_vrrp_ports
)
if not created:
if not instance_info:
self.log.info('Previous router is deleting')
return
except:
self.log.exception('Router failed to start boot')
# TODO(mark): attempt clean-up of failed ports
return
else:
# We have successfully started a (re)boot attempt so
# record the timestamp so we can report how long it takes.
self.state = BOOTING
self.last_boot = datetime.utcnow()
self._currently_booting = True
self.instance_info = instance_info
def check_boot(self, worker_context):
ready_states = (UP, CONFIGURED)
@ -267,27 +272,19 @@ class VmManager(object):
def stop(self, worker_context):
self._ensure_cache(worker_context)
if self.state == GONE:
# We are being told to delete a router that neutron has
# already removed. Make a fake router object to use in
# this method.
router_obj = neutron.Router(
id_=self.router_id,
tenant_id=self.tenant_id,
name='unnamed',
admin_state_up=False,
status=neutron.STATUS_DOWN
)
self.log.info('Destroying router neutron has deleted')
else:
router_obj = self.router_obj
self.log.info('Destroying router')
nova_client = worker_context.nova_client
nova_client.destroy_router_instance(router_obj)
try:
nova_client = worker_context.nova_client
nova_client.destroy_instance(self.instance_info)
except Exception:
self.log.exception('Error deleting router instance')
start = time.time()
while time.time() - start < cfg.CONF.boot_timeout:
if not nova_client.get_router_instance_status(router_obj):
if not nova_client.get_instance_by_id(self.instance_info.id_):
if self.state != GONE:
self.state = DOWN
return
@ -310,13 +307,12 @@ class VmManager(object):
if self.state == GONE:
return
addr = _get_management_address(self.router_obj)
# FIXME: This should raise an explicit exception so the caller
# knows that we could not talk to the router (versus the issue
# above).
interfaces = router_api.get_interfaces(
addr,
self.instance_info.management_address,
cfg.CONF.akanda_mgt_service_port
)
@ -327,18 +323,36 @@ class VmManager(object):
self.state = REPLUG
return
# TODO(mark): We're in the first phase of VRRP, so we need
# map the interface to the network ID.
# Eventually we'll send VRRP data and real interface data
port_mac_to_net = {
p.mac_address: p.network_id
for p in self.instance_info.ports
}
# Add in the management port
mgt_port = self.instance_info.management_port
port_mac_to_net[mgt_port.mac_address] = mgt_port.network_id
# this is a network to logical interface id
iface_map = {
port_mac_to_net[i['lladdr']]: i['ifname']
for i in interfaces if i['lladdr'] in port_mac_to_net
}
# FIXME: Need to catch errors talking to neutron here.
config = configuration.build_config(
worker_context.neutron,
self.router_obj,
interfaces
mgt_port,
iface_map
)
self.log.debug('preparing to update config to %r', config)
for i in xrange(attempts):
try:
router_api.update_config(
addr,
self.instance_info.management_address,
cfg.CONF.akanda_mgt_service_port,
config
)
@ -365,55 +379,58 @@ class VmManager(object):
self.log.debug('Attempting to replug...')
self._ensure_provider_ports(self.router_obj, worker_context)
addr = _get_management_address(self.router_obj)
interfaces = router_api.get_interfaces(
addr,
self.instance_info.management_address,
cfg.CONF.akanda_mgt_service_port
)
actual_macs = set((iface['lladdr'] for iface in interfaces))
instance_macs = set(p.mac_address for p in self.instance_info.ports)
instance_macs.add(self.instance_info.management_port.mac_address)
expected_ports = dict(
(p.mac_address, p) for p in self.router_obj.internal_ports
)
expected_macs = set(expected_ports.keys())
expected_macs.add(self.router_obj.management_port.mac_address)
expected_macs.add(self.router_obj.external_port.mac_address)
if instance_macs != actual_macs:
# our cached copy of the ports is wrong reboot and clean up
self.log.warning(
('Instance macs(%s) do not match actual macs (%s). Instance '
'cache appears out-of-sync'),
instance_macs, actual_macs
)
self.state = RESTART
return
ports_to_delete = []
if expected_macs != actual_macs:
instance = worker_context.nova_client.get_instance(self.router_obj)
instance_ports = {p.network_id: p for p in self.instance_info.ports}
instance_networks = set(instance_ports.keys())
logical_networks = set(p.network_id for p in self.router_obj.ports)
if logical_networks != instance_networks:
instance = worker_context.nova_client.get_instance_by_id(
self.instance_info.id_
)
# For each port that doesn't have a mac address on the VM...
for mac in expected_macs - actual_macs:
port = expected_ports.get(mac)
if port:
self.log.debug(
'New port %s, %s found, plugging...' % (port.id, mac)
)
instance.interface_attach(port.id, None, None)
# For each *extra* mac address on the VM...
for mac in actual_macs - expected_macs:
interface_ports = map(
neutron.Port.from_dict,
worker_context.neutron.api_client.list_ports(
device_id=instance.id,
device_owner=neutron.DEVICE_OWNER_ROUTER_INT
)['ports']
for network_id in logical_networks - instance_networks:
port = worker_context.neutron.create_vrrp_port(
self.router_obj.id,
network_id
)
for port in interface_ports:
if port.mac_address == mac:
# If we find a router-interface port attached to the
# device (meaning the interface has been removed
# from the neutron router, but not the VM), detach the
# port from the Nova instance and mark the orphaned
# port for deletion
self.log.debug(''.join([
'Port %s, %s is detached from ' % (port.id, mac),
'the neutron router, unplugging...'
]))
instance.interface_detach(port.id)
ports_to_delete.append(port)
self.log.debug(
'Net %s is missing from the router, plugging: %s',
network_id, port.id
)
instance.interface_attach(port.id, None, None)
self.instance_info.ports.append(port)
for network_id in instance_networks - logical_networks:
port = instance_ports[network_id]
self.log.debug(
'Net %s is detached from the router, unplugging: %s',
network_id, port.id
)
instance.interface_detach(port.id)
self.instance_info.ports.remove(port)
# The action of attaching/detaching interfaces in Nova happens via the
# message bus and is *not* blocking. We need to wait a few seconds to
@ -425,19 +442,12 @@ class VmManager(object):
"Waiting for interface attachments to take effect..."
)
interfaces = router_api.get_interfaces(
addr,
self.instance_info.management_address,
cfg.CONF.akanda_mgt_service_port
)
if self._verify_interfaces(self.router_obj, interfaces):
# If the interfaces now match (hotplugging was successful), go
# ahead and clean up any orphaned neutron ports that may have
# been detached
for port in ports_to_delete:
self.log.debug('Deleting orphaned port %s' % port.id)
worker_context.neutron.api_client.update_port(
port.id, {'port': {'device_owner': ''}}
)
worker_context.neutron.api_client.delete_port(port.id)
# replugging was successful
# TODO(mark) update port states
return
time.sleep(1)
replug_seconds -= 1
@ -456,12 +466,26 @@ class VmManager(object):
self.state = GONE
self.router_obj = None
if not self.instance_info:
self.instance_info = (
worker_context.nova_client.get_instance_info_for_obj(
self.router_id
)
)
if self.instance_info:
(
self.instance_info.management_port,
self.instance_info.ports
) = worker_context.neutron.get_ports_for_instance(
self.instance_info.id_
)
def _check_boot_timeout(self):
if self.last_boot:
seconds_since_boot = (
datetime.utcnow() - self.last_boot
).total_seconds()
if seconds_since_boot < cfg.CONF.boot_timeout:
time_since_boot = self.instance_info.time_since_boot
if time_since_boot:
if time_since_boot.seconds < cfg.CONF.boot_timeout:
# Do not reset the state if we have an error
# condition already. The state will be reset when
# the router starts responding again, or when the
@ -471,8 +495,6 @@ class VmManager(object):
else:
# If the VM was created more than `boot_timeout` seconds
# ago, log an error and set the state set to DOWN
self.last_boot = None
self._currently_booting = False
self.log.info(
'Router is DOWN. Created over %d secs ago.',
cfg.CONF.boot_timeout)
@ -492,22 +514,19 @@ class VmManager(object):
):
return False
num_logical_ports = len(list(logical_config.ports))
num_instance_ports = len(list(self.instance_info.ports))
if num_logical_ports != num_instance_ports:
return False
expected_macs = set(p.mac_address
for p in logical_config.internal_ports)
expected_macs.add(logical_config.management_port.mac_address)
expected_macs.add(logical_config.external_port.mac_address)
for p in self.instance_info.ports)
expected_macs.add(self.instance_info.management_port.mac_address)
self.log.debug('MACs expected: %s', ', '.join(sorted(expected_macs)))
return router_macs == expected_macs
def _ensure_provider_ports(self, router, worker_context):
if router.management_port is None:
self.log.debug('Adding management port to router')
mgt_port = worker_context.neutron.create_router_management_port(
router.id
)
router.management_port = mgt_port
if router.external_port is None:
# FIXME: Need to do some work to pick the right external
# network for a tenant.
@ -517,14 +536,3 @@ class VmManager(object):
)
router.external_port = ext_port
return router
def _get_management_address(router):
network = netaddr.IPNetwork(cfg.CONF.management_prefix)
tokens = ['%02x' % int(t, 16)
for t in router.management_port.mac_address.split(':')]
eui64 = int(''.join(tokens[0:3] + ['ff', 'fe'] + tokens[3:6]), 16)
# the bit inversion is required by the RFC
return str(netaddr.IPAddress(network.value + (eui64 ^ 0x0200000000000000)))

View File

@ -1,11 +1,11 @@
# -*- mode: shell-script -*-
# Set up default directories
AKANDA_NEUTRON_DIR=$DEST/akanda-quantum
AKANDA_NEUTRON_REPO=${AKANDA_NEUTRON_REPO:-git@github.com:neutron/akanda-neutron.git}
AKANDA_NEUTRON_BRANCH=${AKANDA_NEUTRON_BRANCH:-master}
AKANDA_NEUTRON_DIR=$DEST/akanda-neutron
AKANDA_NEUTRON_REPO=${AKANDA_NEUTRON_REPO:-git@github.com:markmcclain/akanda-neutron.git}
AKANDA_NEUTRON_BRANCH=${AKANDA_NEUTRON_BRANCH:-v2}
AKANDA_DEV_APPLIANCE=${AKANDA_DEV_APPLIANCE:-http://markmcclain.objects.dreamhost.com/akanda.qcow2}
AKANDA_DEV_APPLIANCE=${AKANDA_DEV_APPLIANCE:-http://akandaio.objects.dreamhost.com/akanda_cloud.qcow2}
AKANDA_CONF_DIR=/etc/akanda-rug
AKANDA_RUG_CONF=$AKANDA_CONF_DIR/rug.ini
@ -116,7 +116,7 @@ function pre_start_akanda() {
upload_image $AKANDA_DEV_APPLIANCE $TOKEN
typeset image_id=$(glance $auth_args image-show akanda | grep ' id ' | awk '{print $4}')
typeset image_id=$(glance $auth_args image-show akanda_cloud | grep ' id ' | awk '{print $4}')
die_if_not_set $LINENO image_id "Failed to find akanda image"
iniset $AKANDA_RUG_CONF DEFAULT router_image_uuid $image_id