metadata-ipv6: DHCP namespace

Send IPv6 metadata traffic (dst=fe80::a9fe:a9fe) to the metadata-agent.

When running on IPv6 enabled system bind haproxy (i.e. the
metadata-proxy) to 169.254.169.254 and to fe80::a9fe:a9fe also.

We do not introduce new config options. The usual config options
(enable_isolated_metadata, force_metadata, enable_metadata_proxy)
now control the metadata service over both IPv4 and IPv6.

This change series only affects the guests' access to the metadata
service (over tenant networks). They change nothing about how the
metadata-agent talks to Nova's metadata service.

Metadata access over IPv6 is supposed to work both on dual-stack and
v6-only networks.

In order to enable the metadata service on pre-existing isolated
networks during an upgrade, this change makes each dhcp-agent restart
trigger a quick restart of dhcp-agent-controlled metadata-proxies,
so they can pick up their new config making them also bind to
fe80::a9fe:a9fe.

Change-Id: If35f00d1fc9e4ab7e232660362410ce7320c45ba
Partial-Bug: #1460177
This commit is contained in:
Bence Romsics 2020-03-27 16:37:34 +01:00
parent d189d83bd7
commit a0b18d553d
8 changed files with 208 additions and 31 deletions

View File

@ -32,6 +32,7 @@ import oslo_messaging
from oslo_service import loopingcall
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import timeutils
from neutron._i18n import _
@ -117,6 +118,19 @@ class DhcpAgent(manager.Manager):
self._pool = eventlet.GreenPool(size=self._pool_size)
self._queue = queue.ResourceProcessingQueue()
self._network_bulk_allocations = {}
# Each dhcp-agent restart should trigger a restart of all
# metadata-proxies too. This way we can ensure that changes in
# the metadata-proxy config we generate will be applied soon
# after a new version of dhcp-agent is started. This makes
# the metadata service transiently offline. However similar
# metadata-proxy restarts were always done by l3-agent so people
# can apparently live with short metadata outages. We only stop
# the process here and let the process monitor restart it,
# first because it knows everything about how to restart it,
# second because (unless we temporarily disable the monitor too)
# we could race with the monitor restarting the process. See also
# method update_isolated_metadata_proxy().
self.restarted_metadata_proxy_set = set()
def init_host(self):
self.sync_state()
@ -175,8 +189,11 @@ class DhcpAgent(manager.Manager):
self._process_monitor,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
rv = getattr(driver, action)(**action_kwargs)
if action == 'get_metadata_bind_interface':
return rv
else:
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
@ -678,11 +695,15 @@ class DhcpAgent(manager.Manager):
According to return from driver class, spawn or kill the metadata
proxy process. Spawn an existing metadata proxy or kill a nonexistent
metadata proxy will just silently return.
metadata proxy will just silently return. Spawning an existing
metadata proxy restarts it once after each dhcp-agent start.
"""
should_enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
self.conf, network)
if should_enable_metadata:
if network.id not in self.restarted_metadata_proxy_set:
self.disable_isolated_metadata_proxy(network)
self.restarted_metadata_proxy_set.add(network.id)
self.enable_isolated_metadata_proxy(network)
else:
self.disable_isolated_metadata_proxy(network)
@ -715,6 +736,30 @@ class DhcpAgent(manager.Manager):
self._metadata_routers[network.id] = (
router_ports[0].device_id)
if netutils.is_ipv6_enabled():
try:
dhcp_ifaces = [
self.call_driver(
'get_metadata_bind_interface', network, port=p)
for p in network.ports
if (p.device_owner == constants.DEVICE_OWNER_DHCP and
p.admin_state_up)
]
if len(dhcp_ifaces) == 1:
kwargs['bind_interface'] = dhcp_ifaces[0]
kwargs['bind_address_v6'] = dhcp.METADATA_V6_IP
else:
LOG.error(
'Unexpected number of DHCP interfaces for metadata '
'proxy, expected 1, got %s', len(dhcp_ifaces)
)
except AttributeError:
LOG.warning(
'Cannot serve metadata on IPv6 because DHCP driver '
'does not implement method '
'get_metadata_bind_interface(): %s',
self.dhcp_driver_class)
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, network.namespace, dhcp.METADATA_PORT,
self.conf, bind_address=dhcp.METADATA_DEFAULT_IP, **kwargs)

View File

@ -48,8 +48,12 @@ from neutron.privileged.agent.linux import dhcp as priv_dhcp
LOG = logging.getLogger(__name__)
DNS_PORT = 53
# TODO(bence romsics): use the rehomed constants when they get released:
# https://review.opendev.org/738205
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_SUBNET_CIDR = '169.254.0.0/16'
METADATA_V6_IP = 'fe80::a9fe:a9fe'
METADATA_V6_CIDR = 'fe80::a9fe:a9fe/64'
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
@ -328,6 +332,9 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta):
interface_file_path = self.get_conf_file_name('interface')
file_utils.replace_file(interface_file_path, value)
def get_metadata_bind_interface(self, port):
return self.device_manager.get_interface_name(self.network, port)
@property
def active(self):
return self._get_process_manager().active
@ -1329,11 +1336,9 @@ class Dnsmasq(DhcpLocalProcess):
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
# Only IPv4 subnets, with dhcp enabled, will use the metadata proxy.
all_subnets = cls._get_all_subnets(network)
v4_dhcp_subnets = [s for s in all_subnets
if s.ip_version == 4 and s.enable_dhcp]
if not v4_dhcp_subnets:
dhcp_subnets = [s for s in all_subnets if s.enable_dhcp]
if not dhcp_subnets:
return False
if conf.force_metadata:
@ -1347,7 +1352,7 @@ class Dnsmasq(DhcpLocalProcess):
return True
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[s.id] for s in v4_dhcp_subnets)
return any(isolated_subnets[s.id] for s in dhcp_subnets)
class DeviceManager(object):
@ -1714,6 +1719,8 @@ class DeviceManager(object):
if self.conf.force_metadata or self.conf.enable_isolated_metadata:
ip_cidrs.append(constants.METADATA_CIDR)
if netutils.is_ipv6_enabled():
ip_cidrs.append(METADATA_V6_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)

View File

@ -30,6 +30,7 @@ from neutron._i18n import _
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
LOG = logging.getLogger(__name__)
@ -64,6 +65,7 @@ defaults
listen listener
bind %(host)s:%(port)s
%(bind_v6_line)s
server metadata %(unix_socket_path)s
http-request del-header X-Neutron-%(res_type_del)s-ID
http-request set-header X-Neutron-%(res_type)s-ID %(res_id)s
@ -76,13 +78,16 @@ class InvalidUserOrGroupException(Exception):
class HaproxyConfigurator(object):
def __init__(self, network_id, router_id, unix_socket_path, host, port,
user, group, state_path, pid_file):
user, group, state_path, pid_file, host_v6=None,
bind_interface=None):
self.network_id = network_id
self.router_id = router_id
if network_id is None and router_id is None:
raise exceptions.NetworkIdOrRouterIdRequiredError()
self.host = host
self.host_v6 = host_v6
self.bind_interface = bind_interface
self.port = port
self.user = user
self.group = group
@ -127,8 +132,14 @@ class HaproxyConfigurator(object):
'group': groupname,
'pidfile': self.pidfile,
'log_level': self.log_level,
'log_tag': self.log_tag
'log_tag': self.log_tag,
'bind_v6_line': '',
}
if self.host_v6 and self.bind_interface:
cfg_info['bind_v6_line'] = (
'bind %s:%s interface %s' % (
self.host_v6, self.port, self.bind_interface)
)
# If using the network ID, delete any spurious router ID that might
# have been in the request, same for network ID when using router ID.
if self.network_id:
@ -211,7 +222,9 @@ class MetadataDriver(object):
@classmethod
def _get_metadata_proxy_callback(cls, bind_address, port, conf,
network_id=None, router_id=None):
network_id=None, router_id=None,
bind_address_v6=None,
bind_interface=None):
def callback(pid_file):
metadata_proxy_socket = conf.metadata_proxy_socket
user, group = (
@ -224,7 +237,9 @@ class MetadataDriver(object):
user,
group,
conf.state_path,
pid_file)
pid_file,
bind_address_v6,
bind_interface)
haproxy.create_config_file()
proxy_cmd = [HAPROXY_SERVICE,
'-f', haproxy.cfg_path]
@ -235,14 +250,23 @@ class MetadataDriver(object):
@classmethod
def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf,
bind_address="0.0.0.0", network_id=None,
router_id=None):
router_id=None, bind_address_v6=None,
bind_interface=None):
uuid = network_id or router_id
callback = cls._get_metadata_proxy_callback(
bind_address, port, conf, network_id=network_id,
router_id=router_id)
bind_address, port, conf,
network_id=network_id, router_id=router_id,
bind_address_v6=bind_address_v6, bind_interface=bind_interface)
pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name,
callback=callback)
if bind_interface is not None and bind_address_v6 is not None:
# HAProxy cannot bind() until IPv6 Duplicate Address Detection
# completes. We must wait until the address leaves its 'tentative'
# state.
ip_lib.IpAddrCommand(
parent=ip_lib.IPDevice(name=bind_interface, namespace=ns_name)
).wait_until_address_ready(address=bind_address_v6)
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm

View File

@ -343,17 +343,31 @@ class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework):
exception=RuntimeError("Stale metadata proxy didn't get killed"))
def _test_metadata_proxy_spawn_kill_with_subnet_create_delete(self):
network = self.network_dict_for_dhcp(ip_version=lib_const.IP_VERSION_6)
network = self.network_dict_for_dhcp(
ip_version=lib_const.IP_VERSION_6,
dhcp_enabled=False)
self.configure_dhcp_for_network(network=network)
pm = self._get_metadata_proxy_process(network)
# A newly created network with ipv6 subnet will not have metadata proxy
self.assertFalse(pm.active)
new_network = copy.deepcopy(network)
dhcp_enabled_ipv4_subnet = self.create_subnet_dict(network.id)
new_network.subnets.append(dhcp_enabled_ipv4_subnet)
self.mock_plugin_api.get_network_info.return_value = new_network
fixed_ip_mock = mock.Mock(
ip_address='192.168.10.2',
subnet_id=dhcp_enabled_ipv4_subnet.id)
dhcp_port_mock = mock.Mock(
dns_assignment={},
extra_dhcp_opts=[],
fixed_ips=[fixed_ip_mock],
id=new_network.ports[0].id,
mac_address=str(self._DHCP_PORT_MAC_ADDRESS))
self.mock_plugin_api.get_dhcp_port.return_value = dhcp_port_mock
self.mock_plugin_api.update_dhcp_port.return_value = dhcp_port_mock
self.agent.refresh_dhcp_helper(network.id)
# Metadata proxy should be spawned for the newly added subnet
common_utils.wait_until_true(

View File

@ -122,7 +122,8 @@ fake_dhcp_port = dhcp.DictModel(
allocation_pools=fake_subnet1_allocation_pools,
mac_address='aa:bb:cc:dd:ee:22',
network_id=FAKE_NETWORK_UUID,
fixed_ips=[fake_fixed_ip2])
fixed_ips=[fake_fixed_ip2],
admin_state_up=True)
fake_port2 = dhcp.DictModel(id='12345678-1234-aaaa-123456789000',
device_id='dhcp-12345678-1234-aaaa-123456789000',
@ -365,6 +366,14 @@ class TestDhcpAgent(base.BaseTestCase):
trace_level='warning',
expected_sync=False)
def test_call_driver_get_metadata_bind_interface_returns(self):
network = mock.Mock()
self.driver().get_metadata_bind_interface.return_value = 'iface0'
agent = dhcp_agent.DhcpAgent(cfg.CONF)
self.assertEqual(
'iface0',
agent.call_driver('get_metadata_bind_interface', network))
def _test_sync_state_helper(self, known_net_ids, active_net_ids):
active_networks = set(mock.Mock(id=netid) for netid in active_net_ids)
@ -553,14 +562,18 @@ class TestDhcpAgent(base.BaseTestCase):
self.assertEqual(all_ports, ports_ready)
def test_dhcp_ready_ports_updates_after_enable_dhcp(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertEqual(set(), dhcp.dhcp_ready_ports)
dhcp.configure_dhcp_for_network(fake_network)
self.assertEqual({fake_port1.id}, dhcp.dhcp_ready_ports)
with mock.patch('neutron.agent.linux.ip_lib.'
'IpAddrCommand.wait_until_address_ready') as mock_wait:
mock_wait.return_value = True
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertEqual(set(), dhcp.dhcp_ready_ports)
dhcp.configure_dhcp_for_network(fake_network)
self.assertEqual({fake_port1.id}, dhcp.dhcp_ready_ports)
def test_dhcp_metadata_destroy(self):
cfg.CONF.set_override('force_metadata', True)
cfg.CONF.set_override('enable_isolated_metadata', False)
with mock.patch.object(metadata_driver,
'MetadataDriver') as md_cls:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
@ -569,10 +582,26 @@ class TestDhcpAgent(base.BaseTestCase):
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
bind_address=self.METADATA_DEFAULT_IP,
network_id=fake_network.id)
md_cls.reset_mock()
dhcp.disable_dhcp_helper(fake_network.id)
md_cls.destroy_monitored_metadata_proxy.assert_called_once_with(
mock.ANY, fake_network.id, mock.ANY, fake_network.namespace)
def test_agent_start_restarts_metadata_proxy(self):
cfg.CONF.set_override('force_metadata', True)
cfg.CONF.set_override('enable_isolated_metadata', False)
with mock.patch.object(metadata_driver,
'MetadataDriver') as md_cls:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.configure_dhcp_for_network(fake_network)
md_cls.destroy_monitored_metadata_proxy.assert_called_once_with(
mock.ANY, fake_network.id, mock.ANY, fake_network.namespace)
md_cls.spawn_monitored_metadata_proxy.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
bind_address=self.METADATA_DEFAULT_IP,
network_id=fake_network.id)
def test_report_state_revival_logic(self):
dhcp = dhcp_agent.DhcpAgentWithStateReport(HOSTNAME)
with mock.patch.object(dhcp.state_rpc,
@ -758,6 +787,11 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
self.mock_resize_p = mock.patch('neutron.agent.dhcp.agent.'
'DhcpAgent._resize_process_pool')
self.mock_resize = self.mock_resize_p.start()
self.mock_wait_until_address_ready_p = mock.patch(
'neutron.agent.linux.ip_lib.'
'IpAddrCommand.wait_until_address_ready')
self.mock_wait_until_address_ready_p.start()
self.addCleanup(self.mock_wait_until_address_ready_p.stop)
def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS):
return mock.call(conf=cfg.CONF,
@ -984,6 +1018,42 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
def test_enable_isolated_metadata_proxy_with_dist_network(self):
self._test_enable_isolated_metadata_proxy(fake_dist_network)
def _test_enable_isolated_metadata_proxy_ipv6(self, network):
cfg.CONF.set_override('enable_metadata_network', True)
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('log_file', 'test.log')
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
'.spawn_monitored_metadata_proxy')
with mock.patch(method_path) as spawn, \
mock.patch.object(netutils, 'is_ipv6_enabled') as mock_ipv6:
mock_ipv6.return_value = True
self.call_driver.return_value = 'fake-interface'
self.dhcp.enable_isolated_metadata_proxy(network)
spawn.assert_called_once_with(self.dhcp._process_monitor,
network.namespace,
dhcp.METADATA_PORT,
cfg.CONF,
bind_address='169.254.169.254',
network_id=network.id,
bind_interface='fake-interface',
bind_address_v6='fe80::a9fe:a9fe')
def test_enable_isolated_metadata_proxy_with_metadata_network_ipv6(self):
network = copy.deepcopy(fake_meta_network)
network.ports = [fake_dhcp_port]
self._test_enable_isolated_metadata_proxy_ipv6(network)
def test_enable_isolated_metadata_proxy_with_metadata_network_dvr_ipv6(
self):
network = copy.deepcopy(fake_meta_dvr_network)
network.ports = [fake_dhcp_port]
self._test_enable_isolated_metadata_proxy_ipv6(network)
def test_enable_isolated_metadata_proxy_with_dist_network_ipv6(self):
network = copy.deepcopy(fake_dist_network)
network.ports = [fake_dhcp_port]
self._test_enable_isolated_metadata_proxy_ipv6(network)
def _test_disable_isolated_metadata_proxy(self, network):
cfg.CONF.set_override('enable_metadata_network', True)
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
@ -1824,6 +1894,9 @@ class TestDeviceManager(base.BaseTestCase):
else:
expected_ips = ['172.9.9.9/24', const.METADATA_CIDR]
if ipv6_enabled:
expected_ips.append(const.METADATA_V6_CIDR)
expected = [mock.call.get_device_name(port)]
if ipv6_enabled:

View File

@ -284,12 +284,15 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework):
router_info.agent = agent
agent.router_info[router.id] = router_info
agent.l3_ext_manager.ha_state_change = mock.Mock()
agent.enqueue_state_change(router.id, 'primary')
eventlet.sleep(self.conf.ha_vrrp_advert_int + 2)
agent.l3_ext_manager.ha_state_change.assert_called_once_with(
agent.context,
{'router_id': router.id, 'state': 'primary',
'host': agent.host})
with mock.patch('neutron.agent.linux.ip_lib.'
'IpAddrCommand.wait_until_address_ready') as mock_wait:
mock_wait.return_value = True
agent.enqueue_state_change(router.id, 'primary')
eventlet.sleep(self.conf.ha_vrrp_advert_int + 2)
agent.l3_ext_manager.ha_state_change.assert_called_once_with(
agent.context,
{'router_id': router.id, 'state': 'primary',
'host': agent.host})
def test_enqueue_state_change_router_active_ha(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)

View File

@ -2950,6 +2950,10 @@ class TestDnsmasq(TestBase):
self.assertTrue(dhcp.Dnsmasq.should_enable_metadata(
self.conf, FakeV4NetworkNoRouter()))
def test_should_enable_metadata_isolated_network_returns_true_ipv6(self):
self.assertTrue(dhcp.Dnsmasq.should_enable_metadata(
self.conf, FakeV6Network()))
def test_should_enable_metadata_non_isolated_network_returns_false(self):
self.assertFalse(dhcp.Dnsmasq.should_enable_metadata(
self.conf, FakeV4NetworkDistRouter()))
@ -3147,7 +3151,9 @@ class TestDeviceManager(TestConfBase):
expect_ips = ['192.168.0.6/24', 'fdca:3ba5:a17a:4ba3::2/64']
if enable_isolated_metadata or force_metadata:
expect_ips.append(constants.METADATA_CIDR)
expect_ips.extend([
constants.METADATA_CIDR,
dhcp.METADATA_V6_CIDR])
mgr.driver.init_l3.assert_called_with('ns-XXX',
expect_ips,
namespace='qdhcp-ns')

View File

@ -128,13 +128,17 @@ class TestMetadataDriverProcess(base.BaseTestCase):
return_value=test_utils.FakeUser(self.EUNAME)),\
mock.patch('grp.getgrnam',
return_value=test_utils.FakeGroup(self.EGNAME)),\
mock.patch('os.makedirs'):
mock.patch('os.makedirs'),\
mock.patch(
'neutron.agent.linux.ip_lib.'
'IpAddrCommand.wait_until_address_ready') as mock_wait:
cfg_file = os.path.join(
metadata_driver.HaproxyConfigurator.get_config_path(
agent.conf.state_path),
"%s.conf" % router_id)
mock_open = self.useFixture(
lib_fixtures.OpenFixture(cfg_file)).mock_open
mock_wait.return_value = True
agent.metadata_driver.spawn_monitored_metadata_proxy(
agent.process_monitor,
router_ns,
@ -160,7 +164,8 @@ class TestMetadataDriverProcess(base.BaseTestCase):
'res_type_del': 'Network',
'pidfile': self.PIDFILE,
'log_level': 'debug',
'log_tag': log_tag}
'log_tag': log_tag,
'bind_v6_line': ''}
mock_open.assert_has_calls([
mock.call(cfg_file, 'w'),