Rename container_address to management_address

This patch aims to reduce confusion caused by a variable
`container_address` that's applicable for bare metal hosts. With that
it renames  `is_container_address` to `is_management_address`
to be aligned with the purpose of the variable, as `container` part
raised confusion.

Change-Id: I314224f3376cf91e05680b11d225fdaf81ec32ab
This commit is contained in:
Dmitriy Rabotyagov 2023-05-15 12:13:21 +02:00
parent 41965140c3
commit 86d1bdff55
22 changed files with 91 additions and 77 deletions

View File

@ -147,7 +147,7 @@ Now you can set up Zookeeper as coordination backend for Gnocchi:
.. code-block:: console
gnocchi_coordination_url: "zookeeper://{% for host in groups['zookeeper_all'] %}{{ hostvars[host]['container_address'] }}:2181{% if not loop.last %},{% endif %}{% endfor %}"
gnocchi_coordination_url: "zookeeper://{% for host in groups['zookeeper_all'] %}{{ hostvars[host]['management_address'] }}:2181{% if not loop.last %},{% endif %}{% endfor %}"
You also have to install additional packages:

View File

@ -3,7 +3,7 @@ cidr_networks:
bmaas: 172.29.228.0/22
lbaas: 172.29.232.0/22
dbaas: 172.29.252.0/22
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -41,12 +41,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"

View File

@ -9,7 +9,7 @@ cidr_networks:
{% if 'octavia' in bootstrap_host_scenarios_expanded %}
lbaas: 172.29.232.0/22
{% endif %}
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -49,12 +49,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
# define static routes to the neutron public IP ranges via br-mgmt
# this is AIO specific and relies on the host forwarding to reach instance
# floating ips using the br-mgmt interface as a gateway

View File

@ -180,7 +180,7 @@
# 'container_interface'.
# (e.g., 'ip_from_q'_address and 'container_interface'_address)
#
# Option: is_container_address (required, boolean)
# Option: is_management_address (required, boolean)
# If true, the load balancer uses this IP address to access services
# in the container. Only valid for networks with 'ip_from_q' option.
#
@ -251,8 +251,8 @@
# container_bridge: "br-mgmt"
# container_interface: "eth1"
# container_type: "veth"
# ip_from_q: "container"
# is_container_address: true
# ip_from_q: "management"
# is_management_address: true
# - network:
# group_binds:
# - glance_api

View File

@ -1,6 +1,6 @@
---
cidr_networks:
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -25,12 +25,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
#
# The below provider network defines details related to overlay traffic,
# including the range of VXLAN VNIs to assign to project/tenant networks

View File

@ -44,13 +44,13 @@ global_overrides:
container_type: "veth"
container_interface: "eth1"
ip_from_q: "pod1_container"
address_prefix: "container"
address_prefix: "management"
type: "raw"
group_binds:
- all_containers
- hosts
reference_group: "pod1_hosts"
is_container_address: true
is_management_address: true
# Containers in pod1 need routes to the container networks of other pods
static_routes:
# Route to container networks
@ -61,13 +61,13 @@ global_overrides:
container_type: "veth"
container_interface: "eth1"
ip_from_q: "pod2_container"
address_prefix: "container"
address_prefix: "management"
type: "raw"
group_binds:
- all_containers
- hosts
reference_group: "pod2_hosts"
is_container_address: true
is_management_address: true
# Containers in pod2 need routes to the container networks of other pods
static_routes:
# Route to container networks
@ -78,13 +78,13 @@ global_overrides:
container_type: "veth"
container_interface: "eth1"
ip_from_q: "pod3_container"
address_prefix: "container"
address_prefix: "management"
type: "raw"
group_binds:
- all_containers
- hosts
reference_group: "pod3_hosts"
is_container_address: true
is_management_address: true
# Containers in pod3 need routes to the container networks of other pods
static_routes:
# Route to container networks
@ -95,13 +95,13 @@ global_overrides:
container_type: "veth"
container_interface: "eth1"
ip_from_q: "pod4_container"
address_prefix: "container"
address_prefix: "management"
type: "raw"
group_binds:
- all_containers
- hosts
reference_group: "pod4_hosts"
is_container_address: true
is_management_address: true
# Containers in pod4 need routes to the container networks of other pods
static_routes:
# Route to container networks

View File

@ -1,6 +1,6 @@
---
cidr_networks: &cidr_networks
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -26,12 +26,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"

View File

@ -1,6 +1,6 @@
---
cidr_networks:
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -25,12 +25,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"

View File

@ -1,6 +1,6 @@
---
cidr_networks:
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -25,12 +25,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
#
# The below provider network defines details related to vxlan traffic,
# including the range of VNIs to assign to project/tenant networks and

View File

@ -1,6 +1,6 @@
---
cidr_networks:
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -25,12 +25,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
#
# The below provider network defines details related to overlay traffic,
# including the range of VXLAN VNIs to assign to project/tenant networks

View File

@ -1,6 +1,6 @@
---
cidr_networks:
container: 172.29.236.0/22
management: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
@ -21,12 +21,12 @@ global_overrides:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
ip_from_q: "management"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
is_management_address: true
- network:
container_bridge: "br-vxlan"
container_type: "veth"

View File

@ -140,7 +140,7 @@ install_method: source
# proxy_env_url: http://username:pa$$w0rd@10.10.10.9:9000/
## (1) This sets up a permanent environment, used during and after deployment:
# no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
# no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['management_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
# global_environment_variables:
# HTTP_PROXY: "{{ proxy_env_url }}"
# HTTPS_PROXY: "{{ proxy_env_url }}"
@ -153,7 +153,7 @@ install_method: source
# deployment_environment_variables:
# http_proxy: "{{ proxy_env_url }}"
# https_proxy: "{{ proxy_env_url }}"
# no_proxy: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['keystone_all'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
# no_proxy: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['keystone_all'] %}{{ hostvars[host]['management_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
## SSH connection wait time

View File

@ -37,7 +37,6 @@ debug: False
## SSH connection wait time
ssh_delay: 5
management_address: "{{ container_address }}"
openstack_service_bind_address: "{{ management_address }}"
package_state: "present"

View File

@ -17,4 +17,4 @@
# both the os_ceilometer and os_keystone roles require them
# Hosts allowed to override remote IP with X-Forwarded-For
keystone_set_real_ip_from: "{{ groups['haproxy'] | map('extract', hostvars, 'container_address') | list }}"
keystone_set_real_ip_from: "{{ groups['haproxy'] | map('extract', hostvars, 'management_address') | list }}"

View File

@ -41,7 +41,7 @@ REQUIRED_HOSTVARS = [
'properties',
'ansible_host',
'physical_host_group',
'container_address',
'management_address',
'container_name',
'container_networks',
'physical_host',
@ -73,7 +73,7 @@ class ProviderNetworkMisconfiguration(Exception):
self.queue_name = queue_name
error_msg = ("Provider network with queue '{queue}' "
"requires 'is_container_address' "
"requires 'is_management_address' "
"to be set to True.")
self.message = error_msg.format(queue=self.queue_name)
@ -213,7 +213,7 @@ def _build_container_hosts(container_affinity, container_hosts, type_and_name,
hostvars_options.update({
'ansible_host': address,
'container_address': address,
'management_address': address,
'container_name': container_host_name,
'physical_host': host_type,
'physical_host_group': physical_host_type,
@ -443,7 +443,7 @@ def user_defined_setup(config, inventory):
hvs[_key].update({
'ansible_host': _value['ip'],
'container_address': _value['ip'],
'management_address': _value['ip'],
'is_metal': True,
'physical_host_group': key
})
@ -548,7 +548,7 @@ def network_entry(is_metal, interface,
def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
bridge, bridge_type, net_type, net_mtu,
user_config, is_container_address, static_routes,
user_config, is_management_address, static_routes,
gateway, reference_group, address_prefix):
"""Process additional ip adds and append then to hosts as needed.
@ -563,7 +563,7 @@ def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
:param netmask: ``str`` netmask to use.
:param interface: ``str`` interface name to set for the network.
:param user_config: ``dict`` user defined configuration details.
:param is_container_address: ``bol`` set this address to container_address.
:param is_management_address: ``bool`` set address as management_address.
:param static_routes: ``list`` List containing static route dicts.
:param gateway: ``str`` gateway address to use in container
:param reference_group: ``str`` group to filter membership of host against.
@ -587,7 +587,7 @@ def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
net_type,
net_mtu,
user_config,
is_container_address,
is_management_address,
static_routes,
gateway,
reference_group,
@ -662,7 +662,7 @@ def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
elif is_metal:
network = networks[old_address] = _network
network['netmask'] = netmask
if is_container_address:
if is_management_address:
# Container physical host group
cphg = container.get('physical_host_group')
@ -673,11 +673,11 @@ def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
phg = user_config[cphg][physical_host]
network['address'] = phg['ip']
if is_container_address is True:
if is_management_address is True:
container['ansible_host'] = networks[old_address]['address']
if is_container_address is True:
container['container_address'] = networks[old_address]['address']
if is_management_address is True:
container['management_address'] = networks[old_address]['address']
if gateway:
# if specified, gateway address will be used for default route in
@ -783,7 +783,9 @@ def container_skel_load(container_skel, inventory, config):
net_type=p_net.get('container_type'),
net_mtu=p_net.get('container_mtu'),
user_config=config,
is_container_address=p_net.get('is_container_address'),
is_management_address=p_net.get(
'is_management_address', p_net.get('is_container_address')
),
static_routes=p_net.get('static_routes'),
gateway=p_net.get('gateway'),
reference_group=p_net.get('reference_group'),
@ -1048,7 +1050,9 @@ def _check_config_settings(cidr_networks, config, container_skel):
)
if (p_net.get('container_bridge') == overrides.get(
'management_bridge')):
if not p_net.get('is_container_address'):
if not p_net.get(
'is_management_address',
p_net.get('is_container_address')):
raise ProviderNetworkMisconfiguration(q_name)
logger.debug("Provider network information OK")

View File

@ -305,7 +305,7 @@ def remove_ip_addresses(inventory, filepath=None):
if variables.get('is_metal', False):
continue
ip_vars = ['container_networks', 'container_address',
ip_vars = ['container_networks', 'management_address',
'ansible_host', 'ansible_ssh_host']
# Don't raise a KeyError if the entries have already been removed.

View File

@ -79,7 +79,7 @@
- name: Determine management bridge IP address
include_tasks: ../common-tasks/dynamic-address-fact.yml
vars:
network_address: "container_address"
network_address: "management_address"
tags:
- always
@ -152,7 +152,7 @@
roles:
- role: "os_nova"
nova_management_address: "{{ container_address }}"
nova_management_address: "{{ management_address }}"
nova_cinder_rbd_inuse: "{{ hostvars['localhost']['nova_cinder_rbd_inuse'] | default(False) }}"
- role: "openstack.osa.system_crontab_coordination"

View File

@ -120,7 +120,7 @@
- "'ebtables' in modules_content"
- "'vm.swappiness' in sysctl_content"
- "'172.29.236.100 {{ ansible_facts['fqdn'] }} {{ ansible_facts['hostname'] }}' in hosts_content"
- "'{{ hostvars[groups['galera_all'][0]]['container_address'] }} {{ hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] }}.openstack.local {{ hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] ~ ((hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] != groups['galera_all'][0]) | ternary(' ' ~ groups['galera_all'][0], '')) }}' in hosts_content"
- "'{{ hostvars[groups['galera_all'][0]]['management_address'] }} {{ hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] }}.openstack.local {{ hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] ~ ((hostvars[groups['galera_all'][0]]['ansible_facts']['hostname'] != groups['galera_all'][0]) | ternary(' ' ~ groups['galera_all'][0], '')) }}' in hosts_content"
- "release_file.stat.exists"
- "systat_file.stat.exists"
- "'PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' in environment_content"

View File

@ -28,7 +28,7 @@
tasks:
- name: Check the repo sync file on each repo server
uri:
url: "http://{{ hostvars[item]['container_address'] }}:{{ repo_server_port }}/{{ repo_requirements_file }}"
url: "http://{{ hostvars[item]['management_address'] }}:{{ repo_server_port }}/{{ repo_requirements_file }}"
with_inventory_hostnames: "{{ groups['repo_all'] }}"
when: install_method == 'source'
tags:
@ -129,7 +129,7 @@
state: present
- name: Connect to remote memcache servers (full mesh testing)
shell: "echo stats | nc -w 3 {{ hostvars[memcached_host]['container_address'] }} {{ memcached_port }}"
shell: "echo stats | nc -w 3 {{ hostvars[memcached_host]['management_address'] }} {{ memcached_port }}"
changed_when: false
register: memcache_stats
with_items: "{{ groups['memcached'] }}"
@ -335,7 +335,7 @@
dest: "{{ venv_path }}/rabbitmq-test.py"
mode: 0755
- name: Connect to rabbitmq
command: "{{ venv_path }}/bin/python {{ venv_path }}/rabbitmq-test.py {{ hostvars[groups['rabbitmq_all'][0]]['container_address'] }}"
command: "{{ venv_path }}/bin/python {{ venv_path }}/rabbitmq-test.py {{ hostvars[groups['rabbitmq_all'][0]]['management_address'] }}"
changed_when: false
tags:
- healthcheck

View File

@ -0,0 +1,11 @@
---
deprecations:
- |
Key ``is_container_address`` that is used for definition of the
`provider_networks` has been renamed to ``is_management_address``,
to avoid confusion for bare metal deployments. Backwards compatability was
kept.
- |
Variable ``container_address`` has been deprecated in favor of
``management_address``. Variable ``management_address`` is available since
Victoria release and was defaulted to the value of ``container_address``.

View File

@ -1,5 +1,5 @@
---
no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['management_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
http_proxy_env: "{{ lookup('env', 'http_proxy') }}"
https_proxy_env: "{{ lookup('env', 'https_proxy') }}"
global_environment_variables:

View File

@ -728,7 +728,7 @@ class TestConfigCheckBase(unittest.TestCase):
class TestConfigChecks(TestConfigCheckBase):
def test_missing_container_cidr_network(self):
self.delete_provider_network('container')
self.delete_provider_network('management')
with self.assertRaises(SystemExit) as context:
get_inventory()
expectedLog = ("No container or management network specified in "
@ -736,13 +736,13 @@ class TestConfigChecks(TestConfigCheckBase):
self.assertEqual(str(context.exception), expectedLog)
def test_management_network_malformed(self):
self.delete_provider_network_key('container', 'is_container_address')
self.delete_provider_network_key('management', 'is_management_address')
self.write_config()
with self.assertRaises(di.ProviderNetworkMisconfiguration) as context:
get_inventory()
expectedLog = ("Provider network with queue 'container' "
"requires 'is_container_address' "
expectedLog = ("Provider network with queue 'management' "
"requires 'is_management_address' "
"to be set to True.")
self.assertEqual(str(context.exception), expectedLog)
self.restore_config()
@ -866,7 +866,7 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def setUp(self):
super(TestStaticRouteConfig, self).setUp()
self.expectedMsg = ("Static route provider network with queue "
"'container' needs both 'cidr' and 'gateway' "
"'management' needs both 'cidr' and 'gateway' "
"values.")
def add_static_route(self, q_name, route_dict):
@ -882,12 +882,12 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def test_setting_static_route(self):
route_dict = {'cidr': '10.176.0.0/12',
'gateway': '172.29.248.1'}
self.add_static_route('container', route_dict)
self.add_static_route('management', route_dict)
inventory = get_inventory()
# Use aio1 and 'container_address' since they're known keys.
# Use aio1 and 'management_address' since they're known keys.
hostvars = inventory['_meta']['hostvars']['aio1']
cont_add = hostvars['container_networks']['container_address']
cont_add = hostvars['container_networks']['management_address']
self.assertIn('static_routes', cont_add)
@ -897,7 +897,7 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def test_setting_bad_static_route_only_cidr(self):
route_dict = {'cidr': '10.176.0.0/12'}
self.add_static_route('container', route_dict)
self.add_static_route('management', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
@ -908,7 +908,7 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def test_setting_bad_static_route_only_gateway(self):
route_dict = {'gateway': '172.29.248.1'}
self.add_static_route('container', route_dict)
self.add_static_route('management', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
@ -920,7 +920,7 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def test_setting_bad_gateway_value(self):
route_dict = {'cidr': '10.176.0.0/12',
'gateway': None}
self.add_static_route('container', route_dict)
self.add_static_route('management', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
@ -932,7 +932,7 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def test_setting_bad_cidr_value(self):
route_dict = {'cidr': None,
'gateway': '172.29.248.1'}
self.add_static_route('container', route_dict)
self.add_static_route('management', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
@ -944,7 +944,7 @@ class TestStaticRouteConfig(TestConfigCheckBase):
def test_setting_bad_cidr_gateway_value(self):
route_dict = {'cidr': None,
'gateway': None}
self.add_static_route('container', route_dict)
self.add_static_route('management', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
@ -993,7 +993,7 @@ class TestGlobalOverridesConfigDeletion(TestConfigCheckBase):
self.assertEqual('bar', self.inventory['all']['vars']['foo'])
def test_container_cidr_key_retained(self):
user_cidr = self.user_defined_config['cidr_networks']['container']
user_cidr = self.user_defined_config['cidr_networks']['management']
di._parse_global_variables(user_cidr, self.inventory,
self.user_defined_config)
self.assertIn('container_cidr', self.inventory['all']['vars'])
@ -1533,9 +1533,9 @@ class TestInventoryGroupConstraints(unittest.TestCase):
class TestL3ProviderNetworkConfig(TestConfigCheckBase):
def setUp(self):
super(TestL3ProviderNetworkConfig, self).setUp()
self.delete_provider_network('container')
self.delete_provider_network('management')
self.add_provider_network('pod1_container', '172.29.236.0/22')
self.add_provider_network_key('container', 'ip_from_q',
self.add_provider_network_key('management', 'ip_from_q',
'pod1_container')
self.add_provider_network_key('pod1_container', 'address_prefix',
'management')