diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 76e0cc56..b2e55335 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -32,6 +32,7 @@ Added * GnocchiResourceType.list_resource_type * GnocchiResourceType.create_resource_type * GnocchiResourceType.create_delete_resource_type +* NeutronSubnets.delete_subnets * [ci] New Zuul V3 native jobs Changed diff --git a/rally-jobs/neutron.yaml b/rally-jobs/neutron.yaml index 63185702..ee22f40e 100644 --- a/rally-jobs/neutron.yaml +++ b/rally-jobs/neutron.yaml @@ -617,6 +617,26 @@ failure_rate: max: 20 + NeutronSubnets.delete_subnets: + - + runner: + type: "constant" + times: {{smoke or 15}} + concurrency: {{smoke or 15}} + context: + users: + tenants: 1 + users_per_tenant: {{smoke or 15}} + user_choice_method: "round_robin" + quotas: + neutron: + network: -1 + subnet: -1 + network: + subnets_per_network: 15 + dualstack: True + router: {} + Quotas.neutron_update: - args: diff --git a/rally_openstack/contexts/network/networks.py b/rally_openstack/contexts/network/networks.py index e965397f..5da8fbce 100644 --- a/rally_openstack/contexts/network/networks.py +++ b/rally_openstack/contexts/network/networks.py @@ -61,6 +61,9 @@ class Network(context.Context): "items": {"type": "string"}, "uniqueItems": True }, + "dualstack": { + "type": "boolean", + }, "router": { "type": "object", "properties": { @@ -89,7 +92,8 @@ class Network(context.Context): "subnets_per_network": 1, "network_create_args": {}, "dns_nameservers": None, - "router": {"external": True} + "router": {"external": True}, + "dualstack": False } def setup(self): @@ -107,11 +111,12 @@ class Network(context.Context): self.context.get("users", []))): self.context["tenants"][tenant_id]["networks"] = [] for i in range(self.config["networks_per_tenant"]): - # NOTE(amaretskiy): add_router and subnets_num take effect - # for Neutron only. + # NOTE(amaretskiy): router_create_args and subnets_num take + # effect for Neutron only. network_create_args = self.config["network_create_args"].copy() network = net_wrapper.create_network( tenant_id, + dualstack=self.config["dualstack"], subnets_num=self.config["subnets_per_network"], network_create_args=network_create_args, router_create_args=self.config["router"], diff --git a/rally_openstack/scenarios/neutron/network.py b/rally_openstack/scenarios/neutron/network.py index 61a9978f..e9a6cf7c 100644 --- a/rally_openstack/scenarios/neutron/network.py +++ b/rally_openstack/scenarios/neutron/network.py @@ -573,3 +573,34 @@ class ListAgents(utils.NeutronScenario): """ agent_args = agent_args or {} self._list_agents(**agent_args) + + +@validation.add("required_services", + services=[consts.Service.NEUTRON]) +@validation.add("required_contexts", contexts=["network"]) +@validation.add("required_platform", platform="openstack", users=True) +@scenario.configure(context={"cleanup@openstack": ["neutron"]}, + name="NeutronSubnets.delete_subnets", + platform="openstack") +class DeleteSubnets(utils.NeutronScenario): + + def run(self): + """Delete a subnet that belongs to each precreated network. + + Each runner instance picks a specific subnet from the list based on its + positional location in the list of users. By doing so, we can start + multiple threads with sufficient number of users created and spread + delete requests across all of them, so that they hit different subnets + concurrently. + + Concurrent execution of this scenario should help reveal any race + conditions and other concurrency issues in Neutron IP allocation layer, + among other things. + """ + tenant_id = self.context["tenant"]["id"] + users = self.context["tenants"][tenant_id]["users"] + number = users.index(self.context["user"]) + for network in self.context["tenants"][tenant_id]["networks"]: + # delete one of subnets based on the user sequential number + subnet_id = network["subnets"][number] + self._delete_subnet({"subnet": {"id": subnet_id}}) diff --git a/rally_openstack/wrappers/network.py b/rally_openstack/wrappers/network.py index fe14ce2b..8fc62df0 100644 --- a/rally_openstack/wrappers/network.py +++ b/rally_openstack/wrappers/network.py @@ -14,6 +14,7 @@ # under the License. import abc +import itertools import netaddr import six @@ -32,6 +33,7 @@ CONF = cfg.CONF cidr_incr = utils.RAMInt() +ipv6_cidr_incr = utils.RAMInt() def generate_cidr(start_cidr="10.2.0.0/24"): @@ -44,7 +46,10 @@ def generate_cidr(start_cidr="10.2.0.0/24"): :param start_cidr: start CIDR str :returns: next available CIDR str """ - cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) + if netaddr.IPNetwork(start_cidr).version == 4: + cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) + else: + cidr = str(netaddr.IPNetwork(start_cidr).next(next(ipv6_cidr_incr))) LOG.debug("CIDR generated: %s" % cidr) return cidr @@ -64,6 +69,7 @@ class NetworkWrapper(object): This allows to significantly re-use and simplify code. """ START_CIDR = "10.2.0.0/24" + START_IPV6_CIDR = "dead:beaf::/64" SERVICE_IMPL = None def __init__(self, clients, owner, config=None): @@ -75,9 +81,8 @@ class NetworkWrapper(object): random names, so must implement rally.common.utils.RandomNameGeneratorMixin :param config: The configuration of the network - wrapper. Currently only one config option is - recognized, 'start_cidr', and only for Nova - network. + wrapper. Currently only two config options are + recognized, 'start_cidr' and 'start_ipv6_cidr'. :returns: NetworkWrapper subclass instance """ if hasattr(clients, self.SERVICE_IMPL): @@ -87,6 +92,8 @@ class NetworkWrapper(object): self.config = config or {} self.owner = owner self.start_cidr = self.config.get("start_cidr", self.START_CIDR) + self.start_ipv6_cidr = self.config.get( + "start_ipv6_cidr", self.START_IPV6_CIDR) @abc.abstractmethod def create_network(self): @@ -116,6 +123,7 @@ class NetworkWrapper(object): class NeutronWrapper(NetworkWrapper): SERVICE_IMPL = consts.Service.NEUTRON SUBNET_IP_VERSION = 4 + SUBNET_IPV6_VERSION = 6 LB_METHOD = "ROUND_ROBIN" LB_PROTOCOL = "HTTP" @@ -187,9 +195,11 @@ class NeutronWrapper(NetworkWrapper): } return self.client.create_pool(pool_args) - def _generate_cidr(self): + def _generate_cidr(self, ip_version=4): # TODO(amaretskiy): Generate CIDRs unique for network, not cluster - return generate_cidr(start_cidr=self.start_cidr) + return generate_cidr( + start_cidr=self.start_cidr if ip_version == 4 + else self.start_ipv6_cidr) def create_network(self, tenant_id, **kwargs): """Create network. @@ -200,6 +210,7 @@ class NeutronWrapper(NetworkWrapper): Create an external router and add an interface to each subnet created. Default: False * subnets_num: Number of subnets to create per network. Default: 0 + * dualstack: Whether subnets should be of both IPv4 and IPv6 * dns_nameservers: Nameservers for each subnet. Default: 8.8.8.8, 8.8.4.4 * network_create_args: Additional network creation arguments. @@ -225,19 +236,28 @@ class NeutronWrapper(NetworkWrapper): router_args["tenant_id"] = tenant_id router = self.create_router(**router_args) + dualstack = kwargs.get("dualstack", False) + subnets = [] subnets_num = kwargs.get("subnets_num", 0) + ip_versions = itertools.cycle( + [self.SUBNET_IP_VERSION, self.SUBNET_IPV6_VERSION] + if dualstack else [self.SUBNET_IP_VERSION]) for i in range(subnets_num): + ip_version = next(ip_versions) subnet_args = { "subnet": { "tenant_id": tenant_id, "network_id": network["id"], "name": self.owner.generate_random_name(), - "ip_version": self.SUBNET_IP_VERSION, - "cidr": self._generate_cidr(), + "ip_version": ip_version, + "cidr": self._generate_cidr(ip_version), "enable_dhcp": True, - "dns_nameservers": kwargs.get("dns_nameservers", - ["8.8.8.8", "8.8.4.4"]) + "dns_nameservers": ( + kwargs.get("dns_nameservers", ["8.8.8.8", "8.8.4.4"]) + if ip_version == 4 + else kwargs.get("dns_nameservers", + ["dead:beaf::1", "dead:beaf::2"])) } } subnet = self.client.create_subnet(subnet_args)["subnet"] @@ -297,8 +317,9 @@ class NeutronWrapper(NetworkWrapper): # port is auto-removed pass - for subnet_id in network["subnets"]: - self._delete_subnet(subnet_id) + for subnet in self.client.list_subnets( + network_id=network["id"])["subnets"]: + self._delete_subnet(subnet["id"]) responce = self.client.delete_network(network["id"]) diff --git a/samples/tasks/scenarios/neutron/delete-subnets.json b/samples/tasks/scenarios/neutron/delete-subnets.json new file mode 100644 index 00000000..04070a7d --- /dev/null +++ b/samples/tasks/scenarios/neutron/delete-subnets.json @@ -0,0 +1,28 @@ +{ + "NeutronSubnets.delete_subnets": [ + { + "runner": { + "type": "constant", + "times": 15, + "concurrency": 15 + }, + "context": { + "users": { + "tenants": 1, + "users_per_tenant": 15, + "user_choice_method": "round_robin" + }, + "network": { + "subnets_per_network": 15, + "dualstack": true, + "router": {} + } + }, + "sla": { + "failure_rate": { + "max": 0 + } + } + } + ] +} diff --git a/samples/tasks/scenarios/neutron/delete-subnets.yaml b/samples/tasks/scenarios/neutron/delete-subnets.yaml new file mode 100644 index 00000000..9e73f6cd --- /dev/null +++ b/samples/tasks/scenarios/neutron/delete-subnets.yaml @@ -0,0 +1,19 @@ +--- + NeutronSubnets.delete_subnets: + - + runner: + type: "constant" + times: 15 + concurrency: 15 + context: + users: + tenants: 1 + users_per_tenant: 15 + user_choice_method: "round_robin" + network: + subnets_per_network: 15 + dualstack: true + router: {} + sla: + failure_rate: + max: 0 diff --git a/tasks/openstack/macro/macro.yaml b/tasks/openstack/macro/macro.yaml index e6542f37..9d9a7700 100644 --- a/tasks/openstack/macro/macro.yaml +++ b/tasks/openstack/macro/macro.yaml @@ -1,10 +1,11 @@ -{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%} +{%- macro user_context(tenants,users_per_tenant, use_existing_users, use_round_robin) -%} {%- if use_existing_users and caller is not defined -%} {} {%- else %} {%- if not use_existing_users %} users: tenants: {{ tenants }} users_per_tenant: {{ users_per_tenant }} + user_choice_method: {{ "round_robin" if use_round_robin else "random" }} {%- endif %} {%- if caller is defined %} {{ caller() }} diff --git a/tasks/openstack/scenario/neutron.yaml b/tasks/openstack/scenario/neutron.yaml index 0e6d5f45..f5143c3b 100644 --- a/tasks/openstack/scenario/neutron.yaml +++ b/tasks/openstack/scenario/neutron.yaml @@ -242,4 +242,21 @@ runner: {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} sla: - {{ no_failures_sla() }} \ No newline at end of file + {{ no_failures_sla() }} + + NeutronSubnets.delete_subnets: + - + runner: + type: "constant" + times: 15 + concurrency: 15 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users, use_round_robin) }} + quotas: + neutron: + network: -1 + subnet: -1 + network: + subnets_per_network: 15 + dualstack: True + router: {} diff --git a/tests/unit/contexts/network/test_network.py b/tests/unit/contexts/network/test_network.py index 2fc6e97c..4237a6be 100644 --- a/tests/unit/contexts/network/test_network.py +++ b/tests/unit/contexts/network/test_network.py @@ -86,7 +86,7 @@ class NetworkTestCase(test.TestCase): dns_kwargs["dns_nameservers"] = tuple( dns_kwargs["dns_nameservers"]) create_calls = [ - mock.call(tenant, + mock.call(tenant, dualstack=False, subnets_num=1, network_create_args={"fakearg": "fake"}, router_create_args={"external": True}, **dns_kwargs) diff --git a/tests/unit/scenarios/neutron/test_network.py b/tests/unit/scenarios/neutron/test_network.py index 8daaf95c..5affc7ce 100644 --- a/tests/unit/scenarios/neutron/test_network.py +++ b/tests/unit/scenarios/neutron/test_network.py @@ -549,3 +549,50 @@ class NeutronNetworksTestCase(test.ScenarioTestCase): floating_network, **floating_ip_args) scenario._delete_floating_ip.assert_called_once_with( scenario._create_floatingip.return_value["floatingip"]) + + @mock.patch("%s.DeleteSubnets._delete_subnet" % BASE) + def test_delete_subnets(self, mock__delete_subnet): + # do not guess what user will be used + self.context["user_choice_method"] = "round_robin" + # if it is the 4th iteration, the second user from the second tenant + # should be taken, which means that the second subnets from each + # tenant network should be removed. + self.context["iteration"] = 4 + # in case of `round_robin` the user will be selected from the list of + # available users of particular tenant, not from the list of all + # tenants (i.e random choice). BUT to trigger selecting user and + # tenant `users` key should present in context dict + self.context["users"] = [] + + self.context["tenants"] = { + # this should not be used + "uuid-1": { + "id": "uuid-1", + "networks": [{"subnets": ["subnet-1"]}], + "users": [{"id": "user-1", "credential": mock.MagicMock()}, + {"id": "user-2", "credential": mock.MagicMock()}] + }, + # this is expected user + "uuid-2": { + "id": "uuid-2", + "networks": [ + {"subnets": ["subnet-2", "subnet-3"]}, + {"subnets": ["subnet-4", "subnet-5"]}], + "users": [{"id": "user-3", "credential": mock.MagicMock()}, + {"id": "user-4", "credential": mock.MagicMock()}] + } + } + + scenario = network.DeleteSubnets(self.context) + self.assertEqual("user-4", scenario.context["user"]["id"], + "Unexpected user is taken. The wrong subnets can be " + "affected(removed).") + + scenario.run() + + self.assertEqual( + [ + mock.call({"subnet": {"id": "subnet-3"}}), + mock.call({"subnet": {"id": "subnet-5"}}) + ], + mock__delete_subnet.call_args_list) diff --git a/tests/unit/wrappers/test_network.py b/tests/unit/wrappers/test_network.py index 8d0cbe1c..73f4a1f2 100644 --- a/tests/unit/wrappers/test_network.py +++ b/tests/unit/wrappers/test_network.py @@ -142,7 +142,7 @@ class NeutronWrapperTestCase(test.TestCase): subnets_cidrs = iter(range(subnets_num)) subnets_ids = iter(range(subnets_num)) service._generate_cidr = mock.Mock( - side_effect=lambda: "cidr-%d" % next(subnets_cidrs)) + side_effect=lambda v: "cidr-%d" % next(subnets_cidrs)) service.client.create_subnet = mock.Mock( side_effect=lambda i: { "subnet": {"id": "subnet-%d" % next(subnets_ids)}}) @@ -236,6 +236,7 @@ class NeutronWrapperTestCase(test.TestCase): def test_delete_network(self, mock_neutron_wrapper_supports_extension): service = self.get_wrapper() service.client.list_ports.return_value = {"ports": []} + service.client.list_subnets.return_value = {"subnets": []} service.client.delete_network.return_value = "foo_deleted" result = service.delete_network({"id": "foo_id", "router_id": None, "subnets": []}) @@ -267,6 +268,8 @@ class NeutronWrapperTestCase(test.TestCase): service.client.list_dhcp_agent_hosting_networks.return_value = ( {"agents": [{"id": agent_id} for agent_id in agents]}) service.client.list_ports.return_value = ({"ports": ports}) + service.client.list_subnets.return_value = ( + {"subnets": [{"id": id_} for id_ in subnets]}) service.client.delete_network.return_value = "foo_deleted" result = service.delete_network( @@ -315,6 +318,8 @@ class NeutronWrapperTestCase(test.TestCase): {"agents": [{"id": agent_id} for agent_id in agents]}) service.client.list_ports.return_value = ({"ports": ports}) service.client.delete_network.return_value = "foo_deleted" + service.client.list_subnets.return_value = {"subnets": [ + {"id": id_} for id_ in subnets]} if should_raise: self.assertRaises(exception_type, service.delete_network,