Add functional test for AggregateMultiTenancyIsolation + migrate

A bug was reported against Ocata where a non-admin user
creates a server and the user's project is isolated to a
set of hosts via the AggregateMultiTenancyIsolation filter.

The admin, with a different project, cold migrates the server
and the filter rejects the request because before change
I195d389ac59574724a5e7202ba1a17d92c53a676 the cold migrate
task would re-generate the RequestSpec using the request context
which was from the admin, not the owner of the instance.

Even though this is not a problem past Ocata, we did not have
functional test coverage for this scenario so it is added here.

This will also be used to backport the fix to Ocata to show
the regression and fix in that branch.

Change-Id: I97559607fc720fb98c3543ff3dd6095281752cd4
Related-Bug: #1774205
Related-Bug: #1675607
This commit is contained in:
Matt Riedemann 2018-05-30 13:40:35 -04:00
parent fd916f4a97
commit a638685c46
1 changed files with 120 additions and 0 deletions

View File

@ -551,3 +551,123 @@ class TestAggregateMultiTenancyIsolationFilter(
server = user_api.post_server(server_req)
self._wait_for_state_change(user_api, server, 'ACTIVE')
self.assertEqual(2, len(self.filtered_hosts))
class AggregateMultiTenancyIsolationColdMigrateTest(
test.TestCase, integrated_helpers.InstanceHelperMixin):
@staticmethod
def _create_aggregate(admin_api, name):
return admin_api.api_post(
'/os-aggregates', {'aggregate': {'name': name}}).body['aggregate']
@staticmethod
def _add_host_to_aggregate(admin_api, aggregate, host):
add_host_req_body = {
"add_host": {
"host": host
}
}
admin_api.api_post(
'/os-aggregates/%s/action' % aggregate['id'], add_host_req_body)
@staticmethod
def _isolate_aggregate(admin_api, aggregate, tenant_id):
set_meta_req_body = {
"set_metadata": {
"metadata": {
"filter_tenant_id": tenant_id
}
}
}
admin_api.api_post(
'/os-aggregates/%s/action' % aggregate['id'], set_meta_req_body)
def setUp(self):
super(AggregateMultiTenancyIsolationColdMigrateTest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
# Intentionally keep these separate since we want to create the
# server with the non-admin user in a different project.
admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.admin_project))
self.admin_api = admin_api_fixture.admin_api
self.admin_api.microversion = 'latest'
user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.user_project))
self.api = user_api_fixture.api
self.api.microversion = 'latest'
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.start_service('conductor')
# Enable the AggregateMultiTenancyIsolation filter before starting the
# scheduler service.
enabled_filters = CONF.filter_scheduler.enabled_filters
if 'AggregateMultiTenancyIsolation' not in enabled_filters:
enabled_filters.append('AggregateMultiTenancyIsolation')
self.flags(
enabled_filters=enabled_filters, group='filter_scheduler')
# Add a custom weigher which will weigh host1, which will be in the
# admin project aggregate, higher than the other hosts which are in
# the non-admin project aggregate.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
self.start_service('scheduler')
for host in ('host1', 'host2', 'host3'):
self.start_service('compute', host=host)
# Create an admin-only aggregate for the admin project. This is needed
# because if host1 is not in an aggregate with the filter_tenant_id
# metadata key, the filter will accept that host even for the non-admin
# project.
admin_aggregate = self._create_aggregate(
self.admin_api, 'admin-aggregate')
self._add_host_to_aggregate(self.admin_api, admin_aggregate, 'host1')
# Restrict the admin project to the admin aggregate.
self._isolate_aggregate(
self.admin_api, admin_aggregate, uuids.admin_project)
# Create the tenant aggregate for the non-admin project.
tenant_aggregate = self._create_aggregate(
self.admin_api, 'tenant-aggregate')
# Add two compute hosts to the tenant aggregate. We exclude host1
# since that is weighed higher in HostNameWeigher and we want to
# ensure the scheduler properly filters out host1 before we even get
# to weighing the selected hosts.
for host in ('host2', 'host3'):
self._add_host_to_aggregate(self.admin_api, tenant_aggregate, host)
# Restrict the non-admin project to the tenant aggregate.
self._isolate_aggregate(
self.admin_api, tenant_aggregate, uuids.user_project)
def test_cold_migrate_server(self):
"""Creates a server using the non-admin project, then cold migrates
the server and asserts the server goes to the other host in the
isolated host aggregate via the AggregateMultiTenancyIsolation filter.
"""
img = nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
server_req_body = self._build_minimal_create_server_request(
self.api, 'test_cold_migrate_server', image_uuid=img,
networks='none')
server = self.api.post_server({'server': server_req_body})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Ensure the server ended up in host2 or host3
original_host = server['OS-EXT-SRV-ATTR:host']
self.assertNotEqual('host1', original_host)
# Now cold migrate the server and it should end up in the other host
# in the same tenant-isolated aggregate.
self.admin_api.api_post(
'/servers/%s/action' % server['id'], {'migrate': None})
server = self._wait_for_state_change(
self.admin_api, server, 'VERIFY_RESIZE')
# Ensure the server is on the other host in the same aggregate.
expected_host = 'host3' if original_host == 'host2' else 'host2'
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])