Pool Manager Integration with Central

Full integration of Pool Manager with Central (no longer using the proxy
backend driver.)

This patch fixes:

- Fix concurrent requests that cause lockup issue (bug #1392762)
- Fixed bug where creating a domain fails the first time in mdns
- Fixed bug where records in recordsets do not have the correct
  status/action/serial
- Changed 'ADD' to 'CREATE' for ACTION column
- Ported Fake backend to pools
- Removed transitional pool_manager_proxy backend

Change-Id: Icb40448f760ff2a573d08a04bb4dec1f550119bb
Closes-Bug: 1392762
This commit is contained in:
rjrjr 2014-12-02 08:44:20 -07:00 committed by Kiall Mac Innes
parent 6285bdaf6d
commit 4e75f5b54c
22 changed files with 707 additions and 589 deletions

View File

@ -88,6 +88,8 @@ function configure_designate {
iniset $DESIGNATE_CONF DEFAULT state_path $DESIGNATE_STATE_PATH
iniset $DESIGNATE_CONF DEFAULT root-helper sudo designate-rootwrap $DESIGNATE_ROOTWRAP_CONF
iniset $DESIGNATE_CONF storage:sqlalchemy connection `database_connection_url designate`
iniset $DESIGNATE_CONF service:pool_manager pool_id $DESIGNATE_POOL_ID
iniset $DESIGNATE_CONF pool_manager_cache:sqlalchemy connection `database_connection_url designate_pool_manager`
iniset $DESIGNATE_CONF service:api enabled_extensions_v1 $DESIGNATE_ENABLED_EXTENSIONS_V1
iniset $DESIGNATE_CONF service:api enabled_extensions_v2 $DESIGNATE_ENABLED_EXTENSIONS_V2
@ -119,17 +121,6 @@ function configure_designate {
configure_auth_token_middleware $DESIGNATE_CONF designate $DESIGNATE_AUTH_CACHE_DIR
fi
if is_service_enabled designate-agent; then
iniset $DESIGNATE_CONF service:central backend_driver rpc
iniset $DESIGNATE_CONF service:agent backend_driver $DESIGNATE_BACKEND_DRIVER
elif is_service_enabled designate-pool-manager; then
iniset $DESIGNATE_CONF service:central backend_driver pool_manager_proxy
iniset $DESIGNATE_CONF service:pool_manager pool_id $DESIGNATE_POOL_ID
iniset $DESIGNATE_CONF pool_manager_cache:sqlalchemy connection `database_connection_url designate_pool_manager`
else
iniset $DESIGNATE_CONF service:central backend_driver $DESIGNATE_BACKEND_DRIVER
fi
iniset $DESIGNATE_CONF service:api api_host $DESIGNATE_SERVICE_HOST
iniset $DESIGNATE_CONF service:api api_base_uri $DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/
iniset $DESIGNATE_CONF service:api enable_api_v1 True
@ -192,14 +183,11 @@ function init_designate {
# Init and migrate designate database
designate-manage database sync
if is_service_enabled designate-pool-manager; then
# (Re)create designate_pool_manager cache
recreate_database designate_pool_manager utf8
# (Re)create designate_pool_manager cache
recreate_database designate_pool_manager utf8
# Init and migrate designate pool-manager-cache
designate-manage pool-manager-cache sync
fi
# Init and migrate designate pool-manager-cache
designate-manage pool-manager-cache sync
init_designate_backend
}

View File

@ -18,7 +18,7 @@ LOG_COLOR=True
ENABLED_SERVICES=rabbit,mysql,key
# Enable core Designate services
ENABLED_SERVICES+=,designate,designate-api,designate-central,designate-mdns
ENABLED_SERVICES+=,designate,designate-api,designate-central,designate-mdns,designate-pool-manager
# Optional Designate services
#ENABLED_SERVICES+=,designate-sink
@ -33,7 +33,7 @@ ENABLED_SERVICES+=,designate,designate-api,designate-central,designate-mdns
# Optional Horizon Panels
#ENABLED_SERVICES+=,horizon
# Optional core OpenStack services (needed by horizon)
# Optional core OpenStack services (needed by horizon)
#ENABLED_SERVICES+=,g-api,g-reg,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth
# Designate Options

View File

@ -14,18 +14,17 @@
# License for the specific language governing permissions and limitations
# under the License.
from designate.openstack.common import log as logging
from designate.backend.base import Backend
from designate.backend.base import PoolBackend
LOG = logging.getLogger(__name__)
def get_backend(backend_driver, central_service):
def get_backend(backend_driver, backend_options):
LOG.debug("Loading backend driver: %s" % backend_driver)
cls = Backend.get_driver(backend_driver)
cls = PoolBackend.get_driver(backend_driver)
return cls(central_service=central_service)
return cls(backend_options)
def get_server_object(backend_driver, server_id):
@ -33,11 +32,3 @@ def get_server_object(backend_driver, server_id):
cls = PoolBackend.get_driver(backend_driver)
return cls.get_server_object(backend_driver, server_id)
def get_pool_backend(backend_driver, backend_options):
LOG.debug("Loading pool backend driver: %s" % backend_driver)
cls = PoolBackend.get_driver(backend_driver)
return cls(backend_options)

View File

@ -1,6 +1,6 @@
# Copyright 2012 Managed I.T.
# Copyright 2014 eBay Inc.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
# Author: Ron Rickard <rrickard@ebay.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -17,73 +17,15 @@ from designate.openstack.common import log as logging
from designate.i18n import _LI
from designate.backend import base
LOG = logging.getLogger(__name__)
class FakeBackend(base.Backend):
class FakeBackend(base.PoolBackend):
__plugin_name__ = 'fake'
def __init__(self, *args, **kwargs):
super(FakeBackend, self).__init__(*args, **kwargs)
def create_tsigkey(self, context, tsigkey):
LOG.info(_LI('Create TSIG Key %r') % tsigkey)
def update_tsigkey(self, context, tsigkey):
LOG.info(_LI('Update TSIG Key %r') % tsigkey)
def delete_tsigkey(self, context, tsigkey):
LOG.info(_LI('Delete TSIG Key %r') % tsigkey)
def create_server(self, context, server):
LOG.info(_LI('Create Server %r') % server)
def update_server(self, context, server):
LOG.info(_LI('Update Server %r') % server)
def delete_server(self, context, server):
LOG.info(_LI('Delete Server %r') % server)
def create_domain(self, context, domain):
LOG.info(_LI('Create Domain %r') % domain)
def update_domain(self, context, domain):
LOG.info(_LI('Update Domain %r') % domain)
def delete_domain(self, context, domain):
LOG.info(_LI('Delete Domain %r') % domain)
def create_recordset(self, context, domain, recordset):
LOG.info(_LI('Create RecordSet %(domain)r / %(recordset)r') %
{'domain': domain, 'recordset': recordset})
def update_recordset(self, context, domain, recordset):
LOG.info(_LI('Update RecordSet %(domain)r / %(recordset)r') %
{'domain': domain, 'recordset': recordset})
def delete_recordset(self, context, domain, recordset):
LOG.info(_LI('Delete RecordSet %(domain)r / %(recordset)r') %
{'domain': domain, 'recordset': recordset})
def create_record(self, context, domain, recordset, record):
LOG.info(_LI('Create Record %(domain)r / %(recordset)r / %(record)r') %
{'domain': domain, 'recordset': recordset, 'record': record})
def update_record(self, context, domain, recordset, record):
LOG.info(_LI('Update Record %(domain)r / %(recordset)r / %(record)r') %
{'domain': domain, 'recordset': recordset, 'record': record})
def delete_record(self, context, domain, recordset, record):
LOG.info(_LI('Delete Record %(domain)r / %(recordset)r / %(record)r') %
{'domain': domain, 'recordset': recordset, 'record': record})
def sync_domain(self, context, domain, records):
LOG.info(_LI('Sync Domain %(domain)r / %(records)r') %
{'domain': domain, 'records': records})
def sync_record(self, context, domain, record):
LOG.info(_LI('Sync Record %(domain)r / %(record)r') %
{'domain': domain, 'record': record})
def ping(self, context):
LOG.info(_LI('Ping'))

View File

@ -1,99 +0,0 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.openstack.common import log as logging
from designate.backend import base
from designate.pool_manager import rpcapi as pool_manager_rpcapi
LOG = logging.getLogger(__name__)
POOL_MANAGER_API = None
def get_pool_manager_api():
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup by
rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global POOL_MANAGER_API
if not POOL_MANAGER_API:
POOL_MANAGER_API = pool_manager_rpcapi.PoolManagerAPI()
return POOL_MANAGER_API
class PoolManagerProxyBackend(base.Backend):
__plugin_name__ = 'pool_manager_proxy'
def __init__(self, *args, **kwargs):
super(PoolManagerProxyBackend, self).__init__(*args, **kwargs)
self.pool_manager = get_pool_manager_api()
def create_server(self, context, server):
LOG.debug('Create Server')
domains = self.central_service.find_domains(self.admin_context)
for domain in domains:
self.pool_manager.update_domain(context, domain)
def update_server(self, context, server):
LOG.debug('Update Server')
domains = self.central_service.find_domains(self.admin_context)
for domain in domains:
self.pool_manager.update_domain(context, domain)
def delete_server(self, context, server):
LOG.debug('Delete Server')
domains = self.central_service.find_domains(self.admin_context)
for domain in domains:
self.pool_manager.update_domain(context, domain)
def create_domain(self, context, domain):
LOG.debug('Create Domain')
self.pool_manager.create_domain(context, domain)
def update_domain(self, context, domain):
LOG.debug('Update Domain')
self.pool_manager.update_domain(context, domain)
def delete_domain(self, context, domain):
LOG.debug('Delete Domain')
self.pool_manager.delete_domain(context, domain)
def create_recordset(self, context, domain, recordset):
LOG.debug('Create RecordSet')
self.pool_manager.update_domain(context, domain)
def update_recordset(self, context, domain, recordset):
LOG.debug('Update RecordSet')
self.pool_manager.update_domain(context, domain)
def delete_recordset(self, context, domain, recordset):
LOG.debug('Delete RecordSet')
self.pool_manager.update_domain(context, domain)
def create_record(self, context, domain, recordset, record):
LOG.debug('Create Record')
self.pool_manager.update_domain(context, domain)
def update_record(self, context, domain, recordset, record):
LOG.debug('Update Record')
self.pool_manager.update_domain(context, domain)
def delete_record(self, context, domain, recordset, record):
LOG.debug('Delete Record')
self.pool_manager.update_domain(context, domain)

View File

@ -22,8 +22,6 @@ cfg.CONF.register_group(cfg.OptGroup(
cfg.CONF.register_opts([
cfg.IntOpt('workers', default=None,
help='Number of worker processes to spawn'),
cfg.StrOpt('backend-driver', default='fake',
help='The backend driver to use'),
cfg.StrOpt('storage-driver', default='sqlalchemy',
help='The storage driver to use'),
cfg.ListOpt('enabled-notification-handlers', default=[],

File diff suppressed because it is too large Load Diff

View File

@ -106,7 +106,7 @@ class Service(service.RPCService):
server_id = section['server_id']
server = backend.get_server_object(backend_driver, server_id)
backend_instance = backend.get_pool_backend(
backend_instance = backend.get_backend(
backend_driver, server.backend_options)
server_backend = {
'server': server,
@ -177,6 +177,12 @@ class Service(service.RPCService):
self.central_api.update_status(
context, domain.id, status, domain.serial)
for server_backend in self.server_backends:
server = server_backend['server']
self.mdns_api.notify_zone_changed(
context, domain, self._get_destination(server), self.timeout,
self.retry_interval, self.max_retries, 0)
for server_backend in self.server_backends:
server = server_backend['server']
self.mdns_api.poll_for_serial_number(

View File

@ -33,7 +33,7 @@ TASK_STATUSES = ['ACTIVE', 'PENDING', 'DELETED', 'ERROR', 'COMPLETE']
TSIG_ALGORITHMS = ['hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256',
'hmac-sha384', 'hmac-sha512']
POOL_PROVISIONERS = ['UNMANAGED']
ACTIONS = ['ADD', 'DELETE', 'UPDATE', 'NONE']
ACTIONS = ['CREATE', 'DELETE', 'UPDATE', 'NONE']
metadata = MetaData()

View File

@ -45,8 +45,6 @@ LOG = logging.getLogger(__name__)
cfg.CONF.import_opt('storage_driver', 'designate.central',
group='service:central')
cfg.CONF.import_opt('backend_driver', 'designate.central',
group='service:central')
cfg.CONF.import_opt('auth_strategy', 'designate.api',
group='service:api')
cfg.CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
@ -301,7 +299,6 @@ class TestCase(base.BaseTestCase):
self.config(
storage_driver='sqlalchemy',
backend_driver='fake',
group='service:central'
)

View File

@ -311,7 +311,13 @@ class ApiV1DomainsTest(ApiV1Test):
self.delete('domains/%s' % domain['id'])
# Esnure we can no longer fetch the domain
# Simulate the domain having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Ensure we can no longer fetch the domain
self.get('domains/%s' % domain['id'], status_code=404)
@patch.object(central_service.Service, 'delete_domain',

View File

@ -88,6 +88,12 @@ class ApiV1RecordsTest(ApiV1Test):
self.delete('domains/%s/records/%s' % (self.domain['id'],
record_1.json['id']))
# Simulate the record 1 having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, self.domain['id']).serial
self.central_service.update_status(
self.admin_context, self.domain['id'], "SUCCESS", domain_serial)
# Get the record 2 to ensure recordset did not get deleted
rec_2_get_response = self.get('domains/%s/records/%s' %
(self.domain['id'], record_2.json['id']))
@ -100,6 +106,12 @@ class ApiV1RecordsTest(ApiV1Test):
self.delete('domains/%s/records/%s' % (self.domain['id'],
record_2.json['id']))
# Simulate the record 2 having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, self.domain['id']).serial
self.central_service.update_status(
self.admin_context, self.domain['id'], "SUCCESS", domain_serial)
# Re-create as a different type, but use the same name
fixture = self.get_record_fixture('CNAME')
fixture.update({
@ -452,6 +464,12 @@ class ApiV1RecordsTest(ApiV1Test):
self.delete('domains/%s/records/%s' % (self.domain['id'],
record['id']))
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, self.domain['id']).serial
self.central_service.update_status(
self.admin_context, self.domain['id'], "SUCCESS", domain_serial)
# Ensure we can no longer fetch the record
self.get('domains/%s/records/%s' % (self.domain['id'],
record['id']),

View File

@ -188,6 +188,8 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
fixture = self.get_ptr_fixture()
context = self.get_context(tenant='a')
elevated_context = context.elevated()
elevated_context.all_tenants = True
fip = self.network_api.fake.allocate_floatingip(context.tenant)
@ -195,6 +197,19 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context.tenant
}
domain_id = self.central_service.find_record(
elevated_context, criterion=criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_context, domain_id).serial
self.central_service.update_status(
elevated_context, domain_id, "SUCCESS", domain_serial)
# Unset PTR ('ptrdname' is None aka null in JSON)
response = self.client.patch_json(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
@ -203,6 +218,12 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
self.assertEqual(None, response.json)
self.assertEqual(200, response.status_int)
# Simulate the unset on the backend
domain_serial = self.central_service.get_domain(
elevated_context, domain_id).serial
self.central_service.update_status(
elevated_context, domain_id, "SUCCESS", domain_serial)
fip = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertEqual(None, fip['ptrdname'])

View File

@ -173,8 +173,8 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
data = [self.create_recordset(self.domain,
name='x-%s.%s' % (i, self.domain['name']))
for i in xrange(0, 10)]
data.insert(0, soa)
data.insert(0, ns)
data.insert(0, soa)
self._assert_paging(data, url, key='recordsets')
@ -245,6 +245,12 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
# now delete the domain and get the recordsets
self.client.delete('/zones/%s' % zone['id'], status=204)
# Simulate the domain having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, zone['id']).serial
self.central_service.update_status(
self.admin_context, zone['id'], "SUCCESS", domain_serial)
# Check that we get a domain_not_found error
self._assert_exception('domain_not_found', 404, self.client.get, url)

View File

@ -38,7 +38,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
response = self.client.post_json('/zones/', {'zone': fixture})
# Check the headers are what we expect
self.assertEqual(201, response.status_int)
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
@ -49,7 +49,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
# Check the values returned are what we expect
self.assertIn('id', response.json['zone'])
self.assertIn('created_at', response.json['zone'])
self.assertEqual('ACTIVE', response.json['zone']['status'])
self.assertEqual('PENDING', response.json['zone']['status'])
self.assertIsNone(response.json['zone']['updated_at'])
for k in fixture:
@ -190,7 +190,7 @@ class ApiV2ZonesTest(ApiV2TestCase):
# Check the values returned are what we expect
self.assertIn('id', response.json['zone'])
self.assertIn('created_at', response.json['zone'])
self.assertEqual('ACTIVE', response.json['zone']['status'])
self.assertEqual('PENDING', response.json['zone']['status'])
self.assertIsNone(response.json['zone']['updated_at'])
self.assertEqual(zone['name'], response.json['zone']['name'])
self.assertEqual(zone['email'], response.json['zone']['email'])
@ -230,10 +230,10 @@ class ApiV2ZonesTest(ApiV2TestCase):
body = {'zone': {'email': 'prefix-%s' % zone['email']}}
response = self.client.patch_json('/zones/%s' % zone['id'], body,
status=200)
status=202)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect

View File

@ -473,10 +473,9 @@ class CentralServiceTest(CentralTestCase):
self.assertEqual(domain['email'], values['email'])
self.assertIn('status', domain)
# Ensure we sent exactly 1 notification, plus 2 for SOA and NS
# recordsets
# Ensure we sent exactly 1 notification
notifications = self.get_notifications()
self.assertEqual(len(notifications), 3)
self.assertEqual(len(notifications), 1)
# Ensure the notification wrapper contains the correct info
ctxt, message, priority, retry = notifications[0]
@ -849,11 +848,9 @@ class CentralServiceTest(CentralTestCase):
self.assertTrue(domain.serial > original_serial)
self.assertEqual('info@example.net', domain.email)
# Ensure we sent exactly 2 notifications
# One for the domain update and one to
# update the SOA recordset
# Ensure we sent exactly 1 notification
notifications = self.get_notifications()
self.assertEqual(len(notifications), 2)
self.assertEqual(len(notifications), 1)
# Ensure the notification wrapper contains the correct info
ctxt, message, priority, retry = notifications[0]
@ -912,9 +909,21 @@ class CentralServiceTest(CentralTestCase):
# Delete the domain
self.central_service.delete_domain(self.admin_context, domain['id'])
# Fetch the domain again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.DomainNotFound):
self.central_service.get_domain(self.admin_context, domain['id'])
# Fetch the domain
deleted_domain = self.central_service.get_domain(
self.admin_context, domain['id'])
# Ensure the domain is marked for deletion
self.assertEqual(deleted_domain.id, domain.id)
self.assertEqual(deleted_domain.name, domain.name)
self.assertEqual(deleted_domain.email, domain.email)
self.assertEqual(deleted_domain.status, 'PENDING')
self.assertEqual(deleted_domain.tenant_id, domain.tenant_id)
self.assertEqual(deleted_domain.parent_domain_id,
domain.parent_domain_id)
self.assertEqual(deleted_domain.action, 'DELETE')
self.assertEqual(deleted_domain.serial, domain.serial)
self.assertEqual(deleted_domain.pool_id, domain.pool_id)
# Ensure we sent exactly 1 notification
notifications = self.get_notifications()
@ -1731,15 +1740,33 @@ class CentralServiceTest(CentralTestCase):
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'])
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the domain serial number was updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.assertNotEqual(new_domain_serial, domain_serial)
# Fetch the record
deleted_record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the record is marked for deletion
self.assertEqual(deleted_record.id, record.id)
self.assertEqual(deleted_record.data, record.data)
self.assertEqual(deleted_record.domain_id, record.domain_id)
self.assertEqual(deleted_record.status, 'PENDING')
self.assertEqual(deleted_record.tenant_id, record.tenant_id)
self.assertEqual(deleted_record.recordset_id, record.recordset_id)
self.assertEqual(deleted_record.action, 'DELETE')
self.assertEqual(deleted_record.serial, new_domain_serial)
def test_delete_record_without_incrementing_serial(self):
domain = self.create_domain()
@ -1748,26 +1775,34 @@ class CentralServiceTest(CentralTestCase):
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain['id'])
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'],
increment_serial=False)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain['id'])
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id'])['serial']
self.assertEqual(new_domain_serial, domain_serial)
self.assertEqual(domain_before['serial'], domain_after['serial'])
# Fetch the record
deleted_record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the record is marked for deletion
self.assertEqual(deleted_record.id, record.id)
self.assertEqual(deleted_record.data, record.data)
self.assertEqual(deleted_record.domain_id, record.domain_id)
self.assertEqual(deleted_record.status, 'PENDING')
self.assertEqual(deleted_record.tenant_id, record.tenant_id)
self.assertEqual(deleted_record.recordset_id, record.recordset_id)
self.assertEqual(deleted_record.action, 'DELETE')
self.assertEqual(deleted_record.serial, new_domain_serial)
def test_delete_record_incorrect_domain_id(self):
domain = self.create_domain()
@ -1880,6 +1915,18 @@ class CentralServiceTest(CentralTestCase):
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
with testtools.ExpectedException(exceptions.NotFound):
@ -1887,9 +1934,6 @@ class CentralServiceTest(CentralTestCase):
context_a, fip['region'], fip['id'])
# Ensure that the record is still in DB (No invalidation)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
self.central_service.find_record(elevated_a, criterion)
# Now give the fip id to tenant 'b' and see that it get's deleted
@ -1901,6 +1945,12 @@ class CentralServiceTest(CentralTestCase):
context_b, fip['region'], fip['id'])
self.assertEqual(None, fip_ptr['ptrdname'])
# Simulate the invalidation on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
# Ensure that the old record for tenant a for the fip now owned by
# tenant b is gone
with testtools.ExpectedException(exceptions.RecordNotFound):
@ -1965,15 +2015,24 @@ class CentralServiceTest(CentralTestCase):
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
fips = self.central_service.list_floatingips(context_a)
self.assertEqual([], fips)
# Ensure that the record is still in DB (No invalidation)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
self.central_service.find_record(elevated_a, criterion)
# Now give the fip id to tenant 'b' and see that it get's deleted
@ -1985,6 +2044,12 @@ class CentralServiceTest(CentralTestCase):
self.assertEqual(1, len(fips))
self.assertEqual(None, fips[0]['ptrdname'])
# Simulate the invalidation on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
# Ensure that the old record for tenant a for the fip now owned by
# tenant b is gone
with testtools.ExpectedException(exceptions.RecordNotFound):
@ -2007,7 +2072,7 @@ class CentralServiceTest(CentralTestCase):
self.assertEqual(None, fip_ptr['description'])
self.assertIsNotNone(fip_ptr['ttl'])
def test_set_floatingip_removes_old_rrset_and_record(self):
def test_set_floatingip_removes_old_record(self):
self.create_server()
context_a = self.get_context(tenant='a')
@ -2025,10 +2090,22 @@ class CentralServiceTest(CentralTestCase):
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
fixture2 = self.get_ptr_fixture(fixture=1)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture2)
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
count = self.central_service.count_records(
elevated_a, {'managed_resource_id': fip['id']})
@ -2044,6 +2121,12 @@ class CentralServiceTest(CentralTestCase):
self.central_service.update_floatingip(
context_b, fip['region'], fip['id'], fixture)
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
count = self.central_service.count_records(
elevated_a, {'managed_resource_id': fip['id']})
@ -2474,6 +2557,84 @@ class CentralServiceTest(CentralTestCase):
with testtools.ExpectedException(exceptions.PoolNotFound):
self.central_service.get_pool(self.admin_context, pool['id'])
def test_update_status_delete_domain(self):
# Create a domain
domain = self.create_domain()
# Reset the list of notifications
self.reset_notifications()
# Delete the domain
self.central_service.delete_domain(self.admin_context, domain['id'])
# Simulate the domain having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the domain again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.DomainNotFound):
self.central_service.get_domain(self.admin_context, domain['id'])
def test_update_status_delete_last_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'])
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
def test_update_status_delete_last_record_without_incrementing_serial(
self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'],
increment_serial=False)
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the domains serial number was not updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.assertEqual(new_domain_serial, domain_serial)
def test_create_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)

View File

@ -81,7 +81,13 @@ class NeutronFloatingHandlerTest(TestCase, NotificationHandlerMixin):
self.plugin.process_notification(
self.admin_context, event_type, fixture['payload'])
# Ensure we now have exactly 0 records
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, self.domain_id).serial
self.central_service.update_status(
self.admin_context, self.domain_id, "SUCCESS", domain_serial)
# Ensure we now have exactly 0 records, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)
@ -103,7 +109,7 @@ class NeutronFloatingHandlerTest(TestCase, NotificationHandlerMixin):
criterion = {'domain_id': self.domain_id}
# Ensure we start with at least 1 record, plus SOA & NS
# Ensure we start with at least 1 record, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)
self.assertEqual(3, len(records))
@ -111,7 +117,13 @@ class NeutronFloatingHandlerTest(TestCase, NotificationHandlerMixin):
self.plugin.process_notification(
self.admin_context, event_type, fixture['payload'])
# Ensure we now have exactly 0 records
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, self.domain_id).serial
self.central_service.update_status(
self.admin_context, self.domain_id, "SUCCESS", domain_serial)
# Ensure we now have exactly 0 records, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)

View File

@ -72,7 +72,7 @@ class NovaFixedHandlerTest(TestCase, NotificationHandlerMixin):
criterion = {'domain_id': self.domain_id}
# Ensure we start with at least 1 record
# Ensure we start with at least 1 record, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)
@ -81,7 +81,13 @@ class NovaFixedHandlerTest(TestCase, NotificationHandlerMixin):
self.plugin.process_notification(
self.admin_context, event_type, fixture['payload'])
# Ensure we now have exactly 0 records
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, self.domain_id).serial
self.central_service.update_status(
self.admin_context, self.domain_id, "SUCCESS", domain_serial)
# Ensure we now have exactly 0 records, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)

View File

@ -0,0 +1,20 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.tests import TestCase
class PoolManagerTestCase(TestCase):
pass

View File

@ -0,0 +1,44 @@
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from designate.tests.test_pool_manager import PoolManagerTestCase
class PoolManagerServiceTest(PoolManagerTestCase):
def setUp(self):
super(PoolManagerServiceTest, self).setUp()
section_name = 'backend:fake:*'
server_opts = [
cfg.StrOpt('masters', default='127.0.0.1:5354')
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(server_opts, group=section_name)
section_name = 'backend:fake:f278782a-07dc-4502-9177-b5d85c5f7c7e'
server_opts = [
cfg.StrOpt('host', default='127.0.0.1'),
cfg.IntOpt('port', default=53)
]
cfg.CONF.register_group(cfg.OptGroup(name=section_name))
cfg.CONF.register_opts(server_opts, group=section_name)
self.service = self.start_service('pool_manager')
def test_stop(self):
# NOTE: Start is already done by the fixture in start_service()
self.service.stop()

View File

@ -923,8 +923,8 @@ class StorageTestCase(object):
ns = self.storage.find_recordset(self.admin_context,
criterion={'domain_id': domain['id'],
'type': "NS"})
created.insert(0, soa)
created.insert(0, ns)
created.insert(0, soa)
# Ensure we can page through the results.
self._ensure_paging(created, self.storage.find_recordsets)
@ -1309,7 +1309,7 @@ class StorageTestCase(object):
'type': "NS"})
for r in ns['records']:
created.insert(0, r)
created.append(soa['records'][0])
created.insert(0, soa['records'][0])
# Ensure we can page through the results.
self._ensure_paging(created, self.storage.find_records)

View File

@ -70,11 +70,10 @@ designate.backend =
bind9 = designate.backend.impl_bind9:Bind9Backend
powerdns = designate.backend.impl_powerdns:PowerDNSBackend
fake = designate.backend.impl_fake:FakeBackend
nsd4slave = designate.backend.impl_nsd4slave:NSD4SlaveBackend
multi = designate.backend.impl_multi:MultiBackend
dynect = designate.backend.impl_dynect:DynECTBackend
ipa = designate.backend.impl_ipa:IPABackend
pool_manager_proxy = designate.backend.impl_pool_manager_proxy:PoolManagerProxyBackend
#nsd4slave = designate.backend.impl_nsd4slave:NSD4SlaveBackend
#multi = designate.backend.impl_multi:MultiBackend
#dynect = designate.backend.impl_dynect:DynECTBackend
#ipa = designate.backend.impl_ipa:IPABackend
designate.network_api =
fake = designate.network_api.fake:FakeNetworkAPI