Introduce RecordSets concept to core, and add initial RRSet API to v2

The v2 RecordSet API is experimental. This commit is intended to only provide
a stable experience with the V1 API.

Change-Id: I168401d8ce3066a19d3538b3ec5cd36338b10b44
This commit is contained in:
Kiall Mac Innes 2013-11-20 14:33:37 +00:00
parent aa780afb12
commit ce325c9c04
50 changed files with 3193 additions and 1306 deletions

1
.gitignore vendored
View File

@ -23,4 +23,5 @@ designate/versioninfo
*.DS_Store
*.idea
/bind9
/dnsmasq
.testrepository/*

View File

@ -15,6 +15,7 @@
# under the License.
import flask
from designate.openstack.common import log as logging
from designate import exceptions
from designate import schema
from designate.central import rpcapi as central_rpcapi
@ -25,6 +26,55 @@ record_schema = schema.Schema('v1', 'record')
records_schema = schema.Schema('v1', 'records')
def _find_recordset(context, domain_id, name, type):
return central_api.find_recordset(context, {
'domain_id': domain_id,
'name': name,
'type': type,
})
def _find_or_create_recordset(context, domain_id, name, type, ttl):
try:
recordset = _find_recordset(context, domain_id, name, type)
except exceptions.RecordSetNotFound:
recordset = central_api.create_recordset(context, domain_id, {
'name': name,
'type': type,
'ttl': ttl,
})
return recordset
def _extract_record_values(values):
record_values = ('data', 'priority', 'comment',)
return dict((k, values[k]) for k in record_values if k in values)
def _extract_recordset_values(values):
recordset_values = ('name', 'type', 'ttl',)
return dict((k, values[k]) for k in recordset_values if k in values)
def _format_record_v1(record, recordset):
record.update({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
})
return record
def _fetch_domain_recordsets(context, domain_id):
criterion = {'domain_id': domain_id}
recordsets = central_api.find_recordsets(context, criterion)
return dict((r['id'], r) for r in recordsets)
@blueprint.route('/schemas/record', methods=['GET'])
def get_record_schema():
return flask.jsonify(record_schema.raw)
@ -41,7 +91,17 @@ def create_record(domain_id):
values = flask.request.json
record_schema.validate(values)
record = central_api.create_record(context, domain_id, values)
recordset = _find_or_create_recordset(context,
domain_id,
values['name'],
values['type'],
values.get('ttl', None))
record = central_api.create_record(context, domain_id, recordset['id'],
_extract_record_values(values))
record = _format_record_v1(record, recordset)
response = flask.jsonify(record_schema.filter(record))
response.status_int = 201
@ -55,7 +115,19 @@ def create_record(domain_id):
def get_records(domain_id):
context = flask.request.environ.get('context')
records = central_api.find_records(context, domain_id)
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an empty records array instead of a domain not found
central_api.get_domain(context, domain_id)
records = central_api.find_records(context, {'domain_id': domain_id})
recordsets = _fetch_domain_recordsets(context, domain_id)
def _inner(record):
recordset = recordsets[record['recordset_id']]
return _format_record_v1(record, recordset)
records = [_inner(r) for r in records]
return flask.jsonify(records_schema.filter({'records': records}))
@ -65,7 +137,17 @@ def get_records(domain_id):
def get_record(domain_id, record_id):
context = flask.request.environ.get('context')
record = central_api.get_record(context, domain_id, record_id)
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an record not found instead of a domain not found
central_api.get_domain(context, domain_id)
criterion = {'domain_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
recordset = central_api.get_recordset(
context, domain_id, record['recordset_id'])
record = _format_record_v1(record, recordset)
return flask.jsonify(record_schema.filter(record))
@ -76,12 +158,56 @@ def update_record(domain_id, record_id):
context = flask.request.environ.get('context')
values = flask.request.json
record = central_api.get_record(context, domain_id, record_id)
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an record not found instead of a domain not found
central_api.get_domain(context, domain_id)
# Find the record
criterion = {'domain_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
# Find the associated recordset
recordset = central_api.get_recordset(
context, domain_id, record['recordset_id'])
# Filter out any extra fields from the fetched record
record = record_schema.filter(record)
# Ensure all the API V1 fields are in place
record = _format_record_v1(record, recordset)
# Name and Type can't be updated on existing records
if 'name' in values and record['name'] != values['name']:
raise exceptions.InvalidOperation('The name field is immutable')
if 'type' in values and record['type'] != values['type']:
raise exceptions.InvalidOperation('The type field is immutable')
# TTL Updates should be applied to the RecordSet
update_recordset = False
if 'ttl' in values and record['ttl'] != values['ttl']:
update_recordset = True
# Apply the updated fields to the record
record.update(values)
# Validate the record
record_schema.validate(record)
record = central_api.update_record(context, domain_id, record_id, values)
# Update the record
record = central_api.update_record(
context, domain_id, recordset['id'], record_id,
_extract_record_values(values))
# Update the recordset (if necessary)
if update_recordset:
recordset = central_api.update_recordset(
context, domain_id, recordset['id'],
_extract_recordset_values(values))
# Format and return the response
record = _format_record_v1(record, recordset)
return flask.jsonify(record_schema.filter(record))
@ -91,6 +217,15 @@ def update_record(domain_id, record_id):
def delete_record(domain_id, record_id):
context = flask.request.environ.get('context')
central_api.delete_record(context, domain_id, record_id)
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return a record not found instead of a domain not found
central_api.get_domain(context, domain_id)
# Find the record
criterion = {'domain_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
central_api.delete_record(
context, domain_id, record['recordset_id'], record_id)
return flask.Response(status=200)

View File

@ -0,0 +1,156 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from designate.central import rpcapi as central_rpcapi
from designate.openstack.common import log as logging
from designate import schema
from designate import utils
from designate.api.v2.controllers import rest
from designate.api.v2.views import records as records_view
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
class RecordsController(rest.RestController):
_view = records_view.RecordsView()
_resource_schema = schema.Schema('v2', 'record')
_collection_schema = schema.Schema('v2', 'records')
@pecan.expose(template='json:', content_type='application/json')
def get_one(self, zone_id, recordset_id, record_id):
""" Get Record """
# TODO(kiall): Validate we have a sane UUID for zone_id, recordset_id
# and record_id
request = pecan.request
context = request.environ['context']
record = central_api.get_record(context, zone_id, recordset_id,
record_id)
return self._view.detail(context, request, record)
@pecan.expose(template='json:', content_type='application/json')
def get_all(self, zone_id, recordset_id, **params):
""" List Records """
request = pecan.request
context = request.environ['context']
# Extract the pagination params
#marker = params.pop('marker', None)
#limit = int(params.pop('limit', 30))
# Extract any filter params.
accepted_filters = ('data', )
criterion = dict((k, params[k]) for k in accepted_filters
if k in params)
criterion['domain_id'] = zone_id
criterion['recordset_id'] = recordset_id
records = central_api.find_records(context, criterion)
return self._view.list(context, request, records)
@pecan.expose(template='json:', content_type='application/json')
def post_all(self, zone_id, recordset_id):
""" Create Record """
request = pecan.request
response = pecan.response
context = request.environ['context']
body = request.body_dict
# Validate the request conforms to the schema
self._resource_schema.validate(body)
# Convert from APIv2 -> Central format
values = self._view.load(context, request, body)
# Create the records
record = central_api.create_record(context, zone_id, recordset_id,
values)
# Prepare the response headers
if record['status'] == 'PENDING':
response.status_int = 202
else:
response.status_int = 201
response.headers['Location'] = self._view._get_resource_href(
request, record)
# Prepare and return the response body
return self._view.detail(context, request, record)
@pecan.expose(template='json:', content_type='application/json')
@pecan.expose(template='json:', content_type='application/json-patch+json')
def patch_one(self, zone_id, recordset_id, record_id):
""" Update Record """
request = pecan.request
context = request.environ['context']
body = request.body_dict
response = pecan.response
# TODO(kiall): Validate we have a sane UUID for zone_id and
# recordset_id
# Fetch the existing record
record = central_api.get_record(context, zone_id, recordset_id,
record_id)
# Convert to APIv2 Format
record = self._view.detail(context, request, record)
if request.content_type == 'application/json-patch+json':
raise NotImplemented('json-patch not implemented')
else:
record = utils.deep_dict_merge(record, body)
# Validate the request conforms to the schema
self._resource_schema.validate(record)
values = self._view.load(context, request, body)
record = central_api.update_record(
context, zone_id, recordset_id, record_id, values)
if record['status'] == 'PENDING':
response.status_int = 202
else:
response.status_int = 200
return self._view.detail(context, request, record)
@pecan.expose(template=None, content_type='application/json')
def delete_one(self, zone_id, recordset_id, record_id):
""" Delete Record """
request = pecan.request
response = pecan.response
context = request.environ['context']
# TODO(kiall): Validate we have a sane UUID for zone_id and
# recordset_id
record = central_api.delete_record(context, zone_id, recordset_id,
record_id)
if record['status'] == 'DELETING':
response.status_int = 202
else:
response.status_int = 204
# NOTE: This is a hack and a half.. But Pecan needs it.
return ''

View File

@ -14,36 +14,131 @@
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from designate.api.v2.controllers import rest
from designate.central import rpcapi as central_rpcapi
from designate.openstack.common import log as logging
from designate import schema
from designate import utils
from designate.api.v2.controllers import rest
from designate.api.v2.views import recordsets as recordsets_view
from designate.api.v2.controllers import records
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
class RecordSetsController(rest.RestController):
_view = recordsets_view.RecordSetsView()
_resource_schema = schema.Schema('v2', 'recordset')
_collection_schema = schema.Schema('v2', 'recordsets')
records = records.RecordsController()
@pecan.expose(template='json:', content_type='application/json')
def get_one(self, zone_id, recordset_id):
""" Get RecordSet """
pass
# TODO(kiall): Validate we have a sane UUID for zone_id and
# recordset_id
request = pecan.request
context = request.environ['context']
recordset = central_api.get_recordset(context, zone_id, recordset_id)
return self._view.detail(context, request, recordset)
@pecan.expose(template='json:', content_type='application/json')
def get_all(self, zone_id):
def get_all(self, zone_id, **params):
""" List RecordSets """
pass
request = pecan.request
context = request.environ['context']
# Extract the pagination params
#marker = params.pop('marker', None)
#limit = int(params.pop('limit', 30))
# Extract any filter params.
accepted_filters = ('name', 'type', 'ttl', )
criterion = dict((k, params[k]) for k in accepted_filters
if k in params)
criterion['domain_id'] = zone_id
recordsets = central_api.find_recordsets(context, criterion)
return self._view.list(context, request, recordsets)
@pecan.expose(template='json:', content_type='application/json')
def post_all(self, zone_id):
""" Create RecordSet """
pass
request = pecan.request
response = pecan.response
context = request.environ['context']
body = request.body_dict
# Validate the request conforms to the schema
self._resource_schema.validate(body)
# Convert from APIv2 -> Central format
values = self._view.load(context, request, body)
# Create the recordset
recordset = central_api.create_recordset(context, zone_id, values)
# Prepare the response headers
response.status_int = 201
response.headers['Location'] = self._view._get_resource_href(
request, recordset)
# Prepare and return the response body
return self._view.detail(context, request, recordset)
@pecan.expose(template='json:', content_type='application/json')
@pecan.expose(template='json:', content_type='application/json-patch+json')
def patch_one(self, zone_id, recordset_id):
""" Update RecordSet """
pass
request = pecan.request
context = request.environ['context']
body = request.body_dict
response = pecan.response
@pecan.expose(template='json:', content_type='application/json')
# TODO(kiall): Validate we have a sane UUID for zone_id and
# recordset_id
# Fetch the existing recordset
recordset = central_api.get_recordset(context, zone_id, recordset_id)
# Convert to APIv2 Format
recordset = self._view.detail(context, request, recordset)
if request.content_type == 'application/json-patch+json':
raise NotImplemented('json-patch not implemented')
else:
recordset = utils.deep_dict_merge(recordset, body)
# Validate the request conforms to the schema
self._resource_schema.validate(recordset)
values = self._view.load(context, request, body)
recordset = central_api.update_recordset(
context, zone_id, recordset_id, values)
response.status_int = 200
return self._view.detail(context, request, recordset)
@pecan.expose(template=None, content_type='application/json')
def delete_one(self, zone_id, recordset_id):
""" Delete RecordSet """
pass
request = pecan.request
response = pecan.response
context = request.environ['context']
# TODO(kiall): Validate we have a sane UUID for zone_id and
# recordset_id
central_api.delete_recordset(context, zone_id, recordset_id)
response.status_int = 204
# NOTE: This is a hack and a half.. But Pecan needs it.
return ''

View File

@ -67,7 +67,29 @@ class ZonesController(rest.RestController):
""" Export zonefile """
servers = central_api.get_domain_servers(context, zone_id)
domain = central_api.get_domain(context, zone_id)
records = central_api.find_records(context, zone_id)
criterion = {'domain_id': zone_id}
recordsets = central_api.find_recordsets(context, criterion)
records = []
for recordset in recordsets:
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
raw_records = central_api.find_records(context, criterion)
for record in raw_records:
records.append({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'priority': record['priority'],
'data': record['data'],
})
return utils.render_template('bind9-zone.jinja2',
servers=servers,
domain=domain,
@ -84,7 +106,7 @@ class ZonesController(rest.RestController):
#limit = int(params.pop('limit', 30))
# Extract any filter params.
accepted_filters = ('name', 'email')
accepted_filters = ('name', 'email', )
criterion = dict((k, params[k]) for k in accepted_filters
if k in params)
@ -141,7 +163,6 @@ class ZonesController(rest.RestController):
try:
self._create_records(context, zone['id'], dnspython_zone)
except exceptions.Base as e:
central_api.delete_domain(context, zone['id'])
raise e
@ -244,20 +265,17 @@ class ZonesController(rest.RestController):
def _record2json(self, record_type, rdata):
if record_type == 'MX':
return {
'type': record_type,
'data': rdata.exchange.to_text(),
'priority': str(rdata.preference)
}
elif record_type == 'SRV':
return {
'type': record_type,
'data': '%s %s %s' % (str(rdata.weight), str(rdata.port),
rdata.target.to_text()),
'priority': str(rdata.priority)
}
else:
return {
'type': record_type,
'data': rdata.to_text()
}
@ -266,12 +284,23 @@ class ZonesController(rest.RestController):
for record_name in dnspython_zone.nodes.keys():
for rdataset in dnspython_zone.nodes[record_name]:
record_type = rdatatype.to_text(rdataset.rdtype)
if record_type == 'SOA':
continue
# Create the recordset
values = {
'domain_id': zone_id,
'name': record_name.to_text(),
'type': record_type,
}
recordset = central_api.create_recordset(
context, zone_id, values)
for rdata in rdataset:
if record_type == 'SOA':
# Don't create SOA records
pass
elif (record_type == 'NS'
and record_name == dnspython_zone.origin):
if (record_type == 'NS'
and record_name == dnspython_zone.origin):
# Don't create NS records for the domain, they've been
# taken care of as servers
pass
@ -279,8 +308,9 @@ class ZonesController(rest.RestController):
# Everything else, including delegation NS, gets
# created
values = self._record2json(record_type, rdata)
values['name'] = record_name.to_text()
central_api.create_record(context, zone_id, values)
central_api.create_record(
context, zone_id, recordset['id'], values)
def _parse_zonefile(self, request):
""" Parses a POSTed zonefile into a dnspython zone object """

View File

@ -0,0 +1,54 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.api.v2.views import base as base_view
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class RecordsView(base_view.BaseView):
""" Model a Record API response as a python dictionary """
_resource_name = 'record'
_collection_name = 'records'
def detail(self, context, request, record):
""" Detailed view of a record """
return {
"record": {
"id": record['id'],
"recordset_id": record['recordset_id'],
"data": record['data'],
"description": record['description'],
"version": record['version'],
"created_at": record['created_at'],
"updated_at": record['updated_at'],
"links": self._get_resource_links(request, record)
}
}
def load(self, context, request, body):
""" Extract a "central" compatible dict from an API call """
result = {}
item = body[self._resource_name]
# Copy keys which need no alterations
for k in ('id', 'data', 'description',):
if k in item:
result[k] = item[k]
return result

View File

@ -0,0 +1,56 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.api.v2.views import base as base_view
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class RecordSetsView(base_view.BaseView):
""" Model a Zone API response as a python dictionary """
_resource_name = 'recordset'
_collection_name = 'recordsets'
def detail(self, context, request, recordset):
""" Detailed view of a recordset """
return {
"recordset": {
"id": recordset['id'],
"zone_id": recordset['domain_id'],
"name": recordset['name'],
"type": recordset['type'],
"ttl": recordset['ttl'],
"description": recordset['description'],
"version": recordset['version'],
"created_at": recordset['created_at'],
"updated_at": recordset['updated_at'],
"links": self._get_resource_links(request, recordset)
}
}
def load(self, context, request, body):
""" Extract a "central" compatible dict from an API call """
result = {}
item = body[self._resource_name]
# Copy keys which need no alterations
for k in ('id', 'name', 'type', 'ttl', 'description',):
if k in item:
result[k] = item[k]
return result

View File

@ -32,6 +32,7 @@ class Backend(Plugin):
super(Backend, self).__init__()
self.central_service = central_service
self.admin_context = DesignateContext.get_admin_context()
self.admin_context.all_tenants = True
def create_tsigkey(self, context, tsigkey):
""" Create a TSIG Key """
@ -60,16 +61,27 @@ class Backend(Plugin):
def delete_domain(self, context, domain):
""" Delete a DNS domain """
def create_recordset(self, context, domain, recordset):
""" Create a DNS recordset """
@abc.abstractmethod
def create_record(self, context, domain, record):
def update_recordset(self, context, domain, recordset):
""" Update a DNS recordset """
@abc.abstractmethod
def delete_recordset(self, context, domain, recordset):
""" Delete a DNS recordset """
@abc.abstractmethod
def create_record(self, context, domain, recordset, record):
""" Create a DNS record """
@abc.abstractmethod
def update_record(self, context, domain, record):
def update_record(self, context, domain, recordset, record):
""" Update a DNS record """
@abc.abstractmethod
def delete_record(self, context, domain, record):
def delete_record(self, context, domain, recordset, record):
""" Delete a DNS record """
@abc.abstractmethod

View File

@ -75,15 +75,23 @@ class Bind9Backend(base.Backend):
LOG.debug('Delete Domain')
self._sync_delete_domain(domain)
def create_record(self, context, domain, record):
def update_recordset(self, context, domain, recordset):
LOG.debug('Update RecordSet')
self._sync_domain(domain)
def delete_recordset(self, context, domain, recordset):
LOG.debug('Delete RecordSet')
self._sync_domain(domain)
def create_record(self, context, domain, recordset, record):
LOG.debug('Create Record')
self._sync_domain(domain)
def update_record(self, context, domain, record):
def update_record(self, context, domain, recordset, record):
LOG.debug('Update Record')
self._sync_domain(domain)
def delete_record(self, context, domain, record):
def delete_record(self, context, domain, recordset, record):
LOG.debug('Delete Record')
self._sync_domain(domain)
@ -136,8 +144,28 @@ class Bind9Backend(base.Backend):
servers = self.central_service.find_servers(self.admin_context)
records = self.central_service.find_records(self.admin_context,
domain['id'])
recordsets = self.central_service.find_recordsets(
self.admin_context, {'domain_id': domain['id']})
records = []
for recordset in recordsets:
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
raw_records = self.central_service.find_records(
self.admin_context, criterion)
for record in raw_records:
records.append({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'priority': record['priority'],
'data': record['data'],
})
output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
'bind9')

View File

@ -27,8 +27,8 @@ LOG = logging.getLogger(__name__)
class DnsmasqBackend(base.Backend):
__plugin_name__ = 'dnsmasq'
def start(self):
super(DnsmasqBackend, self).start()
def __init__(self, central_service):
super(DnsmasqBackend, self).__init__(central_service)
self.output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
'dnsmasq')
@ -37,6 +37,9 @@ class DnsmasqBackend(base.Backend):
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
def start(self):
super(DnsmasqBackend, self).start()
# TODO(Andrey): Remove this..
self._sync_domains_hack()
@ -85,21 +88,35 @@ class DnsmasqBackend(base.Backend):
self._merge_zonefiles()
self._reload_dnsmasq()
def create_record(self, context, domain, record):
def update_recordset(self, context, domain, recordset):
LOG.debug('Update RecordSet')
self._write_zonefile(domain)
self._merge_zonefiles()
self._reload_dnsmasq()
def delete_recordset(self, context, domain, recordset):
LOG.debug('Delete RecordSet')
self._write_zonefile(domain)
self._merge_zonefiles()
self._reload_dnsmasq()
def create_record(self, context, domain, recordset, record):
LOG.debug('Create Record')
self._write_zonefile(domain)
self._merge_zonefiles()
self._reload_dnsmasq()
def update_record(self, context, domain, record):
def update_record(self, context, domain, recordset, record):
LOG.debug('Update Record')
self._write_zonefile(domain)
self._merge_zonefiles()
self._reload_dnsmasq()
def delete_record(self, context, domain, record):
def delete_record(self, context, domain, recordset, record):
LOG.debug('Delete Record')
self._write_zonefile(domain)
@ -107,14 +124,37 @@ class DnsmasqBackend(base.Backend):
self._reload_dnsmasq()
def _write_zonefile(self, domain):
records = self.central_service.find_records(self.admin_context,
domain['id'])
criterion = {'domain_id': domain['id']}
template_data = {}
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
for recordset in recordsets:
# Dnsmasq only supports A and AAAA records
if recordset['type'] not in ('A', 'AAAA', ):
continue
template_data.setdefault(recordset['name'], {})
template_data[recordset['name']].setdefault(recordset['type'], [])
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
records = self.central_service.find_records(
self.admin_context, criterion)
template_data[recordset['name']][recordset['type']] = \
[r['data'] for r in records]
filename = os.path.join(self.output_folder, '%s.zone' % domain['id'])
utils.render_template_to_file('dnsmasq-zone.jinja2',
filename,
records=records)
template_data=template_data)
def _purge_zonefile(self, domain):
filename = os.path.join(self.output_folder, '%s.zone' % domain['id'])

View File

@ -52,14 +52,23 @@ class FakeBackend(base.Backend):
def delete_domain(self, context, domain):
LOG.info('Delete Domain %r' % domain)
def create_record(self, context, domain, record):
LOG.info('Create Record %r / %r' % (domain, record))
def create_recordset(self, context, domain, recordset):
LOG.info('Create RecordSet %r / %r' % (domain, recordset))
def update_record(self, context, domain, record):
LOG.info('Update Record %r / %r' % (domain, record))
def update_recordset(self, context, domain, recordset):
LOG.info('Update RecordSet %r / %r' % (domain, recordset))
def delete_record(self, context, domain, record):
LOG.info('Delete Record %r / %r' % (domain, record))
def delete_recordset(self, context, domain, recordset):
LOG.info('Delete RecordSet %r / %r' % (domain, recordset))
def create_record(self, context, domain, recordset, record):
LOG.info('Create Record %r / %r / %r' % (domain, recordset, record))
def update_record(self, context, domain, recordset, record):
LOG.info('Update Record %r / %r / %r' % (domain, recordset, record))
def delete_record(self, context, domain, recordset, record):
LOG.info('Delete Record %r / %r / %r' % (domain, recordset, record))
def sync_domain(self, context, domain, records):
LOG.info('Sync Domain %r / %r' % (domain, records))

View File

@ -107,16 +107,18 @@ class MultiBackend(base.Backend):
# Get the "full" domain (including id) from Central first, as we may
# have to recreate it on slave if delete on master fails
full_domain = self.central.find_domain(
context, criterion={'name': domain['name']})
context, {'name': domain['name']})
self.slave.delete_domain(context, domain)
try:
self.master.delete_domain(context, domain)
except (exceptions.Base, exceptions.Backend):
with excutils.save_and_reraise_exception():
self.slave.create_domain(context, domain)
[self.slave.create_record(context, domain, record)
for record in self.central.find_records(context,
full_domain['id'])]
for record in self.central.find_records(
context, {'domain_id': full_domain['id']})]
def create_server(self, context, server):
self.master.create_server(context, server)
@ -137,14 +139,23 @@ class MultiBackend(base.Backend):
with excutils.save_and_reraise_exception():
self.slave.create_server(context, server)
def create_record(self, context, domain, record):
self.master.create_record(context, domain, record)
def create_recordset(self, context, domain, recordset):
self.master.create_recordset(context, domain, recordset)
def update_record(self, context, domain, record):
self.master.update_record(context, domain, record)
def update_recordset(self, context, domain, recordset):
self.master.update_recordset(context, domain, recordset)
def delete_record(self, context, domain, record):
self.master.delete_record(context, domain, record)
def delete_recordset(self, context, domain, recordset):
self.master.delete_recordset(context, domain, recordset)
def create_record(self, context, domain, recordset, record):
self.master.create_record(context, domain, recordset, record)
def update_record(self, context, domain, recordset, record):
self.master.update_record(context, domain, recordset, record)
def delete_record(self, context, domain, recordset, record):
self.master.delete_record(context, domain, recordset, record)
def ping(self, context):
return {

View File

@ -1,399 +0,0 @@
# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
# Copyright 2012 Managed I.T.
#
# Author: Patrick Galbraith <patg@hp.com>
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate import utils
from designate import exceptions
from designate.backend import base
from sqlalchemy.ext.sqlsoup import SqlSoup
from sqlalchemy.engine.url import _parse_rfc1738_args
from designate.sqlalchemy.session import get_engine
from designate.sqlalchemy.session import SQLOPTS
LOG = logging.getLogger(__name__)
cfg.CONF.register_group(cfg.OptGroup(
name='backend:mysqlbind9', title="Configuration for BIND9+MySQL Backend"
))
cfg.CONF.register_opts([
cfg.StrOpt('rndc-host', default='127.0.0.1', help='RNDC Host'),
cfg.IntOpt('rndc-port', default=953, help='RNDC Port'),
cfg.StrOpt('rndc-config-file',
default=None, help='RNDC Config File'),
cfg.StrOpt('rndc-key-file', secret=True,
default=None, help='RNDC Key File'),
cfg.StrOpt('dns-server-type', default='master',
help='slave or master DNS server?'),
cfg.BoolOpt('write-database', default=True,
help='Write to the DNS mysqlbind database?'),
cfg.StrOpt('database-dns-table',
default='dns_domains',
help='DNS schema'),
], group='backend:mysqlbind9')
cfg.CONF.register_opts(SQLOPTS, group='backend:mysqlbind9')
class MySQLBind9Backend(base.Backend):
__plugin_name__ = 'mysqlbind9'
def get_url_data(self):
url = _parse_rfc1738_args(cfg.CONF[self.name].database_connection)
return url.translate_connect_args()
def get_dns_table(self, table=None):
"""
Get a Table object from SQLSoup
:param table: Overridable table name
"""
table = table or cfg.CONF[self.name].database_dns_table
return getattr(self._db, table)
def start(self):
super(MySQLBind9Backend, self).start()
if cfg.CONF[self.name].write_database:
self._engine = get_engine(self.name)
self._db = SqlSoup(self._engine)
self._sync_domains()
def _add_soa_record(self, domain, servers):
"""
add the single SOA record for this domain. Must create the
data from attributes of the domain
"""
table = self.get_dns_table()
data_rec = "%s. %s. %d %d %d %d %d" % (
servers[0]['name'],
domain['email'].replace("@", "."),
domain['serial'],
domain['refresh'],
domain['retry'],
domain['expire'],
domain['minimum'])
# use the domain id for records that don't have a match
# in designate's records table
table.insert(
tenant_id=domain['tenant_id'],
domain_id=domain['id'],
designate_rec_id=domain['id'],
name=domain['name'],
ttl=domain['ttl'],
type='SOA',
data=data_rec)
self._db.commit()
def _add_ns_records(self, domain, servers):
"""
add the NS records, one for each server, for this domain
"""
table = self.get_dns_table()
# use the domain id for records that don't have a match
# in designate's records table
for server in servers:
table.insert(
tenant_id=domain['tenant_id'],
domain_id=domain['id'],
designate_rec_id=domain['id'],
name=domain['name'],
ttl=domain['ttl'],
type='NS',
data=server['name'])
self._db.commit()
def _insert_db_record(self, tenant_id, domain_id, record):
"""
generic db insertion method for a domain record
"""
table = self.get_dns_table()
table.insert(
tenant_id=tenant_id,
domain_id=domain_id,
designate_rec_id=record['id'],
name=record['name'],
ttl=record['ttl'],
type=record['type'],
data=record['data'])
self._db.commit()
def _update_ns_records(self, domain, servers):
"""
delete and re-add all NS records : easier to just delete all
NS records and then replace - in the case of adding new NS
servers
"""
table = self.get_dns_table()
all_ns_rec = table.filter_by(tenant_id=domain['tenant_id'],
domain_id=domain['id'],
type=u'NS')
# delete all NS records
all_ns_rec.delete()
# add all NS records (might have new servers)
self._db.commit()
self._add_ns_records(domain, servers)
def _update_db_record(self, tenant_id, record):
"""
generic domain db record update method
"""
table = self.get_dns_table()
q = table.filter_by(
tenant_id=tenant_id,
domain_id=record['domain_id'],
designate_rec_id=record['id'])
q.update({'ttl': record['ttl'],
'type': record['type'],
'data': record['data']})
self._db.commit()
def _update_soa_record(self, domain, servers):
"""
update the one single SOA record for the domain
"""
LOG.debug("_update_soa_record()")
table = self.get_dns_table()
# there will only ever be -one- of these
existing_record = table.filter_by(tenant_id=domain['tenant_id'],
domain_id=domain['id'],
type=u'SOA')
data_rec = "%s. %s. %d %d %d %d %d" % (
servers[0]['name'],
domain['email'].replace("@", "."),
domain['serial'],
domain['refresh'],
domain['retry'],
domain['expire'],
domain['minimum'])
existing_record.update(
{'ttl': domain['ttl'],
'type': u'SOA',
'data': data_rec})
self._db.commit()
# def _update_domain_ttl(self, domain):
# LOG.debug("_update_soa_record()")
# table = self.get_dns_table()
#
# # there will only ever be -one- of these
# domain_records = table.filter_by(domain_id=domain['id'])
#
# domain_records.update({'ttl': domain['ttl']})
#
# self._db.commit()
def _delete_db_record(self, tenant_id, record):
"""
delete a specific record for a given domain
"""
table = self.get_dns_table()
LOG.debug("_delete_db_record")
q = table.filter_by(
tenant_id=tenant_id,
domain_id=record['domain_id'],
designate_rec_id=record['id'])
q.delete()
self._db.commit()
def _delete_db_domain_records(self, tenant_id, domain_id):
"""
delete all records for a given domain
"""
LOG.debug('_delete_db_domain_records()')
table = self.get_dns_table()
# delete all records for the domain id
q = table.filter_by(tenant_id=tenant_id,
domain_id=domain_id)
q.delete()
self._db.commit()
def create_domain(self, context, domain):
LOG.debug('create_domain()')
if cfg.CONF[self.name].write_database:
servers = self.central_service.find_servers(self.admin_context)
self._add_soa_record(domain, servers)
self._add_ns_records(domain, servers)
self._sync_domains()
def update_domain(self, context, domain):
LOG.debug('update_domain()')
if cfg.CONF[self.name].write_database:
servers = self.central_service.find_servers(self.admin_context)
self._update_soa_record(domain, servers)
self._update_ns_records(domain, servers)
def delete_domain(self, context, domain):
LOG.debug('delete_domain()')
if cfg.CONF[self.name].write_database:
self._delete_db_domain_records(domain['tenant_id'],
domain['id'])
self._sync_domains()
def create_server(self, context, server):
LOG.debug('create_server()')
raise exceptions.NotImplemented('create_server() for '
'mysqlbind9 backend is '
'not implemented')
"""
TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
if cfg.CONF[self.name].write_database:
domains = self.central_service.find_domains(self.admin_context)
for domain in domains:
self._add_ns_records(domain, server)
self._sync_domains()
"""
# This method could be a very expensive and should only be called
# (e.g., from central) only if the name of the existing server is
# changed.
def update_server(self, context, server):
LOG.debug('update_server()')
raise exceptions.NotImplemented('update_server() for '
'mysqlbind9 backend is '
'not implemented')
"""
TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
if cfg.CONF[self.name].write_database:
servers = self.central_service.find_servers(self.admin_context)
domains = self.central_service.find_domains(self.admin_context)
for domain in domains:
self._update_ns_records(domain, servers)
self._sync_domains()
"""
def delete_server(self, context, server):
LOG.debug('delete_server()')
raise exceptions.NotImplemented('delete_server() for '
'mysqlbind9 backend is'
' not implemented')
"""
TODO: For scale, Use bulk SQLAlchemy (core) queries
"""
def create_record(self, context, domain, record):
LOG.debug('create_record()')
if cfg.CONF[self.name].write_database:
self._insert_db_record(domain['tenant_id'],
domain['id'],
record)
def update_record(self, context, domain, record):
LOG.debug('update_record()')
if cfg.CONF[self.name].write_database:
self._update_db_record(domain['tenant_id'],
record)
def delete_record(self, context, domain, record):
LOG.debug('Delete Record')
if cfg.CONF[self.name].write_database:
self._delete_db_record(domain['tenant_id'],
record)
def _sync_domains(self):
"""
Update the zone file and reconfig rndc to update bind.
Unike regular bind, this only needs to be done upon adding
or deleting domains as mysqlbind takes care of updating
bind upon regular record changes
"""
LOG.debug('Synchronising domains')
domains = self.central_service.find_domains(self.admin_context)
output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
'bind9')
# Create the output folder tree if necessary
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_path = os.path.join(output_folder, 'zones.config')
abs_state_path = os.path.abspath(cfg.CONF.state_path)
LOG.debug("Getting ready to write zones.config at %s" % output_path)
# NOTE(CapTofu): Might have to adapt this later on?
url = self.get_url_data()
utils.render_template_to_file('mysql-bind9-config.jinja2',
output_path,
domains=domains,
state_path=abs_state_path,
dns_server_type=cfg.CONF[self.name].
dns_server_type,
dns_db_schema=url['database'],
dns_db_table=cfg.CONF[self.name].
database_dns_table,
dns_db_host=url['host'],
dns_db_user=url['username'],
dns_db_password=url['password'])
# only do this if domain create, domain delete
rndc_call = [
'rndc',
'-s', cfg.CONF[self.name].rndc_host,
'-p', str(cfg.CONF[self.name].rndc_port),
]
if cfg.CONF[self.name].rndc_config_file:
rndc_call.extend(['-c', self.config.rndc_config_file])
if cfg.CONF[self.name].rndc_key_file:
rndc_call.extend(['-k', self.config.rndc_key_file])
rndc_call.extend(['reconfig'])
utils.execute(*rndc_call)

View File

@ -114,13 +114,19 @@ class NSD4SlaveBackend(base.Backend):
sock.close()
return result.rstrip()
def create_record(self, context, domain, record):
def update_recordset(self, context, domain, recordset):
pass
def update_record(self, context, domain, record):
def delete_recordset(self, context, domain, recordset):
pass
def delete_record(self, context, domain, record):
def create_record(self, context, domain, recordset, record):
pass
def update_record(self, context, domain, recordset, record):
pass
def delete_record(self, context, domain, recordset, record):
pass
def create_server(self, context, server):

View File

@ -228,45 +228,67 @@ class PowerDNSBackend(base.Backend):
query = self.session.query(models.DomainMetadata)
query.filter_by(domain_id=domain_m.id).delete()
# RecordSet Methods
def update_recordset(self, context, domain, recordset):
# Ensure records are updated
values = {'ttl': recordset['ttl']}
query = self.session.query(models.Records)
query.filter_by(designate_recordset_id=recordset['id']).update(values)
self._update_soa(domain)
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = self.session.query(models.Records)
query.filter_by(designate_recordset_id=recordset['id']).delete()
self._update_soa(domain)
# Record Methods
def create_record(self, context, domain, record):
def create_record(self, context, domain, recordset, record):
domain_m = self._get_domain(domain['id'])
record_m = models.Record()
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_m.update({
'designate_id': record['id'],
'designate_recordset_id': record['recordset_id'],
'domain_id': domain_m.id,
'name': record['name'].rstrip('.'),
'type': record['type'],
'content': self._sanitize_content(record['type'], record['data']),
'ttl': domain['ttl'] if record['ttl'] is None else record['ttl'],
'inherit_ttl': True if record['ttl'] is None else False,
'name': recordset['name'].rstrip('.'),
'type': recordset['type'],
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, record)
'auth': self._is_authoritative(domain, recordset, record)
})
record_m.save(self.session)
self._update_soa(domain)
def update_record(self, context, domain, record):
def update_record(self, context, domain, recordset, record):
record_m = self._get_record(record['id'])
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_m.update({
'name': record['name'].rstrip('.'),
'type': record['type'],
'content': self._sanitize_content(record['type'], record['data']),
'ttl': domain['ttl'] if record['ttl'] is None else record['ttl'],
'inherit_ttl': True if record['ttl'] is None else False,
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, record)
'auth': self._is_authoritative(domain, recordset, record)
})
record_m.save(self.session)
self._update_soa(domain)
def delete_record(self, context, domain, record):
def delete_record(self, context, domain, recordset, record):
try:
record_m = self._get_record(record['id'])
except exceptions.RecordNotFound:
@ -319,9 +341,9 @@ class PowerDNSBackend(base.Backend):
content=value)
m.save(self.session)
def _is_authoritative(self, domain, record):
def _is_authoritative(self, domain, recordset, record):
# NOTE(kiall): See http://doc.powerdns.com/dnssec-modes.html
if record['type'] == 'NS' and record['name'] != domain['name']:
if recordset['type'] == 'NS' and recordset['name'] != domain['name']:
return False
else:
return True

View File

@ -0,0 +1,36 @@
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column
from designate.sqlalchemy.types import UUID
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
recordset_id = Column('designate_recordset_id', UUID())
recordset_id.create(records_table)
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
records_table.c.designate_recordset_id.drop()

View File

@ -63,6 +63,7 @@ class Record(Base):
__tablename__ = 'records'
designate_id = Column(UUID, nullable=False)
designate_recordset_id = Column(UUID, default=None, nullable=True)
domain_id = Column(Integer, default=None, nullable=True)
name = Column(String(255), default=None, nullable=True)

View File

@ -38,6 +38,7 @@ cfg.CONF.register_opts([
help='Effective TLDs'),
cfg.IntOpt('max_domain_name_len', default=255,
help="Maximum domain name length"),
cfg.IntOpt('max_record_name_len', default=255,
help="Maximum record name length"),
cfg.IntOpt('max_recordset_name_len', default=255,
help="Maximum recordset name length",
deprecated_name='max_record_name_len'),
], group='service:central')

View File

@ -32,11 +32,12 @@ class CentralAPI(rpc_proxy.RpcProxy):
1.3 - Add get_absolute_limits
2.0 - Renamed most get_resources to find_resources
2.1 - Add quota methods
3.0 - RecordSet Changes
"""
def __init__(self, topic=None):
topic = topic if topic else cfg.CONF.central_topic
super(CentralAPI, self).__init__(topic=topic, default_version='2.0')
super(CentralAPI, self).__init__(topic=topic, default_version='3.0')
# Misc Methods
def get_absolute_limits(self, context):
@ -50,27 +51,27 @@ class CentralAPI(rpc_proxy.RpcProxy):
LOG.info("get_quotas: Calling central's get_quotas.")
msg = self.make_msg('get_quotas', tenant_id=tenant_id)
return self.call(context, msg, version='2.1')
return self.call(context, msg)
def get_quota(self, context, tenant_id, resource):
LOG.info("get_quota: Calling central's get_quota.")
msg = self.make_msg('get_quota', tenant_id=tenant_id,
resource=resource)
return self.call(context, msg, version='2.1')
return self.call(context, msg)
def set_quota(self, context, tenant_id, resource, hard_limit):
LOG.info("set_quota: Calling central's set_quota.")
msg = self.make_msg('set_quota', tenant_id=tenant_id,
resource=resource, hard_limit=hard_limit)
return self.call(context, msg, version='2.1')
return self.call(context, msg)
def reset_quotas(self, context, tenant_id):
LOG.info("reset_quotas: Calling central's reset_quotas.")
msg = self.make_msg('reset_quotas', tenant_id=tenant_id)
return self.call(context, msg, version='2.1')
return self.call(context, msg)
# Server Methods
def create_server(self, context, values):
@ -180,7 +181,7 @@ class CentralAPI(rpc_proxy.RpcProxy):
return self.call(context, msg)
def find_domain(self, context, criterion):
def find_domain(self, context, criterion=None):
LOG.info("find_domain: Calling central's find_domain.")
msg = self.make_msg('find_domain', criterion=criterion)
@ -213,56 +214,113 @@ class CentralAPI(rpc_proxy.RpcProxy):
return self.call(context, msg)
# Record Methods
def create_record(self, context, domain_id, values, increment_serial=True):
LOG.info("create_record: Calling central's create_record.")
msg = self.make_msg('create_record',
# RecordSet Methods
def create_recordset(self, context, domain_id, values):
LOG.info("create_recordset: Calling central's create_recordset.")
msg = self.make_msg('create_recordset',
domain_id=domain_id,
values=values)
return self.call(context, msg)
def get_recordset(self, context, domain_id, recordset_id):
LOG.info("get_recordset: Calling central's get_recordset.")
msg = self.make_msg('get_recordset',
domain_id=domain_id,
recordset_id=recordset_id)
return self.call(context, msg)
def find_recordsets(self, context, criterion=None):
LOG.info("find_recordsets: Calling central's find_recordsets.")
msg = self.make_msg('find_recordsets', criterion=criterion)
return self.call(context, msg)
def find_recordset(self, context, criterion=None):
LOG.info("find_recordset: Calling central's find_recordset.")
msg = self.make_msg('find_recordset', criterion=criterion)
return self.call(context, msg)
def update_recordset(self, context, domain_id, recordset_id, values,
increment_serial=True):
LOG.info("update_recordset: Calling central's update_recordset.")
msg = self.make_msg('update_recordset',
domain_id=domain_id,
recordset_id=recordset_id,
values=values,
increment_serial=increment_serial)
return self.call(context, msg)
def get_record(self, context, domain_id, record_id):
def delete_recordset(self, context, domain_id, recordset_id,
increment_serial=True):
LOG.info("delete_recordset: Calling central's delete_recordset.")
msg = self.make_msg('delete_recordset',
domain_id=domain_id,
recordset_id=recordset_id,
increment_serial=increment_serial)
return self.call(context, msg)
def count_recordsets(self, context, criterion=None):
LOG.info("count_recordsets: Calling central's count_recordsets.")
msg = self.make_msg('count_recordsets', criterion=criterion)
return self.call(context, msg)
# Record Methods
def create_record(self, context, domain_id, recordset_id, values,
increment_serial=True):
LOG.info("create_record: Calling central's create_record.")
msg = self.make_msg('create_record',
domain_id=domain_id,
recordset_id=recordset_id,
values=values,
increment_serial=increment_serial)
return self.call(context, msg)
def get_record(self, context, domain_id, recordset_id, record_id):
LOG.info("get_record: Calling central's get_record.")
msg = self.make_msg('get_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
return self.call(context, msg)
def find_records(self, context, domain_id, criterion=None):
def find_records(self, context, criterion=None):
LOG.info("find_records: Calling central's find_records.")
msg = self.make_msg('find_records',
domain_id=domain_id,
criterion=criterion)
msg = self.make_msg('find_records', criterion=criterion)
return self.call(context, msg)
def find_record(self, context, domain_id, criterion=None):
def find_record(self, context, criterion=None):
LOG.info("find_record: Calling central's find_record.")
msg = self.make_msg('find_record',
domain_id=domain_id,
criterion=criterion)
msg = self.make_msg('find_record', criterion=criterion)
return self.call(context, msg)
def update_record(self, context, domain_id, record_id, values,
increment_serial=True):
def update_record(self, context, domain_id, recordset_id, record_id,
values, increment_serial=True):
LOG.info("update_record: Calling central's update_record.")
msg = self.make_msg('update_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id,
values=values,
increment_serial=increment_serial)
return self.call(context, msg)
def delete_record(self, context, domain_id, record_id,
def delete_record(self, context, domain_id, recordset_id, record_id,
increment_serial=True):
LOG.info("delete_record: Calling central's delete_record.")
msg = self.make_msg('delete_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id,
increment_serial=increment_serial)
@ -287,10 +345,11 @@ class CentralAPI(rpc_proxy.RpcProxy):
return self.call(context, msg)
def sync_record(self, context, domain_id, record_id):
def sync_record(self, context, domain_id, recordset_id, record_id):
LOG.info("sync_record: Calling central's sync_record.")
msg = self.make_msg('sync_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
return self.call(context, msg)

View File

@ -45,7 +45,7 @@ def wrap_backend_call():
class Service(rpc_service.Service):
RPC_API_VERSION = '2.1'
RPC_API_VERSION = '3.0'
def __init__(self, *args, **kwargs):
backend_driver = cfg.CONF['service:central'].backend_driver
@ -119,54 +119,49 @@ class Service(rpc_service.Service):
return True
def _is_valid_record_name(self, context, domain, record_name, record_type):
if not record_name.endswith('.'):
def _is_valid_recordset_name(self, context, domain, recordset_name,
recordset_type):
if not recordset_name.endswith('.'):
raise ValueError('Please supply a FQDN')
# Validate record name length
if len(record_name) > cfg.CONF['service:central'].max_record_name_len:
raise exceptions.InvalidRecordName('Name too long')
max_len = cfg.CONF['service:central'].max_recordset_name_len
if len(recordset_name) > max_len:
raise exceptions.InvalidRecordSetName('Name too long')
# Record must be contained in the parent zone
if not record_name.endswith(domain['name']):
raise exceptions.InvalidRecordLocation('Record is not contained '
'within it\'s parent '
'domain')
# RecordSets must be contained in the parent zone
if not recordset_name.endswith(domain['name']):
raise exceptions.InvalidRecordSetLocation(
'RecordSet is not contained within it\'s parent domain')
# CNAME's must not be created at the zone apex.
if record_type == 'CNAME' and record_name == domain['name']:
raise exceptions.InvalidRecordLocation('CNAME records may not be '
'created at the zone apex')
if recordset_type == 'CNAME' and recordset_name == domain['name']:
raise exceptions.InvalidRecordSetLocation(
'CNAME recordsets may not be created at the zone apex')
def _is_valid_record_placement(self, context, domain, record_name,
record_type, record_id=None):
# CNAME's must not share a name with other records
criterion = {
'name': record_name,
'domain_id': domain['id']
}
if record_type != 'CNAME':
criterion['type'] = 'CNAME'
records = self.storage_api.find_records(context, criterion=criterion)
if ((len(records) == 1 and records[0]['id'] != record_id)
or len(records) > 1):
raise exceptions.InvalidRecordLocation('CNAME records may not '
'share a name with any '
'other records')
# Duplicate PTR's with the same name are not allowed
if record_type == 'PTR':
criterion = {
'name': record_name,
'type': 'PTR',
'domain_id': domain['id']}
records = self.storage_api.find_records(context,
criterion=criterion)
if ((len(records) == 1 and records[0]['id'] != record_id)
or len(records) > 1):
raise exceptions.DuplicateRecord()
def _is_valid_recordset_placement(self, context, domain, recordset_name,
recordset_type, recordset_id=None):
# # CNAME's must not share a name with other recordsets
# criterion = {'name': recordset_name}
#
# if recordset_type != 'CNAME':
# criterion['type'] = 'CNAME'
#
# recordsets = self.storage_api.find_recordsets(context, domain['id'],
# criterion=criterion)
# if ((len(recordsets) == 1 and recordsets[0]['id'] != recordset_id)
# or len(recordsets) > 1):
# raise exceptions.InvalidRecordSetLocation(
# 'CNAME recordsets may not share a name with any other records')
#
# # Duplicate PTR's with the same name are not allowed
# if recordset_type == 'PTR':
# criterion = {'name': recordset_name, 'type': 'PTR'}
# records = self.storage_api.find_recordsets(context, domain['id'],
# criterion=criterion)
# if ((len(recordsets) == 1 and recordsets[0]['id'] != recordset_id)
# or len(recordsets) > 1):
# raise exceptions.DuplicateRecordSet()
return True
@ -205,25 +200,25 @@ class Service(rpc_service.Service):
return False
def _is_subrecord(self, context, domain, record_name, criterion):
# Break the names up into their component labels
domain_labels = domain['name'].split(".")
record_labels = record_name.split(".")
# # Break the names up into their component labels
# domain_labels = domain['name'].split(".")
# record_labels = record_name.split(".")
i = 1
j = len(record_labels) - len(domain_labels)
# i = 1
# j = len(record_labels) - len(domain_labels)
criterion['domain_id'] = domain['id']
# criterion['domain_id'] = domain['id']
#
# # Starting with label #2, search for matching records's in the database
# while (i <= j):
# criterion['name'] = '.'.join(record_labels[i:])
#
# records = self.storage_api.find_records(context, criterion)
# Starting with label #2, search for matching records's in the database
while (i <= j):
criterion['name'] = '.'.join(record_labels[i:])
records = self.storage_api.find_records(context, criterion)
if len(records) == 0:
i += 1
else:
return records
# if len(records) == 0:
# i += 1
# else:
# return records
return False
@ -247,7 +242,11 @@ class Service(rpc_service.Service):
self.quota.limit_check(context, tenant_id, domains=count)
def _enforce_record_quota(self, context, domain):
def _enforce_recordset_quota(self, context, domain):
# TODO(kiall): Enforce RRSet Quotas
pass
def _enforce_record_quota(self, context, domain, recordset):
# Ensure the records per domain quota is OK
criterion = {'domain_id': domain['id']}
count = self.storage_api.count_records(context, criterion)
@ -255,6 +254,8 @@ class Service(rpc_service.Service):
self.quota.limit_check(context, domain['tenant_id'],
domain_records=count)
# TODO(kiall): Enforce Records per RRSet Quotas
# Misc Methods
def get_absolute_limits(self, context):
# NOTE(Kiall): Currently, we only have quota based limits..
@ -491,7 +492,7 @@ class Service(rpc_service.Service):
return self.storage_api.find_domains(context, criterion)
def find_domain(self, context, criterion):
def find_domain(self, context, criterion=None):
target = {'tenant_id': context.tenant_id}
policy.check('find_domain', context, target)
@ -590,32 +591,178 @@ class Service(rpc_service.Service):
return domain
# Record Methods
def create_record(self, context, domain_id, values, increment_serial=True):
# RecordSet Methods
def create_recordset(self, context, domain_id, values):
domain = self.storage_api.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'record_name': values['name'],
'recordset_name': values['name'],
'tenant_id': domain['tenant_id'],
}
policy.check('create_recordset', context, target)
# Ensure the tenant has enough quota to continue
self._enforce_recordset_quota(context, domain)
# Ensure the recordset name and placement is valid
self._is_valid_recordset_name(context, domain, values['name'],
values['type'])
self._is_valid_recordset_placement(context, domain, values['name'],
values['type'])
with self.storage_api.create_recordset(
context, domain_id, values) as recordset:
with wrap_backend_call():
self.backend.create_recordset(context, domain, recordset)
# Send RecordSet creation notification
self.notifier.info(context, 'dns.recordset.create', recordset)
return recordset
def get_recordset(self, context, domain_id, recordset_id):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
# Ensure the domain_id matches the record's domain_id
if domain['id'] != recordset['domain_id']:
raise exceptions.RecordSetNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset['id'],
'tenant_id': domain['tenant_id'],
}
policy.check('get_recordset', context, target)
return recordset
def find_recordsets(self, context, criterion=None):
target = {'tenant_id': context.tenant_id}
policy.check('find_recordsets', context, target)
return self.storage_api.find_recordsets(context, criterion)
def find_recordset(self, context, criterion=None):
target = {'tenant_id': context.tenant_id}
policy.check('find_recordset', context, target)
return self.storage_api.find_recordset(context, criterion)
def update_recordset(self, context, domain_id, recordset_id, values,
increment_serial=True):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
# Ensure the domain_id matches the recordset's domain_id
if domain['id'] != recordset['domain_id']:
raise exceptions.RecordSetNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset['id'],
'tenant_id': domain['tenant_id']
}
policy.check('update_recordset', context, target)
# Ensure the record name is valid
recordset_name = values['name'] if 'name' in values \
else recordset['name']
recordset_type = values['type'] if 'type' in values \
else recordset['type']
self._is_valid_recordset_name(context, domain, recordset_name,
recordset_type)
self._is_valid_recordset_placement(context, domain, recordset_name,
recordset_type, recordset_id)
# Update the recordset
with self.storage_api.update_recordset(
context, recordset_id, values) as recordset:
with wrap_backend_call():
self.backend.update_recordset(context, domain, recordset)
if increment_serial:
self._increment_domain_serial(context, domain_id)
# Send RecordSet update notification
self.notifier.info(context, 'dns.recordset.update', recordset)
return recordset
def delete_recordset(self, context, domain_id, recordset_id,
increment_serial=True):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
# Ensure the domain_id matches the recordset's domain_id
if domain['id'] != recordset['domain_id']:
raise exceptions.RecordSetNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset['id'],
'tenant_id': domain['tenant_id']
}
policy.check('delete_recordset', context, target)
with self.storage_api.delete_recordset(context, recordset_id) \
as recordset:
with wrap_backend_call():
self.backend.delete_recordset(context, domain, recordset)
if increment_serial:
self._increment_domain_serial(context, domain_id)
# Send Record deletion notification
self.notifier.info(context, 'dns.recordset.delete', recordset)
return recordset
def count_recordsets(self, context, criterion=None):
if criterion is None:
criterion = {}
target = {
'tenant_id': criterion.get('tenant_id', None)
}
policy.check('count_recordsets', context, target)
return self.storage_api.count_recordsets(context, criterion)
# Record Methods
def create_record(self, context, domain_id, recordset_id, values,
increment_serial=True):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset_id,
'recordset_name': recordset['name'],
'tenant_id': domain['tenant_id']
}
policy.check('create_record', context, target)
# Ensure the tenant has enough quota to continue
self._enforce_record_quota(context, domain)
# Ensure the record name and placement is valid
self._is_valid_record_name(context, domain, values['name'],
values['type'])
self._is_valid_record_placement(context, domain, values['name'],
values['type'])
self._enforce_record_quota(context, domain, recordset)
with self.storage_api.create_record(
context, domain_id, values) as record:
context, domain_id, recordset_id, values) as record:
with wrap_backend_call():
self.backend.create_record(context, domain, record)
self.backend.create_record(context, domain, recordset, record)
if increment_serial:
self._increment_domain_serial(context, domain_id)
@ -625,17 +772,24 @@ class Service(rpc_service.Service):
return record
def get_record(self, context, domain_id, record_id):
def get_record(self, context, domain_id, recordset_id, record_id):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
record = self.storage_api.get_record(context, record_id)
# Ensure the domain_id matches the record's domain_id
if domain['id'] != record['domain_id']:
raise exceptions.RecordNotFound()
# Ensure the recordset_id matches the record's recordset_id
if recordset['id'] != record['recordset_id']:
raise exceptions.RecordNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset_id,
'recordset_name': recordset['name'],
'record_id': record['id'],
'tenant_id': domain['tenant_id']
}
@ -644,73 +798,48 @@ class Service(rpc_service.Service):
return record
def find_records(self, context, domain_id, criterion=None):
if criterion is None:
criterion = {}
domain = self.storage_api.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'tenant_id': domain['tenant_id']
}
def find_records(self, context, criterion=None):
target = {'tenant_id': context.tenant_id}
policy.check('find_records', context, target)
criterion['domain_id'] = domain_id
return self.storage_api.find_records(context, criterion)
def find_record(self, context, domain_id, criterion=None):
if criterion is None:
criterion = {}
domain = self.storage_api.get_domain(context, domain_id)
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'tenant_id': domain['tenant_id']
}
def find_record(self, context, criterion=None):
target = {'tenant_id': context.tenant_id}
policy.check('find_record', context, target)
criterion['domain_id'] = domain_id
return self.storage_api.find_record(context, criterion)
def update_record(self, context, domain_id, record_id, values,
increment_serial=True):
def update_record(self, context, domain_id, recordset_id, record_id,
values, increment_serial=True):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
record = self.storage_api.get_record(context, record_id)
# Ensure the domain_id matches the record's domain_id
if domain['id'] != record['domain_id']:
raise exceptions.RecordNotFound()
# Ensure the recordset_id matches the record's recordset_id
if recordset['id'] != record['recordset_id']:
raise exceptions.RecordNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset_id,
'recordset_name': recordset['name'],
'record_id': record['id'],
'tenant_id': domain['tenant_id']
}
policy.check('update_record', context, target)
# Ensure the record name is valid
record_name = values['name'] if 'name' in values else record['name']
record_type = values['type'] if 'type' in values else record['type']
self._is_valid_record_name(context, domain, record_name, record_type)
self._is_valid_record_placement(context, domain, record_name,
record_type, record_id)
# Update the record
with self.storage_api.update_record(
context, record_id, values) as record:
with wrap_backend_call():
self.backend.update_record(context, domain, record)
self.backend.update_record(context, domain, recordset, record)
if increment_serial:
self._increment_domain_serial(context, domain_id)
@ -720,18 +849,25 @@ class Service(rpc_service.Service):
return record
def delete_record(self, context, domain_id, record_id,
def delete_record(self, context, domain_id, recordset_id, record_id,
increment_serial=True):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
record = self.storage_api.get_record(context, record_id)
# Ensure the domain_id matches the record's domain_id
if domain['id'] != record['domain_id']:
raise exceptions.RecordNotFound()
# Ensure the recordset_id matches the record's recordset_id
if recordset['id'] != record['recordset_id']:
raise exceptions.RecordNotFound()
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset_id,
'recordset_name': recordset['name'],
'record_id': record['id'],
'tenant_id': domain['tenant_id']
}
@ -740,7 +876,7 @@ class Service(rpc_service.Service):
with self.storage_api.delete_record(context, record_id) as record:
with wrap_backend_call():
self.backend.delete_record(context, domain, record)
self.backend.delete_record(context, domain, recordset, record)
if increment_serial:
self._increment_domain_serial(context, domain_id)
@ -797,12 +933,15 @@ class Service(rpc_service.Service):
with wrap_backend_call():
return self.backend.sync_domain(context, domain, records)
def sync_record(self, context, domain_id, record_id):
def sync_record(self, context, domain_id, recordset_id, record_id):
domain = self.storage_api.get_domain(context, domain_id)
recordset = self.storage_api.get_recordset(context, recordset_id)
target = {
'domain_id': domain_id,
'domain_name': domain['name'],
'recordset_id': recordset_id,
'recordset_name': recordset['name'],
'record_id': record_id,
'tenant_id': domain['tenant_id']
}
@ -812,7 +951,7 @@ class Service(rpc_service.Service):
record = self.storage_api.get_record(context, record_id)
with wrap_backend_call():
return self.backend.sync_record(context, domain, record)
return self.backend.sync_record(context, domain, recordset, record)
def ping(self, context):
policy.check('diagnostics_ping', context)

View File

@ -70,6 +70,11 @@ class BadRequest(Base):
error_type = 'bad_request'
class InvalidOperation(BadRequest):
error_code = 400
error_type = 'invalid_operation'
class UnsupportedAccept(BadRequest):
error_code = 406
error_type = 'unsupported_accept'
@ -95,14 +100,14 @@ class InvalidTLD(Base):
error_type = 'invalid_tld'
class InvalidRecordName(Base):
class InvalidRecordSetName(Base):
error_code = 400
error_type = 'invalid_record_name'
error_type = 'invalid_recordset_name'
class InvalidRecordLocation(Base):
class InvalidRecordSetLocation(Base):
error_code = 400
error_type = 'invalid_record_location'
error_type = 'invalid_recordset_location'
class DomainHasSubdomain(Base):
@ -136,6 +141,10 @@ class DuplicateDomain(Duplicate):
error_type = 'duplicate_domain'
class DuplicateRecordSet(Duplicate):
error_type = 'duplicate_recordset'
class DuplicateRecord(Duplicate):
error_type = 'duplicate_record'
@ -161,6 +170,10 @@ class DomainNotFound(NotFound):
error_type = 'domain_not_found'
class RecordSetNotFound(NotFound):
error_type = 'recordset_not_found'
class RecordNotFound(NotFound):
error_type = 'record_not_found'

View File

@ -16,6 +16,7 @@
# under the License.
import abc
from oslo.config import cfg
from designate import exceptions
from designate.openstack.common import log as logging
from designate.central import rpcapi as central_rpcapi
from designate.context import DesignateContext
@ -72,6 +73,23 @@ class Handler(Plugin):
context = DesignateContext.get_admin_context(all_tenants=True)
return central_api.get_domain(context, domain_id)
def _find_or_create_recordset(self, context, domain_id, name, type,
ttl=None):
try:
recordset = central_api.find_recordset(context, {
'domain_id': domain_id,
'name': name,
'type': type,
})
except exceptions.RecordSetNotFound:
recordset = central_api.create_recordset(context, domain_id, {
'name': name,
'type': type,
'ttl': ttl,
})
return recordset
class BaseAddressHandler(Handler):
default_format = '%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(domain)s'
@ -102,14 +120,20 @@ class BaseAddressHandler(Handler):
context = DesignateContext.get_admin_context(all_tenants=True)
for addr in addresses:
record_data = data.copy()
record_data.update(get_ip_data(addr))
event_data = data.copy()
event_data.update(get_ip_data(addr))
recordset_values = {
'domain_id': domain['id'],
'name': self._get_format() % event_data,
'type': 'A' if addr['version'] == 4 else 'AAAA'}
recordset = self._find_or_create_recordset(
context, **recordset_values)
record_name = self._get_format() % record_data
record_values = {
'type': 'A' if addr['version'] == 4 else 'AAAA',
'name': record_name,
'data': addr['address']}
if managed:
record_values.update({
'managed': managed,
@ -117,9 +141,11 @@ class BaseAddressHandler(Handler):
'managed_plugin_type': self.get_plugin_type(),
'managed_resource_type': resource_type,
'managed_resource_id': resource_id})
LOG.debug('Creating record in %s with values %r', domain['id'],
record_values)
central_api.create_record(context, domain['id'], record_values)
LOG.debug('Creating record in %s / %s with values %r',
domain['id'], recordset['id'], record_values)
central_api.create_record(context, domain['id'], recordset['id'],
record_values)
def _delete(self, managed=True, resource_id=None, resource_type='instance',
criterion={}):
@ -130,6 +156,8 @@ class BaseAddressHandler(Handler):
"""
context = DesignateContext.get_admin_context(all_tenants=True)
criterion.update({'domain_id': cfg.CONF[self.name].domain_id})
if managed:
criterion.update({
'managed': managed,
@ -139,12 +167,10 @@ class BaseAddressHandler(Handler):
'managed_resource_type': resource_type
})
records = central_api.find_records(context,
cfg.CONF[self.name].domain_id,
criterion)
records = central_api.find_records(context, criterion)
for record in records:
LOG.debug('Deleting record %s' % record['id'])
central_api.delete_record(context, cfg.CONF[self.name].domain_id,
record['id'])
record['recordset_id'], record['id'])

View File

@ -21,10 +21,15 @@ LOG = logging.getLogger(__name__)
cfg.CONF.register_opts([
cfg.StrOpt('quota-driver', default='storage', help='Quota driver to use'),
cfg.IntOpt('quota-domains', default=10, help='Number of domains allowed '
'per tenant'),
cfg.IntOpt('quota-domain-records', default=500, help='Number of records '
'allowed per domain'),
cfg.IntOpt('quota-domains', default=10,
help='Number of domains allowed per tenant'),
cfg.IntOpt('quota-domain-recordsets', default=500,
help='Number of recordsets allowed per domain'),
cfg.IntOpt('quota-domain-records', default=500,
help='Number of records allowed per domain'),
cfg.IntOpt('quota-recordset-records', default=20,
help='Number of records allowed per recordset'),
])

View File

@ -49,7 +49,9 @@ class Quota(Plugin):
def get_default_quotas(self, context):
return {
'domains': cfg.CONF.quota_domains,
'domain_recordsets': cfg.CONF.quota_domain_recordsets,
'domain_records': cfg.CONF.quota_domain_records,
'recordset_records': cfg.CONF.quota_recordset_records,
}
def get_quota(self, context, tenant_id, resource):

View File

@ -0,0 +1,72 @@
{
"$schema": "http://json-schema.org/draft-04/hyper-schema",
"id": "record",
"title": "record",
"description": "Record",
"additionalProperties": false,
"required": ["record"],
"properties": {
"record": {
"type": "object",
"additionalProperties": false,
"required": ["data"],
"properties": {
"id": {
"type": "string",
"description": "Record identifier",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"readOnly": true
},
"recordset_id": {
"type": "string",
"description": "RecordSet identifier",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"immutable": true
},
"data": {
"type": "string",
"description": "Record Value",
"maxLength": 255
},
"description": {
"type": ["string", "null"],
"description": "Description for the Record",
"maxLength": 160
},
"version": {
"type": "integer",
"description": "Record version number",
"readOnly": true
},
"created_at": {
"type": "string",
"description": "Date and time of Record creation",
"format": "date-time",
"readOnly": true
},
"updated_at": {
"type": ["string", "null"],
"description": "Date and time of last record modification",
"format": "date-time",
"readOnly": true
},
"links": {
"type": "object",
"additionalProperties": false,
"properties": {
"self": {
"type": "string",
"format": "url"
}
}
}
}
}
}
}

View File

@ -0,0 +1,38 @@
{
"$schema": "http://json-schema.org/draft-04/hyper-schema",
"id": "records",
"title": "records",
"description": "Records",
"additionalProperties": false,
"required": ["records"],
"properties": {
"records": {
"type": "array",
"description": "Records",
"items": {"$ref": "record#/properties/record"}
},
"links": {
"type": "object",
"additionalProperties": false,
"properties": {
"self": {
"type": "string",
"format": "url"
},
"next": {
"type": ["string", "null"],
"format": "url"
},
"previous": {
"type": ["string", "null"],
"format": "url"
}
}
}
}
}

View File

@ -13,7 +13,7 @@
"recordset": {
"type": "object",
"additionalProperties": false,
"required": ["zone_id", "name", "type", "records"],
"required": ["name", "type"],
"properties": {
"id": {
@ -52,17 +52,6 @@
"max": 2147483647,
"default": null
},
"records": {
"type": "array",
"description": "Records Array",
"uniqueItems": true,
"minItems": 1
},
"notes": {
"type": ["string", "null"],
"description": "Notes",
"maxLength": 100
},
"version": {
"type": "integer",
"description": "RecordSet version number",
@ -91,20 +80,7 @@
}
}
}
},
"oneOf": [
{"properties": {"type": {"enum": ["A"]}, "records": {"items": {"$ref": "rdata/a#"}}}},
{"properties": {"type": {"enum": ["AAAA"]}, "records": {"items": {"$ref": "rdata/aaaa#"}}}},
{"properties": {"type": {"enum": ["CNAME"]}, "records": {"items": {"$ref": "rdata/cname#"}}}},
{"properties": {"type": {"enum": ["MX"]}, "records": {"items": {"$ref": "rdata/mx#"}}}},
{"properties": {"type": {"enum": ["NS"]}, "records": {"items": {"$ref": "rdata/ns#"}}}},
{"properties": {"type": {"enum": ["PTR"]}, "records": {"items": {"$ref": "rdata/ptr#"}}}},
{"properties": {"type": {"enum": ["SOA"]}, "records": {"items": {"$ref": "rdata/soa#"}}}},
{"properties": {"type": {"enum": ["SPF"]}, "records": {"items": {"$ref": "rdata/spf#"}}}},
{"properties": {"type": {"enum": ["SRV"]}, "records": {"items": {"$ref": "rdata/srv#"}}}},
{"properties": {"type": {"enum": ["SSHFP"]}, "records": {"items": {"$ref": "rdata/sshfp#"}}}},
{"properties": {"type": {"enum": ["TXT"]}, "records": {"items": {"$ref": "rdata/txt#"}}}}
]
}
}
}
}

View File

@ -71,11 +71,6 @@
"max": 4294967295,
"readOnly": true
},
"notes": {
"type": ["string", "null"],
"description": "Notes",
"maxLength": 100
},
"version": {
"type": "integer",
"description": "Zone version number",

View File

@ -1,3 +1,10 @@
{% for record in records if record.type in ['A', 'AAAA'] %}
{{record.data}} {{record.name}}
# Example input:
# {u'mail.example.com.': {u'A': [u'192.0.2.1', u'192.0.2.2']}, u'www.example.com.': {u'A': [u'192.0.2.1', u'192.0.2.2']}}
{% for name in template_data -%}
{% for rdtype in template_data[name] -%}
# RecordSet: {{name}} / {{rdtype}}
{% for record in template_data[name][rdtype] -%}
{{record}} {{name}}
{% endfor %}
{% endfor %}
{%- endfor %}

View File

@ -1,3 +0,0 @@
{% for domain in domains %}
zone "{{domain.name}}" { type master; notify no; database "mysqldb {{dns_db_schema}} {{dns_db_table}} {{dns_db_host}} {{dns_db_user}} {{dns_db_password}} {{domain.id}} {{domain.tenant_id}}"; };
{%- endfor %}

View File

@ -353,15 +353,101 @@ class StorageAPI(object):
return self.storage.count_domains(context, criterion)
@contextlib.contextmanager
def create_record(self, context, domain_id, values):
def create_recordset(self, context, domain_id, values):
"""
Create a recordset on a given Domain ID
:param context: RPC Context.
:param domain_id: Domain ID to create the recordset in.
:param values: Values to create the new RecordSet from.
"""
recordset = self.storage.create_recordset(context, domain_id, values)
try:
yield recordset
except Exception:
with excutils.save_and_reraise_exception():
self.storage.delete_recordset(context, recordset['id'])
def get_recordset(self, context, recordset_id):
"""
Get a recordset via ID
:param context: RPC Context.
:param recordset_id: RecordSet ID to get
"""
return self.storage.get_recordset(context, recordset_id)
def find_recordsets(self, context, criterion=None):
"""
Find RecordSets.
:param context: RPC Context.
:param criterion: Criteria to filter by.
"""
return self.storage.find_recordsets(context, criterion)
def find_recordset(self, context, criterion=None):
"""
Find a single RecordSet.
:param context: RPC Context.
:param criterion: Criteria to filter by.
"""
return self.storage.find_recordset(context, criterion)
@contextlib.contextmanager
def update_recordset(self, context, recordset_id, values):
"""
Update a recordset via ID
:param context: RPC Context
:param recordset_id: RecordSet ID to update
"""
backup = self.storage.get_recordset(context, recordset_id)
recordset = self.storage.update_recordset(
context, recordset_id, values)
try:
yield recordset
except Exception:
with excutils.save_and_reraise_exception():
restore = self._extract_dict_subset(backup, values.keys())
self.storage.update_recordset(context, recordset_id, restore)
@contextlib.contextmanager
def delete_recordset(self, context, recordset_id):
"""
Delete a recordset
:param context: RPC Context
:param recordset_id: RecordSet ID to delete
"""
yield self.storage.get_recordset(context, recordset_id)
self.storage.delete_recordset(context, recordset_id)
def count_recordsets(self, context, criterion=None):
"""
Count recordsets
:param context: RPC Context.
:param criterion: Criteria to filter by.
"""
return self.storage.count_recordsets(context, criterion)
@contextlib.contextmanager
def create_record(self, context, domain_id, recordset_id, values):
"""
Create a record on a given Domain ID
:param context: RPC Context.
:param domain_id: Domain ID to create the record in.
:param recordset_id: RecordSet ID to create the record in.
:param values: Values to create the new Record from.
"""
record = self.storage.create_record(context, domain_id, values)
record = self.storage.create_record(context, domain_id, recordset_id,
values)
try:
yield record

View File

@ -258,6 +258,72 @@ class Storage(Plugin):
:param criterion: Criteria to filter by.
"""
@abc.abstractmethod
def create_recordset(self, context, domain_id, values):
"""
Create a recordset on a given Domain ID
:param context: RPC Context.
:param domain_id: Domain ID to create the recordset in.
:param values: Values to create the new RecordSet from.
"""
@abc.abstractmethod
def get_recordset(self, context, recordset_id):
"""
Get a recordset via ID
:param context: RPC Context.
:param recordset_id: RecordSet ID to get
"""
@abc.abstractmethod
def find_recordsets(self, context, domain_id, criterion=None):
"""
Find RecordSets.
:param context: RPC Context.
:param domain_id: Domain ID where the recordsets reside.
:param criterion: Criteria to filter by.
"""
@abc.abstractmethod
def find_recordset(self, context, domain_id, criterion):
"""
Find a single RecordSet.
:param context: RPC Context.
:param domain_id: Domain ID where the recordsets reside.
:param criterion: Criteria to filter by.
"""
@abc.abstractmethod
def update_recordset(self, context, recordset_id, values):
"""
Update a recordset via ID
:param context: RPC Context
:param recordset_id: RecordSet ID to update
"""
@abc.abstractmethod
def delete_recordset(self, context, recordset_id):
"""
Delete a recordset
:param context: RPC Context
:param recordset_id: RecordSet ID to delete
"""
@abc.abstractmethod
def count_recordsets(self, context, criterion=None):
"""
Count recordsets
:param context: RPC Context.
:param criterion: Criteria to filter by.
"""
@abc.abstractmethod
def create_record(self, context, domain_id, values):
"""

View File

@ -357,6 +357,73 @@ class SQLAlchemyStorage(base.Storage):
return query.count()
# RecordSet Methods
def _find_recordsets(self, context, criterion, one=False):
try:
return self._find(models.RecordSet, context, criterion, one)
except exceptions.NotFound:
raise exceptions.RecordSetNotFound()
def create_recordset(self, context, domain_id, values):
# Fetch the domain as we need the tenant_id
domain = self._find_domains(context, {'id': domain_id}, one=True)
recordset = models.RecordSet()
recordset.update(values)
recordset.tenant_id = domain['tenant_id']
recordset.domain_id = domain_id
try:
recordset.save(self.session)
except exceptions.Duplicate:
raise exceptions.DuplicateRecordSet()
return dict(recordset)
def get_recordset(self, context, recordset_id):
recordset = self._find_recordsets(context, {'id': recordset_id},
one=True)
return dict(recordset)
def find_recordsets(self, context, criterion=None):
recordsets = self._find_recordsets(context, criterion)
return [dict(r) for r in recordsets]
def find_recordset(self, context, criterion):
recordset = self._find_recordsets(context, criterion, one=True)
return dict(recordset)
def update_recordset(self, context, recordset_id, values):
recordset = self._find_recordsets(context, {'id': recordset_id},
one=True)
recordset.update(values)
try:
recordset.save(self.session)
except exceptions.Duplicate:
raise exceptions.DuplicateRecordSet()
return dict(recordset)
def delete_recordset(self, context, recordset_id):
recordset = self._find_recordsets(context, {'id': recordset_id},
one=True)
recordset.delete(self.session)
return dict(recordset)
def count_recordsets(self, context, criterion=None):
query = self.session.query(models.RecordSet)
query = self._apply_criterion(models.RecordSet, query, criterion)
return query.count()
# Record Methods
def _find_records(self, context, criterion, one=False):
try:
@ -364,15 +431,18 @@ class SQLAlchemyStorage(base.Storage):
except exceptions.NotFound:
raise exceptions.RecordNotFound()
def create_record(self, context, domain_id, values):
def create_record(self, context, domain_id, recordset_id, values):
# Fetch the domain as we need the tenant_id
domain = self._find_domains(context, {'id': domain_id}, one=True)
record = models.Record()
# Create and populate the new Record model
record = models.Record()
record.update(values)
record.tenant_id = domain['tenant_id']
record.domain_id = domain_id
record.recordset_id = recordset_id
try:
# Save the new Record model
@ -383,9 +453,6 @@ class SQLAlchemyStorage(base.Storage):
return dict(record)
def find_records(self, context, criterion=None):
if criterion is None:
criterion = {}
records = self._find_records(context, criterion)
return [dict(r) for r in records]
@ -396,9 +463,6 @@ class SQLAlchemyStorage(base.Storage):
return dict(record)
def find_record(self, context, criterion=None):
if criterion is None:
criterion = {}
record = self._find_records(context, criterion, one=True)
return dict(record)

View File

@ -0,0 +1,163 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from sqlalchemy import ForeignKey, Enum, Integer, String, DateTime, Unicode
from sqlalchemy import func
from sqlalchemy.sql import select
from sqlalchemy.schema import Table, Column, MetaData
from migrate import ForeignKeyConstraint
from migrate.changeset.constraint import UniqueConstraint
from designate.openstack.common import timeutils
from designate.openstack.common.uuidutils import generate_uuid
from designate.sqlalchemy.types import UUID
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR',
'SSHFP']
meta = MetaData()
recordsets_table = Table(
'recordsets',
meta,
Column('id', UUID(), default=generate_uuid, primary_key=True),
Column('created_at', DateTime(), default=timeutils.utcnow),
Column('updated_at', DateTime(), onupdate=timeutils.utcnow),
Column('version', Integer(), default=1, nullable=False),
Column('tenant_id', String(36), default=None, nullable=True),
Column('domain_id', UUID, ForeignKey('domains.id'), nullable=False),
Column('name', String(255), nullable=False),
Column('type', Enum(name='record_types', *RECORD_TYPES), nullable=False),
Column('ttl', Integer, default=None, nullable=True),
Column('description', Unicode(160), nullable=True),
UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset'),
mysql_engine='INNODB',
mysql_charset='utf8')
def _build_hash(recordset_id, record):
md5 = hashlib.md5()
md5.update("%s:%s:%s" % (recordset_id, record.data, record.priority))
return md5.hexdigest()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
# We need to autoload the domains table for the FK to succeed.
Table('domains', meta, autoload=True)
# Prepare an empty dict to cache (domain_id, name, type) tuples to
# RRSet id's
cache = {}
# Create the recordsets_table table
recordsets_table.create()
# NOTE(kiall): Since we need a unique UUID for each recordset, and need
# to maintain cross DB compatibility, we're stuck doing this
# in code rather than an
# INSERT INTO recordsets_table SELECT (..) FROM records;
results = select(
columns=[
records_table.c.tenant_id,
records_table.c.domain_id,
records_table.c.name,
records_table.c.type,
func.min(records_table.c.ttl).label('ttl'),
func.min(records_table.c.created_at).label('created_at'),
func.max(records_table.c.updated_at).label('updated_at')
],
group_by=[
records_table.c.domain_id,
records_table.c.name,
records_table.c.type
]
).execute()
for result in results:
# Create the new RecordSet and remember it's id
pk = recordsets_table.insert().execute(
tenant_id=result.tenant_id,
domain_id=result.domain_id,
name=result.name,
type=result.type,
ttl=result.ttl,
created_at=result.created_at,
updated_at=result.updated_at
).inserted_primary_key[0]
# Cache the ID for later
cache_key = "%s.%s.%s" % (result.domain_id, result.name, result.type)
cache[cache_key] = pk
# Add the recordset column to the records table
record_recordset_id = Column('recordset_id', UUID,
default=None,
nullable=True)
record_recordset_id.create(records_table, populate_default=True)
# Fetch all the records
# TODO(kiall): Batch this..
results = select(
columns=[
records_table.c.id,
records_table.c.domain_id,
records_table.c.name,
records_table.c.type,
records_table.c.data,
records_table.c.priority
]
).execute()
# Update each result with the approperiate recordset_id, and refresh
# the hash column to reflect the removal of several fields.
for result in results:
cache_key = "%s.%s.%s" % (result.domain_id, result.name,
result.type)
recordset_id = cache[cache_key]
new_hash = _build_hash(recordset_id, result)
records_table.update()\
.where(records_table.c.id == result.id)\
.values(recordset_id=cache[cache_key], hash=new_hash)\
.execute()
# Now that the records.recordset_id field is populated, lets ensure the
# column is not nullable and is a FK to the records table.
records_table.c.recordset_id.alter(nullable=False)
ForeignKeyConstraint(columns=[records_table.c.recordset_id],
refcolumns=[recordsets_table.c.id],
ondelete='CASCADE',
name='fkey_records_recordset_id').create()
# Finally, drop the now-defunct columns from the records table
records_table.c.name.drop()
records_table.c.type.drop()
records_table.c.ttl.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
raise Exception('There is no undo')

View File

@ -95,28 +95,50 @@ class Domain(SoftDeleteMixin, Base):
nullable=False, server_default='ACTIVE',
default='ACTIVE')
records = relationship('Record', backref=backref('domain', uselist=False),
lazy='dynamic', cascade="all, delete-orphan",
passive_deletes=True)
recordsets = relationship('RecordSet',
backref=backref('domain', uselist=False),
cascade="all, delete-orphan",
passive_deletes=True)
parent_domain_id = Column(UUID, ForeignKey('domains.id'), default=None,
nullable=True)
class RecordSet(Base):
__tablename__ = 'recordsets'
__table_args__ = (
UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
)
tenant_id = Column(String(36), default=None, nullable=True)
domain_id = Column(UUID, ForeignKey('domains.id', ondelete='CASCADE'),
nullable=False)
name = Column(String(255), nullable=False)
type = Column(Enum(name='record_types', *RECORD_TYPES), nullable=False)
ttl = Column(Integer, default=None, nullable=True)
description = Column(Unicode(160), nullable=True)
records = relationship('Record',
backref=backref('recordset', uselist=False),
cascade="all, delete-orphan",
passive_deletes=True)
class Record(Base):
__tablename__ = 'records'
tenant_id = Column(String(36), default=None, nullable=True)
domain_id = Column(UUID, ForeignKey('domains.id', ondelete='CASCADE'),
nullable=False)
tenant_id = Column(String(36), default=None, nullable=True)
type = Column(Enum(name='record_types', *RECORD_TYPES), nullable=False)
name = Column(String(255), nullable=False)
description = Column(Unicode(160), nullable=True)
recordset_id = Column(UUID,
ForeignKey('recordsets.id', ondelete='CASCADE'),
nullable=False)
data = Column(Text, nullable=False)
priority = Column(Integer, default=None, nullable=True)
ttl = Column(Integer, default=None, nullable=True)
description = Column(Unicode(160), nullable=True)
hash = Column(String(32), nullable=False, unique=True)
@ -134,8 +156,7 @@ class Record(Base):
Calculates the hash of the record, used to ensure record uniqueness.
"""
md5 = hashlib.md5()
md5.update("%s:%s:%s:%s:%s" % (self.domain_id, self.name, self.type,
self.data, self.priority))
md5.update("%s:%s:%s" % (self.recordset_id, self.data, self.priority))
self.hash = md5.hexdigest()

View File

@ -129,10 +129,34 @@ class TestCase(test.BaseTestCase):
'email': 'example@example.org',
}]
record_fixtures = [
{'name': 'www.%s', 'type': 'A', 'data': '192.0.2.1'},
{'name': 'mail.%s', 'type': 'A', 'data': '192.0.2.2'}
]
recordset_fixtures = {
'A': [
{'name': 'mail.%s', 'type': 'A'},
{'name': 'www.%s', 'type': 'A'},
],
'MX': [
{'name': 'mail.%s', 'type': 'MX'},
],
'SRV': [
{'name': '_sip._tcp.%s', 'type': 'SRV'},
{'name': '_sip._udp.%s', 'type': 'SRV'},
],
}
record_fixtures = {
'A': [
{'data': '192.0.2.1'},
{'data': '192.0.2.2'}
],
'MX': [
{'data': 'mail.example.org.', 'priority': 5},
{'data': 'mail.example.com.', 'priority': 10},
],
'SRV': [
{'data': '0 5060 server1.example.org.', 'priority': 5},
{'data': '1 5060 server2.example.org.', 'priority': 10},
]
}
def setUp(self):
super(TestCase, self).setUp()
@ -244,8 +268,9 @@ class TestCase(test.BaseTestCase):
_values.update(values)
return _values
def get_record_fixture(self, domain_name, fixture=0, values={}):
_values = copy.copy(self.record_fixtures[fixture])
def get_recordset_fixture(self, domain_name, type='A', fixture=0,
values={}):
_values = copy.copy(self.recordset_fixtures[type][fixture])
_values.update(values)
try:
@ -255,6 +280,11 @@ class TestCase(test.BaseTestCase):
return _values
def get_record_fixture(self, recordset_type, fixture=0, values={}):
_values = copy.copy(self.record_fixtures[recordset_type][fixture])
_values.update(values)
return _values
def get_zonefile_fixture(self, variant=None):
if variant is None:
f = 'example.com.zone'
@ -302,13 +332,26 @@ class TestCase(test.BaseTestCase):
return self.central_service.create_domain(context, values=values)
def create_record(self, domain, **kwargs):
def create_recordset(self, domain, type='A', **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_record_fixture(domain['name'], fixture=fixture,
values = self.get_recordset_fixture(domain['name'], type=type,
fixture=fixture,
values=kwargs)
return self.central_service.create_recordset(context,
domain['id'],
values=values)
def create_record(self, domain, recordset, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_record_fixture(recordset['type'], fixture=fixture,
values=kwargs)
return self.central_service.create_record(context, domain['id'],
return self.central_service.create_record(context,
domain['id'],
recordset['id'],
values=values)

View File

@ -29,9 +29,32 @@ class ApiV1RecordsTest(ApiV1Test):
super(ApiV1RecordsTest, self).setUp()
self.domain = self.create_domain()
self.recordset = self.create_recordset(self.domain, 'A')
def test_create_record(self):
fixture = self.get_record_fixture(self.domain['name'], 0)
recordset_fixture = self.get_recordset_fixture(
self.domain['name'])
fixture = self.get_record_fixture(recordset_fixture['type'])
fixture.update({
'name': recordset_fixture['name'],
'type': recordset_fixture['type'],
})
# Create a record
response = self.post('domains/%s/records' % self.domain['id'],
data=fixture)
self.assertIn('id', response.json)
self.assertIn('name', response.json)
self.assertEqual(response.json['name'], fixture['name'])
def test_create_record_existing_recordset(self):
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
# Create a record
response = self.post('domains/%s/records' % self.domain['id'],
@ -43,15 +66,25 @@ class ApiV1RecordsTest(ApiV1Test):
@patch.object(central_service.Service, 'create_record')
def test_create_record_trailing_slash(self, mock):
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
# Create a record with a trailing slash
self.post('domains/%s/records/' % self.domain['id'],
data=self.get_record_fixture(self.domain['name'], 0))
data=fixture)
# verify that the central service is called
self.assertTrue(mock.called)
def test_create_record_junk(self):
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
# Add a junk property
fixture['junk'] = 'Junk Field'
@ -61,7 +94,11 @@ class ApiV1RecordsTest(ApiV1Test):
status_code=400)
def test_create_record_utf_description(self):
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
#Add a UTF-8 riddled description
fixture['description'] = "utf-8:2H₂+O₂⇌2H₂O,R=4.7kΩ,⌀200mm∮E⋅da=Q,n" \
@ -71,9 +108,13 @@ class ApiV1RecordsTest(ApiV1Test):
self.post('domains/%s/records' % self.domain['id'], data=fixture)
def test_create_record_description_too_long(self):
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
#Add a description that is too long
# Add a description that is too long
fixture['description'] = "x" * 161
# Create a record, Ensuring it Fails with a 400
@ -81,8 +122,13 @@ class ApiV1RecordsTest(ApiV1Test):
status_code=400)
def test_create_record_negative_ttl(self):
# Create a record
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
# Set the TTL to a negative value
fixture['ttl'] = -1
# Create a record, Ensuring it Fails with a 400
@ -92,7 +138,11 @@ class ApiV1RecordsTest(ApiV1Test):
@patch.object(central_service.Service, 'create_record',
side_effect=rpc_common.Timeout())
def test_create_record_timeout(self, _):
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
# Create a record
self.post('domains/%s/records' % self.domain['id'], data=fixture,
@ -100,8 +150,11 @@ class ApiV1RecordsTest(ApiV1Test):
def test_create_wildcard_record(self):
# Prepare a record
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture['name'] = '*.%s' % fixture['name']
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': '*.%s' % self.recordset['name'],
'type': self.recordset['type'],
})
# Create a record
response = self.post('domains/%s/records' % self.domain['id'],
@ -112,12 +165,14 @@ class ApiV1RecordsTest(ApiV1Test):
self.assertEqual(response.json['name'], fixture['name'])
def test_create_srv_record(self):
# Prepare a record
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture['type'] = 'SRV'
fixture['name'] = '_sip._udp.%s' % fixture['name']
fixture['priority'] = 10
fixture['data'] = '0 5060 sip.%s' % self.domain['name']
recordset_fixture = self.get_recordset_fixture(
self.domain['name'], 'SRV')
fixture = self.get_record_fixture(recordset_fixture['type'])
fixture.update({
'name': recordset_fixture['name'],
'type': recordset_fixture['type'],
})
# Create a record
response = self.post('domains/%s/records' % self.domain['id'],
@ -130,11 +185,14 @@ class ApiV1RecordsTest(ApiV1Test):
self.assertEqual(response.json['data'], fixture['data'])
def test_create_invalid_data_srv_record(self):
# Prepare a record
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture['type'] = 'SRV'
fixture['name'] = '_sip._udp.%s' % fixture['name']
fixture['priority'] = 10
recordset_fixture = self.get_recordset_fixture(
self.domain['name'], 'SRV')
fixture = self.get_record_fixture(recordset_fixture['type'])
fixture.update({
'name': recordset_fixture['name'],
'type': recordset_fixture['type'],
})
invalid_datas = [
'I 5060 sip.%s' % self.domain['name'],
@ -152,17 +210,20 @@ class ApiV1RecordsTest(ApiV1Test):
status_code=400)
def test_create_invalid_name_srv_record(self):
# Prepare a record
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture['type'] = 'SRV'
fixture['priority'] = 10
fixture['data'] = '0 5060 sip.%s' % self.domain['name']
recordset_fixture = self.get_recordset_fixture(
self.domain['name'], 'SRV')
fixture = self.get_record_fixture(recordset_fixture['type'])
fixture.update({
'name': recordset_fixture['name'],
'type': recordset_fixture['type'],
})
invalid_names = [
'%s' % fixture['name'],
'_udp.%s' % fixture['name'],
'sip._udp.%s' % fixture['name'],
'_sip.udp.%s' % fixture['name'],
'%s' % self.domain['name'],
'_udp.%s' % self.domain['name'],
'sip._udp.%s' % self.domain['name'],
'_sip.udp.%s' % self.domain['name'],
]
for invalid_name in invalid_names:
@ -174,7 +235,11 @@ class ApiV1RecordsTest(ApiV1Test):
def test_create_invalid_name(self):
# Prepare a record
fixture = self.get_record_fixture(self.domain['name'], 0)
fixture = self.get_record_fixture(self.recordset['type'])
fixture.update({
'name': self.recordset['name'],
'type': self.recordset['type'],
})
invalid_names = [
'org',
@ -201,7 +266,7 @@ class ApiV1RecordsTest(ApiV1Test):
self.assertEqual(0, len(response.json['records']))
# Create a record
self.create_record(self.domain)
self.create_record(self.domain, self.recordset)
response = self.get('domains/%s/records' % self.domain['id'])
@ -209,7 +274,7 @@ class ApiV1RecordsTest(ApiV1Test):
self.assertEqual(1, len(response.json['records']))
# Create a second record
self.create_record(self.domain, fixture=1)
self.create_record(self.domain, self.recordset, fixture=1)
response = self.get('domains/%s/records' % self.domain['id'])
@ -239,18 +304,20 @@ class ApiV1RecordsTest(ApiV1Test):
def test_get_record(self):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
response = self.get('domains/%s/records/%s' % (self.domain['id'],
record['id']))
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], record['id'])
self.assertEqual(response.json['name'], self.recordset['name'])
self.assertEqual(response.json['type'], self.recordset['type'])
@patch.object(central_service.Service, 'get_record')
@patch.object(central_service.Service, 'get_recordset')
def test_get_record_trailing_slash(self, mock):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
self.get('domains/%s/records/%s/' % (self.domain['id'],
record['id']))
@ -260,26 +327,44 @@ class ApiV1RecordsTest(ApiV1Test):
def test_update_record(self):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
data = {'name': 'prefix-%s' % record['name']}
# Fetch another fixture to use in the update
fixture = self.get_record_fixture(self.recordset['type'], fixture=1)
# Update the record
data = {'data': fixture['data']}
response = self.put('domains/%s/records/%s' % (self.domain['id'],
record['id']),
data=data)
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], record['id'])
self.assertEqual(response.json['data'], fixture['data'])
self.assertEqual(response.json['type'], self.recordset['type'])
self.assertIn('name', response.json)
self.assertEqual(response.json['name'], 'prefix-%s' % record['name'])
def test_update_record_ttl(self):
# Create a record
record = self.create_record(self.domain, self.recordset)
# Update the record
data = {'ttl': 100}
response = self.put('domains/%s/records/%s' % (self.domain['id'],
record['id']),
data=data)
self.assertIn('id', response.json)
self.assertEqual(response.json['id'], record['id'])
self.assertEqual(response.json['data'], record['data'])
self.assertEqual(response.json['type'], self.recordset['type'])
self.assertEqual(response.json['ttl'], 100)
@patch.object(central_service.Service, 'update_record')
def test_update_record_trailing_slash(self, mock):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
data = {'name': 'prefix-%s' % record['name']}
data = {'ttl': 100}
self.put('domains/%s/records/%s/' % (self.domain['id'],
record['id']),
@ -290,27 +375,27 @@ class ApiV1RecordsTest(ApiV1Test):
def test_update_record_junk(self):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
data = {'name': 'prefix-%s' % record['name'], 'junk': 'Junk Field'}
data = {'ttl': 100, 'junk': 'Junk Field'}
self.put('domains/%s/records/%s' % (self.domain['id'], record['id']),
data=data, status_code=400)
def test_update_record_outside_domain_fail(self):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
data = {'name': 'test.someotherdomain.com'}
data = {'name': 'test.someotherdomain.com.'}
self.put('domains/%s/records/%s' % (self.domain['id'], record['id']),
data=data, status_code=400)
@patch.object(central_service.Service, 'update_record',
@patch.object(central_service.Service, 'get_domain',
side_effect=rpc_common.Timeout())
def test_update_record_timeout(self, _):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
data = {'name': 'test.example.org.'}
@ -351,20 +436,20 @@ class ApiV1RecordsTest(ApiV1Test):
def test_delete_record(self):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
self.delete('domains/%s/records/%s' % (self.domain['id'],
record['id']))
# Esnure we can no longer fetch the record
# Ensure we can no longer fetch the record
self.get('domains/%s/records/%s' % (self.domain['id'],
record['id']),
status_code=404)
@patch.object(central_service.Service, 'delete_record')
@patch.object(central_service.Service, 'get_domain')
def test_delete_record_trailing_slash(self, mock):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
self.delete('domains/%s/records/%s/' % (self.domain['id'],
record['id']))
@ -372,11 +457,11 @@ class ApiV1RecordsTest(ApiV1Test):
# verify that the central service is called
self.assertTrue(mock.called)
@patch.object(central_service.Service, 'delete_record',
@patch.object(central_service.Service, 'get_domain',
side_effect=rpc_common.Timeout())
def test_delete_record_timeout(self, _):
# Create a record
record = self.create_record(self.domain)
record = self.create_record(self.domain, self.recordset)
self.delete('domains/%s/records/%s' % (self.domain['id'],
record['id']),

View File

@ -0,0 +1,288 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from designate import exceptions
from designate.central import service as central_service
from designate.openstack.common.rpc import common as rpc_common
from designate.tests.test_api.test_v2 import ApiV2TestCase
class ApiV2RecordSetsTest(ApiV2TestCase):
def setUp(self):
super(ApiV2RecordSetsTest, self).setUp()
# Create a domain
self.domain = self.create_domain()
def test_create_recordset(self):
# Create a zone
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], {'recordset': fixture})
# Check the headers are what we expect
self.assertEqual(201, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('recordset', response.json)
self.assertIn('links', response.json['recordset'])
self.assertIn('self', response.json['recordset']['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json['recordset'])
self.assertIn('created_at', response.json['recordset'])
self.assertIsNone(response.json['recordset']['updated_at'])
for k in fixture:
self.assertEqual(fixture[k], response.json['recordset'][k])
def test_create_recordset_validation(self):
# NOTE: The schemas should be tested separatly to the API. So we
# don't need to test every variation via the API itself.
# Fetch a fixture
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
# Add a junk field to the wrapper
body = {'recordset': fixture, 'junk': 'Junk Field'}
# Ensure it fails with a 400
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], body, status=400)
self.assertEqual(400, response.status_int)
# Add a junk field to the body
fixture['junk'] = 'Junk Field'
body = {'recordset': fixture}
# Ensure it fails with a 400
response = self.client.post_json(
'/zones/%s/recordsets' % self.domain['id'], body, status=400)
@patch.object(central_service.Service, 'create_recordset',
side_effect=rpc_common.Timeout())
def test_create_recordset_timeout(self, _):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = {'recordset': fixture}
self.client.post_json('/zones/%s/recordsets' % self.domain['id'], body,
status=504)
@patch.object(central_service.Service, 'create_recordset',
side_effect=exceptions.DuplicateDomain())
def test_create_recordset_duplicate(self, _):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = {'recordset': fixture}
self.client.post_json('/zones/%s/recordsets' % self.domain['id'], body,
status=409)
def test_create_recordset_invalid_domain(self):
fixture = self.get_recordset_fixture(self.domain['name'], fixture=0)
body = {'recordset': fixture}
self.client.post_json(
'/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets', body,
status=404)
def test_get_recordsets(self):
response = self.client.get('/zones/%s/recordsets' % self.domain['id'])
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('recordsets', response.json)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# We should start with 0 recordsets
self.assertEqual(0, len(response.json['recordsets']))
# Test with 1 recordset
self.create_recordset(self.domain)
response = self.client.get('/zones/%s/recordsets' % self.domain['id'])
self.assertIn('recordsets', response.json)
self.assertEqual(1, len(response.json['recordsets']))
# test with 2 recordsets
self.create_recordset(self.domain, fixture=1)
response = self.client.get('/zones/%s/recordsets' % self.domain['id'])
self.assertIn('recordsets', response.json)
self.assertEqual(2, len(response.json['recordsets']))
@patch.object(central_service.Service, 'find_recordsets',
side_effect=rpc_common.Timeout())
def test_get_recordsets_timeout(self, _):
self.client.get(
'/zones/ba751950-6193-11e3-949a-0800200c9a66/recordsets',
status=504)
def test_get_recordset(self):
# Create a recordset
recordset = self.create_recordset(self.domain)
url = '/zones/%s/recordsets/%s' % (self.domain['id'], recordset['id'])
response = self.client.get(url)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('recordset', response.json)
self.assertIn('links', response.json['recordset'])
self.assertIn('self', response.json['recordset']['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json['recordset'])
self.assertIn('created_at', response.json['recordset'])
self.assertIsNone(response.json['recordset']['updated_at'])
self.assertEqual(recordset['name'], response.json['recordset']['name'])
self.assertEqual(recordset['type'], response.json['recordset']['type'])
@patch.object(central_service.Service, 'get_recordset',
side_effect=rpc_common.Timeout())
def test_get_recordset_timeout(self, _):
self.client.get('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c'
'9a66' % self.domain['id'],
headers={'Accept': 'application/json'},
status=504)
@patch.object(central_service.Service, 'get_recordset',
side_effect=exceptions.RecordSetNotFound())
def test_get_recordset_missing(self, _):
self.client.get('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c'
'9a66' % self.domain['id'],
headers={'Accept': 'application/json'},
status=404)
def test_get_recordset_invalid_id(self):
self.skip('We don\'t guard against this in APIv2 yet')
def test_update_recordset(self):
# Create a recordset
recordset = self.create_recordset(self.domain)
# Prepare an update body
body = {'recordset': {'description': 'Tester'}}
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
response = self.client.patch_json(url, body, status=200)
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('recordset', response.json)
self.assertIn('links', response.json['recordset'])
self.assertIn('self', response.json['recordset']['links'])
# Check the values returned are what we expect
self.assertIn('id', response.json['recordset'])
self.assertIsNotNone(response.json['recordset']['updated_at'])
self.assertEqual('Tester', response.json['recordset']['description'])
def test_update_recordset_validation(self):
# NOTE: The schemas should be tested separatly to the API. So we
# don't need to test every variation via the API itself.
# Create a zone
recordset = self.create_recordset(self.domain)
# Prepare an update body with junk in the wrapper
body = {'recordset': {'description': 'Tester'}, 'junk': 'Junk Field'}
# Ensure it fails with a 400
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self.client.patch_json(url, body, status=400)
# Prepare an update body with junk in the body
body = {'recordset': {'description': 'Tester', 'junk': 'Junk Field'}}
# Ensure it fails with a 400
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self.client.patch_json(url, body, status=400)
@patch.object(central_service.Service, 'get_recordset',
side_effect=exceptions.DuplicateRecordSet())
def test_update_recordset_duplicate(self, _):
# Prepare an update body
body = {'recordset': {'description': 'Tester'}}
# Ensure it fails with a 409
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self.client.patch_json(url, body, status=409)
@patch.object(central_service.Service, 'get_recordset',
side_effect=rpc_common.Timeout())
def test_update_recordset_timeout(self, _):
# Prepare an update body
body = {'recordset': {'description': 'Tester'}}
# Ensure it fails with a 504
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self.client.patch_json(url, body, status=504)
@patch.object(central_service.Service, 'get_recordset',
side_effect=exceptions.RecordSetNotFound())
def test_update_recordset_missing(self, _):
# Prepare an update body
body = {'recordset': {'description': 'Tester'}}
# Ensure it fails with a 404
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self.client.patch_json(url, body, status=404)
def test_update_recordset_invalid_id(self):
self.skip('We don\'t guard against this in APIv2 yet')
def test_delete_recordset(self):
recordset = self.create_recordset(self.domain)
url = '/zones/%s/recordsets/%s' % (recordset['domain_id'],
recordset['id'])
self.client.delete(url, status=204)
@patch.object(central_service.Service, 'delete_recordset',
side_effect=rpc_common.Timeout())
def test_delete_recordset_timeout(self, _):
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self.client.delete(url, status=504)
@patch.object(central_service.Service, 'delete_recordset',
side_effect=exceptions.RecordSetNotFound())
def test_delete_recordset_missing(self, _):
url = ('/zones/%s/recordsets/ba751950-6193-11e3-949a-0800200c9a66'
% (self.domain['id']))
self.client.delete(url, status=404)
def test_delete_recordset_invalid_id(self):
self.skip('We don\'t guard against this in APIv2 yet')

View File

@ -24,8 +24,6 @@ class BackendTestCase(tests.TestCase, BackendTestMixin):
('bind9', dict(backend_driver='bind9', group='service:agent')),
('dnsmasq', dict(backend_driver='dnsmasq', group='service:agent')),
('fake', dict(backend_driver='fake', group='service:agent')),
('mysqlbind9', dict(backend_driver='mysqlbind9',
group='service:agent')),
('nsd4slave', dict(backend_driver='nsd4slave', group='service:agent',
server_fixture=NSD4Fixture)),
('powerdns', dict(backend_driver='powerdns', group='service:agent'))
@ -33,6 +31,8 @@ class BackendTestCase(tests.TestCase, BackendTestMixin):
def setUp(self):
super(BackendTestCase, self).setUp()
if hasattr(self, 'server_fixture'):
self.useFixture(self.server_fixture())
self.config(backend_driver=self.backend_driver, group=self.group)

View File

@ -0,0 +1,41 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Artom Lifshitz <artom.lifshitz@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate import tests
from designate.tests.test_backend import BackendTestMixin
class DnsmasqBackendTestCase(tests.TestCase, BackendTestMixin):
def setUp(self):
super(DnsmasqBackendTestCase, self).setUp()
self.config(backend_driver='dnsmasq', group='service:agent')
self.central_service = self.start_service('central')
self.backend = self.get_backend_driver()
def test_write_zonefile(self):
domain = self.create_domain()
recordset_one = self.create_recordset(domain, fixture=0)
recordset_two = self.create_recordset(domain, fixture=1)
self.create_record(domain, recordset_one, fixture=0)
self.create_record(domain, recordset_one, fixture=1)
self.create_record(domain, recordset_two, fixture=0)
self.create_record(domain, recordset_two, fixture=1)
self.backend._write_zonefile(domain)

View File

@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import call
from mock import MagicMock
from designate import exceptions
@ -101,6 +101,7 @@ class MultiBackendTestCase(tests.TestCase, BackendTestMixin):
def test_delete_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
# Since multi's delete fetches the domain from central to be able to
# recreate it if something goes wrong, create the domain first
self.backend.central_service.create_server(
@ -146,29 +147,80 @@ class MultiBackendTestCase(tests.TestCase, BackendTestMixin):
call.master.delete_server(context, server),
call.slave.create_server(context, server)])
def test_create_recordset(self):
context = self.get_context()
domain = mock.sentinel.domain
recordset = mock.sentinel.recordset
self.backend.create_recordset(context, domain, recordset)
self.assertEqual(
self.backends.mock_calls,
[call.master.create_recordset(context, domain, recordset)])
def test_update_recordset(self):
context = self.get_context()
domain = mock.sentinel.domain
recordset = mock.sentinel.recordset
self.backend.update_recordset(context, domain, recordset)
self.assertEqual(
self.backends.mock_calls,
[call.master.update_recordset(context, domain, recordset)])
def test_delete_recordset(self):
context = self.get_context()
domain = mock.sentinel.domain
recordset = mock.sentinel.recordset
self.backend.delete_recordset(context, domain, recordset)
self.assertEqual(
self.backends.mock_calls,
[call.master.delete_recordset(context, domain, recordset)])
def test_create_record(self):
context = self.get_context()
domain = self.get_domain_fixture()
record = self.get_record_fixture(domain['name'])
self.backend.create_record(context, domain, record)
self.assertEqual(self.backends.mock_calls,
[call.master.create_record(context, domain, record)])
domain = mock.sentinel.domain
recordset = mock.sentinel.recordset
record = mock.sentinel.record
self.backend.create_record(context, domain, recordset, record)
self.assertEqual(
self.backends.mock_calls,
[call.master.create_record(context, domain, recordset, record)])
def test_update_record(self):
context = self.get_context()
domain = self.get_domain_fixture()
record = self.get_record_fixture(domain['name'])
self.backend.update_record(context, domain, record)
self.assertEqual(self.backends.mock_calls,
[call.master.update_record(context, domain, record)])
domain = mock.sentinel.domain
recordset = mock.sentinel.recordset
record = mock.sentinel.record
self.backend.update_record(context, domain, recordset, record)
self.assertEqual(
self.backends.mock_calls,
[call.master.update_record(context, domain, recordset, record)])
def test_delete_record(self):
context = self.get_context()
domain = self.get_domain_fixture()
record = self.get_record_fixture(domain['name'])
self.backend.delete_record(context, domain, record)
self.assertEqual(self.backends.mock_calls,
[call.master.delete_record(context, domain, record)])
domain = mock.sentinel.domain
recordset = mock.sentinel.recordset
record = mock.sentinel.record
self.backend.delete_record(context, domain, recordset, record)
self.assertEqual(
self.backends.mock_calls,
[call.master.delete_record(context, domain, recordset, record)])
def test_ping(self):
context = self.get_context()

View File

@ -26,11 +26,7 @@ from designate import exceptions
from designate import tests
from designate.tests.test_backend import BackendTestMixin
from designate.tests import resources
# impl_nsd4slave needs to register its options before being instanciated.
# Import it and pretend to use it to avoid flake8 unused import errors.
from designate.backend import impl_nsd4slave
impl_nsd4slave
class NSD4ServerStub:

File diff suppressed because it is too large Load Diff

View File

@ -41,9 +41,11 @@ class NeutronFloatingHandlerTest(TestCase, NotificationHandlerMixin):
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'domain_id': self.domain_id}
# Ensure we start with 0 records
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertEqual(0, len(records))
@ -51,9 +53,9 @@ class NeutronFloatingHandlerTest(TestCase, NotificationHandlerMixin):
# Ensure we now have exactly 1 record
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertEqual(len(records), 1)
self.assertEqual(1, len(records))
def test_floatingip_disassociate(self):
start_event_type = 'floatingip.update.end'
@ -68,15 +70,18 @@ class NeutronFloatingHandlerTest(TestCase, NotificationHandlerMixin):
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'domain_id': self.domain_id}
# Ensure we start with at least 1 record
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertTrue(len(records) >= 1)
self.assertEqual(1, len(records))
self.plugin.process_notification(event_type, fixture['payload'])
# Ensure we now have exactly 0 records
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertEqual(0, len(records))

View File

@ -40,9 +40,11 @@ class NovaFixedHandlerTest(TestCase, NotificationHandlerMixin):
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'domain_id': self.domain_id}
# Ensure we start with 0 records
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertEqual(0, len(records))
@ -50,9 +52,9 @@ class NovaFixedHandlerTest(TestCase, NotificationHandlerMixin):
# Ensure we now have exactly 1 record
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertEqual(len(records), 1)
self.assertEqual(1, len(records))
def test_instance_delete_start(self):
# Prepare for the test
@ -68,16 +70,18 @@ class NovaFixedHandlerTest(TestCase, NotificationHandlerMixin):
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'domain_id': self.domain_id}
# Ensure we start with at least 1 record
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertTrue(len(records) >= 1)
self.assertEqual(1, len(records))
self.plugin.process_notification(event_type, fixture['payload'])
# Ensure we now have exactly 0 records
records = self.central_service.find_records(self.admin_context,
self.domain_id)
criterion)
self.assertEqual(0, len(records))

View File

@ -42,10 +42,12 @@ class QuotaTestCase(tests.TestCase):
quotas = self.quota.get_quotas(context, 'DefaultQuotaTenant')
self.assertIsNotNone(quotas)
self.assertEqual(quotas, {
self.assertEqual({
'domains': cfg.CONF.quota_domains,
'domain_records': cfg.CONF.quota_domain_records
})
'domain_recordsets': cfg.CONF.quota_domain_recordsets,
'domain_records': cfg.CONF.quota_domain_records,
'recordset_records': cfg.CONF.quota_recordset_records,
}, quotas)
def test_limit_check_unknown(self):
context = self.get_admin_context()

View File

@ -13,9 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from designate.openstack.common import log as logging
from designate import exceptions
from designate import schema
from designate.tests import TestCase
@ -32,11 +30,7 @@ class SchemasV2Test(TestCase):
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'A',
'records': [
{'address': "127.0.0.1"},
{'address': "127.0.0.2"},
]
'type': 'A'
}
})
@ -46,77 +40,6 @@ class SchemasV2Test(TestCase):
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'MX',
'records': [
{'preference': 10, 'exchange': 'mail.example.com.'},
]
'type': 'MX'
}
})
with testtools.ExpectedException(exceptions.InvalidObject):
# Fail Expected - Empty Records Array
validator.validate({
'recordset': {
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'A',
'records': []
}
})
with testtools.ExpectedException(exceptions.InvalidObject):
# Fail Expected - No Records
validator.validate({
'recordset': {
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'A'
}
})
with testtools.ExpectedException(exceptions.InvalidObject):
# Fail Expected - MX records in an A RRset
validator.validate({
'recordset': {
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'A',
'records': [
{'address': "127.0.0.1"},
{'preference': 10, 'exchange': 'mail.example.com.'},
]
}
})
with testtools.ExpectedException(exceptions.InvalidObject):
# Fail Expected - A records in an MX RRset
validator.validate({
'recordset': {
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'MX',
'records': [
{'preference': 10, 'exchange': 'mail.example.com.'},
{'address': "127.0.0.1"},
]
}
})
with testtools.ExpectedException(exceptions.InvalidObject):
# Fail Expected - AAAA records in an A RRset
validator.validate({
'recordset': {
'id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'zone_id': 'b22d09e0-efa3-11e2-b778-0800200c9a66',
'name': 'example.com.',
'type': 'A',
'records': [
{'address': "127.0.0.1"},
{'address': "::1"},
]
}
})

View File

@ -58,14 +58,24 @@ class StorageTestCase(object):
return fixture, self.storage.create_domain(context, fixture)
def create_record(self, domain, fixture=0, values={}, context=None):
def create_recordset(self, domain, type='A', fixture=0, values={},
context=None):
if not context:
context = self.admin_context
fixture = self.get_record_fixture(domain['name'], fixture, values)
return fixture, self.storage.create_record(context,
domain['id'],
fixture)
fixture = self.get_recordset_fixture(domain['name'], type, fixture,
values)
return fixture, self.storage.create_recordset(
context, domain['id'], fixture)
def create_record(self, domain, recordset, fixture=0, values={},
context=None):
if not context:
context = self.admin_context
fixture = self.get_record_fixture(recordset['type'], fixture, values)
return fixture, self.storage.create_record(
context, domain['id'], recordset['id'], fixture)
# Quota Tests
def test_create_quota(self):
@ -756,16 +766,221 @@ class StorageTestCase(object):
# well, did we get 1?
self.assertEqual(domains, 1)
def test_create_record(self):
def test_create_recordset(self):
domain_fixture, domain = self.create_domain()
values = {
'name': 'www.%s' % domain['name'],
'type': 'A',
'type': 'A'
}
result = self.storage.create_recordset(self.admin_context,
domain['id'],
values=values)
self.assertIsNotNone(result['id'])
self.assertIsNotNone(result['created_at'])
self.assertIsNone(result['updated_at'])
self.assertEqual(result['name'], values['name'])
self.assertEqual(result['type'], values['type'])
def test_create_recordset_duplicate(self):
_, domain = self.create_domain()
# Create the First RecordSet
self.create_recordset(domain)
with testtools.ExpectedException(exceptions.DuplicateRecordSet):
# Attempt to create the second/duplicate recordset
self.create_recordset(domain)
def test_find_recordsets(self):
_, domain = self.create_domain()
criterion = {'domain_id': domain['id']}
actual = self.storage.find_recordsets(self.admin_context, criterion)
self.assertEqual(actual, [])
# Create a single recordset
_, recordset_one = self.create_recordset(domain, fixture=0)
actual = self.storage.find_recordsets(self.admin_context, criterion)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['name'], recordset_one['name'])
self.assertEqual(actual[0]['type'], recordset_one['type'])
# Create a second recordset
_, recordset_two = self.create_recordset(domain, fixture=1)
actual = self.storage.find_recordsets(self.admin_context, criterion)
self.assertEqual(len(actual), 2)
self.assertEqual(actual[1]['name'], recordset_two['name'])
self.assertEqual(actual[1]['type'], recordset_two['type'])
def test_find_recordsets_criterion(self):
_, domain = self.create_domain()
_, recordset_one = self.create_recordset(domain, type='A', fixture=0)
self.create_recordset(domain, fixture=1)
criterion = dict(
domain_id=domain['id'],
name=recordset_one['name'],
)
results = self.storage.find_recordsets(self.admin_context,
criterion)
self.assertEqual(len(results), 1)
criterion = dict(
domain_id=domain['id'],
type='A',
)
results = self.storage.find_recordsets(self.admin_context,
criterion)
self.assertEqual(len(results), 2)
def test_find_recordsets_criterion_wildcard(self):
_, domain = self.create_domain()
values = {'name': 'one.%s' % domain['name']}
self.create_recordset(domain, fixture=0, values=values)
criterion = dict(
domain_id=domain['id'],
name="%%%s" % domain['name'],
)
results = self.storage.find_recordsets(self.admin_context, criterion)
self.assertEqual(len(results), 1)
def test_get_recordset(self):
_, domain = self.create_domain()
_, expected = self.create_recordset(domain)
actual = self.storage.get_recordset(self.admin_context, expected['id'])
self.assertEqual(actual['name'], expected['name'])
self.assertEqual(actual['type'], expected['type'])
def test_get_recordset_missing(self):
with testtools.ExpectedException(exceptions.RecordSetNotFound):
uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b'
self.storage.get_recordset(self.admin_context, uuid)
def test_find_recordset_criterion(self):
_, domain = self.create_domain(0)
_, expected = self.create_recordset(domain)
criterion = dict(
domain_id=domain['id'],
name=expected['name'],
)
actual = self.storage.find_recordset(self.admin_context, criterion)
self.assertEqual(actual['name'], expected['name'])
self.assertEqual(actual['type'], expected['type'])
def test_find_recordset_criterion_missing(self):
_, domain = self.create_domain(0)
_, expected = self.create_recordset(domain)
criterion = dict(
name=expected['name'] + "NOT FOUND"
)
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.storage.find_recordset(self.admin_context, criterion)
def test_update_recordset(self):
domain_fixture, domain = self.create_domain()
# Create a recordset
_, recordset = self.create_recordset(domain)
# Get some different values to test the update with
recordset_fixture = self.get_recordset_fixture(domain['name'],
fixture=1)
# Update the recordset with the new values...
updated = self.storage.update_recordset(self.admin_context,
recordset['id'],
recordset_fixture)
# Ensure the update succeeded
self.assertEqual(updated['id'], recordset['id'])
self.assertEqual(updated['name'], recordset_fixture['name'])
self.assertEqual(updated['type'], recordset_fixture['type'])
def test_update_recordset_duplicate(self):
_, domain = self.create_domain()
# Create the first two recordsets
recordset_one_fixture, _ = self.create_recordset(domain, fixture=0)
_, recordset_two = self.create_recordset(domain, fixture=1)
with testtools.ExpectedException(exceptions.DuplicateRecordSet):
# Attempt to update the second recordset, making it a duplicate
# recordset
self.storage.update_recordset(self.admin_context,
recordset_two['id'],
recordset_one_fixture)
def test_update_recordset_missing(self):
with testtools.ExpectedException(exceptions.RecordSetNotFound):
uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b'
self.storage.update_recordset(self.admin_context, uuid, {})
def test_delete_recordset(self):
_, domain = self.create_domain()
# Create a recordset
_, recordset = self.create_recordset(domain)
self.storage.delete_recordset(self.admin_context, recordset['id'])
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.storage.get_recordset(self.admin_context, recordset['id'])
def test_delete_recordset_missing(self):
with testtools.ExpectedException(exceptions.RecordSetNotFound):
uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b'
self.storage.delete_recordset(self.admin_context, uuid)
def test_count_recordsets(self):
# in the beginning, there should be nothing
recordsets = self.storage.count_recordsets(self.admin_context)
self.assertEqual(recordsets, 0)
# Create a single domain & recordset
_, domain = self.create_domain()
self.create_recordset(domain)
# we should have 1 recordsets now
recordsets = self.storage.count_recordsets(self.admin_context)
self.assertEqual(recordsets, 1)
def test_create_record(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain, type='A')
values = {
'data': '192.0.2.1',
}
result = self.storage.create_record(self.admin_context, domain['id'],
result = self.storage.create_record(self.admin_context,
domain['id'],
recordset['id'],
values=values)
self.assertIsNotNone(result['id'])
@ -774,72 +989,69 @@ class StorageTestCase(object):
self.assertIsNone(result['updated_at'])
self.assertEqual(result['tenant_id'], self.admin_context.tenant_id)
self.assertEqual(result['name'], values['name'])
self.assertEqual(result['type'], values['type'])
self.assertEqual(result['data'], values['data'])
self.assertIn('status', result)
def test_create_record_duplicate(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain)
# Create the First Record
self.create_record(domain)
self.create_record(domain, recordset)
with testtools.ExpectedException(exceptions.DuplicateRecord):
# Attempt to create the second/duplicate record
self.create_record(domain)
self.create_record(domain, recordset)
def test_find_records(self):
_, domain = self.create_domain()
actual = self.storage.find_records(
self.admin_context,
criterion={'domain_id': domain['id']})
_, recordset = self.create_recordset(domain)
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
actual = self.storage.find_records(self.admin_context, criterion)
self.assertEqual(actual, [])
# Create a single record
_, record_one = self.create_record(domain, fixture=0)
_, record_one = self.create_record(domain, recordset, fixture=0)
actual = self.storage.find_records(
self.admin_context,
criterion={'domain_id': domain['id']})
actual = self.storage.find_records(self.admin_context, criterion)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['name'], record_one['name'])
self.assertEqual(actual[0]['type'], record_one['type'])
self.assertEqual(actual[0]['data'], record_one['data'])
self.assertIn('status', record_one)
# Create a second record
_, record_two = self.create_record(domain, fixture=1)
_, record_two = self.create_record(domain, recordset, fixture=1)
actual = self.storage.find_records(
self.admin_context,
criterion={'domain_id': domain['id']})
actual = self.storage.find_records(self.admin_context, criterion)
self.assertEqual(len(actual), 2)
self.assertEqual(actual[1]['name'], record_two['name'])
self.assertEqual(actual[1]['type'], record_two['type'])
self.assertEqual(actual[1]['data'], record_two['data'])
self.assertIn('status', record_two)
def test_find_records_criterion(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain, type='A')
_, record_one = self.create_record(domain, fixture=0)
self.create_record(domain, fixture=1)
_, record_one = self.create_record(domain, recordset, fixture=0)
self.create_record(domain, recordset, fixture=1)
criterion = dict(
data=record_one['data'],
domain_id=domain['id']
domain_id=domain['id'],
recordset_id=recordset['id'],
)
results = self.storage.find_records(self.admin_context, criterion)
self.assertEqual(len(results), 1)
criterion = dict(
type='A',
domain_id=domain['id']
domain_id=domain['id'],
recordset_id=recordset['id'],
)
results = self.storage.find_records(self.admin_context, criterion)
@ -848,13 +1060,16 @@ class StorageTestCase(object):
def test_find_records_criterion_wildcard(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain, type='A')
values = {'name': 'one.%s' % domain['name']}
values = {'data': '127.0.0.1'}
self.create_record(domain, recordset, fixture=0, values=values)
self.create_record(domain, fixture=0, values=values)
criterion = dict(
name="%%%s" % domain['name'],
domain_id=domain['id']
domain_id=domain['id'],
recordset_id=recordset['id'],
data="%%.0.0.1",
)
results = self.storage.find_records(self.admin_context, criterion)
@ -875,10 +1090,17 @@ class StorageTestCase(object):
# Create two domains in different tenants, and 1 record in each
_, domain_one = self.create_domain(fixture=0, context=one_context)
self.create_record(domain_one, fixture=0, context=one_context)
_, recordset_one = self.create_recordset(domain_one, fixture=0,
context=one_context)
self.create_record(domain_one, recordset_one, fixture=0,
context=one_context)
_, domain_two = self.create_domain(fixture=1, context=two_context)
self.create_record(domain_two, fixture=0, context=two_context)
_, recordset_one = self.create_recordset(domain_two, fixture=1,
context=two_context)
self.create_record(domain_two, recordset_one, fixture=0,
context=two_context)
# Ensure the all_tenants context see's two records
results = self.storage.find_records(at_context)
@ -898,13 +1120,12 @@ class StorageTestCase(object):
def test_get_record(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain)
_, expected = self.create_record(domain)
_, expected = self.create_record(domain, recordset)
actual = self.storage.get_record(self.admin_context, expected['id'])
self.assertEqual(actual['name'], expected['name'])
self.assertEqual(actual['type'], expected['type'])
self.assertEqual(actual['data'], expected['data'])
self.assertIn('status', actual)
@ -915,40 +1136,44 @@ class StorageTestCase(object):
def test_find_record_criterion(self):
_, domain = self.create_domain(0)
_, expected = self.create_record(domain)
_, recordset = self.create_recordset(domain)
_, expected = self.create_record(domain, recordset)
criterion = dict(
name=expected['name'],
domain_id=domain['id']
domain_id=domain['id'],
recordset_id=recordset['id'],
data=expected['data'],
)
actual = self.storage.find_record(self.admin_context, criterion)
self.assertEqual(actual['name'], expected['name'])
self.assertEqual(actual['type'], expected['type'])
self.assertEqual(actual['data'], expected['data'])
self.assertIn('status', actual)
def test_find_record_criterion_missing(self):
_, domain = self.create_domain(0)
_, expected = self.create_record(domain)
_, recordset = self.create_recordset(domain)
_, expected = self.create_record(domain, recordset)
criterion = dict(
name=expected['name'] + "NOT FOUND",
domain_id=domain['id']
domain_id=domain['id'],
data=expected['data'] + "NOT FOUND",
)
with testtools.ExpectedException(exceptions.RecordNotFound):
self.storage.find_record(self.admin_context, criterion)
def test_update_record(self):
domain_fixture, domain = self.create_domain()
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain)
# Create a record
_, record = self.create_record(domain)
_, record = self.create_record(domain, recordset)
# Get some different values to test the update with
record_fixture = self.get_record_fixture(domain['name'], fixture=1)
record_fixture = self.get_record_fixture(recordset['type'], fixture=1)
# Update the record with the new values...
updated = self.storage.update_record(self.admin_context, record['id'],
@ -956,18 +1181,18 @@ class StorageTestCase(object):
# Ensure the update succeeded
self.assertEqual(updated['id'], record['id'])
self.assertEqual(updated['name'], record_fixture['name'])
self.assertEqual(updated['type'], record_fixture['type'])
self.assertEqual(updated['data'], record_fixture['data'])
self.assertNotEqual(updated['hash'], record['hash'])
self.assertIn('status', updated)
def test_update_record_duplicate(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain)
# Create the first two records
record_one_fixture, _ = self.create_record(domain, fixture=0)
_, record_two = self.create_record(domain, fixture=1)
record_one_fixture, _ = self.create_record(domain, recordset,
fixture=0)
_, record_two = self.create_record(domain, recordset, fixture=1)
with testtools.ExpectedException(exceptions.DuplicateRecord):
# Attempt to update the second record, making it a duplicate record
@ -981,9 +1206,10 @@ class StorageTestCase(object):
def test_delete_record(self):
_, domain = self.create_domain()
_, recordset = self.create_recordset(domain)
# Create a record
_, record = self.create_record(domain)
_, record = self.create_record(domain, recordset)
self.storage.delete_record(self.admin_context, record['id'])
@ -995,12 +1221,6 @@ class StorageTestCase(object):
uuid = 'caf771fc-6b05-4891-bee1-c2a48621f57b'
self.storage.delete_record(self.admin_context, uuid)
def test_ping(self):
pong = self.storage.ping(self.admin_context)
self.assertEqual(pong['status'], True)
self.assertIsNotNone(pong['rtt'])
def test_count_records(self):
# in the beginning, there should be nothing
records = self.storage.count_records(self.admin_context)
@ -1008,8 +1228,15 @@ class StorageTestCase(object):
# Create a single domain & record
_, domain = self.create_domain()
self.create_record(domain)
_, recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
# we should have 1 record now
records = self.storage.count_domains(self.admin_context)
records = self.storage.count_records(self.admin_context)
self.assertEqual(records, 1)
def test_ping(self):
pong = self.storage.ping(self.admin_context)
self.assertEqual(pong['status'], True)
self.assertIsNotNone(pong['rtt'])

View File

@ -503,6 +503,112 @@ class StorageAPITest(TestCase):
self._assert_call_count('delete_domain', 0)
# RecordSet Tests
def test_create_recordset(self):
context = mock.sentinel.context
values = mock.sentinel.values
recordset = mock.sentinel.recordset
self._set_side_effect('create_recordset', [recordset])
with self.storage_api.create_recordset(context, 123, values) as q:
self.assertEqual(recordset, q)
self._assert_called_with('create_recordset', context, 123, values)
def test_create_recordset_failure(self):
context = mock.sentinel.context
values = mock.sentinel.values
self._set_side_effect('create_recordset', [{'id': 12345}])
with testtools.ExpectedException(SentinelException):
with self.storage_api.create_recordset(context, 123, values):
raise SentinelException('Something Went Wrong')
self._assert_called_with('create_recordset', context, 123, values)
self._assert_called_with('delete_recordset', context, 12345)
def test_get_recordset(self):
context = mock.sentinel.context
recordset_id = mock.sentinel.recordset_id
recordset = mock.sentinel.recordset
self._set_side_effect('get_recordset', [recordset])
result = self.storage_api.get_recordset(context, recordset_id)
self._assert_called_with('get_recordset', context, recordset_id)
self.assertEqual(recordset, result)
def test_find_recordsets(self):
context = mock.sentinel.context
criterion = mock.sentinel.criterion
recordset = mock.sentinel.recordset
self._set_side_effect('find_recordsets', [[recordset]])
result = self.storage_api.find_recordsets(context, criterion)
self._assert_called_with('find_recordsets', context, criterion)
self.assertEqual([recordset], result)
def test_find_recordset(self):
context = mock.sentinel.context
criterion = mock.sentinel.criterion
recordset = mock.sentinel.recordset
self._set_side_effect('find_recordset', [recordset])
result = self.storage_api.find_recordset(context, criterion)
self._assert_called_with('find_recordset', context, criterion)
self.assertEqual(recordset, result)
def test_update_recordset(self):
context = mock.sentinel.context
values = mock.sentinel.values
with self.storage_api.update_recordset(context, 123, values):
pass
self._assert_called_with('update_recordset', context, 123, values)
def test_update_recordset_failure(self):
context = mock.sentinel.context
values = {'test': 2}
self._set_side_effect('get_recordset', [{'id': 123, 'test': 1}])
with testtools.ExpectedException(SentinelException):
with self.storage_api.update_recordset(context, 123, values):
raise SentinelException('Something Went Wrong')
self._assert_has_calls('update_recordset', [
mock.call(context, 123, values),
mock.call(context, 123, {'test': 1}),
])
def test_delete_recordset(self):
context = mock.sentinel.context
recordset = mock.sentinel.recordset
self._set_side_effect('get_recordset', [recordset])
with self.storage_api.delete_recordset(context, 123) as q:
self.assertEqual(recordset, q)
self._assert_called_with('delete_recordset', context, 123)
def test_delete_recordset_failure(self):
context = mock.sentinel.context
recordset = mock.sentinel.recordset
self._set_side_effect('get_recordset', [recordset])
with testtools.ExpectedException(SentinelException):
with self.storage_api.delete_recordset(context, 123):
raise SentinelException('Something Went Wrong')
self._assert_call_count('delete_recordset', 0)
# Record Tests
def test_create_record(self):
context = mock.sentinel.context
@ -511,10 +617,10 @@ class StorageAPITest(TestCase):
self._set_side_effect('create_record', [record])
with self.storage_api.create_record(context, 123, values) as q:
with self.storage_api.create_record(context, 123, 321, values) as q:
self.assertEqual(record, q)
self._assert_called_with('create_record', context, 123, values)
self._assert_called_with('create_record', context, 123, 321, values)
def test_create_record_failure(self):
context = mock.sentinel.context
@ -523,10 +629,10 @@ class StorageAPITest(TestCase):
self._set_side_effect('create_record', [{'id': 12345}])
with testtools.ExpectedException(SentinelException):
with self.storage_api.create_record(context, 123, values):
with self.storage_api.create_record(context, 123, 321, values):
raise SentinelException('Something Went Wrong')
self._assert_called_with('create_record', context, 123, values)
self._assert_called_with('create_record', context, 123, 321, values)
self._assert_called_with('delete_record', context, 12345)
def test_get_record(self):
@ -549,6 +655,7 @@ class StorageAPITest(TestCase):
result = self.storage_api.find_records(context, criterion)
self._assert_called_with('find_records', context, criterion)
self.assertEqual([record], result)
def test_find_record(self):
@ -560,30 +667,33 @@ class StorageAPITest(TestCase):
result = self.storage_api.find_record(context, criterion)
self._assert_called_with('find_record', context, criterion)
self.assertEqual(record, result)
def test_update_record(self):
context = mock.sentinel.context
record_id = mock.sentinel.record_id
values = mock.sentinel.values
with self.storage_api.update_record(context, 123, values):
with self.storage_api.update_record(context, record_id, values):
pass
self._assert_called_with('update_record', context, 123, values)
self._assert_called_with('update_record', context, record_id, values)
def test_update_record_failure(self):
context = mock.sentinel.context
record_id = mock.sentinel.record_id
values = {'test': 2}
self._set_side_effect('get_record', [{'id': 123, 'test': 1}])
self._set_side_effect('get_record', [{'id': record_id, 'test': 1}])
with testtools.ExpectedException(SentinelException):
with self.storage_api.update_record(context, 123, values):
with self.storage_api.update_record(context, record_id, values):
raise SentinelException('Something Went Wrong')
self._assert_has_calls('update_record', [
mock.call(context, 123, values),
mock.call(context, 123, {'test': 1}),
mock.call(context, record_id, values),
mock.call(context, record_id, {'test': 1}),
])
def test_delete_record(self):

View File

@ -62,7 +62,6 @@ designate.notification.handler =
designate.backend =
bind9 = designate.backend.impl_bind9:Bind9Backend
mysqlbind9 = designate.backend.impl_mysqlbind9:MySQLBind9Backend
powerdns = designate.backend.impl_powerdns:PowerDNSBackend
rpc = designate.backend.impl_rpc:RPCBackend
dnsmasq = designate.backend.impl_dnsmasq:DnsmasqBackend