Backends need to implement c/u/d_recordset

The current code means the central generate SOA/NS records are
not provisioned, nor or any records create via the V2 API.

Change-Id: I49dc3dda9d4b49305646a8b222165abaa3506959
Closes-Bug: 1366821
Closes-Bug: 1370621
This commit is contained in:
Kiall Mac Innes 2014-09-08 15:02:58 +01:00
parent ac2a261182
commit 3caefe64ee
14 changed files with 470 additions and 404 deletions

View File

@ -57,6 +57,15 @@ class Backend(DriverPlugin):
raise exceptions.NotImplemented(
'TSIG is not supported by this backend')
def create_server(self, context, server):
"""Create a Server"""
def update_server(self, context, server):
"""Update a Server"""
def delete_server(self, context, server):
"""Delete a Server"""
@abc.abstractmethod
def create_domain(self, context, domain):
"""Create a DNS domain"""
@ -69,6 +78,7 @@ class Backend(DriverPlugin):
def delete_domain(self, context, domain):
"""Delete a DNS domain"""
@abc.abstractmethod
def create_recordset(self, context, domain, recordset):
"""Create a DNS recordset"""
@ -92,18 +102,6 @@ class Backend(DriverPlugin):
def delete_record(self, context, domain, recordset, record):
"""Delete a DNS record"""
@abc.abstractmethod
def create_server(self, context, server):
"""Create a DNS server"""
@abc.abstractmethod
def update_server(self, context, server):
"""Update a DNS server"""
@abc.abstractmethod
def delete_server(self, context, server):
"""Delete a DNS server"""
def sync_domain(self, context, domain, rdata):
"""
Re-Sync a DNS domain

View File

@ -16,10 +16,12 @@
import os
import glob
import shutil
import time
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate.openstack.common import lockutils
from designate.i18n import _LW
from designate import utils
from designate.backend import base
@ -68,18 +70,6 @@ class Bind9Backend(base.Backend):
else:
raise proc_exec_err
def create_server(self, context, server):
LOG.debug('Create Server')
self._sync_domains_on_server_change()
def update_server(self, context, server):
LOG.debug('Update Server')
self._sync_domains_on_server_change()
def delete_server(self, context, server):
LOG.debug('Delete Server')
self._sync_domains_on_server_change()
def create_domain(self, context, domain):
LOG.debug('Create Domain')
self._sync_domain(domain, new_domain_flag=True)
@ -92,6 +82,10 @@ class Bind9Backend(base.Backend):
LOG.debug('Delete Domain')
self._sync_delete_domain(domain)
def create_recordset(self, context, domain, recordset):
LOG.debug('Create RecordSet')
self._sync_domain(domain)
def update_recordset(self, context, domain, recordset):
LOG.debug('Update RecordSet')
self._sync_domain(domain)
@ -157,74 +151,73 @@ class Bind9Backend(base.Backend):
def _sync_domain(self, domain, new_domain_flag=False):
"""Sync a single domain's zone file and reload bind config"""
LOG.debug('Synchronising Domain: %s' % domain['id'])
servers = self.central_service.find_servers(self.admin_context)
# NOTE: Only one thread should be working with the Zonefile at a given
# time. The sleep(1) below introduces a not insignificant risk
# of more than 1 thread working with a zonefile at a given time.
with lockutils.lock('bind9-%s' % domain['id']):
LOG.debug('Synchronising Domain: %s' % domain['id'])
recordsets = self.central_service.find_recordsets(
self.admin_context, {'domain_id': domain['id']})
recordsets = self.central_service.find_recordsets(
self.admin_context, {'domain_id': domain['id']})
records = []
records = []
for recordset in recordsets:
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
for recordset in recordsets:
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
raw_records = self.central_service.find_records(
self.admin_context, criterion)
raw_records = self.central_service.find_records(
self.admin_context, criterion)
for record in raw_records:
records.append({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'priority': record['priority'],
'data': record['data'],
})
for record in raw_records:
records.append({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'priority': record['priority'],
'data': record['data'],
})
output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
'bind9')
output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
'bind9')
output_path = os.path.join(output_folder, '%s.zone' %
"_".join([domain['name'], domain['id']]))
output_name = "_".join([domain['name'], domain['id']])
output_path = os.path.join(output_folder, '%s.zone' % output_name)
utils.render_template_to_file('bind9-zone.jinja2',
output_path,
servers=servers,
domain=domain,
records=records)
utils.render_template_to_file('bind9-zone.jinja2',
output_path,
domain=domain,
records=records)
rndc_call = self._rndc_base()
rndc_call = self._rndc_base()
if new_domain_flag:
rndc_op = [
'addzone',
'%s { type master; file "%s"; };' % (domain['name'],
output_path),
]
rndc_call.extend(rndc_op)
else:
rndc_op = 'reload'
rndc_call.extend([rndc_op])
rndc_call.extend([domain['name']])
if new_domain_flag:
rndc_op = [
'addzone',
'%s { type master; file "%s"; };' % (domain['name'],
output_path),
]
rndc_call.extend(rndc_op)
else:
rndc_op = 'reload'
rndc_call.extend([rndc_op])
rndc_call.extend([domain['name']])
LOG.debug('Calling RNDC with: %s' % " ".join(rndc_call))
utils.execute(*rndc_call)
if not new_domain_flag:
# NOTE: Bind9 will only ever attempt to re-read a zonefile if
# the file's timestamp has changed since the previous
# reload. A one second sleep ensures we cross over a
# second boundary before allowing the next change.
time.sleep(1)
nzf_name = glob.glob('%s/*.nzf' % cfg.CONF[self.name].nzf_path)
LOG.debug('Calling RNDC with: %s' % " ".join(rndc_call))
utils.execute(*rndc_call)
output_file = os.path.join(output_folder, 'zones.config')
nzf_name = glob.glob('%s/*.nzf' % cfg.CONF[self.name].nzf_path)
shutil.copyfile(nzf_name[0], output_file)
output_file = os.path.join(output_folder, 'zones.config')
def _sync_domains_on_server_change(self):
# TODO(eankutse): Improve this so it scales. Need to design
# for it in the new Pool Manager/Agent for the backend that is
# being proposed
LOG.debug('Synchronising domains on server change')
domains = self.central_service.find_domains(self.admin_context)
for domain in domains:
self._sync_domain(domain)
shutil.copyfile(nzf_name[0], output_file)

View File

@ -372,6 +372,9 @@ class DynECTBackend(base.Backend):
raise
client.logout()
def create_recordset(self, context, domain, recordset):
LOG.debug('Discarding create_recordset call, not-applicable')
def update_recordset(self, context, domain, recordset):
LOG.debug('Discarding update_recordset call, not-applicable')
@ -386,12 +389,3 @@ class DynECTBackend(base.Backend):
def delete_record(self, context, domain, recordset, record):
LOG.debug('Discarding delete_record call, not-applicable')
def create_server(self, context, server):
LOG.debug('Discarding create_server call, not-applicable')
def update_server(self, context, server):
LOG.debug('Discarding update_server call, not-applicable')
def delete_server(self, context, server):
LOG.debug('Discarding delete_server call, not-applicable')

View File

@ -211,15 +211,6 @@ class IPABackend(base.Backend):
self.ntries = cfg.CONF[self.name].ipa_connect_retries
self.force = cfg.CONF[self.name].ipa_force_ns_use
def create_server(self, context, server):
LOG.debug('Discarding create_server call, not-applicable')
def update_server(self, context, server):
LOG.debug('Discarding update_server call, not-applicable')
def delete_server(self, context, server):
LOG.debug('Discarding delete_server call, not-applicable')
def create_domain(self, context, domain):
LOG.debug('Create Domain %r' % domain)
ipareq = {'method': 'dnszone_add', 'id': 0}

View File

@ -117,6 +117,9 @@ class NSD4SlaveBackend(base.Backend):
sock.close()
return result.rstrip()
def create_recordset(self, context, domain, recordset):
pass
def update_recordset(self, context, domain, recordset):
pass
@ -131,12 +134,3 @@ class NSD4SlaveBackend(base.Backend):
def delete_record(self, context, domain, recordset, record):
pass
def create_server(self, context, server):
pass
def update_server(self, context, server):
pass
def delete_server(self, context, server):
pass

View File

@ -20,9 +20,7 @@ import threading
from oslo.config import cfg
from oslo.db import options
from sqlalchemy import func
from sqlalchemy.sql import select
from sqlalchemy.sql.expression import and_
from sqlalchemy.orm import exc as sqlalchemy_exceptions
from designate.openstack.common import excutils
@ -36,13 +34,14 @@ from designate.sqlalchemy.expressions import InsertFromSelect
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5']
cfg.CONF.register_group(cfg.OptGroup(
CONF.register_group(cfg.OptGroup(
name='backend:powerdns', title="Configuration for Powerdns Backend"
))
cfg.CONF.register_opts([
CONF.register_opts([
cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'),
cfg.ListOpt('also-notify', default=[], help='List of additional IPs to '
'send NOTIFYs to'),
@ -50,9 +49,8 @@ cfg.CONF.register_opts([
# Overide the default DB connection registered above, to avoid name conflicts
# between the Designate and PowerDNS databases.
cfg.CONF.set_default('connection',
'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
CONF.set_default('connection', 'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
class PowerDNSBackend(base.Backend):
@ -160,18 +158,6 @@ class PowerDNSBackend(base.Backend):
.filter_by(kind='TSIG-ALLOW-AXFR', content=tsigkey['name'])\
.delete()
def create_server(self, context, server):
LOG.debug('Create Server')
self._update_domains_on_server_create(server)
def update_server(self, context, server):
LOG.debug('Update Server')
self._update_domains_on_server_update(server)
def delete_server(self, context, server):
LOG.debug('Delete Server')
self._update_domains_on_server_delete(server)
# Domain Methods
def create_domain(self, context, domain):
servers = self.central_service.find_servers(self.admin_context)
@ -181,24 +167,11 @@ class PowerDNSBackend(base.Backend):
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': servers[0]['name'].rstrip('.'),
'type': cfg.CONF['backend:powerdns'].domain_type,
'type': CONF['backend:powerdns'].domain_type,
'account': context.tenant
})
domain_m.save(self.session)
for server in servers:
record_m = models.Record()
record_m.update({
'designate_id': server['id'],
'domain_id': domain_m.id,
'name': domain['name'].rstrip('.'),
'type': 'NS',
'content': server['name'].rstrip('.'),
'ttl': domain['ttl'],
'auth': True
})
record_m.save(self.session)
# Install all TSIG Keys on this domain
tsigkeys = self.session.query(models.TsigKey).all()
values = [t.name for t in tsigkeys]
@ -207,32 +180,14 @@ class PowerDNSBackend(base.Backend):
# Install all Also Notify's on this domain
self._update_domainmetadata(domain_m.id, 'ALSO-NOTIFY',
cfg.CONF['backend:powerdns'].also_notify)
# NOTE(kiall): Do the SOA last, ensuring we don't trigger a NOTIFY
# before the NS records are in place.
record_m = models.Record()
record_m.update({
'designate_id': domain['id'],
'domain_id': domain_m.id,
'name': domain['name'].rstrip('.'),
'type': 'SOA',
'content': self._build_soa_content(domain, servers),
'auth': True
})
record_m.save(self.session)
CONF['backend:powerdns'].also_notify)
def update_domain(self, context, domain):
# TODO(kiall): Sync Server List
domain_m = self._get_domain(domain['id'])
try:
self.session.begin()
# Update the Domains SOA
self._update_soa(domain)
# Update the Records TTLs where necessary
self.session.query(models.Record)\
.filter_by(domain_id=domain_m.id, inherit_ttl=True)\
@ -266,22 +221,39 @@ class PowerDNSBackend(base.Backend):
query.filter_by(domain_id=domain_m.id).delete()
# RecordSet Methods
def create_recordset(self, context, domain, recordset):
try:
self.session.begin(subtransactions=True)
# Create all the records..
for record in recordset.records:
self.create_record(context, domain, recordset, record)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_recordset(self, context, domain, recordset):
# Ensure records are updated
values = {'ttl': recordset['ttl']}
# TODO(kiall): This is a total kludge. Intended as the simplest
# possible fix for the issue. This needs to be
# re-implemented correctly.
try:
self.session.begin(subtransactions=True)
query = self.session.query(models.Record)
query.filter_by(designate_recordset_id=recordset['id']).update(values)
self._update_soa(domain)
self.delete_recordset(context, domain, recordset)
self.create_recordset(context, domain, recordset)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = self.session.query(models.Record)
query.filter_by(designate_recordset_id=recordset['id']).delete()
self._update_soa(domain)
# Record Methods
def create_record(self, context, domain, recordset, record):
domain_m = self._get_domain(domain['id'])
@ -305,8 +277,6 @@ class PowerDNSBackend(base.Backend):
record_m.save(self.session)
self._update_soa(domain)
def update_record(self, context, domain, recordset, record):
record_m = self._get_record(record['id'])
@ -323,8 +293,6 @@ class PowerDNSBackend(base.Backend):
record_m.save(self.session)
self._update_soa(domain)
def delete_record(self, context, domain, recordset, record):
try:
record_m = self._get_record(record['id'])
@ -337,21 +305,7 @@ class PowerDNSBackend(base.Backend):
else:
record_m.delete(self.session)
self._update_soa(domain)
# Internal Methods
def _update_soa(self, domain):
servers = self.central_service.find_servers(self.admin_context)
domain_m = self._get_domain(domain['id'])
record_m = self._get_record(domain=domain_m, type='SOA')
record_m.update({
'content': self._build_soa_content(domain, servers),
'ttl': domain['ttl']
})
record_m.save(self.session)
def _update_domainmetadata(self, domain_id, kind, values=None,
delete=True):
"""Updates a domain's metadata with new values"""
@ -398,18 +352,6 @@ class PowerDNSBackend(base.Backend):
return content
def _sanitize_uuid_str(self, uuid):
return uuid.replace("-", "")
def _build_soa_content(self, domain, servers):
return "%s %s. %d %d %d %d %d" % (servers[0]['name'],
domain['email'].replace("@", "."),
domain['serial'],
domain['refresh'],
domain['retry'],
domain['expire'],
domain['minimum'])
def _get_tsigkey(self, tsigkey_id):
query = self.session.query(models.TsigKey)
@ -454,175 +396,3 @@ class PowerDNSBackend(base.Backend):
raise exceptions.RecordNotFound('Too many records found')
else:
return record
def _update_domains_on_server_create(self, server):
"""
For performance, manually prepare a bulk insert query to
build NS records for all existing domains for insertion
into Record table
"""
ns_rec_content = self._sanitize_content("NS", server['name'])
LOG.debug("Content field of newly created NS records for "
"existing domains upon server create is: %s"
% ns_rec_content)
query_select = select([
models.Domain.__table__.c.id,
"'%s'" % self._sanitize_uuid_str(server['id']),
models.Domain.__table__.c.name,
"'NS'",
"'%s'" % ns_rec_content,
1,
1]
)
columns = [
models.Record.__table__.c.domain_id,
models.Record.__table__.c.designate_id,
models.Record.__table__.c.name,
models.Record.__table__.c.type,
models.Record.__table__.c.content,
models.Record.__table__.c.auth,
models.Record.__table__.c.inherit_ttl,
]
query = InsertFromSelect(models.Record.__table__, query_select,
columns)
# Execute the manually prepared query
# A TX is required for, at the least, SQLite.
try:
self.session.begin()
self.session.execute(query)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def _update_domains_on_server_update(self, server):
"""
For performance, manually prepare a bulk update query to
update all NS records for all existing domains that need
updating of their corresponding NS record in Record table
"""
ns_rec_content = self._sanitize_content("NS", server['name'])
LOG.debug("Content field of existing NS records will be updated"
" to the following upon server update: %s" % ns_rec_content)
try:
# Execute the manually prepared query
# A TX is required for, at the least, SQLite.
#
self.session.begin()
# first determine the old name of the server
# before making the updates. Since the value
# is coming from an NS record, the server name
# will not have a trailing period (.)
old_ns_rec = self.session.query(models.Record)\
.filter_by(type='NS', designate_id=server['id'])\
.first()
if old_ns_rec is not None:
old_server_name = old_ns_rec.content
LOG.debug("old server name read from a backend NS record:"
" %s" % old_server_name)
LOG.debug("new server name: %s" % server['name'])
# Then update all NS records that need updating
# Only the name of a server has changed when we are here
self.session.query(models.Record)\
.filter_by(type='NS', designate_id=server['id'])\
.update({"content": ns_rec_content})
# Then update all SOA records as necessary
# Do the SOA last, ensuring we don't trigger a NOTIFY
# before the NS records are in place.
#
# Update the content field of every SOA record that has the
# old server name as part of its 'content' field to reflect
# the new server name.
# Need to strip the trailing period from the server['name']
# before using it to replace the old_server_name in the SOA
# record since the SOA record already has a trailing period
# and we want to keep it
self.session.execute(models.Record.__table__
.update()
.where(and_(models.Record.__table__.c.type == "SOA",
models.Record.__table__.c.content.like
("%s%%" % old_server_name)))
.values(content=func.replace(
models.Record.__table__.c.content,
old_server_name,
server['name'].rstrip('.'))
)
)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
# now commit
else:
self.session.commit()
def _update_domains_on_server_delete(self, server):
"""
For performance, manually prepare a bulk update query to
update all NS records for all existing domains that need
updating of their corresponding NS record in Record table
"""
# find a replacement server
replacement_server_name = None
servers = self.central_service.find_servers(self.admin_context)
for replacement in servers:
if replacement['id'] != server['id']:
replacement_server_name = replacement['name']
break
LOG.debug("This existing server name will be used to update existing"
" SOA records upon server delete: %s "
% replacement_server_name)
# NOTE: because replacement_server_name came from central storage
# it has the trailing period
# Execute the manually prepared query
# A TX is required for, at the least, SQLite.
try:
self.session.begin()
# first delete affected NS records
self.session.query(models.Record)\
.filter_by(type='NS', designate_id=server['id'])\
.delete()
# then update all SOA records as necessary
# Do the SOA last, ensuring we don't trigger a
# NOTIFY before the NS records are in place.
#
# Update the content field of every SOA record that
# has the deleted server name as part of its
# 'content' field to reflect the name of another
# server that exists
# both server['name'] and replacement_server_name
# have trailing period so we are fine just doing the
# substitution without striping trailing period
self.session.execute(models.Record.__table__
.update()
.where(and_(models.Record.__table__.c.type == "SOA",
models.Record.__table__.c.content.like
("%s%%" % server['name'])))
.values(content=func.replace(
models.Record.__table__.c.content,
server['name'],
replacement_server_name)))
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()

View File

@ -49,6 +49,9 @@ class RPCBackend(base.Backend):
def delete_domain(self, context, domain):
return self.agent_api.delete_domain(context, domain)
def create_recordset(self, context, domain, recordset):
return self.agent_api.create_recordset(context, domain, recordset)
def update_recordset(self, context, domain, recordset):
return self.agent_api.update_recordset(context, domain, recordset)

View File

@ -778,9 +778,11 @@ class Service(service.RPCService):
subdomain.parent_domain_id = domain.id
self.update_domain(context, subdomain)
# Create the SOA and NS recordsets for the new domain
self._create_soa(context, created_domain)
# Create the NS and SOA recordsets for the new domain. SOA must be
# last, in order to ensure BIND etc do not read the zone file before
# all changes have been committed to the zone file.
self._create_ns(context, created_domain, servers)
self._create_soa(context, created_domain)
return created_domain
@ -857,13 +859,16 @@ class Service(service.RPCService):
if increment_serial:
# Increment the serial number
domain.serial = utils.increment_serial(domain.serial)
self._update_soa(context, domain)
domain = self.storage.update_domain(context, domain)
with wrap_backend_call():
self.backend.update_domain(context, domain)
if increment_serial:
# Update the SOA Record
self._update_soa(context, domain)
self.notifier.info(context, 'dns.domain.update', domain)
self.mdns_api.notify_zone_changed(context, domain.name)

View File

@ -0,0 +1,326 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from designate.openstack.common import fileutils
from designate.openstack.common.gettextutils import _, _LE, _LI
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("OSLO_LOCK_PATH"),
help='Directory to use for lock files.')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{'filename': self.fname,
'exception': e})
def __enter__(self):
self.acquire()
return self
def release(self):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
LOG.debug('Using existing semaphore "%s"', name)
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug('Created new semaphore "%s"', name)
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
with int_lock:
LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name})
try:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
finally:
LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["OSLO_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,19 +1,27 @@
$ORIGIN {{ domain.name }}
$TTL {{ domain.ttl }}
{{ domain.name }} IN SOA {{ servers[0].name }} {{ domain.email | replace("@", ".") }}. (
{{ domain.serial }} ; serial
{{ domain.refresh }} ; refresh
{{ domain.retry }} ; retry
{{ domain.expire }} ; expire
{{ domain.minimum }} ; minimum
)
{% for server in servers %}
{{domain.name}} IN NS {{server.name}}
{%- endfor %}
{% for record in records %}
{% for record in records -%}
{{record.name}} {{record.ttl or ''}} IN {{record.type}} {{record.priority or ''}} {{record.data}}
{%- endfor %}
{% else %}
{# Since the zone is created before the NS/SOA records are, we need to "fool" bind
into accepting the `rndc addzone` call. By placing dummy data here, the call passes
and the NS/SOA records will be added moments later. The end result, from BINDs
point of view is:
Sep 8 14:58:45 named[1725]: received control channel command 'addzone new-zone-test.com. { type master; file "/opt/stack/data/designate/bind9/new-zone-test.com._f6afe0a3-aa12-4045-888d-70e776528653.zone"; };'
Sep 8 14:58:45 named[1725]: zone new-zone-test.com/IN: loaded serial 1
Sep 8 14:58:45 named[1725]: zone new-zone-test.com. added to view _default via addzone
Sep 8 14:58:45 named[1725]: zone new-zone-test.com/IN: sending notifies (serial 1)
Sep 8 14:58:45 named[1725]: received control channel command 'reload new-zone-test.com.'
Sep 8 14:58:45 named[1725]: zone new-zone-test.com/IN: has no NS records
Sep 8 14:58:45 named[1725]: zone new-zone-test.com/IN: not loaded due to errors.
Sep 8 14:58:45 named[1725]: received control channel command 'reload new-zone-test.com.'
Sep 8 14:58:45 named[1725]: zone new-zone-test.com/IN: loaded serial 1410188324
#}
{{domain.name}} 10 IN SOA provisioning.example.com. provisioning.example.com. 1 5 10 600 10
{{domain.name}} 10 IN NS provisioning.example.com.
{% endfor %}

View File

@ -173,8 +173,8 @@ class ApiV2RecordSetsTest(ApiV2TestCase):
data = [self.create_recordset(self.domain,
name='x-%s.%s' % (i, self.domain['name']))
for i in xrange(0, 10)]
data.insert(0, ns)
data.insert(0, soa)
data.insert(0, ns)
self._assert_paging(data, url, key='recordsets')

View File

@ -95,23 +95,6 @@ class PowerDNSBackendTestCase(tests.TestCase, BackendTestMixin):
self.backend.create_tsigkey(context, tsigkey)
self.backend.delete_tsigkey(context, tsigkey)
def test_create_server(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
def test_update_server(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
self.backend.update_server(context, server)
def test_delete_server(self):
context = self.get_context()
server = self.get_server_fixture()
self.backend.create_server(context, server)
self.backend.delete_server(context, server)
def test_create_domain(self):
context = self.get_context()
server = self.get_server_fixture()

View File

@ -907,8 +907,8 @@ class StorageTestCase(object):
ns = self.storage.find_recordset(self.admin_context,
criterion={'domain_id': domain['id'],
'type': "NS"})
created.insert(0, ns)
created.insert(0, soa)
created.insert(0, ns)
# Ensure we can page through the results.
self._ensure_paging(created, self.storage.find_recordsets)

View File

@ -10,6 +10,7 @@ module=fixture.config
module=importutils
module=jsonutils
module=local
module=lockutils
module=log
module=middleware.base
module=middleware.request_id