Retire Packaging Deb project repos

This commit is part of a series to retire the Packaging Deb
project. Step 2 is to remove all content from the project
repos, replacing it with a README notification where to find
ongoing work, and how to recover the repo if needed at some
future point (as in
https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project).

Change-Id: I00c02762c4c6dfa3a0cfa1dc2262f2ed98256ace
This commit is contained in:
Tony Breeds 2017-09-12 16:12:51 -06:00
parent c2167820c8
commit e13f99b619
84 changed files with 14 additions and 9503 deletions

View File

@ -1,8 +0,0 @@
[run]
branch = True
source = tooz
omit = tooz/tests/*,tooz/openstack/*
[report]
ignore_errors = True
precision = 2

24
.gitignore vendored
View File

@ -1,24 +0,0 @@
*.py[co]
*.egg
*.egg-info
build
/.*
!.coveragerc
!.gitignore
!.mailmap
!.testr.conf
.*.sw?
cover/*
covhtml
dist
.tox
# Generated by pbr
AUTHORS
ChangeLog
# Generated by testrepository
.testrepository
# Generated by etcd
etcd-v*
default.etcd
# reno build
releasenotes/build

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/tooz.git

View File

@ -1,9 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
OS_DEBUG=${OS_DEBUG:-TRACE} \
OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
${PYTHON:-python} -m subunit.run discover tooz $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,17 +0,0 @@
If you would like to contribute to the development of OpenStack, you must
follow the steps in this page:
http://docs.openstack.org/infra/manual/developers.html
If you already have a good understanding of how the system works and your
OpenStack accounts are set up, you can skip to the development workflow
section of this documentation to learn how changes to OpenStack should be
submitted for review via the Gerrit tool:
http://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/python-tooz/

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

14
README Normal file
View File

@ -0,0 +1,14 @@
This project is no longer maintained.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
For ongoing work on maintaining OpenStack packages in the Debian
distribution, please see the Debian OpenStack packaging team at
https://wiki.debian.org/OpenStack/.
For any further questions, please email
openstack-dev@lists.openstack.org or join #openstack-dev on
Freenode.

View File

@ -1,24 +0,0 @@
Tooz
====
.. image:: https://img.shields.io/pypi/v/tooz.svg
:target: https://pypi.python.org/pypi/tooz/
:alt: Latest Version
.. image:: https://img.shields.io/pypi/dm/tooz.svg
:target: https://pypi.python.org/pypi/tooz/
:alt: Downloads
The Tooz project aims at centralizing the most common distributed primitives
like group membership protocol, lock service and leader election by providing
a coordination API helping developers to build distributed applications.
* Free software: Apache license
* Documentation: https://docs.openstack.org/tooz/latest/
* Source: https://git.openstack.org/cgit/openstack/tooz
* Bugs: https://bugs.launchpad.net/python-tooz/
Join us
-------
- https://launchpad.net/python-tooz

View File

@ -1,11 +0,0 @@
redis-sentinel [platform:ubuntu !platform:ubuntu-trusty]
redis-server [platform:dpkg]
libpq-dev [platform:dpkg]
postgresql [platform:dpkg]
mysql-client [platform:dpkg]
mysql-server [platform:dpkg]
build-essential [platform:dpkg]
libffi-dev [platform:dpkg]
zookeeperd [platform:dpkg]
memcached [platform:dpkg]
unzip [platform:dpkg]

View File

@ -1,263 +0,0 @@
# -*- coding: utf-8 -*-
#
# tooz documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'sphinx.ext.extlinks',
'openstackdocstheme',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.viewcode',
'stevedore.sphinxext',
]
# openstackdocstheme options
repository_name = 'openstack/tooz'
bug_project = 'tooz'
bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tooz'
copyright = u'%s, OpenStack Foundation' % datetime.date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = subprocess.Popen(['sh', '-c', 'cd ../..; python setup.py --version'],
stdout=subprocess.PIPE).stdout.read()
version = version.strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'toozdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tooz.tex', u'tooz Documentation',
u'eNovance', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ('index', 'tooz', u'tooz Documentation',
# [u'eNovance'], 1)
# ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tooz', u'tooz Documentation',
u'OpenStack Foundation', 'tooz', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# extlinks = {
# }
autodoc_default_flags = ['members', 'special-members', 'show-inheritance']

View File

@ -1,25 +0,0 @@
=============================================================
Tooz -- Distributed System Helper Library
=============================================================
The Tooz project aims at centralizing the most common distributed primitives
like group membership protocol, lock service and leader election by providing
a coordination API helping developers to build distributed applications. [#f1]_
.. toctree::
:maxdepth: 2
install/index
user/index
reference/index
.. rubric:: Indices and tables
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. [#f1] It should be noted that even though it is designed with OpenStack
integration in mind, and that is where most of its *current*
integration is it aims to be generally usable and useful in any
project.

View File

@ -1,43 +0,0 @@
============
Installation
============
Python Versions
===============
Tooz is tested under Python 2.7 and 3.4.
.. _install-basic:
Basic Installation
==================
Tooz should be installed into the same site-packages area where
the application and extensions are installed (either a virtualenv or
the global site-packages). You may need administrative privileges to
do that. The easiest way to install it is using pip_::
$ pip install tooz
or::
$ sudo pip install tooz
.. _pip: http://pypi.python.org/pypi/pip
Download
========
Tooz releases are hosted on PyPI and can be downloaded from:
http://pypi.python.org/pypi/tooz
Source Code
===========
The source is hosted on the OpenStack infrastructure: https://git.openstack.org/cgit/openstack/tooz/
Reporting Bugs
==============
Please report bugs through the launchpad project:
https://launchpad.net/python-tooz

View File

@ -1,82 +0,0 @@
================
Module Reference
================
Interfaces
----------
.. autoclass:: tooz.coordination.CoordinationDriver
:members:
Consul
~~~~~~
.. autoclass:: tooz.drivers.consul.ConsulDriver
:members:
Etcd
~~~~
.. autoclass:: tooz.drivers.etcd.EtcdDriver
:members:
File
~~~~
.. autoclass:: tooz.drivers.file.FileDriver
:members:
IPC
~~~
.. autoclass:: tooz.drivers.ipc.IPCDriver
:members:
Memcached
~~~~~~~~~
.. autoclass:: tooz.drivers.memcached.MemcachedDriver
:members:
Mysql
~~~~~
.. autoclass:: tooz.drivers.mysql.MySQLDriver
:members:
PostgreSQL
~~~~~~~~~~
.. autoclass:: tooz.drivers.pgsql.PostgresDriver
:members:
Redis
~~~~~
.. autoclass:: tooz.drivers.redis.RedisDriver
:members:
Zake
~~~~
.. autoclass:: tooz.drivers.zake.ZakeDriver
:members:
Zookeeper
~~~~~~~~~
.. autoclass:: tooz.drivers.zookeeper.KazooDriver
:members:
Exceptions
----------
.. autoclass:: tooz.ToozError
.. autoclass:: tooz.coordination.ToozConnectionError
.. autoclass:: tooz.coordination.OperationTimedOut
.. autoclass:: tooz.coordination.GroupNotCreated
.. autoclass:: tooz.coordination.GroupAlreadyExist
.. autoclass:: tooz.coordination.MemberAlreadyExist
.. autoclass:: tooz.coordination.MemberNotJoined
.. autoclass:: tooz.coordination.GroupNotEmpty
.. autofunction:: tooz.utils.raise_with_cause

View File

@ -1,128 +0,0 @@
=============
Compatibility
=============
Grouping
========
APIs
----
* :py:meth:`~tooz.coordination.CoordinationDriver.watch_join_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.unwatch_join_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.watch_leave_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.unwatch_leave_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.create_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.get_groups`
* :py:meth:`~tooz.coordination.CoordinationDriver.join_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.leave_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.delete_group`
* :py:meth:`~tooz.coordination.CoordinationDriver.get_members`
* :py:meth:`~tooz.coordination.CoordinationDriver.get_member_capabilities`
* :py:meth:`~tooz.coordination.CoordinationDriver.update_capabilities`
Driver support
--------------
.. list-table::
:header-rows: 1
* - Driver
- Supported
* - :py:class:`~tooz.drivers.consul.ConsulDriver`
- No
* - :py:class:`~tooz.drivers.etcd.EtcdDriver`
- No
* - :py:class:`~tooz.drivers.file.FileDriver`
- Yes
* - :py:class:`~tooz.drivers.ipc.IPCDriver`
- No
* - :py:class:`~tooz.drivers.memcached.MemcachedDriver`
- Yes
* - :py:class:`~tooz.drivers.mysql.MySQLDriver`
- No
* - :py:class:`~tooz.drivers.pgsql.PostgresDriver`
- No
* - :py:class:`~tooz.drivers.redis.RedisDriver`
- Yes
* - :py:class:`~tooz.drivers.zake.ZakeDriver`
- Yes
* - :py:class:`~tooz.drivers.zookeeper.KazooDriver`
- Yes
Leaders
=======
APIs
----
* :py:meth:`~tooz.coordination.CoordinationDriver.watch_elected_as_leader`
* :py:meth:`~tooz.coordination.CoordinationDriver.unwatch_elected_as_leader`
* :py:meth:`~tooz.coordination.CoordinationDriver.stand_down_group_leader`
* :py:meth:`~tooz.coordination.CoordinationDriver.get_leader`
Driver support
--------------
.. list-table::
:header-rows: 1
* - Driver
- Supported
* - :py:class:`~tooz.drivers.consul.ConsulDriver`
- No
* - :py:class:`~tooz.drivers.etcd.EtcdDriver`
- No
* - :py:class:`~tooz.drivers.file.FileDriver`
- No
* - :py:class:`~tooz.drivers.ipc.IPCDriver`
- No
* - :py:class:`~tooz.drivers.memcached.MemcachedDriver`
- Yes
* - :py:class:`~tooz.drivers.mysql.MySQLDriver`
- No
* - :py:class:`~tooz.drivers.pgsql.PostgresDriver`
- No
* - :py:class:`~tooz.drivers.redis.RedisDriver`
- Yes
* - :py:class:`~tooz.drivers.zake.ZakeDriver`
- Yes
* - :py:class:`~tooz.drivers.zookeeper.KazooDriver`
- Yes
Locking
=======
APIs
----
* :py:meth:`~tooz.coordination.CoordinationDriver.get_lock`
Driver support
--------------
.. list-table::
:header-rows: 1
* - Driver
- Supported
* - :py:class:`~tooz.drivers.consul.ConsulDriver`
- Yes
* - :py:class:`~tooz.drivers.etcd.EtcdDriver`
- Yes
* - :py:class:`~tooz.drivers.file.FileDriver`
- Yes
* - :py:class:`~tooz.drivers.ipc.IPCDriver`
- Yes
* - :py:class:`~tooz.drivers.memcached.MemcachedDriver`
- Yes
* - :py:class:`~tooz.drivers.mysql.MySQLDriver`
- Yes
* - :py:class:`~tooz.drivers.pgsql.PostgresDriver`
- Yes
* - :py:class:`~tooz.drivers.redis.RedisDriver`
- Yes
* - :py:class:`~tooz.drivers.zake.ZakeDriver`
- Yes
* - :py:class:`~tooz.drivers.zookeeper.KazooDriver`
- Yes

View File

@ -1,233 +0,0 @@
=======
Drivers
=======
Tooz is provided with several drivers implementing the provided coordination
API. While all drivers provides the same set of features with respect to the
API, some of them have different characteristics:
Zookeeper
---------
**Driver:** :py:class:`tooz.drivers.zookeeper.KazooDriver`
**Characteristics:**
:py:attr:`tooz.drivers.zookeeper.KazooDriver.CHARACTERISTICS`
**Entrypoint name:** ``zookeeper`` or ``kazoo``
**Summary:**
The zookeeper is the reference implementation and provides the most solid
features as it's possible to build a cluster of zookeeper servers that is
resilient towards network partitions for example.
**Test driver:** :py:class:`tooz.drivers.zake.ZakeDriver`
**Characteristics:**
:py:attr:`tooz.drivers.zake.ZakeDriver.CHARACTERISTICS`
**Test driver entrypoint name:** ``zake``
Considerations
~~~~~~~~~~~~~~
- Primitives are based on sessions (and typically require careful selection
of session heartbeat periodicity and server side configuration of session
expiry).
Memcached
---------
**Driver:** :py:class:`tooz.drivers.memcached.MemcachedDriver`
**Characteristics:**
:py:attr:`tooz.drivers.memcached.MemcachedDriver.CHARACTERISTICS`
**Entrypoint name:** ``memcached``
**Summary:**
The memcached driver is a basic implementation and provides little
resiliency, though it's much simpler to setup. A lot of the features provided
in tooz are based on timeout (heartbeats, locks, etc) so are less resilient
than other backends.
Considerations
~~~~~~~~~~~~~~
- Less resilient than other backends such as zookeeper and redis.
- Primitives are often based on TTL(s) that may expire before
being renewed.
- Lacks certain primitives (compare and delete) so certain functionality
is fragile and/or broken due to this.
Redis
-----
**Driver:** :py:class:`tooz.drivers.redis.RedisDriver`
**Characteristics:**
:py:attr:`tooz.drivers.redis.RedisDriver.CHARACTERISTICS`
**Entrypoint name:** ``redis``
**Summary:**
The redis driver is a basic implementation and provides reasonable resiliency
when used with `redis-sentinel`_. A lot of the features provided in tooz are
based on timeout (heartbeats, locks, etc) so are less resilient than other
backends.
Considerations
~~~~~~~~~~~~~~
- Less resilient than other backends such as zookeeper.
- Primitives are often based on TTL(s) that may expire before
being renewed.
IPC
---
**Driver:** :py:class:`tooz.drivers.ipc.IPCDriver`
**Characteristics:** :py:attr:`tooz.drivers.ipc.IPCDriver.CHARACTERISTICS`
**Entrypoint name:** ``ipc``
**Summary:**
The IPC driver is based on Posix IPC API and implements a lock
mechanism and some basic group primitives (with **huge**
limitations).
Considerations
~~~~~~~~~~~~~~
- The lock can **only** be distributed locally to a computer
processes.
File
----
**Driver:** :py:class:`tooz.drivers.file.FileDriver`
**Characteristics:** :py:attr:`tooz.drivers.file.FileDriver.CHARACTERISTICS`
**Entrypoint name:** ``file``
**Summary:**
The file driver is a **simple** driver based on files and directories. It
implements a lock based on POSIX or Window file level locking
mechanism and some basic group primitives (with **huge**
limitations).
Considerations
~~~~~~~~~~~~~~
- The lock can **only** be distributed locally to a computer processes.
- Certain concepts provided by it are **not** crash tolerant.
PostgreSQL
----------
**Driver:** :py:class:`tooz.drivers.pgsql.PostgresDriver`
**Characteristics:**
:py:attr:`tooz.drivers.pgsql.PostgresDriver.CHARACTERISTICS`
**Entrypoint name:** ``postgresql``
**Summary:**
The postgresql driver is a driver providing only a distributed lock (for now)
and is based on the `PostgreSQL database server`_ and its API(s) that provide
for `advisory locks`_ to be created and used by applications. When a lock is
acquired it will release either when explicitly released or automatically when
the database session ends (for example if the program using the lock crashes).
Considerations
~~~~~~~~~~~~~~
- Lock that may be acquired restricted by
``max_locks_per_transaction * (max_connections + max_prepared_transactions)``
upper bound (PostgreSQL server configuration settings).
MySQL
-----
**Driver:** :py:class:`tooz.drivers.mysql.MySQLDriver`
**Characteristics:** :py:attr:`tooz.drivers.mysql.MySQLDriver.CHARACTERISTICS`
**Entrypoint name:** ``mysql``
**Summary:**
The MySQL driver is a driver providing only distributed locks (for now)
and is based on the `MySQL database server`_ supported `get_lock`_
primitives. When a lock is acquired it will release either when explicitly
released or automatically when the database session ends (for example if
the program using the lock crashes).
Considerations
~~~~~~~~~~~~~~
- Does **not** work correctly on some MySQL versions.
- Does **not** work when MySQL replicates from one server to another (locks
are local to the server that they were created from).
Etcd
----
**Driver:** :py:class:`tooz.drivers.etcd.EtcdDriver`
**Characteristics:** :py:attr:`tooz.drivers.etcd.EtcdDriver.CHARACTERISTICS`
**Entrypoint name:** ``etcd``
**Summary:**
The etcd driver is a driver providing only distributed locks (for now)
and is based on the `etcd server`_ supported key/value storage and
associated primitives.
Consul
------
**Driver:** :py:class:`tooz.drivers.consul.ConsulDriver`
**Characteristics:**
:py:attr:`tooz.drivers.consul.ConsulDriver.CHARACTERISTICS`
**Entrypoint name:** ``consul``
**Summary:**
The `consul`_ driver is a driver providing only distributed locks (for now)
and is based on the consul server key/value storage and/or
primitives. When a lock is acquired it will release either when explicitly
released or automatically when the consul session ends (for example if
the program using the lock crashes).
Characteristics
---------------
.. autoclass:: tooz.coordination.Characteristics
.. _etcd server: https://coreos.com/etcd/
.. _consul: https://www.consul.io/
.. _advisory locks: http://www.postgresql.org/docs/8.2/interactive/\
explicit-locking.html#ADVISORY-LOCKS
.. _get_lock: http://dev.mysql.com/doc/refman/5.5/en/\
miscellaneous-functions.html#function_get-lock
.. _PostgreSQL database server: http://postgresql.org
.. _MySQL database server: http://mysql.org
.. _redis-sentinel: http://redis.io/topics/sentinel

View File

@ -1 +0,0 @@
.. include:: ../../../ChangeLog

View File

@ -1,15 +0,0 @@
==================
User Documentation
==================
.. toctree::
:maxdepth: 2
tutorial/index
drivers
compatibility
.. toctree::
:maxdepth: 1
history

View File

@ -1,42 +0,0 @@
=======================
Creating A Coordinator
=======================
The principal object provided by tooz is the *coordinator*. It allows you to
use various features, such as group membership, leader election or
distributed locking.
The features provided by tooz coordinator are implemented using different
drivers. When creating a coordinator, you need to specify which back-end
driver you want it to use. Different drivers may provide different set of
capabilities.
If a driver does not support a feature, it will raise a
:class:`~tooz.NotImplemented` exception.
This example program loads a basic coordinator using the ZooKeeper based
driver.
.. literalinclude:: ../../../../examples/coordinator.py
:language: python
The second argument passed to the coordinator must be a unique identifier
identifying the running program.
After the coordinator is created, it can be used to use the various features
provided.
In order to keep the connection to the coordination server active, the method
:meth:`~tooz.coordination.CoordinationDriver.heartbeat` method must be called
regularly. This will ensure that the coordinator is not considered dead by
other program participating in the coordination. Unless you want to call it
manually, you can use tooz builtin heartbeat manager by passing the
`start_heart` argument.
.. literalinclude:: ../../../../examples/coordinator_heartbeat.py
:language: python
heartbeat at different moment or intervals.
Note that certain drivers, such as `memcached` are heavily based on timeout,
so the interval used to run the heartbeat is important.

View File

@ -1,41 +0,0 @@
=====================
Group Membership
=====================
Basic operations
===================
One of the feature provided by the coordinator is the ability to handle
group membership. Once a group is created, any coordinator can join the
group and become a member of it. Any coordinator can be notified when a
members joins or leaves the group.
.. literalinclude:: ../../../../examples/group_membership.py
:language: python
Note that all the operation are asynchronous. That means you cannot be sure
that your group has been created or joined before you call the
:meth:`tooz.coordination.CoordAsyncResult.get` method.
You can also leave a group using the
:meth:`tooz.coordination.CoordinationDriver.leave_group` method. The list of
all available groups is retrievable via the
:meth:`tooz.coordination.CoordinationDriver.get_groups` method.
Watching Group Changes
======================
It's possible to watch and get notified when the member list of a group
changes. That's useful to run callback functions whenever something happens
in that group.
.. literalinclude:: ../../../../examples/group_membership_watch.py
:language: python
Using :meth:`tooz.coordination.CoordinationDriver.watch_join_group` and
:meth:`tooz.coordination.CoordinationDriver.watch_leave_group` your
application can be notified each time a member join or leave a group. To
stop watching an event, the two methods
:meth:`tooz.coordination.CoordinationDriver.unwatch_join_group` and
:meth:`tooz.coordination.CoordinationDriver.unwatch_leave_group` allow to
unregister a particular callback.

View File

@ -1,10 +0,0 @@
===========
Hash ring
===========
Tooz provides a consistent hash ring implementation. It can be used to map
objects (represented via binary keys) to one or several nodes. When the node
list changes, the rebalancing of objects across the ring is kept minimal.
.. literalinclude:: ../../../../examples/hashring.py
:language: python

View File

@ -1,16 +0,0 @@
=====================================
Using Tooz in Your Application
=====================================
This tutorial is a step-by-step walk-through demonstrating how to
use tooz in your application.
.. toctree::
:maxdepth: 2
coordinator
group_membership
leader_election
lock
hashring
partitioner

View File

@ -1,25 +0,0 @@
=================
Leader Election
=================
Each group can elect its own leader. There can be only one leader at a time
in a group. Only members that are running for the election can be elected.
As soon as one of leader steps down or dies, a new member that was running
for the election will be elected.
.. literalinclude:: ../../../../examples/leader_election.py
:language: python
The method
:meth:`tooz.coordination.CoordinationDriver.watch_elected_as_leader` allows
to register for a function to be called back when the member is elected as a
leader. Using this function indicates that the run is therefore running for
the election. The member can stop running by unregistering all its callbacks
with :meth:`tooz.coordination.CoordinationDriver.unwatch_elected_as_leader`.
It can also temporarily try to step down as a leader with
:meth:`tooz.coordination.CoordinationDriver.stand_down_group_leader`. If
another member is in the run for election, it may be elected instead.
To retrieve the leader of a group, even when not being part of the group,
the method :meth:`tooz.coordination.CoordinationDriver.get_leader()` can be
used.

View File

@ -1,14 +0,0 @@
======
Lock
======
Tooz provides distributed locks. A lock is identified by a name, and a lock can
only be acquired by one coordinator at a time.
.. literalinclude:: ../../../../examples/lock.py
:language: python
The method :meth:`tooz.coordination.CoordinationDriver.get_lock` allows
to create a lock identified by a name. Once you retrieve this lock, you can
use it as a context manager or use the :meth:`tooz.locking.Lock.acquire` and
:meth:`tooz.locking.Lock.release` methods to acquire and release the lock.

View File

@ -1,11 +0,0 @@
=============
Partitioner
=============
Tooz provides a partitioner object based on its consistent hash ring
implementation. It can be used to map Python objects to one or several nodes.
The partitioner object automatically keeps track of nodes joining and leaving
the group, so the rebalancing is managed.
.. literalinclude:: ../../../../examples/partitioner.py
:language: python

View File

@ -1,5 +0,0 @@
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
coordinator.stop()

View File

@ -1,5 +0,0 @@
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start(start_heart=True)
coordinator.stop()

View File

@ -1,19 +0,0 @@
import uuid
import six
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
# Create a group
group = six.binary_type(six.text_type(uuid.uuid4()).encode('ascii'))
request = coordinator.create_group(group)
request.get()
# Join a group
request = coordinator.join_group(group)
request.get()
coordinator.stop()

View File

@ -1,22 +0,0 @@
import uuid
import six
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
# Create a group
group = six.binary_type(six.text_type(uuid.uuid4()).encode('ascii'))
request = coordinator.create_group(group)
request.get()
def group_joined(event):
# Event is an instance of tooz.coordination.MemberJoinedGroup
print(event.group_id, event.member_id)
coordinator.watch_join_group(group, group_joined)
coordinator.stop()

View File

@ -1,15 +0,0 @@
from tooz import hashring
hashring = hashring.HashRing({'node1', 'node2', 'node3'})
# Returns set(['node2'])
nodes_for_foo = hashring[b'foo']
# Returns set(['node2', 'node3'])
nodes_for_foo_with_replicas = hashring.get_nodes(b'foo',
replicas=2)
# Returns set(['node1', 'node3'])
nodes_for_foo_with_replicas = hashring.get_nodes(b'foo',
replicas=2,
ignore_nodes={'node2'})

View File

@ -1,36 +0,0 @@
import time
import uuid
import six
from tooz import coordination
ALIVE_TIME = 1
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
# Create a group
group = six.binary_type(six.text_type(uuid.uuid4()).encode('ascii'))
request = coordinator.create_group(group)
request.get()
# Join a group
request = coordinator.join_group(group)
request.get()
def when_i_am_elected_leader(event):
# event is a LeaderElected event
print(event.group_id, event.member_id)
# Propose to be a leader for the group
coordinator.watch_elected_as_leader(group, when_i_am_elected_leader)
start = time.time()
while time.time() - start < ALIVE_TIME:
coordinator.heartbeat()
coordinator.run_watchers()
time.sleep(0.1)
coordinator.stop()

View File

@ -1,11 +0,0 @@
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
# Create a lock
lock = coordinator.get_lock("foobar")
with lock:
print("Do something that is distributed")
coordinator.stop()

View File

@ -1,11 +0,0 @@
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
partitioner = coordinator.join_partitioned_group("group1")
# Returns {'host-1'}
member = partitioner.members_for_object(object())
coordinator.leave_partitioned_group(partitioner)
coordinator.stop()

View File

@ -1,3 +0,0 @@
---
other:
- Introduce reno for deployer release notes.

View File

@ -1,3 +0,0 @@
---
features:
- Add `tooz.hashring`, a consistent hash ring implementation.

View File

@ -1,4 +0,0 @@
---
features:
- Coordination drivers now have a method `join_group_create` that is able to
create a group before joining it if it does not exist yet.

View File

@ -1,6 +0,0 @@
---
features:
- >-
Introduce a new partitioner object. This object is synchronized within a
group of nodes and exposes a way to distribute object management across
several nodes.

View File

@ -1,281 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# openstackdocstheme options
repository_name = 'openstack/tooz'
bug_project = 'tooz'
bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tooz Release Notes'
copyright = u'2016, tooz Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
import pkg_resources
release = pkg_resources.get_distribution('tooz').version
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'toozReleaseNotesDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'toozReleaseNotes.tex',
u'tooz Release Notes Documentation',
u'tooz Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'toozReleaseNotes',
u'tooz Release Notes Documentation',
[u'tooz Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'toozReleaseNotes',
u'tooz Release Notes Documentation',
u'tooz Developers', 'toozReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']

View File

@ -1,9 +0,0 @@
====================
tooz Release Notes
====================
.. toctree::
:maxdepth: 1
unreleased
ocata

View File

@ -1,6 +0,0 @@
===================================
Ocata Series Release Notes
===================================
.. release-notes::
:branch: origin/stable/ocata

View File

@ -1,5 +0,0 @@
==============================
Current Series Release Notes
==============================
.. release-notes::

View File

@ -1,15 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=1.6 # Apache-2.0
stevedore>=1.16.0 # Apache-2.0
six>=1.9.0 # MIT
enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
voluptuous>=0.8.9 # BSD License
msgpack-python>=0.4.0 # Apache-2.0
fasteners>=0.7 # Apache-2.0
tenacity>=3.2.1 # Apache-2.0
futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD
futurist!=0.15.0,>=0.11.0 # Apache-2.0
oslo.utils>=3.15.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -e
python_version=$(python --version 2>&1)
echo "Running using '$python_version'"
for filename in examples/*.py; do
echo "Activating '$filename'"
python $filename
done

View File

@ -1,22 +0,0 @@
#!/bin/bash
set -e
set -x
if [ -n "$TOOZ_TEST_DRIVERS" ]
then
IFS=","
for TOOZ_TEST_DRIVER in $TOOZ_TEST_DRIVERS
do
IFS=" "
TOOZ_TEST_DRIVER=(${TOOZ_TEST_DRIVER})
SETUP_ENV_SCRIPT="./setup-${TOOZ_TEST_DRIVER[0]}-env.sh"
[ -x $SETUP_ENV_SCRIPT ] || unset SETUP_ENV_SCRIPT
$SETUP_ENV_SCRIPT pifpaf -e TOOZ_TEST run "${TOOZ_TEST_DRIVER[@]}" -- $*
done
unset IFS
else
for d in $TOOZ_TEST_URLS
do
TOOZ_TEST_URL=$d $*
done
fi

View File

@ -1,27 +0,0 @@
#!/bin/bash
set -eux
if [ -z "$(which consul)" ]; then
CONSUL_VERSION=0.6.3
CONSUL_RELEASE_URL=https://releases.hashicorp.com/consul
case `uname -s` in
Darwin)
consul_file="consul_${CONSUL_VERSION}_darwin_amd64.zip"
;;
Linux)
consul_file="consul_${CONSUL_VERSION}_linux_amd64.zip"
;;
*)
echo "Unknown operating system"
exit 1
;;
esac
consul_dir=`basename $consul_file .zip`
mkdir -p $consul_dir
curl -L $CONSUL_RELEASE_URL/$CONSUL_VERSION/$consul_file > $consul_dir/$consul_file
unzip $consul_dir/$consul_file -d $consul_dir
export PATH=$PATH:$consul_dir
fi
# Yield execution to venv command
$*

View File

@ -1,31 +0,0 @@
#!/bin/bash
set -eux
if [ -z "$(which etcd)" ]; then
ETCD_VERSION=3.1.3
case `uname -s` in
Darwin)
OS=darwin
SUFFIX=zip
;;
Linux)
OS=linux
SUFFIX=tar.gz
;;
*)
echo "Unsupported OS"
exit 1
esac
case `uname -m` in
x86_64)
MACHINE=amd64
;;
*)
echo "Unsupported machine"
exit 1
esac
TARBALL_NAME=etcd-v${ETCD_VERSION}-$OS-$MACHINE
test ! -d "$TARBALL_NAME" && curl -L https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${TARBALL_NAME}.${SUFFIX} | tar xz
export PATH=$PATH:$TARBALL_NAME
fi
$*

View File

@ -1,86 +0,0 @@
[metadata]
name = tooz
author = OpenStack
author-email = openstack-dev@lists.openstack.org
summary = Coordination library for distributed systems.
description-file = README.rst
license = Apache-2
home-page = https://docs.openstack.org/tooz/latest/
classifier =
Environment :: OpenStack
Intended Audience :: Developers
Intended Audience :: Information Technology
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Topic :: System :: Distributed Computing
[files]
packages =
tooz
[entry_points]
tooz.backends =
etcd = tooz.drivers.etcd:EtcdDriver
etcd3 = tooz.drivers.etcd3:Etcd3Driver
etcd3+http = tooz.drivers.etcd3gw:Etcd3Driver
kazoo = tooz.drivers.zookeeper:KazooDriver
zake = tooz.drivers.zake:ZakeDriver
memcached = tooz.drivers.memcached:MemcachedDriver
ipc = tooz.drivers.ipc:IPCDriver
redis = tooz.drivers.redis:RedisDriver
postgresql = tooz.drivers.pgsql:PostgresDriver
mysql = tooz.drivers.mysql:MySQLDriver
file = tooz.drivers.file:FileDriver
zookeeper = tooz.drivers.zookeeper:KazooDriver
consul = tooz.drivers.consul:ConsulDriver
[extras]
consul =
python-consul>=0.4.7 # MIT License
etcd =
requests>=2.10.0 # Apache-2.0
etcd3 =
etcd3>=0.6.2 # Apache-2.0
etcd3gw =
etcd3gw>=0.1.0 # Apache-2.0
zake =
zake>=0.1.6 # Apache-2.0
redis =
redis>=2.10.0 # MIT
postgresql =
psycopg2>=2.5 # LGPL/ZPL
mysql =
PyMySQL>=0.6.2 # MIT License
zookeeper =
kazoo>=2.2 # Apache-2.0
memcached =
pymemcache!=1.3.0,>=1.2.9 # Apache 2.0 License
ipc =
sysv-ipc>=0.6.8 # BSD License
test =
mock>=2.0 # BSD
python-subunit>=0.0.18 # Apache-2.0/BSD
testrepository>=0.0.18 # Apache-2.0/BSD
testtools>=1.4.0 # MIT
coverage>=3.6 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD
pifpaf>=0.10.0 # Apache-2.0
os-testr>=0.8.0 # Apache-2.0
doc =
sphinx>=1.6.2 # BSD
openstackdocstheme>=1.11.0 # Apache-2.0
reno>=1.8.0 # Apache-2.0
[wheel]
universal = 1
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
warning-is-error = 1

View File

@ -1,29 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)

View File

@ -1,150 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tabulate import tabulate
def print_header(txt, delim="="):
print(txt)
print(delim * len(txt))
def print_methods(methods):
driver_tpl = ":py:meth:`~tooz.coordination.CoordinationDriver.%s`"
for api_name in methods:
method_name = driver_tpl % api_name
print("* %s" % method_name)
if methods:
print("")
driver_tpl = ":py:class:`~tooz.drivers.%s`"
driver_class_names = [
"consul.ConsulDriver",
"etcd.EtcdDriver",
"file.FileDriver",
"ipc.IPCDriver",
"memcached.MemcachedDriver",
"mysql.MySQLDriver",
"pgsql.PostgresDriver",
"redis.RedisDriver",
"zake.ZakeDriver",
"zookeeper.KazooDriver",
]
driver_headers = []
for n in driver_class_names:
driver_headers.append(driver_tpl % (n))
print_header("Grouping")
print("")
print_header("APIs", delim="-")
print("")
grouping_methods = [
'watch_join_group',
'unwatch_join_group',
'watch_leave_group',
'unwatch_leave_group',
'create_group',
'get_groups',
'join_group',
'leave_group',
'delete_group',
'get_members',
'get_member_capabilities',
'update_capabilities',
]
print_methods(grouping_methods)
print_header("Driver support", delim="-")
print("")
grouping_table = [
[
"No", # Consul
"No", # Etcd
"Yes", # File
"No", # IPC
"Yes", # Memcached
"No", # MySQL
"No", # PostgreSQL
"Yes", # Redis
"Yes", # Zake
"Yes", # Zookeeper
],
]
print(tabulate(grouping_table, driver_headers, tablefmt="rst"))
print("")
print_header("Leaders")
print("")
print_header("APIs", delim="-")
print("")
leader_methods = [
'watch_elected_as_leader',
'unwatch_elected_as_leader',
'stand_down_group_leader',
'get_leader',
]
print_methods(leader_methods)
print_header("Driver support", delim="-")
print("")
leader_table = [
[
"No", # Consul
"No", # Etcd
"No", # File
"No", # IPC
"Yes", # Memcached
"No", # MySQL
"No", # PostgreSQL
"Yes", # Redis
"Yes", # Zake
"Yes", # Zookeeper
],
]
print(tabulate(leader_table, driver_headers, tablefmt="rst"))
print("")
print_header("Locking")
print("")
print_header("APIs", delim="-")
print("")
lock_methods = [
'get_lock',
]
print_methods(lock_methods)
print_header("Driver support", delim="-")
print("")
lock_table = [
[
"Yes", # Consul
"Yes", # Etcd
"Yes", # File
"Yes", # IPC
"Yes", # Memcached
"Yes", # MySQL
"Yes", # PostgreSQL
"Yes", # Redis
"Yes", # Zake
"Yes", # Zookeeper
],
]
print(tabulate(lock_table, driver_headers, tablefmt="rst"))
print("")

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -o pipefail
TESTRARGS=$1
# --until-failure is not compatible with --subunit see:
#
# https://bugs.launchpad.net/testrepository/+bug/1411804
#
# this work around exists until that is addressed
if [[ "$TESTARGS" =~ "until-failure" ]]; then
python setup.py testr --slowest --testr-args="$TESTRARGS"
else
python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
fi

View File

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# Client constraint file contains this client version pin that is in conflict
# with installing the client from source. We should remove the version pin in
# the constraints file before applying it for from-source installation.
CONSTRAINTS_FILE="$1"
shift 1
set -e
# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
# published to logs.openstack.org for easy debugging.
localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
if [[ "$CONSTRAINTS_FILE" != http* ]]; then
CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE"
fi
# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile"
pip install -c"$localfile" openstack-requirements
# This is the main purpose of the script: Allow local installation of
# the current repo. It is listed in constraints file and thus any
# install will be constrained and we need to unconstrain it.
edit-constraints "$localfile" -- "$CLIENT_NAME"
pip install -c"$localfile" -U "$@"
exit $?

View File

@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 eNovance Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ToozError(Exception):
"""Exception raised when an internal error occurs.
Raised for instance in case of server internal error.
:ivar cause: the cause of the exception being raised, when not none this
will itself be an exception instance, this is useful for
creating a chain of exceptions for versions of python where
this is not yet implemented/supported natively.
"""
def __init__(self, message, cause=None):
super(ToozError, self).__init__(message)
self.cause = cause
class NotImplemented(NotImplementedError, ToozError):
pass

View File

@ -1,31 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tenacity
from tenacity import stop
from tenacity import wait
_default_wait = wait.wait_exponential(max=1)
def retry(stop_max_delay=None, **kwargs):
k = {"wait": _default_wait, "retry": lambda x: False}
if stop_max_delay not in (True, False, None):
k['stop'] = stop.stop_after_delay(stop_max_delay)
return tenacity.retry(**k)
TryAgain = tenacity.TryAgain

View File

@ -1,897 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc.
# Copyright (C) 2013-2014 eNovance Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from concurrent import futures
import enum
import logging
import threading
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from stevedore import driver
import tooz
from tooz import _retry
from tooz import partitioner
from tooz import utils
LOG = logging.getLogger(__name__)
TOOZ_BACKENDS_NAMESPACE = "tooz.backends"
class Characteristics(enum.Enum):
"""Attempts to describe the characteristic that a driver supports."""
DISTRIBUTED_ACROSS_THREADS = 'DISTRIBUTED_ACROSS_THREADS'
"""Coordinator components when used by multiple **threads** work
the same as if those components were only used by a single thread."""
DISTRIBUTED_ACROSS_PROCESSES = 'DISTRIBUTED_ACROSS_PROCESSES'
"""Coordinator components when used by multiple **processes** work
the same as if those components were only used by a single thread."""
DISTRIBUTED_ACROSS_HOSTS = 'DISTRIBUTED_ACROSS_HOSTS'
"""Coordinator components when used by multiple **hosts** work
the same as if those components were only used by a single thread."""
NON_TIMEOUT_BASED = 'NON_TIMEOUT_BASED'
"""The driver has the following property:
* Its operations are not based on the timeout of other clients, but on some
other more robust mechanisms.
"""
LINEARIZABLE = 'LINEARIZABLE'
"""The driver has the following properties:
* Ensures each operation must take place before its
completion time.
* Any operation invoked subsequently must take place
after the invocation and by extension, after the original operation
itself.
"""
SEQUENTIAL = 'SEQUENTIAL'
"""The driver has the following properties:
* Operations can take effect before or after completion but all
operations retain the constraint that operations from any given process
must take place in that processes order.
"""
CAUSAL = 'CAUSAL'
"""The driver has the following properties:
* Does **not** have to enforce the order of every
operation from a process, perhaps, only causally related operations
must occur in order.
"""
SERIALIZABLE = 'SERIALIZABLE'
"""The driver has the following properties:
* The history of **all** operations is equivalent to
one that took place in some single atomic order but with unknown
invocation and completion times - it places no bounds on
time or order.
"""
SAME_VIEW_UNDER_PARTITIONS = 'SAME_VIEW_UNDER_PARTITIONS'
"""When a client is connected to a server and that server is partitioned
from a group of other servers it will (somehow) have the same view of
data as a client connected to a server on the other side of the
partition (typically this is accomplished by write availability being
lost and therefore nothing can change).
"""
SAME_VIEW_ACROSS_CLIENTS = 'SAME_VIEW_ACROSS_CLIENTS'
"""A client connected to one server will *always* have the same view
every other client will have (no matter what server those other
clients are connected to). Typically this is a sacrifice in
write availability because before a write can be acknowledged it must
be acknowledged by *all* servers in a cluster (so that all clients
that are connected to those servers read the exact *same* thing).
"""
class Hooks(list):
def run(self, *args, **kwargs):
return list(map(lambda cb: cb(*args, **kwargs), self))
class Event(object):
"""Base class for events."""
class MemberJoinedGroup(Event):
"""A member joined a group event."""
def __init__(self, group_id, member_id):
self.group_id = group_id
self.member_id = member_id
def __repr__(self):
return "<%s: group %s: +member %s>" % (self.__class__.__name__,
self.group_id,
self.member_id)
class MemberLeftGroup(Event):
"""A member left a group event."""
def __init__(self, group_id, member_id):
self.group_id = group_id
self.member_id = member_id
def __repr__(self):
return "<%s: group %s: -member %s>" % (self.__class__.__name__,
self.group_id,
self.member_id)
class LeaderElected(Event):
"""A leader as been elected."""
def __init__(self, group_id, member_id):
self.group_id = group_id
self.member_id = member_id
class Heart(object):
"""Coordination drivers main liveness pump (its heart)."""
def __init__(self, driver, thread_cls=threading.Thread,
event_cls=threading.Event):
self._thread_cls = thread_cls
self._dead = event_cls()
self._runner = None
self._driver = driver
self._beats = 0
@property
def beats(self):
"""How many times the heart has beaten."""
return self._beats
def is_alive(self):
"""Returns if the heart is beating."""
return not (self._runner is None
or not self._runner.is_alive())
@excutils.forever_retry_uncaught_exceptions
def _beat_forever_until_stopped(self):
"""Inner beating loop."""
while not self._dead.is_set():
with timeutils.StopWatch() as w:
wait_until_next_beat = self._driver.heartbeat()
ran_for = w.elapsed()
has_to_sleep_for = wait_until_next_beat - ran_for
if has_to_sleep_for < 0:
LOG.warning(
"Heartbeating took too long to execute (it ran for"
" %0.2f seconds which is %0.2f seconds longer than"
" the next heartbeat idle time). This may cause"
" timeouts (in locks, leadership, ...) to"
" happen (which will not end well).", ran_for,
ran_for - wait_until_next_beat)
self._beats += 1
# NOTE(harlowja): use the event object for waiting and
# not a sleep function since doing that will allow this code
# to terminate early if stopped via the stop() method vs
# having to wait until the sleep function returns.
# NOTE(jd): Wait for only the half time of what we should.
# This is a measure of safety, better be too soon than too late.
self._dead.wait(has_to_sleep_for / 2.0)
def start(self, thread_cls=None):
"""Starts the heart beating thread (noop if already started)."""
if not self.is_alive():
self._dead.clear()
self._beats = 0
if thread_cls is None:
thread_cls = self._thread_cls
self._runner = thread_cls(target=self._beat_forever_until_stopped)
self._runner.daemon = True
self._runner.start()
def stop(self):
"""Requests the heart beating thread to stop beating."""
self._dead.set()
def wait(self, timeout=None):
"""Wait up to given timeout for the heart beating thread to stop."""
self._runner.join(timeout)
return self._runner.is_alive()
class CoordinationDriver(object):
requires_beating = False
"""
Usage requirement that if true requires that the :py:meth:`~.heartbeat`
be called periodically (at a given rate) to avoid locks, sessions and
other from being automatically closed/discarded by the coordinators
backing store.
"""
CHARACTERISTICS = ()
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
def __init__(self, member_id, parsed_url, options):
super(CoordinationDriver, self).__init__()
self._member_id = member_id
self._started = False
self._hooks_join_group = collections.defaultdict(Hooks)
self._hooks_leave_group = collections.defaultdict(Hooks)
self._hooks_elected_leader = collections.defaultdict(Hooks)
self.requires_beating = (
CoordinationDriver.heartbeat != self.__class__.heartbeat
)
self.heart = Heart(self)
def _has_hooks_for_group(self, group_id):
return (group_id in self._hooks_join_group or
group_id in self._hooks_leave_group)
def join_partitioned_group(
self, group_id,
weight=1,
partitions=partitioner.Partitioner.DEFAULT_PARTITION_NUMBER):
"""Join a group and get a partitioner.
A partitioner allows to distribute a bunch of objects across several
members using a consistent hash ring. Each object gets assigned (at
least) one member responsible for it. It's then possible to check which
object is owned by any member of the group.
This method also creates if necessary, and joins the group with the
selected weight.
:param group_id: The group to create a partitioner for.
:param weight: The weight to use in the hashring for this node.
:param partitions: The number of partitions to create.
:return: A :py:class:`~tooz.partitioner.Partitioner` object.
"""
self.join_group_create(group_id, capabilities={'weight': weight})
return partitioner.Partitioner(self, group_id, partitions=partitions)
def leave_partitioned_group(self, partitioner):
"""Leave a partitioned group.
This leaves the partitioned group and stop the partitioner.
:param group_id: The group to create a partitioner for.
"""
leave = self.leave_group(partitioner.group_id)
partitioner.stop()
return leave.get()
@staticmethod
def run_watchers(timeout=None):
"""Run the watchers callback.
This may also activate :py:meth:`.run_elect_coordinator` (depending
on driver implementation).
"""
raise tooz.NotImplemented
@staticmethod
def run_elect_coordinator():
"""Try to leader elect this coordinator & activate hooks on success."""
raise tooz.NotImplemented
def watch_join_group(self, group_id, callback):
"""Call a function when group_id sees a new member joined.
The callback functions will be executed when `run_watchers` is
called.
:param group_id: The group id to watch
:param callback: The function to execute when a member joins this group
"""
self._hooks_join_group[group_id].append(callback)
def unwatch_join_group(self, group_id, callback):
"""Stop executing a function when a group_id sees a new member joined.
:param group_id: The group id to unwatch
:param callback: The function that was executed when a member joined
this group
"""
try:
# Check if group_id is in hooks to avoid creating a default empty
# entry in hooks list.
if group_id not in self._hooks_join_group:
raise ValueError
self._hooks_join_group[group_id].remove(callback)
except ValueError:
raise WatchCallbackNotFound(group_id, callback)
if not self._hooks_join_group[group_id]:
del self._hooks_join_group[group_id]
def watch_leave_group(self, group_id, callback):
"""Call a function when group_id sees a new member leaving.
The callback functions will be executed when `run_watchers` is
called.
:param group_id: The group id to watch
:param callback: The function to execute when a member leaves this
group
"""
self._hooks_leave_group[group_id].append(callback)
def unwatch_leave_group(self, group_id, callback):
"""Stop executing a function when a group_id sees a new member leaving.
:param group_id: The group id to unwatch
:param callback: The function that was executed when a member left
this group
"""
try:
# Check if group_id is in hooks to avoid creating a default empty
# entry in hooks list.
if group_id not in self._hooks_leave_group:
raise ValueError
self._hooks_leave_group[group_id].remove(callback)
except ValueError:
raise WatchCallbackNotFound(group_id, callback)
if not self._hooks_leave_group[group_id]:
del self._hooks_leave_group[group_id]
def watch_elected_as_leader(self, group_id, callback):
"""Call a function when member gets elected as leader.
The callback functions will be executed when `run_watchers` is
called.
:param group_id: The group id to watch
:param callback: The function to execute when a member leaves this
group
"""
self._hooks_elected_leader[group_id].append(callback)
def unwatch_elected_as_leader(self, group_id, callback):
"""Call a function when member gets elected as leader.
The callback functions will be executed when `run_watchers` is
called.
:param group_id: The group id to watch
:param callback: The function to execute when a member leaves this
group
"""
try:
self._hooks_elected_leader[group_id].remove(callback)
except ValueError:
raise WatchCallbackNotFound(group_id, callback)
if not self._hooks_elected_leader[group_id]:
del self._hooks_elected_leader[group_id]
@staticmethod
def stand_down_group_leader(group_id):
"""Stand down as the group leader if we are.
:param group_id: The group where we don't want to be a leader anymore
"""
raise tooz.NotImplemented
@property
def is_started(self):
return self._started
def start(self, start_heart=False):
"""Start the service engine.
If needed, the establishment of a connection to the servers
is initiated.
"""
if self._started:
raise tooz.ToozError(
"Can not start a driver which has not been stopped")
self._start()
if self.requires_beating and start_heart:
self.heart.start()
self._started = True
# Tracks which group are joined
self._joined_groups = set()
def _start(self):
pass
def stop(self):
"""Stop the service engine.
If needed, the connection to servers is closed and the client will
disappear from all joined groups.
"""
if not self._started:
raise tooz.ToozError(
"Can not stop a driver which has not been started")
if self.heart.is_alive():
self.heart.stop()
self.heart.wait()
# Some of the drivers modify joined_groups when being called to leave
# so clone it so that we aren't modifying something while iterating.
joined_groups = self._joined_groups.copy()
leaving = [self.leave_group(group) for group in joined_groups]
for fut in leaving:
try:
fut.get()
except tooz.ToozError:
# Whatever happens, ignore. Maybe we got booted out/never
# existed in the first place, or something is down, but we just
# want to call _stop after whatever happens to not leak any
# connection.
pass
self._stop()
self._started = False
def _stop(self):
pass
@staticmethod
def create_group(group_id):
"""Request the creation of a group asynchronously.
:param group_id: the id of the group to create
:type group_id: ascii bytes
:returns: None
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def get_groups():
"""Return the list composed by all groups ids asynchronously.
:returns: the list of all created group ids
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def join_group(group_id, capabilities=b""):
"""Join a group and establish group membership asynchronously.
:param group_id: the id of the group to join
:type group_id: ascii bytes
:param capabilities: the capabilities of the joined member
:type capabilities: object
:returns: None
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@_retry.retry()
def join_group_create(self, group_id, capabilities=b""):
"""Join a group and create it if necessary.
If the group cannot be joined because it does not exist, it is created
before being joined.
This function will keep retrying until it can create the group and join
it. Since nothing is transactional, it may have to retry several times
if another member is creating/deleting the group at the same time.
:param group_id: Identifier of the group to join and create
:param capabilities: the capabilities of the joined member
"""
req = self.join_group(group_id, capabilities)
try:
req.get()
except GroupNotCreated:
req = self.create_group(group_id)
try:
req.get()
except GroupAlreadyExist:
# The group might have been created in the meantime, ignore
pass
# Now retry to join the group
raise _retry.TryAgain
@staticmethod
def leave_group(group_id):
"""Leave a group asynchronously.
:param group_id: the id of the group to leave
:type group_id: ascii bytes
:returns: None
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def delete_group(group_id):
"""Delete a group asynchronously.
:param group_id: the id of the group to leave
:type group_id: ascii bytes
:returns: Result
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def get_members(group_id):
"""Return the set of all members ids of the specified group.
:returns: set of all created group ids
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def get_member_capabilities(group_id, member_id):
"""Return the capabilities of a member asynchronously.
:param group_id: the id of the group of the member
:type group_id: ascii bytes
:param member_id: the id of the member
:type member_id: ascii bytes
:returns: capabilities of a member
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def get_member_info(group_id, member_id):
"""Return the statistics and capabilities of a member asynchronously.
:param group_id: the id of the group of the member
:type group_id: ascii bytes
:param member_id: the id of the member
:type member_id: ascii bytes
:returns: capabilities and statistics of a member
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def update_capabilities(group_id, capabilities):
"""Update member capabilities in the specified group.
:param group_id: the id of the group of the current member
:type group_id: ascii bytes
:param capabilities: the capabilities of the updated member
:type capabilities: object
:returns: None
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def get_leader(group_id):
"""Return the leader for a group.
:param group_id: the id of the group:
:returns: the leader
:rtype: CoordAsyncResult
"""
raise tooz.NotImplemented
@staticmethod
def get_lock(name):
"""Return a distributed lock.
This is a exclusive lock, a second call to acquire() will block or
return False.
:param name: The lock name that is used to identify it across all
nodes.
"""
raise tooz.NotImplemented
@staticmethod
def heartbeat():
"""Update member status to indicate it is still alive.
Method to run once in a while to be sure that the member is not dead
and is still an active member of a group.
:return: The number of seconds to wait before sending a new heartbeat.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class CoordAsyncResult(object):
"""Representation of an asynchronous task.
Every call API returns an CoordAsyncResult object on which the result or
the status of the task can be requested.
"""
@abc.abstractmethod
def get(self, timeout=10):
"""Retrieve the result of the corresponding asynchronous call.
:param timeout: block until the timeout expire.
:type timeout: float
"""
@abc.abstractmethod
def done(self):
"""Returns True if the task is done, False otherwise."""
class CoordinatorResult(CoordAsyncResult):
"""Asynchronous result that references a future."""
def __init__(self, fut, failure_translator=None):
self._fut = fut
self._failure_translator = failure_translator
def get(self, timeout=None):
try:
if self._failure_translator:
with self._failure_translator():
return self._fut.result(timeout=timeout)
else:
return self._fut.result(timeout=timeout)
except futures.TimeoutError as e:
utils.raise_with_cause(OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
def done(self):
return self._fut.done()
class CoordinationDriverWithExecutor(CoordinationDriver):
EXCLUDE_OPTIONS = None
def __init__(self, member_id, parsed_url, options):
self._options = utils.collapse(options, exclude=self.EXCLUDE_OPTIONS)
self._executor = utils.ProxyExecutor.build(
self.__class__.__name__, self._options)
super(CoordinationDriverWithExecutor, self).__init__(
member_id, parsed_url, options)
def start(self, start_heart=False):
self._executor.start()
super(CoordinationDriverWithExecutor, self).start(start_heart)
def stop(self):
super(CoordinationDriverWithExecutor, self).stop()
self._executor.stop()
class CoordinationDriverCachedRunWatchers(CoordinationDriver):
"""Coordination driver with a `run_watchers` implementation.
This implementation of `run_watchers` is based on a cache of the group
members between each run of `run_watchers` that is being updated between
each run.
"""
def __init__(self, member_id, parsed_url, options):
super(CoordinationDriverCachedRunWatchers, self).__init__(
member_id, parsed_url, options)
# A cache for group members
self._group_members = collections.defaultdict(set)
self._joined_groups = set()
def _init_watch_group(self, group_id):
if group_id not in self._group_members:
members = self.get_members(group_id)
self._group_members[group_id] = members.get()
def watch_join_group(self, group_id, callback):
self._init_watch_group(group_id)
super(CoordinationDriverCachedRunWatchers, self).watch_join_group(
group_id, callback)
def unwatch_join_group(self, group_id, callback):
super(CoordinationDriverCachedRunWatchers, self).unwatch_join_group(
group_id, callback)
if (not self._has_hooks_for_group(group_id) and
group_id in self._group_members):
del self._group_members[group_id]
def watch_leave_group(self, group_id, callback):
self._init_watch_group(group_id)
super(CoordinationDriverCachedRunWatchers, self).watch_leave_group(
group_id, callback)
def unwatch_leave_group(self, group_id, callback):
super(CoordinationDriverCachedRunWatchers, self).unwatch_leave_group(
group_id, callback)
if (not self._has_hooks_for_group(group_id) and
group_id in self._group_members):
del self._group_members[group_id]
def run_watchers(self, timeout=None):
with timeutils.StopWatch(duration=timeout) as w:
result = []
group_with_hooks = set(self._hooks_join_group.keys()).union(
set(self._hooks_leave_group.keys()))
for group_id in group_with_hooks:
try:
group_members = self.get_members(group_id).get(
timeout=w.leftover(return_none=True))
except GroupNotCreated:
group_members = set()
if (group_id in self._joined_groups and
self._member_id not in group_members):
self._joined_groups.discard(group_id)
old_group_members = self._group_members.get(group_id, set())
for member_id in (group_members - old_group_members):
result.extend(
self._hooks_join_group[group_id].run(
MemberJoinedGroup(group_id, member_id)))
for member_id in (old_group_members - group_members):
result.extend(
self._hooks_leave_group[group_id].run(
MemberLeftGroup(group_id, member_id)))
self._group_members[group_id] = group_members
return result
def get_coordinator(backend_url, member_id,
characteristics=frozenset(), **kwargs):
"""Initialize and load the backend.
:param backend_url: the backend URL to use
:type backend: str
:param member_id: the id of the member
:type member_id: ascii bytes
:param characteristics: set
:type characteristics: set of :py:class:`.Characteristics` that will
be matched to the requested driver (this **will**
become a **required** parameter in a future tooz
version)
:param kwargs: additional coordinator options (these take precedence over
options of the **same** name found in the ``backend_url``
arguments query string)
"""
parsed_url = netutils.urlsplit(backend_url)
parsed_qs = six.moves.urllib.parse.parse_qs(parsed_url.query)
if kwargs:
options = {}
for (k, v) in six.iteritems(kwargs):
options[k] = [v]
for (k, v) in six.iteritems(parsed_qs):
if k not in options:
options[k] = v
else:
options = parsed_qs
d = driver.DriverManager(
namespace=TOOZ_BACKENDS_NAMESPACE,
name=parsed_url.scheme,
invoke_on_load=True,
invoke_args=(member_id, parsed_url, options)).driver
characteristics = set(characteristics)
driver_characteristics = set(getattr(d, 'CHARACTERISTICS', set()))
missing_characteristics = characteristics - driver_characteristics
if missing_characteristics:
raise ToozDriverChosenPoorly("Desired characteristics %s"
" is not a strict subset of driver"
" characteristics %s, %s"
" characteristics were not found"
% (characteristics,
driver_characteristics,
missing_characteristics))
return d
# TODO(harlowja): We'll have to figure out a way to remove this 'alias' at
# some point in the future (when we have a better way to tell people it has
# moved without messing up their exception catching hierarchy).
ToozError = tooz.ToozError
class ToozDriverChosenPoorly(tooz.ToozError):
"""Raised when a driver does not match desired characteristics."""
class ToozConnectionError(tooz.ToozError):
"""Exception raised when the client cannot connect to the server."""
class OperationTimedOut(tooz.ToozError):
"""Exception raised when an operation times out."""
class LockAcquireFailed(tooz.ToozError):
"""Exception raised when a lock acquire fails in a context manager."""
class GroupNotCreated(tooz.ToozError):
"""Exception raised when the caller request an nonexistent group."""
def __init__(self, group_id):
self.group_id = group_id
super(GroupNotCreated, self).__init__(
"Group %s does not exist" % group_id)
class GroupAlreadyExist(tooz.ToozError):
"""Exception raised trying to create an already existing group."""
def __init__(self, group_id):
self.group_id = group_id
super(GroupAlreadyExist, self).__init__(
"Group %s already exists" % group_id)
class MemberAlreadyExist(tooz.ToozError):
"""Exception raised trying to join a group already joined."""
def __init__(self, group_id, member_id):
self.group_id = group_id
self.member_id = member_id
super(MemberAlreadyExist, self).__init__(
"Member %s has already joined %s" %
(member_id, group_id))
class MemberNotJoined(tooz.ToozError):
"""Exception raised trying to access a member not in a group."""
def __init__(self, group_id, member_id):
self.group_id = group_id
self.member_id = member_id
super(MemberNotJoined, self).__init__("Member %s has not joined %s" %
(member_id, group_id))
class GroupNotEmpty(tooz.ToozError):
"Exception raised when the caller try to delete a group with members."
def __init__(self, group_id):
self.group_id = group_id
super(GroupNotEmpty, self).__init__("Group %s is not empty" % group_id)
class WatchCallbackNotFound(tooz.ToozError):
"""Exception raised when unwatching a group.
Raised when the caller tries to unwatch a group with a callback that
does not exist.
"""
def __init__(self, group_id, callback):
self.group_id = group_id
self.callback = callback
super(WatchCallbackNotFound, self).__init__(
'Callback %s is not registered on group %s' %
(callback.__name__, group_id))
# TODO(harlowja,jd): We'll have to figure out a way to remove this 'alias' at
# some point in the future (when we have a better way to tell people it has
# moved without messing up their exception catching hierarchy).
SerializationError = utils.SerializationError

View File

@ -1,172 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Yahoo! Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import consul
from oslo_utils import encodeutils
import tooz
from tooz import _retry
from tooz import coordination
from tooz import locking
from tooz import utils
class ConsulLock(locking.Lock):
def __init__(self, name, node, address, session_id, client):
super(ConsulLock, self).__init__(name)
self._name = name
self._node = node
self._address = address
self._session_id = session_id
self._client = client
self.acquired = False
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
@_retry.retry(stop_max_delay=blocking)
def _acquire():
# Check if we are the owner and if we are simulate
# blocking (because consul will not block a second
# acquisition attempt by the same owner).
_index, value = self._client.kv.get(key=self._name)
if value and value.get('Session') == self._session_id:
if blocking is False:
return False
else:
raise _retry.TryAgain
else:
# The value can be anything.
gotten = self._client.kv.put(key=self._name,
value=u"I got it!",
acquire=self._session_id)
if gotten:
self.acquired = True
return True
if blocking is False:
return False
else:
raise _retry.TryAgain
return _acquire()
def release(self):
if not self.acquired:
return False
# Get the lock to verify the session ID's are same
_index, contents = self._client.kv.get(key=self._name)
if not contents:
return False
owner = contents.get('Session')
if owner == self._session_id:
removed = self._client.kv.put(key=self._name,
value=self._session_id,
release=self._session_id)
if removed:
self.acquired = False
return True
return False
class ConsulDriver(coordination.CoordinationDriver):
"""This driver uses `python-consul`_ client against `consul`_ servers.
The ConsulDriver implements a minimal set of coordination driver API(s)
needed to make Consul being used as an option for Distributed Locking. The
data is stored in Consul's key-value store.
To configure the client to your liking please refer
http://python-consul.readthedocs.org/en/latest/. Few options like 'ttl'
and 'namespace' will be passed as part of the options. 'ttl' governs the
duration till when the session holding the lock will be active.
.. _python-consul: http://python-consul.readthedocs.org/
.. _consul: https://consul.io/
"""
#: Default namespace when none is provided
TOOZ_NAMESPACE = u"tooz"
#: Default TTL
DEFAULT_TTL = 15
#: Default consul port if not provided.
DEFAULT_PORT = 8500
def __init__(self, member_id, parsed_url, options):
super(ConsulDriver, self).__init__(member_id, parsed_url, options)
options = utils.collapse(options)
self._host = parsed_url.hostname
self._port = parsed_url.port or self.DEFAULT_PORT
self._session_id = None
self._session_name = encodeutils.safe_decode(member_id)
self._ttl = int(options.get('ttl', self.DEFAULT_TTL))
namespace = options.get('namespace', self.TOOZ_NAMESPACE)
self._namespace = encodeutils.safe_decode(namespace)
self._client = None
def _start(self):
"""Create a client, register a node and create a session."""
# Create a consul client
if self._client is None:
self._client = consul.Consul(host=self._host, port=self._port)
local_agent = self._client.agent.self()
self._node = local_agent['Member']['Name']
self._address = local_agent['Member']['Addr']
# Register a Node
self._client.catalog.register(node=self._node,
address=self._address)
# Create a session
self._session_id = self._client.session.create(
name=self._session_name, node=self._node, ttl=self._ttl)
def _stop(self):
if self._client is not None:
if self._session_id is not None:
self._client.session.destroy(self._session_id)
self._session_id = None
self._client = None
def get_lock(self, name):
real_name = self._paths_join(self._namespace, u"locks", name)
return ConsulLock(real_name, self._node, self._address,
session_id=self._session_id,
client=self._client)
@staticmethod
def _paths_join(*args):
pieces = []
for arg in args:
pieces.append(encodeutils.safe_decode(arg))
return u"/".join(pieces)
def watch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def watch_leave_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_leave_group(self, group_id, callback):
raise tooz.NotImplemented

View File

@ -1,258 +0,0 @@
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import fasteners
from oslo_utils import encodeutils
from oslo_utils import timeutils
import requests
import six
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
def _translate_failures(func):
"""Translates common requests exceptions into tooz exceptions."""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError as e:
# Typically json decoding failed for some reason.
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
except requests.exceptions.RequestException as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
return wrapper
class _Client(object):
def __init__(self, host, port, protocol):
self.host = host
self.port = port
self.protocol = protocol
self.session = requests.Session()
@property
def base_url(self):
return self.protocol + '://' + self.host + ':' + str(self.port)
def get_url(self, path):
return self.base_url + '/v2/' + path.lstrip("/")
def get(self, url, **kwargs):
if kwargs.pop('make_url', True):
url = self.get_url(url)
return self.session.get(url, **kwargs).json()
def put(self, url, **kwargs):
if kwargs.pop('make_url', True):
url = self.get_url(url)
return self.session.put(url, **kwargs).json()
def delete(self, url, **kwargs):
if kwargs.pop('make_url', True):
url = self.get_url(url)
return self.session.delete(url, **kwargs).json()
def self_stats(self):
return self.session.get(self.get_url("/stats/self"))
class EtcdLock(locking.Lock):
_TOOZ_LOCK_PREFIX = "tooz_locks"
def __init__(self, lock_url, name, coord, client, ttl=60):
super(EtcdLock, self).__init__(name)
self.client = client
self.coord = coord
self.ttl = ttl
self._lock_url = lock_url
self._node = None
# NOTE(jschwarz): this lock is mainly used to prevent concurrent runs
# of hearthbeat() with another function. For more details, see
# https://bugs.launchpad.net/python-tooz/+bug/1603005.
self._lock = threading.Lock()
@_translate_failures
@fasteners.locked
def break_(self):
reply = self.client.delete(self._lock_url, make_url=False)
return reply.get('errorCode') is None
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
blocking, timeout = utils.convert_blocking(blocking)
if timeout is not None:
watch = timeutils.StopWatch(duration=timeout)
watch.start()
else:
watch = None
while True:
if self.acquired:
# We already acquired the lock. Just go ahead and wait for ever
# if blocking != False using the last index.
lastindex = self._node['modifiedIndex']
else:
try:
reply = self.client.put(
self._lock_url,
make_url=False,
timeout=watch.leftover() if watch else None,
data={"ttl": self.ttl,
"prevExist": "false"})
except requests.exceptions.RequestException:
if not watch or watch.leftover() == 0:
return False
# We got the lock!
if reply.get("errorCode") is None:
with self._lock:
self._node = reply['node']
self.coord._acquired_locks.add(self)
return True
# No lock, somebody got it, wait for it to be released
lastindex = reply['index'] + 1
# We didn't get the lock and we don't want to wait
if not blocking:
return False
# Ok, so let's wait a bit (or forever!)
try:
reply = self.client.get(
self._lock_url +
"?wait=true&waitIndex=%d" % lastindex,
make_url=False,
timeout=watch.leftover() if watch else None)
except requests.exceptions.RequestException:
if not watch or watch.expired():
return False
@_translate_failures
@fasteners.locked
def release(self):
if self.acquired:
lock_url = self._lock_url
lock_url += "?prevIndex=%s" % self._node['modifiedIndex']
reply = self.client.delete(lock_url, make_url=False)
errorcode = reply.get("errorCode")
if errorcode is None:
self.coord._acquired_locks.discard(self)
self._node = None
return True
else:
LOG.warning("Unable to release '%s' due to %d, %s",
self.name, errorcode, reply.get('message'))
return False
@property
def acquired(self):
return self in self.coord._acquired_locks
@_translate_failures
@fasteners.locked
def heartbeat(self):
"""Keep the lock alive."""
if self.acquired:
poked = self.client.put(self._lock_url,
data={"ttl": self.ttl,
"prevExist": "true"}, make_url=False)
self._node = poked['node']
errorcode = poked.get("errorCode")
if not errorcode:
return True
LOG.warning("Unable to heartbeat by updating key '%s' with "
"extended expiry of %s seconds: %d, %s", self.name,
self.ttl, errorcode, poked.get("message"))
return False
class EtcdDriver(coordination.CoordinationDriver):
"""An etcd based driver.
This driver uses etcd provide the coordination driver semantics and
required API(s).
"""
#: Default socket/lock/member/leader timeout used when none is provided.
DEFAULT_TIMEOUT = 30
#: Default hostname used when none is provided.
DEFAULT_HOST = "localhost"
#: Default port used if none provided (4001 or 2379 are the common ones).
DEFAULT_PORT = 2379
#: Class that will be used to encode lock names into a valid etcd url.
lock_encoder_cls = utils.Base64LockEncoder
def __init__(self, member_id, parsed_url, options):
super(EtcdDriver, self).__init__(member_id, parsed_url, options)
host = parsed_url.hostname or self.DEFAULT_HOST
port = parsed_url.port or self.DEFAULT_PORT
options = utils.collapse(options)
self.client = _Client(host=host, port=port,
protocol=options.get('protocol', 'http'))
default_timeout = options.get('timeout', self.DEFAULT_TIMEOUT)
self.lock_encoder = self.lock_encoder_cls(self.client.get_url("keys"))
self.lock_timeout = int(options.get('lock_timeout', default_timeout))
self._acquired_locks = set()
def _start(self):
try:
self.client.self_stats()
except requests.exceptions.ConnectionError as e:
raise coordination.ToozConnectionError(
encodeutils.exception_to_unicode(e))
def get_lock(self, name):
return EtcdLock(self.lock_encoder.check_and_encode(name), name,
self, self.client, self.lock_timeout)
def heartbeat(self):
for lock in self._acquired_locks.copy():
lock.heartbeat()
return self.lock_timeout
def watch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def watch_leave_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_leave_group(self, group_id, callback):
raise tooz.NotImplemented

View File

@ -1,148 +0,0 @@
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import threading
import etcd3
from etcd3 import exceptions as etcd3_exc
from oslo_utils import encodeutils
import six
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
def _translate_failures(func):
"""Translates common requests exceptions into tooz exceptions."""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except etcd3_exc.ConnectionFailedError as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
except etcd3_exc.ConnectionTimeoutError as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except etcd3_exc.Etcd3Exception as e:
utils.raise_with_cause(coordination.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
return wrapper
class Etcd3Lock(locking.Lock):
"""An etcd3-specific lock.
Thin wrapper over etcd3's lock object basically to provide the heartbeat()
semantics for the coordination driver.
"""
LOCK_PREFIX = b"/tooz/locks"
def __init__(self, coord, name, timeout):
super(Etcd3Lock, self).__init__(name)
self._coord = coord
self._lock = coord.client.lock(name.decode(), timeout)
self._exclusive_access = threading.Lock()
@_translate_failures
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
blocking, timeout = utils.convert_blocking(blocking)
if blocking is False:
timeout = 0
if self._lock.acquire(timeout):
self._coord._acquired_locks.add(self)
return True
return False
@property
def acquired(self):
return self in self._coord._acquired_locks
@_translate_failures
def release(self):
with self._exclusive_access:
if self.acquired and self._lock.release():
self._coord._acquired_locks.discard(self)
return True
return False
@_translate_failures
def heartbeat(self):
with self._exclusive_access:
if self.acquired:
self._lock.refresh()
return True
return False
class Etcd3Driver(coordination.CoordinationDriver):
"""An etcd based driver.
This driver uses etcd provide the coordination driver semantics and
required API(s).
"""
#: Default socket/lock/member/leader timeout used when none is provided.
DEFAULT_TIMEOUT = 30
#: Default hostname used when none is provided.
DEFAULT_HOST = "localhost"
#: Default port used if none provided (4001 or 2379 are the common ones).
DEFAULT_PORT = 2379
def __init__(self, member_id, parsed_url, options):
super(Etcd3Driver, self).__init__(member_id, parsed_url, options)
host = parsed_url.hostname or self.DEFAULT_HOST
port = parsed_url.port or self.DEFAULT_PORT
options = utils.collapse(options)
timeout = int(options.get('timeout', self.DEFAULT_TIMEOUT))
self.client = etcd3.client(host=host, port=port, timeout=timeout)
self.lock_timeout = int(options.get('lock_timeout', timeout))
self._acquired_locks = set()
def get_lock(self, name):
return Etcd3Lock(self, name, self.lock_timeout)
def heartbeat(self):
# NOTE(jaypipes): Copying because set can mutate during iteration
for lock in self._acquired_locks.copy():
lock.heartbeat()
return self.lock_timeout
def watch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def watch_leave_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_leave_group(self, group_id, callback):
raise tooz.NotImplemented

View File

@ -1,204 +0,0 @@
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import base64
import threading
import uuid
import etcd3gw
from etcd3gw import exceptions as etcd3_exc
from oslo_utils import encodeutils
import six
import tooz
from tooz import _retry
from tooz import coordination
from tooz import locking
from tooz import utils
def _translate_failures(func):
"""Translates common requests exceptions into tooz exceptions."""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except etcd3_exc.ConnectionFailedError as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
except etcd3_exc.ConnectionTimeoutError as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except etcd3_exc.Etcd3Exception as e:
utils.raise_with_cause(coordination.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
return wrapper
class Etcd3Lock(locking.Lock):
"""An etcd3-specific lock.
Thin wrapper over etcd3's lock object basically to provide the heartbeat()
semantics for the coordination driver.
"""
LOCK_PREFIX = b"/tooz/locks"
def __init__(self, coord, name, timeout):
super(Etcd3Lock, self).__init__(name)
self._timeout = timeout
self._coord = coord
self._key = self.LOCK_PREFIX + name
self._key_b64 = base64.b64encode(self._key).decode("ascii")
self._uuid = base64.b64encode(uuid.uuid4().bytes).decode("ascii")
self._lease = self._coord.client.lease(self._timeout)
self._exclusive_access = threading.Lock()
@_translate_failures
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
@_retry.retry(stop_max_delay=blocking)
def _acquire():
# TODO(jd): save the created revision so we can check it later to
# make sure we still have the lock
txn = {
'compare': [{
'key': self._key_b64,
'result': 'EQUAL',
'target': 'CREATE',
'create_revision': 0
}],
'success': [{
'request_put': {
'key': self._key_b64,
'value': self._uuid,
'lease': self._lease.id
}
}],
'failure': [{
'request_range': {
'key': self._key_b64
}
}]
}
result = self._coord.client.transaction(txn)
success = result.get('succeeded', False)
if success is not True:
if blocking is False:
return False
raise _retry.TryAgain
self._coord._acquired_locks.add(self)
return True
return _acquire()
@_translate_failures
def release(self):
txn = {
'compare': [{
'key': self._key_b64,
'result': 'EQUAL',
'target': 'VALUE',
'value': self._uuid
}],
'success': [{
'request_delete_range': {
'key': self._key_b64
}
}]
}
with self._exclusive_access:
result = self._coord.client.transaction(txn)
success = result.get('succeeded', False)
if success:
self._coord._acquired_locks.remove(self)
return True
return False
@_translate_failures
def break_(self):
if self._coord.client.delete(self._key):
self._coord._acquired_locks.discard(self)
return True
return False
@property
def acquired(self):
return self in self._coord._acquired_locks
@_translate_failures
def heartbeat(self):
with self._exclusive_access:
if self.acquired:
self._lease.refresh()
return True
return False
class Etcd3Driver(coordination.CoordinationDriver):
"""An etcd based driver.
This driver uses etcd provide the coordination driver semantics and
required API(s).
"""
#: Default socket/lock/member/leader timeout used when none is provided.
DEFAULT_TIMEOUT = 30
#: Default hostname used when none is provided.
DEFAULT_HOST = "localhost"
#: Default port used if none provided (4001 or 2379 are the common ones).
DEFAULT_PORT = 2379
def __init__(self, member_id, parsed_url, options):
super(Etcd3Driver, self).__init__(member_id, parsed_url, options)
host = parsed_url.hostname or self.DEFAULT_HOST
port = parsed_url.port or self.DEFAULT_PORT
options = utils.collapse(options)
timeout = int(options.get('timeout', self.DEFAULT_TIMEOUT))
self.client = etcd3gw.client(host=host, port=port, timeout=timeout)
self.lock_timeout = int(options.get('lock_timeout', timeout))
self._acquired_locks = set()
def get_lock(self, name):
return Etcd3Lock(self, name, self.lock_timeout)
def heartbeat(self):
# NOTE(jaypipes): Copying because set can mutate during iteration
for lock in self._acquired_locks.copy():
lock.heartbeat()
return self.lock_timeout
def watch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_join_group(self, group_id, callback):
raise tooz.NotImplemented
def watch_leave_group(self, group_id, callback):
raise tooz.NotImplemented
def unwatch_leave_group(self, group_id, callback):
raise tooz.NotImplemented

View File

@ -1,532 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import errno
import functools
import hashlib
import logging
import os
import re
import shutil
import sys
import tempfile
import threading
import weakref
import fasteners
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import timeutils
import six
import voluptuous
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
class _Barrier(object):
def __init__(self):
self.cond = threading.Condition()
self.owner = None
self.shared = False
self.ref = 0
@contextlib.contextmanager
def _translate_failures():
try:
yield
except (EnvironmentError, voluptuous.Invalid) as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def _convert_from_old_format(data):
# NOTE(sileht): previous version of the driver was storing str as-is
# making impossible to read from python3 something written with python2
# version of the lib.
# Now everything is stored with explicit type bytes or unicode. This
# convert the old format to the new one to maintain compat of already
# deployed file.
# example of potential old python2 payload:
# {b"member_id": b"member"}
# {b"member_id": u"member"}
# example of potential old python3 payload:
# {u"member_id": b"member"}
# {u"member_id": u"member"}
if six.PY3 and b"member_id" in data or b"group_id" in data:
data = dict((k.decode("utf8"), v) for k, v in data.items())
# About member_id and group_id valuse if the file have been written
# with python2 and in the old format, we can't known with python3
# if we need to decode the value or not. Python3 see bytes blob
# We keep it as-is and pray, this have a good change to break if
# the application was using str in python2 and unicode in python3
# The member file is often overridden so it's should be fine
# But the group file can be very old, so we
# now have to update it each time create_group is called
return data
def _lock_me(lock):
def wrapper(func):
@six.wraps(func)
def decorator(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return decorator
return wrapper
class FileLock(locking.Lock):
"""A file based lock."""
def __init__(self, path, barrier, member_id):
super(FileLock, self).__init__(path)
self.acquired = False
self._lock = fasteners.InterProcessLock(path)
self._barrier = barrier
self._member_id = member_id
self.ref = 0
def is_still_owner(self):
return self.acquired
def acquire(self, blocking=True, shared=False):
blocking, timeout = utils.convert_blocking(blocking)
watch = timeutils.StopWatch(duration=timeout)
watch.start()
# Make the shared barrier ours first.
with self._barrier.cond:
while self._barrier.owner is not None:
if (shared and self._barrier.shared):
break
if not blocking or watch.expired():
return False
self._barrier.cond.wait(watch.leftover(return_none=True))
self._barrier.owner = (threading.current_thread().ident,
os.getpid(), self._member_id)
self._barrier.shared = shared
self._barrier.ref += 1
self.ref += 1
# Ok at this point we are now working in a thread safe manner,
# and now we can try to get the actual lock...
gotten = False
try:
gotten = self._lock.acquire(
blocking=blocking,
# Since the barrier waiting may have
# taken a long time, we have to use
# the leftover (and not the original).
timeout=watch.leftover(return_none=True))
finally:
# NOTE(harlowja): do this in a finally block to **ensure** that
# we release the barrier if something bad happens...
if not gotten:
# Release the barrier to let someone else have a go at it...
with self._barrier.cond:
self._barrier.owner = None
self._barrier.ref = 0
self._barrier.shared = False
self._barrier.cond.notify_all()
self.acquired = gotten
return gotten
def release(self):
if not self.acquired:
return False
with self._barrier.cond:
self._barrier.ref -= 1
self.ref -= 1
if not self.ref:
self.acquired = False
if not self._barrier.ref:
self._barrier.owner = None
self._lock.release()
self._barrier.cond.notify_all()
return True
def __del__(self):
if self.acquired:
LOG.warning("Unreleased lock %s garbage collected", self.name)
class FileDriver(coordination.CoordinationDriverCachedRunWatchers,
coordination.CoordinationDriverWithExecutor):
"""A file based driver.
This driver uses files and directories (and associated file locks) to
provide the coordination driver semantics and required API(s). It **is**
missing some functionality but in the future these not implemented API(s)
will be filled in.
General recommendations/usage considerations:
- It does **not** automatically delete members from
groups of processes that have died, manual cleanup will be needed
for those types of failures.
- It is **not** distributed (or recommended to be used in those
situations, so the developer using this should really take that into
account when applying this driver in there app).
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
HASH_ROUTINE = 'sha1'
"""This routine is used to hash a member (or group) id into a filesystem
safe name that can be used for member lookup and group joining."""
_barriers = weakref.WeakValueDictionary()
"""
Barriers shared among all file driver locks, this is required
since interprocess locking is not thread aware, so we must add the
thread awareness on-top of it instead.
"""
def __init__(self, member_id, parsed_url, options):
"""Initialize the file driver."""
super(FileDriver, self).__init__(member_id, parsed_url, options)
self._dir = self._normalize_path(parsed_url.path)
self._group_dir = os.path.join(self._dir, 'groups')
self._tmpdir = os.path.join(self._dir, 'tmp')
self._driver_lock_path = os.path.join(self._dir, '.driver_lock')
self._driver_lock = self._get_raw_lock(self._driver_lock_path,
self._member_id)
self._reserved_dirs = [self._dir, self._group_dir, self._tmpdir]
self._reserved_paths = list(self._reserved_dirs)
self._reserved_paths.append(self._driver_lock_path)
self._safe_member_id = self._make_filesystem_safe(member_id)
self._timeout = int(self._options.get('timeout', 10))
@staticmethod
def _normalize_path(path):
if sys.platform == 'win32':
# Replace slashes with backslashes and make sure we don't
# have any at the beginning of paths that include drive letters.
#
# Expected url format:
# file:////share_address/share_name
# file:///C:/path
return re.sub(r'\\(?=\w:\\)', '',
os.path.normpath(path))
return path
@classmethod
def _get_raw_lock(cls, path, member_id):
lock_barrier = cls._barriers.setdefault(path, _Barrier())
return FileLock(path, lock_barrier, member_id)
def get_lock(self, name):
path = utils.safe_abs_path(self._dir, name.decode())
if path in self._reserved_paths:
raise ValueError("Unable to create a lock using"
" reserved path '%s' for lock"
" with name '%s'" % (path, name))
return self._get_raw_lock(path, self._member_id)
@classmethod
def _make_filesystem_safe(cls, item):
item = utils.to_binary(item, encoding="utf8")
return hashlib.new(cls.HASH_ROUTINE, item).hexdigest()
def _start(self):
super(FileDriver, self)._start()
for a_dir in self._reserved_dirs:
try:
fileutils.ensure_tree(a_dir)
except OSError as e:
raise coordination.ToozConnectionError(e)
def _update_group_metadata(self, path, group_id):
details = {
u'group_id': utils.to_binary(group_id, encoding="utf8")
}
details[u'encoded'] = details[u"group_id"] != group_id
details_blob = utils.dumps(details)
fd, name = tempfile.mkstemp("tooz", dir=self._tmpdir)
with os.fdopen(fd, "wb") as fh:
fh.write(details_blob)
os.rename(name, path)
def create_group(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
group_meta_path = os.path.join(group_dir, '.metadata')
def _do_create_group():
if os.path.exists(os.path.join(group_dir, ".metadata")):
# NOTE(sileht): We update the group metadata even
# they are already good, so ensure dict key are convert
# to unicode in case of the file have been written with
# tooz < 1.36
self._update_group_metadata(group_meta_path, group_id)
raise coordination.GroupAlreadyExist(group_id)
else:
fileutils.ensure_tree(group_dir)
self._update_group_metadata(group_meta_path, group_id)
fut = self._executor.submit(_do_create_group)
return FileFutureResult(fut)
def join_group(self, group_id, capabilities=b""):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
me_path = os.path.join(group_dir, "%s.raw" % self._safe_member_id)
@_lock_me(self._driver_lock)
def _do_join_group():
if not os.path.exists(os.path.join(group_dir, ".metadata")):
raise coordination.GroupNotCreated(group_id)
if os.path.isfile(me_path):
raise coordination.MemberAlreadyExist(group_id,
self._member_id)
details = {
u'capabilities': capabilities,
u'joined_on': datetime.datetime.now(),
u'member_id': utils.to_binary(self._member_id,
encoding="utf-8")
}
details[u'encoded'] = details[u"member_id"] != self._member_id
details_blob = utils.dumps(details)
with open(me_path, "wb") as fh:
fh.write(details_blob)
self._joined_groups.add(group_id)
fut = self._executor.submit(_do_join_group)
return FileFutureResult(fut)
def leave_group(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
me_path = os.path.join(group_dir, "%s.raw" % self._safe_member_id)
@_lock_me(self._driver_lock)
def _do_leave_group():
if not os.path.exists(os.path.join(group_dir, ".metadata")):
raise coordination.GroupNotCreated(group_id)
try:
os.unlink(me_path)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
else:
raise coordination.MemberNotJoined(group_id,
self._member_id)
else:
self._joined_groups.discard(group_id)
fut = self._executor.submit(_do_leave_group)
return FileFutureResult(fut)
_SCHEMAS = {
'group': voluptuous.Schema({
voluptuous.Required('group_id'): voluptuous.Any(six.text_type,
six.binary_type),
# NOTE(sileht): tooz <1.36 was creating file without this
voluptuous.Optional('encoded'): bool,
}),
'member': voluptuous.Schema({
voluptuous.Required('member_id'): voluptuous.Any(six.text_type,
six.binary_type),
voluptuous.Required('joined_on'): datetime.datetime,
# NOTE(sileht): tooz <1.36 was creating file without this
voluptuous.Optional('encoded'): bool,
}, extra=voluptuous.ALLOW_EXTRA),
}
def _load_and_validate(self, blob, schema_key):
data = utils.loads(blob)
data = _convert_from_old_format(data)
schema = self._SCHEMAS[schema_key]
return schema(data)
def _read_member_id(self, path):
with open(path, 'rb') as fh:
details = self._load_and_validate(fh.read(), 'member')
if details.get("encoded"):
return details[u'member_id'].decode("utf-8")
return details[u'member_id']
def get_members(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
@_lock_me(self._driver_lock)
def _do_get_members():
if not os.path.isdir(group_dir):
raise coordination.GroupNotCreated(group_id)
members = set()
try:
entries = os.listdir(group_dir)
except EnvironmentError as e:
# Did someone manage to remove it before we got here...
if e.errno != errno.ENOENT:
raise
else:
for entry in entries:
if not entry.endswith('.raw'):
continue
entry_path = os.path.join(group_dir, entry)
try:
m_time = datetime.datetime.fromtimestamp(
os.stat(entry_path).st_mtime)
current_time = datetime.datetime.now()
delta_time = timeutils.delta_seconds(m_time,
current_time)
if delta_time >= 0 and delta_time <= self._timeout:
member_id = self._read_member_id(entry_path)
else:
continue
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
else:
members.add(member_id)
return members
fut = self._executor.submit(_do_get_members)
return FileFutureResult(fut)
def get_member_capabilities(self, group_id, member_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
safe_member_id = self._make_filesystem_safe(member_id)
member_path = os.path.join(group_dir, "%s.raw" % safe_member_id)
@_lock_me(self._driver_lock)
def _do_get_member_capabilities():
try:
with open(member_path, "rb") as fh:
contents = fh.read()
except EnvironmentError as e:
if e.errno == errno.ENOENT:
if not os.path.isdir(group_dir):
raise coordination.GroupNotCreated(group_id)
else:
raise coordination.MemberNotJoined(group_id,
member_id)
else:
raise
else:
details = self._load_and_validate(contents, 'member')
return details.get(u"capabilities")
fut = self._executor.submit(_do_get_member_capabilities)
return FileFutureResult(fut)
def delete_group(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
@_lock_me(self._driver_lock)
def _do_delete_group():
try:
entries = os.listdir(group_dir)
except EnvironmentError as e:
if e.errno == errno.ENOENT:
raise coordination.GroupNotCreated(group_id)
else:
raise
else:
if len(entries) > 1:
raise coordination.GroupNotEmpty(group_id)
elif len(entries) == 1 and entries != ['.metadata']:
raise tooz.ToozError(
"Unexpected path '%s' found in"
" group directory '%s' (expected to only find"
" a '.metadata' path)" % (entries[0], group_dir))
else:
try:
shutil.rmtree(group_dir)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
fut = self._executor.submit(_do_delete_group)
return FileFutureResult(fut)
def _read_group_id(self, path):
with open(path, 'rb') as fh:
details = self._load_and_validate(fh.read(), 'group')
if details.get("encoded"):
return details[u'group_id'].decode("utf-8")
return details[u'group_id']
def get_groups(self):
def _do_get_groups():
groups = []
for entry in os.listdir(self._group_dir):
path = os.path.join(self._group_dir, entry, '.metadata')
try:
groups.append(self._read_group_id(path))
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return groups
fut = self._executor.submit(_do_get_groups)
return FileFutureResult(fut)
def heartbeat(self):
for group_id in self._joined_groups:
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
member_path = os.path.join(group_dir, "%s.raw" %
self._safe_member_id)
@_lock_me(self._driver_lock)
def _do_heartbeat():
try:
os.utime(member_path, None)
except EnvironmentError as err:
if err.errno != errno.ENOENT:
raise
_do_heartbeat()
return self._timeout
@staticmethod
def watch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
FileFutureResult = functools.partial(coordination.CoordinatorResult,
failure_translator=_translate_failures)

View File

@ -1,243 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import struct
import time
import msgpack
import six
import sysv_ipc
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
if sysv_ipc.KEY_MIN <= 0:
_KEY_RANGE = abs(sysv_ipc.KEY_MIN) + sysv_ipc.KEY_MAX
else:
_KEY_RANGE = sysv_ipc.KEY_MAX - sysv_ipc.KEY_MIN
def ftok(name, project):
# Similar to ftok & http://semanchuk.com/philip/sysv_ipc/#ftok_weakness
# but hopefully without as many weaknesses...
h = hashlib.md5()
if not isinstance(project, six.binary_type):
project = project.encode('ascii')
h.update(project)
if not isinstance(name, six.binary_type):
name = name.encode('ascii')
h.update(name)
return (int(h.hexdigest(), 16) % _KEY_RANGE) + sysv_ipc.KEY_MIN
class IPCLock(locking.Lock):
"""A sysv IPC based lock.
Please ensure you have read over (and understand) the limitations of sysv
IPC locks, and especially have tried and used $ ipcs -l (note the maximum
number of semaphores system wide field that command outputs). To ensure
that you do not reach that limit it is recommended to use destroy() at
the correct program exit/entry points.
"""
_LOCK_PROJECT = b'__TOOZ_LOCK_'
def __init__(self, name):
super(IPCLock, self).__init__(name)
self.key = ftok(name, self._LOCK_PROJECT)
self._lock = None
def break_(self):
try:
lock = sysv_ipc.Semaphore(key=self.key)
lock.remove()
except sysv_ipc.ExistentialError:
return False
else:
return True
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
if (blocking is not True and
sysv_ipc.SEMAPHORE_TIMEOUT_SUPPORTED is False):
raise tooz.NotImplemented("This system does not support"
" semaphore timeouts")
blocking, timeout = utils.convert_blocking(blocking)
start_time = None
if not blocking:
timeout = 0
elif blocking and timeout is not None:
start_time = time.time()
while True:
tmplock = None
try:
tmplock = sysv_ipc.Semaphore(self.key,
flags=sysv_ipc.IPC_CREX,
initial_value=1)
tmplock.undo = True
except sysv_ipc.ExistentialError:
# We failed to create it because it already exists, then try to
# grab the existing one.
try:
tmplock = sysv_ipc.Semaphore(self.key)
tmplock.undo = True
except sysv_ipc.ExistentialError:
# Semaphore has been deleted in the mean time, retry from
# the beginning!
continue
if start_time is not None:
elapsed = max(0.0, time.time() - start_time)
if elapsed >= timeout:
# Ran out of time...
return False
adjusted_timeout = timeout - elapsed
else:
adjusted_timeout = timeout
try:
tmplock.acquire(timeout=adjusted_timeout)
except sysv_ipc.BusyError:
tmplock = None
return False
except sysv_ipc.ExistentialError:
# Likely the lock has been deleted in the meantime, retry
continue
else:
self._lock = tmplock
return True
def release(self):
if self._lock is not None:
try:
self._lock.remove()
self._lock = None
except sysv_ipc.ExistentialError:
return False
return True
return False
class IPCDriver(coordination.CoordinationDriverWithExecutor):
"""A `IPC`_ based driver.
This driver uses `IPC`_ concepts to provide the coordination driver
semantics and required API(s). It **is** missing some functionality but
in the future these not implemented API(s) will be filled in.
General recommendations/usage considerations:
- It is **not** distributed (or recommended to be used in those
situations, so the developer using this should really take that into
account when applying this driver in there app).
.. _IPC: http://en.wikipedia.org/wiki/Inter-process_communication
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
_SEGMENT_SIZE = 1024
_GROUP_LIST_KEY = "GROUP_LIST"
_GROUP_PROJECT = "_TOOZ_INTERNAL"
_INTERNAL_LOCK_NAME = "TOOZ_INTERNAL_LOCK"
def _start(self):
super(IPCDriver, self)._start()
self._group_list = sysv_ipc.SharedMemory(
ftok(self._GROUP_LIST_KEY, self._GROUP_PROJECT),
sysv_ipc.IPC_CREAT,
size=self._SEGMENT_SIZE)
self._lock = self.get_lock(self._INTERNAL_LOCK_NAME)
def _stop(self):
super(IPCDriver, self)._stop()
try:
self._group_list.detach()
self._group_list.remove()
except sysv_ipc.ExistentialError:
pass
def _read_group_list(self):
data = self._group_list.read(byte_count=2)
length = struct.unpack("H", data)[0]
if length == 0:
return set()
data = self._group_list.read(byte_count=length, offset=2)
return set(msgpack.loads(data))
def _write_group_list(self, group_list):
data = msgpack.dumps(list(group_list))
if len(data) >= self._SEGMENT_SIZE - 2:
raise tooz.ToozError("Group list is too big")
self._group_list.write(struct.pack('H', len(data)))
self._group_list.write(data, offset=2)
def create_group(self, group_id):
def _create_group():
with self._lock:
group_list = self._read_group_list()
if group_id in group_list:
raise coordination.GroupAlreadyExist(group_id)
group_list.add(group_id)
self._write_group_list(group_list)
return coordination.CoordinatorResult(
self._executor.submit(_create_group))
def delete_group(self, group_id):
def _delete_group():
with self._lock:
group_list = self._read_group_list()
if group_id not in group_list:
raise coordination.GroupNotCreated(group_id)
group_list.remove(group_id)
self._write_group_list(group_list)
return coordination.CoordinatorResult(
self._executor.submit(_delete_group))
def watch_join_group(self, group_id, callback):
# Check the group exist
self.get_members(group_id).get()
super(IPCDriver, self).watch_join_group(group_id, callback)
def watch_leave_group(self, group_id, callback):
# Check the group exist
self.get_members(group_id).get()
super(IPCDriver, self).watch_leave_group(group_id, callback)
def _get_groups_handler(self):
with self._lock:
return self._read_group_list()
def get_groups(self):
return coordination.CoordinatorResult(self._executor.submit(
self._get_groups_handler))
@staticmethod
def get_lock(name):
return IPCLock(name)

View File

@ -1,516 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import logging
import socket
from oslo_utils import encodeutils
from pymemcache import client as pymemcache_client
import six
import tooz
from tooz import _retry
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def _failure_translator():
"""Translates common pymemcache exceptions into tooz exceptions.
https://github.com/pinterest/pymemcache/blob/d995/pymemcache/client.py#L202
"""
try:
yield
except pymemcache_client.MemcacheUnexpectedCloseError as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
except (socket.timeout, socket.error,
socket.gaierror, socket.herror) as e:
# TODO(harlowja): get upstream pymemcache to produce a better
# exception for these, using socket (vs. a memcache specific
# error) seems sorta not right and/or the best approach...
msg = encodeutils.exception_to_unicode(e)
if e.errno is not None:
msg += " (with errno %s [%s])" % (errno.errorcode[e.errno],
e.errno)
utils.raise_with_cause(coordination.ToozConnectionError,
msg, cause=e)
except pymemcache_client.MemcacheError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def _translate_failures(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
with _failure_translator():
return func(*args, **kwargs)
return wrapper
class MemcachedLock(locking.Lock):
_LOCK_PREFIX = b'__TOOZ_LOCK_'
def __init__(self, coord, name, timeout):
super(MemcachedLock, self).__init__(self._LOCK_PREFIX + name)
self.coord = coord
self.timeout = timeout
def is_still_owner(self):
if not self.acquired:
return False
else:
owner = self.get_owner()
if owner is None:
return False
return owner == self.coord._member_id
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
@_retry.retry(stop_max_delay=blocking)
@_translate_failures
def _acquire():
if self.coord.client.add(
self.name,
self.coord._member_id,
expire=self.timeout,
noreply=False):
self.coord._acquired_locks.append(self)
return True
if blocking is False:
return False
raise _retry.TryAgain
return _acquire()
@_translate_failures
def break_(self):
return bool(self.coord.client.delete(self.name, noreply=False))
@_translate_failures
def release(self):
if not self.acquired:
return False
# NOTE(harlowja): this has the potential to delete others locks
# especially if this key expired before the delete/release call is
# triggered.
#
# For example:
#
# 1. App #1 with coordinator 'A' acquires lock "b"
# 2. App #1 heartbeats every 10 seconds, expiry for lock let's
# say is 11 seconds.
# 3. App #2 with coordinator also named 'A' blocks trying to get
# lock "b" (let's say it retries attempts every 0.5 seconds)
# 4. App #1 is running behind a little bit, tries to heartbeat but
# key has expired (log message is written); at this point app #1
# doesn't own the lock anymore but it doesn't know that.
# 5. App #2 now retries and adds the key, and now it believes it
# has the lock.
# 6. App #1 (still believing it has the lock) calls release, and
# deletes app #2 lock, app #2 now doesn't own the lock anymore
# but it doesn't know that and now app #(X + 1) can get it.
# 7. App #2 calls release (repeat #6 as many times as desired)
#
# Sadly I don't think memcache has the primitives to actually make
# this work, redis does because it has lua which can check a session
# id and then do the delete and bail out if the session id is not
# as expected but memcache doesn't seem to have any equivalent
# capability.
if self not in self.coord._acquired_locks:
return False
# Do a ghetto test to see what the value is... (see above note),
# and how this really can't be done safely with memcache due to
# it being done in the client side (non-atomic).
value = self.coord.client.get(self.name)
if value != self.coord._member_id:
return False
else:
was_deleted = self.coord.client.delete(self.name, noreply=False)
if was_deleted:
self.coord._acquired_locks.remove(self)
return was_deleted
@_translate_failures
def heartbeat(self):
"""Keep the lock alive."""
if self.acquired:
poked = self.coord.client.touch(self.name,
expire=self.timeout,
noreply=False)
if poked:
return True
LOG.warning("Unable to heartbeat by updating key '%s' with "
"extended expiry of %s seconds", self.name,
self.timeout)
return False
@_translate_failures
def get_owner(self):
return self.coord.client.get(self.name)
@property
def acquired(self):
return self in self.coord._acquired_locks
class MemcachedDriver(coordination.CoordinationDriverCachedRunWatchers,
coordination.CoordinationDriverWithExecutor):
"""A `memcached`_ based driver.
This driver users `memcached`_ concepts to provide the coordination driver
semantics and required API(s). It **is** fully functional and implements
all of the coordination driver API(s). It stores data into memcache
using expiries and `msgpack`_ encoded values.
General recommendations/usage considerations:
- Memcache (without different backend technology) is a **cache** enough
said.
.. _memcached: http://memcached.org/
.. _msgpack: http://msgpack.org/
"""
CHARACTERISTICS = (
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
coordination.Characteristics.CAUSAL,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
#: Key prefix attached to groups (used in name-spacing keys)
GROUP_PREFIX = b'_TOOZ_GROUP_'
#: Key prefix attached to leaders of groups (used in name-spacing keys)
GROUP_LEADER_PREFIX = b'_TOOZ_GROUP_LEADER_'
#: Key prefix attached to members of groups (used in name-spacing keys)
MEMBER_PREFIX = b'_TOOZ_MEMBER_'
#: Key where all groups 'known' are stored.
GROUP_LIST_KEY = b'_TOOZ_GROUP_LIST'
#: Default socket/lock/member/leader timeout used when none is provided.
DEFAULT_TIMEOUT = 30
#: String used to keep a key/member alive (until it next expires).
STILL_ALIVE = b"It's alive!"
def __init__(self, member_id, parsed_url, options):
super(MemcachedDriver, self).__init__(member_id, parsed_url, options)
self.host = (parsed_url.hostname or "localhost",
parsed_url.port or 11211)
default_timeout = self._options.get('timeout', self.DEFAULT_TIMEOUT)
self.timeout = int(default_timeout)
self.membership_timeout = int(self._options.get(
'membership_timeout', default_timeout))
self.lock_timeout = int(self._options.get(
'lock_timeout', default_timeout))
self.leader_timeout = int(self._options.get(
'leader_timeout', default_timeout))
max_pool_size = self._options.get('max_pool_size', None)
if max_pool_size is not None:
self.max_pool_size = int(max_pool_size)
else:
self.max_pool_size = None
self._acquired_locks = []
@staticmethod
def _msgpack_serializer(key, value):
if isinstance(value, six.binary_type):
return value, 1
return utils.dumps(value), 2
@staticmethod
def _msgpack_deserializer(key, value, flags):
if flags == 1:
return value
if flags == 2:
return utils.loads(value)
raise coordination.SerializationError("Unknown serialization"
" format '%s'" % flags)
@_translate_failures
def _start(self):
super(MemcachedDriver, self)._start()
self.client = pymemcache_client.PooledClient(
self.host,
serializer=self._msgpack_serializer,
deserializer=self._msgpack_deserializer,
timeout=self.timeout,
connect_timeout=self.timeout,
max_pool_size=self.max_pool_size)
# Run heartbeat here because pymemcache use a lazy connection
# method and only connect once you do an operation.
self.heartbeat()
@_translate_failures
def _stop(self):
super(MemcachedDriver, self)._stop()
for lock in list(self._acquired_locks):
lock.release()
self.client.delete(self._encode_member_id(self._member_id))
self.client.close()
def _encode_group_id(self, group_id):
return self.GROUP_PREFIX + group_id
def _encode_member_id(self, member_id):
return self.MEMBER_PREFIX + member_id
def _encode_group_leader(self, group_id):
return self.GROUP_LEADER_PREFIX + group_id
@_retry.retry()
def _add_group_to_group_list(self, group_id):
"""Add group to the group list.
:param group_id: The group id
"""
group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
if cas:
group_list = set(group_list)
group_list.add(group_id)
if not self.client.cas(self.GROUP_LIST_KEY,
list(group_list), cas):
# Someone updated the group list before us, try again!
raise _retry.TryAgain
else:
if not self.client.add(self.GROUP_LIST_KEY,
[group_id], noreply=False):
# Someone updated the group list before us, try again!
raise _retry.TryAgain
@_retry.retry()
def _remove_from_group_list(self, group_id):
"""Remove group from the group list.
:param group_id: The group id
"""
group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
group_list = set(group_list)
group_list.remove(group_id)
if not self.client.cas(self.GROUP_LIST_KEY,
list(group_list), cas):
# Someone updated the group list before us, try again!
raise _retry.TryAgain
def create_group(self, group_id):
encoded_group = self._encode_group_id(group_id)
@_translate_failures
def _create_group():
if not self.client.add(encoded_group, {}, noreply=False):
raise coordination.GroupAlreadyExist(group_id)
self._add_group_to_group_list(group_id)
return MemcachedFutureResult(self._executor.submit(_create_group))
def get_groups(self):
@_translate_failures
def _get_groups():
return self.client.get(self.GROUP_LIST_KEY) or []
return MemcachedFutureResult(self._executor.submit(_get_groups))
def join_group(self, group_id, capabilities=b""):
encoded_group = self._encode_group_id(group_id)
@_retry.retry()
@_translate_failures
def _join_group():
group_members, cas = self.client.gets(encoded_group)
if group_members is None:
raise coordination.GroupNotCreated(group_id)
if self._member_id in group_members:
raise coordination.MemberAlreadyExist(group_id,
self._member_id)
group_members[self._member_id] = {
b"capabilities": capabilities,
}
if not self.client.cas(encoded_group, group_members, cas):
# It changed, let's try again
raise _retry.TryAgain
self._joined_groups.add(group_id)
return MemcachedFutureResult(self._executor.submit(_join_group))
def leave_group(self, group_id):
encoded_group = self._encode_group_id(group_id)
@_retry.retry()
@_translate_failures
def _leave_group():
group_members, cas = self.client.gets(encoded_group)
if group_members is None:
raise coordination.GroupNotCreated(group_id)
if self._member_id not in group_members:
raise coordination.MemberNotJoined(group_id, self._member_id)
del group_members[self._member_id]
if not self.client.cas(encoded_group, group_members, cas):
# It changed, let's try again
raise _retry.TryAgain
self._joined_groups.discard(group_id)
return MemcachedFutureResult(self._executor.submit(_leave_group))
def _destroy_group(self, group_id):
self.client.delete(self._encode_group_id(group_id))
def delete_group(self, group_id):
encoded_group = self._encode_group_id(group_id)
@_retry.retry()
@_translate_failures
def _delete_group():
group_members, cas = self.client.gets(encoded_group)
if group_members is None:
raise coordination.GroupNotCreated(group_id)
if group_members != {}:
raise coordination.GroupNotEmpty(group_id)
# Delete is not atomic, so we first set the group to
# using CAS, and then we delete it, to avoid race conditions.
if not self.client.cas(encoded_group, None, cas):
raise _retry.TryAgain
self.client.delete(encoded_group)
self._remove_from_group_list(group_id)
return MemcachedFutureResult(self._executor.submit(_delete_group))
@_retry.retry()
@_translate_failures
def _get_members(self, group_id):
encoded_group = self._encode_group_id(group_id)
group_members, cas = self.client.gets(encoded_group)
if group_members is None:
raise coordination.GroupNotCreated(group_id)
actual_group_members = {}
for m, v in six.iteritems(group_members):
# Never kick self from the group, we know we're alive
if (m == self._member_id or
self.client.get(self._encode_member_id(m))):
actual_group_members[m] = v
if group_members != actual_group_members:
# There are some dead members, update the group
if not self.client.cas(encoded_group, actual_group_members, cas):
# It changed, let's try again
raise _retry.TryAgain
return actual_group_members
def get_members(self, group_id):
def _get_members():
return set(self._get_members(group_id).keys())
return MemcachedFutureResult(self._executor.submit(_get_members))
def get_member_capabilities(self, group_id, member_id):
def _get_member_capabilities():
group_members = self._get_members(group_id)
if member_id not in group_members:
raise coordination.MemberNotJoined(group_id, member_id)
return group_members[member_id][b'capabilities']
return MemcachedFutureResult(
self._executor.submit(_get_member_capabilities))
def update_capabilities(self, group_id, capabilities):
encoded_group = self._encode_group_id(group_id)
@_retry.retry()
@_translate_failures
def _update_capabilities():
group_members, cas = self.client.gets(encoded_group)
if group_members is None:
raise coordination.GroupNotCreated(group_id)
if self._member_id not in group_members:
raise coordination.MemberNotJoined(group_id, self._member_id)
group_members[self._member_id][b'capabilities'] = capabilities
if not self.client.cas(encoded_group, group_members, cas):
# It changed, try again
raise _retry.TryAgain
return MemcachedFutureResult(
self._executor.submit(_update_capabilities))
def get_leader(self, group_id):
def _get_leader():
return self._get_leader_lock(group_id).get_owner()
return MemcachedFutureResult(self._executor.submit(_get_leader))
@_translate_failures
def heartbeat(self):
self.client.set(self._encode_member_id(self._member_id),
self.STILL_ALIVE,
expire=self.membership_timeout)
# Reset the acquired locks
for lock in self._acquired_locks:
lock.heartbeat()
return min(self.membership_timeout,
self.leader_timeout,
self.lock_timeout)
def get_lock(self, name):
return MemcachedLock(self, name, self.lock_timeout)
def _get_leader_lock(self, group_id):
return MemcachedLock(self, self._encode_group_leader(group_id),
self.leader_timeout)
@_translate_failures
def run_elect_coordinator(self):
for group_id, hooks in six.iteritems(self._hooks_elected_leader):
# Try to grab the lock, if that fails, that means someone has it
# already.
leader_lock = self._get_leader_lock(group_id)
if leader_lock.acquire(blocking=False):
# We got the lock
hooks.run(coordination.LeaderElected(
group_id,
self._member_id))
def run_watchers(self, timeout=None):
result = super(MemcachedDriver, self).run_watchers(timeout=timeout)
self.run_elect_coordinator()
return result
MemcachedFutureResult = functools.partial(
coordination.CoordinatorResult,
failure_translator=_failure_translator)

View File

@ -1,198 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_utils import encodeutils
import pymysql
import tooz
from tooz import _retry
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
class MySQLLock(locking.Lock):
"""A MySQL based lock."""
MYSQL_DEFAULT_PORT = 3306
def __init__(self, name, parsed_url, options):
super(MySQLLock, self).__init__(name)
self.acquired = False
self._conn = MySQLDriver.get_connection(parsed_url, options, True)
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
@_retry.retry(stop_max_delay=blocking)
def _lock():
# NOTE(sileht): mysql-server (<5.7.5) allows only one lock per
# connection at a time:
# select GET_LOCK("a", 0);
# select GET_LOCK("b", 0); <-- this release lock "a" ...
# Or
# select GET_LOCK("a", 0);
# select GET_LOCK("a", 0); release and lock again "a"
#
# So, we track locally the lock status with self.acquired
if self.acquired is True:
if blocking:
raise _retry.TryAgain
return False
try:
if not self._conn.open:
self._conn.connect()
with self._conn as cur:
cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
# Can return NULL on error
if cur.fetchone()[0] is 1:
self.acquired = True
return True
except pymysql.MySQLError as e:
utils.raise_with_cause(
tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
if blocking:
raise _retry.TryAgain
self._conn.close()
return False
try:
return _lock()
except Exception:
# Close the connection if we tried too much and finally failed, or
# anything else bad happened.
self._conn.close()
raise
def release(self):
if not self.acquired:
return False
try:
with self._conn as cur:
cur.execute("SELECT RELEASE_LOCK(%s);", self.name)
cur.fetchone()
self.acquired = False
self._conn.close()
return True
except pymysql.MySQLError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def __del__(self):
if self.acquired:
LOG.warning("unreleased lock %s garbage collected", self.name)
class MySQLDriver(coordination.CoordinationDriver):
"""A `MySQL`_ based driver.
This driver users `MySQL`_ database tables to
provide the coordination driver semantics and required API(s). It **is**
missing some functionality but in the future these not implemented API(s)
will be filled in.
.. _MySQL: http://dev.mysql.com/
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
def __init__(self, member_id, parsed_url, options):
"""Initialize the MySQL driver."""
super(MySQLDriver, self).__init__(member_id, parsed_url, options)
self._parsed_url = parsed_url
self._options = utils.collapse(options)
def _start(self):
self._conn = MySQLDriver.get_connection(self._parsed_url,
self._options)
def _stop(self):
self._conn.close()
def get_lock(self, name):
return MySQLLock(name, self._parsed_url, self._options)
@staticmethod
def watch_join_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_join_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def watch_leave_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_leave_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def watch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def get_connection(parsed_url, options, defer_connect=False):
host = parsed_url.hostname
port = parsed_url.port or MySQLLock.MYSQL_DEFAULT_PORT
dbname = parsed_url.path[1:]
username = parsed_url.username
password = parsed_url.password
unix_socket = options.get("unix_socket")
try:
if unix_socket:
return pymysql.Connect(unix_socket=unix_socket,
port=port,
user=username,
passwd=password,
database=dbname,
defer_connect=defer_connect)
else:
return pymysql.Connect(host=host,
port=port,
user=username,
passwd=password,
database=dbname,
defer_connect=defer_connect)
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)

View File

@ -1,249 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import hashlib
import logging
from oslo_utils import encodeutils
import psycopg2
import six
import tooz
from tooz import _retry
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
# See: psycopg/diagnostics_type.c for what kind of fields these
# objects may have (things like 'schema_name', 'internal_query'
# and so-on which are useful for figuring out what went wrong...)
_DIAGNOSTICS_ATTRS = tuple([
'column_name',
'constraint_name',
'context',
'datatype_name',
'internal_position',
'internal_query',
'message_detail',
'message_hint',
'message_primary',
'schema_name',
'severity',
'source_file',
'source_function',
'source_line',
'sqlstate',
'statement_position',
'table_name',
])
def _format_exception(e):
lines = [
"%s: %s" % (type(e).__name__,
encodeutils.exception_to_unicode(e).strip()),
]
if hasattr(e, 'pgcode') and e.pgcode is not None:
lines.append("Error code: %s" % e.pgcode)
# The reason this hasattr check is done is that the 'diag' may not always
# be present, depending on how new of a psycopg is installed... so better
# to be safe than sorry...
if hasattr(e, 'diag') and e.diag is not None:
diagnostic_lines = []
for attr_name in _DIAGNOSTICS_ATTRS:
if not hasattr(e.diag, attr_name):
continue
attr_value = getattr(e.diag, attr_name)
if attr_value is None:
continue
diagnostic_lines.append(" %s = %s" (attr_name, attr_value))
if diagnostic_lines:
lines.append('Diagnostics:')
lines.extend(diagnostic_lines)
return "\n".join(lines)
@contextlib.contextmanager
def _translating_cursor(conn):
try:
with conn.cursor() as cur:
yield cur
except psycopg2.Error as e:
utils.raise_with_cause(tooz.ToozError,
_format_exception(e),
cause=e)
class PostgresLock(locking.Lock):
"""A PostgreSQL based lock."""
def __init__(self, name, parsed_url, options):
super(PostgresLock, self).__init__(name)
self.acquired = False
self._conn = None
self._parsed_url = parsed_url
self._options = options
h = hashlib.md5()
h.update(name)
if six.PY2:
self.key = list(map(ord, h.digest()[0:2]))
else:
self.key = h.digest()[0:2]
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
@_retry.retry(stop_max_delay=blocking)
def _lock():
# NOTE(sileht) One the same session the lock is not exclusive
# so we track it internally if the process already has the lock.
if self.acquired is True:
if blocking:
raise _retry.TryAgain
return False
if not self._conn or self._conn.closed:
self._conn = PostgresDriver.get_connection(self._parsed_url,
self._options)
with _translating_cursor(self._conn) as cur:
if blocking is True:
cur.execute("SELECT pg_advisory_lock(%s, %s);",
self.key)
cur.fetchone()
self.acquired = True
return True
else:
cur.execute("SELECT pg_try_advisory_lock(%s, %s);",
self.key)
if cur.fetchone()[0] is True:
self.acquired = True
return True
elif blocking is False:
self._conn.close()
return False
else:
raise _retry.TryAgain
try:
return _lock()
except Exception:
if self._conn:
self._conn.close()
raise
def release(self):
if not self.acquired:
return False
with _translating_cursor(self._conn) as cur:
cur.execute("SELECT pg_advisory_unlock(%s, %s);", self.key)
cur.fetchone()
self.acquired = False
self._conn.close()
return True
def __del__(self):
if self.acquired:
LOG.warning("unreleased lock %s garbage collected", self.name)
class PostgresDriver(coordination.CoordinationDriver):
"""A `PostgreSQL`_ based driver.
This driver users `PostgreSQL`_ database tables to
provide the coordination driver semantics and required API(s). It **is**
missing some functionality but in the future these not implemented API(s)
will be filled in.
.. _PostgreSQL: http://www.postgresql.org/
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
def __init__(self, member_id, parsed_url, options):
"""Initialize the PostgreSQL driver."""
super(PostgresDriver, self).__init__(member_id, parsed_url, options)
self._parsed_url = parsed_url
self._options = utils.collapse(options)
def _start(self):
self._conn = self.get_connection(self._parsed_url, self._options)
def _stop(self):
self._conn.close()
def get_lock(self, name):
return PostgresLock(name, self._parsed_url, self._options)
@staticmethod
def watch_join_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_join_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def watch_leave_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_leave_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def watch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def get_connection(parsed_url, options):
host = options.get("host") or parsed_url.hostname
port = options.get("port") or parsed_url.port
dbname = options.get("dbname") or parsed_url.path[1:]
kwargs = {}
if parsed_url.username is not None:
kwargs["user"] = parsed_url.username
if parsed_url.password is not None:
kwargs["password"] = parsed_url.password
try:
return psycopg2.connect(host=host,
port=port,
database=dbname,
**kwargs)
except psycopg2.Error as e:
utils.raise_with_cause(coordination.ToozConnectionError,
_format_exception(e),
cause=e)

View File

@ -1,753 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import contextlib
from distutils import version
import functools
import logging
import string
import threading
from oslo_utils import encodeutils
from oslo_utils import strutils
import redis
from redis import exceptions
from redis import sentinel
import six
from six.moves import map as compat_map
from six.moves import zip as compat_zip
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def _translate_failures():
"""Translates common redis exceptions into tooz exceptions."""
try:
yield
except (exceptions.ConnectionError, exceptions.TimeoutError) as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.RedisError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
class RedisLock(locking.Lock):
def __init__(self, coord, client, name, timeout):
name = "%s_%s_lock" % (coord.namespace, six.text_type(name))
super(RedisLock, self).__init__(name)
# NOTE(jd) Make sure we don't release and heartbeat at the same time by
# using a exclusive access lock (LP#1557593)
self._exclusive_access = threading.Lock()
self._lock = client.lock(name,
timeout=timeout,
thread_local=False)
self._coord = coord
self._client = client
def is_still_owner(self):
with _translate_failures():
lock_tok = self._lock.local.token
if not lock_tok:
return False
owner_tok = self._client.get(self.name)
return owner_tok == lock_tok
def break_(self):
with _translate_failures():
return bool(self._client.delete(self.name))
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
blocking, timeout = utils.convert_blocking(blocking)
with _translate_failures():
acquired = self._lock.acquire(
blocking=blocking, blocking_timeout=timeout)
if acquired:
with self._exclusive_access:
self._coord._acquired_locks.add(self)
return acquired
def release(self):
with self._exclusive_access:
if not self.acquired:
return False
with _translate_failures():
try:
self._lock.release()
except exceptions.LockError:
return False
self._coord._acquired_locks.discard(self)
return True
def heartbeat(self):
with self._exclusive_access:
if self.acquired:
with _translate_failures():
self._lock.extend(self._lock.timeout)
return True
return False
@property
def acquired(self):
return self in self._coord._acquired_locks
class RedisDriver(coordination.CoordinationDriverCachedRunWatchers,
coordination.CoordinationDriverWithExecutor):
"""Redis provides a few nice benefits that act as a poormans zookeeper.
It **is** fully functional and implements all of the coordination
driver API(s). It stores data into `redis`_ using the provided `redis`_
API(s) using `msgpack`_ encoded values as needed.
- Durability (when setup with `AOF`_ mode).
- Consistent, note that this is still restricted to only
one redis server, without the recently released redis (alpha)
clustering > 1 server will not be consistent when partitions
or failures occur (even redis clustering docs state it is
not a fully AP or CP solution, which means even with it there
will still be *potential* inconsistencies).
- Master/slave failover (when setup with redis `sentinel`_), giving
some notion of HA (values *can* be lost when a failover transition
occurs).
To use a `sentinel`_ the connection URI must point to the sentinel server.
At connection time the sentinel will be asked for the current IP and port
of the master and then connect there. The connection URI for sentinel
should be written as follows::
redis://<sentinel host>:<sentinel port>?sentinel=<master name>
Additional sentinel hosts are listed with multiple ``sentinel_fallback``
parameters as follows::
redis://<sentinel host>:<sentinel port>?sentinel=<master name>&
sentinel_fallback=<other sentinel host>:<sentinel port>&
sentinel_fallback=<other sentinel host>:<sentinel port>&
sentinel_fallback=<other sentinel host>:<sentinel port>
Further resources/links:
- http://redis.io/
- http://redis.io/topics/sentinel
- http://redis.io/topics/cluster-spec
Note that this client will itself retry on transaction failure (when they
keys being watched have changed underneath the current transaction).
Currently the number of attempts that are tried is infinite (this might
be addressed in https://github.com/andymccurdy/redis-py/issues/566 when
that gets worked on). See http://redis.io/topics/transactions for more
information on this topic.
General recommendations/usage considerations:
- When used for locks, run in AOF mode and think carefully about how
your redis deployment handles losing a server (the clustering support
is supposed to aid in losing servers, but it is also of unknown
reliablity and is relatively new, so use at your own risk).
.. _redis: http://redis.io/
.. _msgpack: http://msgpack.org/
.. _sentinel: http://redis.io/topics/sentinel
.. _AOF: http://redis.io/topics/persistence
"""
CHARACTERISTICS = (
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
coordination.Characteristics.CAUSAL,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
MIN_VERSION = version.LooseVersion("2.6.0")
"""
The min redis version that this driver requires to operate with...
"""
GROUP_EXISTS = b'__created__'
"""
Redis deletes dictionaries that have no keys in them, which means the
key will disappear which means we can't tell the difference between
a group not existing and a group being empty without this key being
saved...
"""
#: Value used (with group exists key) to keep a group from disappearing.
GROUP_EXISTS_VALUE = b'1'
#: Default namespace for keys when none is provided.
DEFAULT_NAMESPACE = b'_tooz'
NAMESPACE_SEP = b':'
"""
Separator that is used to combine a key with the namespace (to get
the **actual** key that will be used).
"""
DEFAULT_ENCODING = 'utf8'
"""
This is for python3.x; which will behave differently when returned
binary types or unicode types (redis uses binary internally it appears),
so to just stick with a common way of doing this, make all the things
binary (with this default encoding if one is not given and a unicode
string is provided).
"""
CLIENT_ARGS = frozenset([
'db',
'encoding',
'retry_on_timeout',
'socket_keepalive',
'socket_timeout',
'ssl',
'ssl_certfile',
'ssl_keyfile',
'sentinel',
'sentinel_fallback',
])
"""
Keys that we allow to proxy from the coordinator configuration into the
redis client (used to configure the redis client internals so that
it works as you expect/want it to).
See: http://redis-py.readthedocs.org/en/latest/#redis.Redis
See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py
"""
#: Client arguments that are expected/allowed to be lists.
CLIENT_LIST_ARGS = frozenset([
'sentinel_fallback',
])
#: Client arguments that are expected to be boolean convertible.
CLIENT_BOOL_ARGS = frozenset([
'retry_on_timeout',
'ssl',
])
#: Client arguments that are expected to be int convertible.
CLIENT_INT_ARGS = frozenset([
'db',
'socket_keepalive',
'socket_timeout',
])
#: Default socket timeout to use when none is provided.
CLIENT_DEFAULT_SOCKET_TO = 30
#: String used to keep a key/member alive (until it next expires).
STILL_ALIVE = b"Not dead!"
SCRIPTS = {
'create_group': """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 1 then
return 0
end
redis.call("sadd", all_groups_key, no_namespaced_group_key)
redis.call("hset", namespaced_group_key,
"${group_existence_key}", "${group_existence_value}")
return 1
""",
'delete_group': """
-- Extract *all* the variables (so we can easily know what they are)...
local namespaced_group_key = KEYS[1]
local all_groups_key = KEYS[2]
local no_namespaced_group_key = ARGV[1]
if redis.call("exists", namespaced_group_key) == 0 then
return -1
end
if redis.call("sismember", all_groups_key, no_namespaced_group_key) == 0 then
return -2
end
if redis.call("hlen", namespaced_group_key) > 1 then
return -3
end
-- First remove from the set (then delete the group); if the set removal
-- fails, at least the group will still exist (and can be fixed manually)...
if redis.call("srem", all_groups_key, no_namespaced_group_key) == 0 then
return -4
end
redis.call("del", namespaced_group_key)
return 1
""",
'update_capabilities': """
-- Extract *all* the variables (so we can easily know what they are)...
local group_key = KEYS[1]
local member_id = ARGV[1]
local caps = ARGV[2]
if redis.call("exists", group_key) == 0 then
return -1
end
if redis.call("hexists", group_key, member_id) == 0 then
return -2
end
redis.call("hset", group_key, member_id, caps)
return 1
""",
}
"""`Lua`_ **template** scripts that will be used by various methods (they
are turned into real scripts and loaded on call into the :func:`.start`
method).
.. _Lua: http://www.lua.org
"""
EXCLUDE_OPTIONS = CLIENT_LIST_ARGS
def __init__(self, member_id, parsed_url, options):
super(RedisDriver, self).__init__(member_id, parsed_url, options)
self._parsed_url = parsed_url
self._encoding = self._options.get('encoding', self.DEFAULT_ENCODING)
timeout = self._options.get('timeout', self.CLIENT_DEFAULT_SOCKET_TO)
self.timeout = int(timeout)
self.membership_timeout = float(self._options.get(
'membership_timeout', timeout))
lock_timeout = self._options.get('lock_timeout', self.timeout)
self.lock_timeout = int(lock_timeout)
namespace = self._options.get('namespace', self.DEFAULT_NAMESPACE)
self._namespace = utils.to_binary(namespace, encoding=self._encoding)
self._group_prefix = self._namespace + b"_group"
self._beat_prefix = self._namespace + b"_beats"
self._groups = self._namespace + b"_groups"
self._client = None
self._acquired_locks = set()
self._started = False
self._server_info = {}
self._scripts = {}
def _check_fetch_redis_version(self, geq_version, not_existent=True):
if isinstance(geq_version, six.string_types):
desired_version = version.LooseVersion(geq_version)
elif isinstance(geq_version, version.LooseVersion):
desired_version = geq_version
else:
raise TypeError("Version check expects a string/version type")
try:
redis_version = version.LooseVersion(
self._server_info['redis_version'])
except KeyError:
return (not_existent, None)
else:
if redis_version < desired_version:
return (False, redis_version)
else:
return (True, redis_version)
@property
def namespace(self):
return self._namespace
@property
def running(self):
return self._started
def get_lock(self, name):
return RedisLock(self, self._client, name, self.lock_timeout)
_dumps = staticmethod(utils.dumps)
_loads = staticmethod(utils.loads)
@classmethod
def _make_client(cls, parsed_url, options, default_socket_timeout):
kwargs = {}
if parsed_url.hostname:
kwargs['host'] = parsed_url.hostname
if parsed_url.port:
kwargs['port'] = parsed_url.port
else:
if not parsed_url.path:
raise ValueError("Expected socket path in parsed urls path")
kwargs['unix_socket_path'] = parsed_url.path
if parsed_url.password:
kwargs['password'] = parsed_url.password
for a in cls.CLIENT_ARGS:
if a not in options:
continue
if a in cls.CLIENT_BOOL_ARGS:
v = strutils.bool_from_string(options[a])
elif a in cls.CLIENT_LIST_ARGS:
v = options[a]
elif a in cls.CLIENT_INT_ARGS:
v = int(options[a])
else:
v = options[a]
kwargs[a] = v
if 'socket_timeout' not in kwargs:
kwargs['socket_timeout'] = default_socket_timeout
# Ask the sentinel for the current master if there is a
# sentinel arg.
if 'sentinel' in kwargs:
sentinel_hosts = [
tuple(fallback.split(':'))
for fallback in kwargs.get('sentinel_fallback', [])
]
sentinel_hosts.insert(0, (kwargs['host'], kwargs['port']))
sentinel_server = sentinel.Sentinel(
sentinel_hosts,
socket_timeout=kwargs['socket_timeout'])
sentinel_name = kwargs['sentinel']
del kwargs['sentinel']
if 'sentinel_fallback' in kwargs:
del kwargs['sentinel_fallback']
master_client = sentinel_server.master_for(sentinel_name, **kwargs)
# The master_client is a redis.StrictRedis using a
# Sentinel managed connection pool.
return master_client
return redis.StrictRedis(**kwargs)
def _start(self):
super(RedisDriver, self)._start()
try:
self._client = self._make_client(self._parsed_url, self._options,
self.timeout)
except exceptions.RedisError as e:
utils.raise_with_cause(coordination.ToozConnectionError,
encodeutils.exception_to_unicode(e),
cause=e)
else:
# Ensure that the server is alive and not dead, this does not
# ensure the server will always be alive, but does insure that it
# at least is alive once...
with _translate_failures():
self._server_info = self._client.info()
# Validate we have a good enough redis version we are connected
# to so that the basic set of features we support will actually
# work (instead of blowing up).
new_enough, redis_version = self._check_fetch_redis_version(
self.MIN_VERSION)
if not new_enough:
raise tooz.NotImplemented("Redis version greater than or"
" equal to '%s' is required"
" to use this driver; '%s' is"
" being used which is not new"
" enough" % (self.MIN_VERSION,
redis_version))
tpl_params = {
'group_existence_value': self.GROUP_EXISTS_VALUE,
'group_existence_key': self.GROUP_EXISTS,
}
# For py3.x ensure these are unicode since the string template
# replacement will expect unicode (and we don't want b'' as a
# prefix which will happen in py3.x if this is not done).
for (k, v) in six.iteritems(tpl_params.copy()):
if isinstance(v, six.binary_type):
v = v.decode('ascii')
tpl_params[k] = v
prepared_scripts = {}
for name, raw_script_tpl in six.iteritems(self.SCRIPTS):
script_tpl = string.Template(raw_script_tpl)
script = script_tpl.substitute(**tpl_params)
prepared_scripts[name] = self._client.register_script(script)
self._scripts = prepared_scripts
self.heartbeat()
self._started = True
def _encode_beat_id(self, member_id):
member_id = utils.to_binary(member_id, encoding=self._encoding)
return self.NAMESPACE_SEP.join([self._beat_prefix, member_id])
def _encode_member_id(self, member_id):
member_id = utils.to_binary(member_id, encoding=self._encoding)
if member_id == self.GROUP_EXISTS:
raise ValueError("Not allowed to use private keys as a member id")
return member_id
def _decode_member_id(self, member_id):
return utils.to_binary(member_id, encoding=self._encoding)
def _encode_group_leader(self, group_id):
group_id = utils.to_binary(group_id, encoding=self._encoding)
return b"leader_of_" + group_id
def _encode_group_id(self, group_id, apply_namespace=True):
group_id = utils.to_binary(group_id, encoding=self._encoding)
if not apply_namespace:
return group_id
return self.NAMESPACE_SEP.join([self._group_prefix, group_id])
def _decode_group_id(self, group_id):
return utils.to_binary(group_id, encoding=self._encoding)
def heartbeat(self):
with _translate_failures():
beat_id = self._encode_beat_id(self._member_id)
expiry_ms = max(0, int(self.membership_timeout * 1000.0))
self._client.psetex(beat_id, time_ms=expiry_ms,
value=self.STILL_ALIVE)
for lock in self._acquired_locks.copy():
try:
lock.heartbeat()
except tooz.ToozError:
LOG.warning("Unable to heartbeat lock '%s'", lock,
exc_info=True)
return min(self.lock_timeout, self.membership_timeout)
def _stop(self):
while self._acquired_locks:
lock = self._acquired_locks.pop()
try:
lock.release()
except tooz.ToozError:
LOG.warning("Unable to release lock '%s'", lock, exc_info=True)
super(RedisDriver, self)._stop()
if self._client is not None:
# Make sure we no longer exist...
beat_id = self._encode_beat_id(self._member_id)
try:
# NOTE(harlowja): this will delete nothing if the key doesn't
# exist in the first place, which is fine/expected/desired...
with _translate_failures():
self._client.delete(beat_id)
except tooz.ToozError:
LOG.warning("Unable to delete heartbeat key '%s'", beat_id,
exc_info=True)
self._client = None
self._server_info = {}
self._scripts.clear()
self._started = False
def _submit(self, cb, *args, **kwargs):
if not self._started:
raise tooz.ToozError("Redis driver has not been started")
return self._executor.submit(cb, *args, **kwargs)
def _get_script(self, script_key):
try:
return self._scripts[script_key]
except KeyError:
raise tooz.ToozError("Redis driver has not been started")
def create_group(self, group_id):
script = self._get_script('create_group')
def _create_group(script):
encoded_group = self._encode_group_id(group_id)
keys = [
encoded_group,
self._groups,
]
args = [
self._encode_group_id(group_id, apply_namespace=False),
]
result = script(keys=keys, args=args)
result = strutils.bool_from_string(result)
if not result:
raise coordination.GroupAlreadyExist(group_id)
return RedisFutureResult(self._submit(_create_group, script))
def update_capabilities(self, group_id, capabilities):
script = self._get_script('update_capabilities')
def _update_capabilities(script):
keys = [
self._encode_group_id(group_id),
]
args = [
self._encode_member_id(self._member_id),
self._dumps(capabilities),
]
result = int(script(keys=keys, args=args))
if result == -1:
raise coordination.GroupNotCreated(group_id)
if result == -2:
raise coordination.MemberNotJoined(group_id, self._member_id)
return RedisFutureResult(self._submit(_update_capabilities, script))
def leave_group(self, group_id):
encoded_group = self._encode_group_id(group_id)
encoded_member_id = self._encode_member_id(self._member_id)
def _leave_group(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
p.multi()
p.hdel(encoded_group, encoded_member_id)
c = p.execute()[0]
if c == 0:
raise coordination.MemberNotJoined(group_id, self._member_id)
else:
self._joined_groups.discard(group_id)
return RedisFutureResult(self._submit(self._client.transaction,
_leave_group, encoded_group,
value_from_callable=True))
def get_members(self, group_id):
encoded_group = self._encode_group_id(group_id)
def _get_members(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
potential_members = set()
for m in p.hkeys(encoded_group):
m = self._decode_member_id(m)
if m != self.GROUP_EXISTS:
potential_members.add(m)
if not potential_members:
return set()
# Ok now we need to see which members have passed away...
gone_members = set()
member_values = p.mget(compat_map(self._encode_beat_id,
potential_members))
for (potential_member, value) in compat_zip(potential_members,
member_values):
# Always preserve self (just incase we haven't heartbeated
# while this call/s was being made...), this does *not* prevent
# another client from removing this though...
if potential_member == self._member_id:
continue
if not value:
gone_members.add(potential_member)
# Trash all the members that no longer are with us... RIP...
if gone_members:
p.multi()
encoded_gone_members = list(self._encode_member_id(m)
for m in gone_members)
p.hdel(encoded_group, *encoded_gone_members)
p.execute()
return set(m for m in potential_members
if m not in gone_members)
return potential_members
return RedisFutureResult(self._submit(self._client.transaction,
_get_members, encoded_group,
value_from_callable=True))
def get_member_capabilities(self, group_id, member_id):
encoded_group = self._encode_group_id(group_id)
encoded_member_id = self._encode_member_id(member_id)
def _get_member_capabilities(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
capabilities = p.hget(encoded_group, encoded_member_id)
if capabilities is None:
raise coordination.MemberNotJoined(group_id, member_id)
return self._loads(capabilities)
return RedisFutureResult(self._submit(self._client.transaction,
_get_member_capabilities,
encoded_group,
value_from_callable=True))
def join_group(self, group_id, capabilities=b""):
encoded_group = self._encode_group_id(group_id)
encoded_member_id = self._encode_member_id(self._member_id)
def _join_group(p):
if not p.exists(encoded_group):
raise coordination.GroupNotCreated(group_id)
p.multi()
p.hset(encoded_group, encoded_member_id,
self._dumps(capabilities))
c = p.execute()[0]
if c == 0:
# Field already exists...
raise coordination.MemberAlreadyExist(group_id,
self._member_id)
else:
self._joined_groups.add(group_id)
return RedisFutureResult(self._submit(self._client.transaction,
_join_group,
encoded_group,
value_from_callable=True))
def delete_group(self, group_id):
script = self._get_script('delete_group')
def _delete_group(script):
keys = [
self._encode_group_id(group_id),
self._groups,
]
args = [
self._encode_group_id(group_id, apply_namespace=False),
]
result = int(script(keys=keys, args=args))
if result in (-1, -2):
raise coordination.GroupNotCreated(group_id)
if result == -3:
raise coordination.GroupNotEmpty(group_id)
if result == -4:
raise tooz.ToozError("Unable to remove '%s' key"
" from set located at '%s'"
% (args[0], keys[-1]))
if result != 1:
raise tooz.ToozError("Internal error, unable"
" to complete group '%s' removal"
% (group_id))
return RedisFutureResult(self._submit(_delete_group, script))
def _destroy_group(self, group_id):
"""Should only be used in tests..."""
self._client.delete(self._encode_group_id(group_id))
def get_groups(self):
def _get_groups():
results = []
for g in self._client.smembers(self._groups):
results.append(self._decode_group_id(g))
return results
return RedisFutureResult(self._submit(_get_groups))
def _get_leader_lock(self, group_id):
name = self._encode_group_leader(group_id)
return self.get_lock(name)
def run_elect_coordinator(self):
for group_id, hooks in six.iteritems(self._hooks_elected_leader):
leader_lock = self._get_leader_lock(group_id)
if leader_lock.acquire(blocking=False):
# We got the lock
hooks.run(coordination.LeaderElected(group_id,
self._member_id))
def run_watchers(self, timeout=None):
result = super(RedisDriver, self).run_watchers(timeout=timeout)
self.run_elect_coordinator()
return result
RedisFutureResult = functools.partial(coordination.CoordinatorResult,
failure_translator=_translate_failures)

View File

@ -1,58 +0,0 @@
# Copyright (c) 2013-2014 Mirantis Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from zake import fake_client
from zake import fake_storage
from tooz import coordination
from tooz.drivers import zookeeper
class ZakeDriver(zookeeper.KazooDriver):
"""This driver uses the `zake`_ client to mimic real `zookeeper`_ servers.
It **should** be mainly used (and **is** really only intended to be used in
this manner) for testing and integration (where real `zookeeper`_ servers
are typically not available).
.. _zake: https://pypi.python.org/pypi/zake
.. _zookeeper: http://zookeeper.apache.org/
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
# NOTE(harlowja): this creates a shared backend 'storage' layer that
# would typically exist inside a zookeeper server, but since zake has
# no concept of a 'real' zookeeper server we create a fake one and share
# it among active clients to simulate zookeeper's consistent storage in
# a thread-safe manner.
fake_storage = fake_storage.FakeStorage(
fake_client.k_threading.SequentialThreadingHandler())
@classmethod
def _make_client(cls, parsed_url, options):
if 'storage' in options:
storage = options['storage']
else:
storage = cls.fake_storage
return fake_client.FakeClient(storage=storage)

View File

@ -1,547 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 eNovance Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kazoo import client
from kazoo import exceptions
from kazoo import security
try:
from kazoo.handlers import eventlet as eventlet_handler
except ImportError:
eventlet_handler = None
from kazoo.handlers import threading as threading_handler
from kazoo.protocol import paths
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
from six.moves import filter as compat_filter
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
class ZooKeeperLock(locking.Lock):
def __init__(self, name, lock):
super(ZooKeeperLock, self).__init__(name)
self._lock = lock
self._client = lock.client
def is_still_owner(self):
if not self.acquired:
return False
try:
data, _znode = self._client.get(
paths.join(self._lock.path, self._lock.node))
return data == self._lock.data
except (self._client.handler.timeout_exception,
exceptions.ConnectionLoss,
exceptions.ConnectionDropped,
exceptions.NoNodeError):
return False
except exceptions.KazooException as e:
utils.raise_with_cause(tooz.ToozError,
"operation error: %s" % (e),
cause=e)
def acquire(self, blocking=True, shared=False):
if shared:
raise tooz.NotImplemented
blocking, timeout = utils.convert_blocking(blocking)
return self._lock.acquire(blocking=blocking,
timeout=timeout)
def release(self):
if self.acquired:
self._lock.release()
return True
else:
return False
@property
def acquired(self):
return self._lock.is_acquired
class KazooDriver(coordination.CoordinationDriverCachedRunWatchers):
"""This driver uses the `kazoo`_ client against real `zookeeper`_ servers.
It **is** fully functional and implements all of the coordination
driver API(s). It stores data into `zookeeper`_ using znodes
and `msgpack`_ encoded values.
To configure the client to your liking a subset of the options defined at
http://kazoo.readthedocs.org/en/latest/api/client.html
will be extracted from the coordinator url (or any provided options),
so that a specific coordinator can be created that will work for you.
Currently the following options will be proxied to the contained client:
================ =============================== ====================
Name Source Default
================ =============================== ====================
hosts url netloc + 'hosts' option key localhost:2181
timeout 'timeout' options key 10.0 (kazoo default)
connection_retry 'connection_retry' options key None
command_retry 'command_retry' options key None
randomize_hosts 'randomize_hosts' options key True
================ =============================== ====================
.. _kazoo: http://kazoo.readthedocs.org/
.. _zookeeper: http://zookeeper.apache.org/
.. _msgpack: http://msgpack.org/
"""
#: Default namespace when none is provided.
TOOZ_NAMESPACE = b"tooz"
HANDLERS = {
'threading': threading_handler.SequentialThreadingHandler,
}
if eventlet_handler:
HANDLERS['eventlet'] = eventlet_handler.SequentialEventletHandler
"""
Restricted immutable dict of handler 'kinds' -> handler classes that
this driver can accept via 'handler' option key (the expected value for
this option is one of the keys in this dictionary).
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
# Writes *always* go through a single leader process, but it may
# take a while for those writes to propagate to followers (and =
# during this time clients can read older values)...
coordination.Characteristics.SEQUENTIAL,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
def __init__(self, member_id, parsed_url, options):
super(KazooDriver, self).__init__(member_id, parsed_url, options)
options = utils.collapse(options, exclude=['hosts'])
self.timeout = int(options.get('timeout', '10'))
self._namespace = options.get('namespace', self.TOOZ_NAMESPACE)
self._coord = self._make_client(parsed_url, options)
self._timeout_exception = self._coord.handler.timeout_exception
def _start(self):
try:
self._coord.start(timeout=self.timeout)
except self._coord.handler.timeout_exception as e:
e_msg = encodeutils.exception_to_unicode(e)
utils.raise_with_cause(coordination.ToozConnectionError,
"Operational error: %s" % e_msg,
cause=e)
try:
self._coord.ensure_path(self._paths_join("/", self._namespace))
except exceptions.KazooException as e:
e_msg = encodeutils.exception_to_unicode(e)
utils.raise_with_cause(tooz.ToozError,
"Operational error: %s" % e_msg,
cause=e)
self._leader_locks = {}
def _stop(self):
self._coord.stop()
@staticmethod
def _dumps(data):
return utils.dumps(data)
@staticmethod
def _loads(blob):
return utils.loads(blob)
def _create_group_handler(self, async_result, timeout,
timeout_exception, group_id):
try:
async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NodeExistsError:
raise coordination.GroupAlreadyExist(group_id)
except exceptions.NoNodeError as e:
utils.raise_with_cause(tooz.ToozError,
"Tooz namespace '%s' has not"
" been created" % self._namespace,
cause=e)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def create_group(self, group_id):
group_path = self._path_group(group_id)
async_result = self._coord.create_async(group_path)
return ZooAsyncResult(async_result, self._create_group_handler,
timeout_exception=self._timeout_exception,
group_id=group_id)
@staticmethod
def _delete_group_handler(async_result, timeout,
timeout_exception, group_id):
try:
async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
raise coordination.GroupNotCreated(group_id)
except exceptions.NotEmptyError:
raise coordination.GroupNotEmpty(group_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def delete_group(self, group_id):
group_path = self._path_group(group_id)
async_result = self._coord.delete_async(group_path)
return ZooAsyncResult(async_result, self._delete_group_handler,
timeout_exception=self._timeout_exception,
group_id=group_id)
@staticmethod
def _join_group_handler(async_result, timeout,
timeout_exception, group_id, member_id):
try:
async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NodeExistsError:
raise coordination.MemberAlreadyExist(group_id, member_id)
except exceptions.NoNodeError:
raise coordination.GroupNotCreated(group_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def join_group(self, group_id, capabilities=b""):
member_path = self._path_member(group_id, self._member_id)
capabilities = self._dumps(capabilities)
async_result = self._coord.create_async(member_path,
value=capabilities,
ephemeral=True)
return ZooAsyncResult(async_result, self._join_group_handler,
timeout_exception=self._timeout_exception,
group_id=group_id, member_id=self._member_id)
@staticmethod
def _leave_group_handler(async_result, timeout,
timeout_exception, group_id, member_id):
try:
async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
raise coordination.MemberNotJoined(group_id, member_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def heartbeat(self):
# Just fetch the base path (and do nothing with it); this will
# force any waiting heartbeat responses to be flushed, and also
# ensures that the connection still works as expected...
base_path = self._paths_join("/", self._namespace)
try:
self._coord.get(base_path)
except self._timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
pass
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
return self.timeout
def leave_group(self, group_id):
member_path = self._path_member(group_id, self._member_id)
async_result = self._coord.delete_async(member_path)
return ZooAsyncResult(async_result, self._leave_group_handler,
timeout_exception=self._timeout_exception,
group_id=group_id, member_id=self._member_id)
@staticmethod
def _get_members_handler(async_result, timeout,
timeout_exception, group_id):
try:
members_ids = async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
raise coordination.GroupNotCreated(group_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
else:
return set(m.encode('ascii') for m in members_ids)
def get_members(self, group_id):
group_path = self._paths_join("/", self._namespace, group_id)
async_result = self._coord.get_children_async(group_path)
return ZooAsyncResult(async_result, self._get_members_handler,
timeout_exception=self._timeout_exception,
group_id=group_id)
@staticmethod
def _update_capabilities_handler(async_result, timeout,
timeout_exception, group_id, member_id):
try:
async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
raise coordination.MemberNotJoined(group_id, member_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def update_capabilities(self, group_id, capabilities):
member_path = self._path_member(group_id, self._member_id)
capabilities = self._dumps(capabilities)
async_result = self._coord.set_async(member_path, capabilities)
return ZooAsyncResult(async_result, self._update_capabilities_handler,
timeout_exception=self._timeout_exception,
group_id=group_id, member_id=self._member_id)
@classmethod
def _get_member_capabilities_handler(cls, async_result, timeout,
timeout_exception, group_id,
member_id):
try:
capabilities = async_result.get(block=True, timeout=timeout)[0]
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
raise coordination.MemberNotJoined(group_id, member_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
else:
return cls._loads(capabilities)
def get_member_capabilities(self, group_id, member_id):
member_path = self._path_member(group_id, member_id)
async_result = self._coord.get_async(member_path)
return ZooAsyncResult(async_result,
self._get_member_capabilities_handler,
timeout_exception=self._timeout_exception,
group_id=group_id, member_id=self._member_id)
@classmethod
def _get_member_info_handler(cls, async_result, timeout,
timeout_exception, group_id,
member_id):
try:
capabilities, znode_stats = async_result.get(block=True,
timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError:
raise coordination.MemberNotJoined(group_id, member_id)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
else:
member_info = {
'capabilities': cls._loads(capabilities),
'created_at': utils.millis_to_datetime(znode_stats.ctime),
'updated_at': utils.millis_to_datetime(znode_stats.mtime)
}
return member_info
def get_member_info(self, group_id, member_id):
member_path = self._path_member(group_id, member_id)
async_result = self._coord.get_async(member_path)
return ZooAsyncResult(async_result,
self._get_member_info_handler,
timeout_exception=self._timeout_exception,
group_id=group_id, member_id=self._member_id)
def _get_groups_handler(self, async_result, timeout, timeout_exception):
try:
group_ids = async_result.get(block=True, timeout=timeout)
except timeout_exception as e:
utils.raise_with_cause(coordination.OperationTimedOut,
encodeutils.exception_to_unicode(e),
cause=e)
except exceptions.NoNodeError as e:
utils.raise_with_cause(tooz.ToozError,
"Tooz namespace '%s' has not"
" been created" % self._namespace,
cause=e)
except exceptions.ZookeeperError as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
else:
return set(g.encode('ascii') for g in group_ids)
def get_groups(self):
tooz_namespace = self._paths_join("/", self._namespace)
async_result = self._coord.get_children_async(tooz_namespace)
return ZooAsyncResult(async_result, self._get_groups_handler,
timeout_exception=self._timeout_exception)
def _path_group(self, group_id):
return self._paths_join("/", self._namespace, group_id)
def _path_member(self, group_id, member_id):
return self._paths_join("/", self._namespace, group_id, member_id)
@staticmethod
def _paths_join(arg, *more_args):
"""Converts paths into a string (unicode)."""
args = [arg]
args.extend(more_args)
cleaned_args = []
for arg in args:
if isinstance(arg, six.binary_type):
cleaned_args.append(arg.decode('ascii'))
else:
cleaned_args.append(arg)
return paths.join(*cleaned_args)
def _make_client(self, parsed_url, options):
# Creates a kazoo client,
# See: https://github.com/python-zk/kazoo/blob/2.2.1/kazoo/client.py
# for what options a client takes...
if parsed_url.username and parsed_url.password:
username = parsed_url.username
password = parsed_url.password
digest_auth = "%s:%s" % (username, password)
digest_acl = security.make_digest_acl(username, password, all=True)
default_acl = (digest_acl,)
auth_data = [('digest', digest_auth)]
else:
default_acl = None
auth_data = None
maybe_hosts = [parsed_url.netloc] + list(options.get('hosts', []))
hosts = list(compat_filter(None, maybe_hosts))
if not hosts:
hosts = ['localhost:2181']
randomize_hosts = options.get('randomize_hosts', True)
client_kwargs = {
'hosts': ",".join(hosts),
'timeout': float(options.get('timeout', self.timeout)),
'connection_retry': options.get('connection_retry'),
'command_retry': options.get('command_retry'),
'randomize_hosts': strutils.bool_from_string(randomize_hosts),
'auth_data': auth_data,
'default_acl': default_acl,
}
handler_kind = options.get('handler')
if handler_kind:
try:
handler_cls = self.HANDLERS[handler_kind]
except KeyError:
raise ValueError("Unknown handler '%s' requested"
" valid handlers are %s"
% (handler_kind,
sorted(self.HANDLERS.keys())))
client_kwargs['handler'] = handler_cls()
return client.KazooClient(**client_kwargs)
def stand_down_group_leader(self, group_id):
if group_id in self._leader_locks:
self._leader_locks[group_id].release()
return True
return False
def _get_group_leader_lock(self, group_id):
if group_id not in self._leader_locks:
self._leader_locks[group_id] = self._coord.Lock(
self._path_group(group_id) + "/leader",
self._member_id.decode('ascii'))
return self._leader_locks[group_id]
def get_leader(self, group_id):
contenders = self._get_group_leader_lock(group_id).contenders()
if contenders and contenders[0]:
leader = contenders[0].encode('ascii')
else:
leader = None
return ZooAsyncResult(None, lambda *args: leader)
def get_lock(self, name):
z_lock = self._coord.Lock(
self._paths_join(b"/", self._namespace, b"locks", name),
self._member_id.decode('ascii'))
return ZooKeeperLock(name, z_lock)
def run_elect_coordinator(self):
for group_id in six.iterkeys(self._hooks_elected_leader):
leader_lock = self._get_group_leader_lock(group_id)
if leader_lock.is_acquired:
# Previously acquired/still leader, leave it be...
continue
if leader_lock.acquire(blocking=False):
# We are now leader for this group
self._hooks_elected_leader[group_id].run(
coordination.LeaderElected(
group_id,
self._member_id))
def run_watchers(self, timeout=None):
results = super(KazooDriver, self).run_watchers(timeout)
self.run_elect_coordinator()
return results
class ZooAsyncResult(coordination.CoordAsyncResult):
def __init__(self, kazoo_async_result, handler, **kwargs):
self._kazoo_async_result = kazoo_async_result
self._handler = handler
self._kwargs = kwargs
def get(self, timeout=10):
return self._handler(self._kazoo_async_result, timeout, **self._kwargs)
def done(self):
return self._kazoo_async_result.ready()

View File

@ -1,142 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import bisect
import hashlib
import six
import tooz
from tooz import utils
class UnknownNode(tooz.ToozError):
"""Node is unknown."""
def __init__(self, node):
super(UnknownNode, self).__init__("Unknown node `%s'" % node)
self.node = node
class HashRing(object):
"""Map objects onto nodes based on their consistent hash."""
DEFAULT_PARTITION_NUMBER = 2**5
def __init__(self, nodes, partitions=DEFAULT_PARTITION_NUMBER):
"""Create a new hashring.
:param nodes: List of nodes where objects will be mapped onto.
:param partitions: Number of partitions to spread objects onto.
"""
self.nodes = {}
self._ring = dict()
self._partitions = []
self._partition_number = partitions
self.add_nodes(set(nodes))
def add_node(self, node, weight=1):
"""Add a node to the hashring.
:param node: Node to add.
:param weight: How many resource instances this node should manage
compared to the other nodes (default 1). Higher weights will be
assigned more resources. Three nodes A, B and C with weights 1, 2 and 3
will each handle 1/6, 1/3 and 1/2 of the resources, respectively.
"""
return self.add_nodes((node,), weight)
def add_nodes(self, nodes, weight=1):
"""Add nodes to the hashring with equal weight
:param nodes: Nodes to add.
:param weight: How many resource instances this node should manage
compared to the other nodes (default 1). Higher weights will be
assigned more resources. Three nodes A, B and C with weights 1, 2 and 3
will each handle 1/6, 1/3 and 1/2 of the resources, respectively.
"""
for node in nodes:
key = utils.to_binary(node, 'utf-8')
key_hash = hashlib.md5(key)
for r in six.moves.range(self._partition_number * weight):
key_hash.update(key)
self._ring[self._hash2int(key_hash)] = node
self.nodes[node] = weight
self._partitions = sorted(self._ring.keys())
def remove_node(self, node):
"""Remove a node from the hashring.
Raises py:exc:`UnknownNode`
:param node: Node to remove.
"""
try:
weight = self.nodes.pop(node)
except KeyError:
raise UnknownNode(node)
key = utils.to_binary(node, 'utf-8')
key_hash = hashlib.md5(key)
for r in six.moves.range(self._partition_number * weight):
key_hash.update(key)
del self._ring[self._hash2int(key_hash)]
self._partitions = sorted(self._ring.keys())
@staticmethod
def _hash2int(key):
return int(key.hexdigest(), 16)
def _get_partition(self, data):
hashed_key = self._hash2int(hashlib.md5(data))
position = bisect.bisect(self._partitions, hashed_key)
return position if position < len(self._partitions) else 0
def _get_node(self, partition):
return self._ring[self._partitions[partition]]
def get_nodes(self, data, ignore_nodes=None, replicas=1):
"""Get the set of nodes which the supplied data map onto.
:param data: A byte identifier to be mapped across the ring.
:param ignore_nodes: Set of nodes to ignore.
:param replicas: Number of replicas to use.
:return: A set of nodes whose length depends on the number of replicas.
"""
partition = self._get_partition(data)
ignore_nodes = set(ignore_nodes) if ignore_nodes else set()
candidates = set(self.nodes.keys()) - ignore_nodes
replicas = min(replicas, len(candidates))
nodes = set()
while len(nodes) < replicas:
node = self._get_node(partition)
if node not in ignore_nodes:
nodes.add(node)
partition = (partition + 1
if partition + 1 < len(self._partitions) else 0)
return nodes
def __getitem__(self, key):
return self.get_nodes(key)
def __len__(self):
return len(self._partitions)

View File

@ -1,109 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 eNovance Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
import tooz
from tooz import coordination
class _LockProxy(object):
def __init__(self, lock, *args, **kwargs):
self.lock = lock
self.args = args
self.kwargs = kwargs
def __enter__(self):
return self.lock.__enter__(*self.args, **self.kwargs)
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.__exit__(exc_type, exc_val, exc_tb)
@six.add_metaclass(abc.ABCMeta)
class Lock(object):
def __init__(self, name):
if not name:
raise ValueError("Locks must be provided a name")
self._name = name
@property
def name(self):
return self._name
def __call__(self, *args, **kwargs):
return _LockProxy(self, *args, **kwargs)
def __enter__(self, *args, **kwargs):
acquired = self.acquire(*args, **kwargs)
if not acquired:
msg = u'Acquiring lock %s failed' % self.name
raise coordination.LockAcquireFailed(msg)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def is_still_owner(self):
"""Checks if the lock is still owned by the acquiree.
:returns: returns true if still acquired (false if not) and
false if the lock was never acquired in the first place
or raises ``NotImplemented`` if not implemented.
"""
raise tooz.NotImplemented
@abc.abstractmethod
def release(self):
"""Attempts to release the lock, returns true if released.
The behavior of releasing a lock which was not acquired in the first
place is undefined (it can range from harmless to releasing some other
users lock)..
:returns: returns true if released (false if not)
:rtype: bool
"""
def break_(self):
"""Forcefully release the lock.
This is mostly used for testing purposes, to simulate an out of
band operation that breaks the lock. Backends may allow waiters to
acquire immediately if a lock is broken, or they should raise an
exception. Releasing should be successful for objects that believe
they hold the lock but do not have the lock anymore. However,
they should be careful not to re-break the lock by releasing it,
since they may not be the holder anymore.
:returns: returns true if forcefully broken (false if not)
or raises ``NotImplemented`` if not implemented.
"""
raise tooz.NotImplemented
@abc.abstractmethod
def acquire(self, blocking=True):
"""Attempts to acquire the lock.
:param blocking: If True, blocks until the lock is acquired. If False,
returns right away. Otherwise, the value is used as a
timeout value and the call returns maximum after this
number of seconds.
:returns: returns true if acquired (false if not)
:rtype: bool
"""

View File

@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tooz import hashring
class Partitioner(object):
"""Partition set of objects across several members.
Objects to be partitioned should implement the __tooz_hash__ method to
identify themselves across the consistent hashring. This should method
return bytes.
"""
DEFAULT_PARTITION_NUMBER = hashring.HashRing.DEFAULT_PARTITION_NUMBER
def __init__(self, coordinator, group_id,
partitions=DEFAULT_PARTITION_NUMBER):
members = coordinator.get_members(group_id)
self.partitions = partitions
self.group_id = group_id
self._coord = coordinator
caps = [(m, self._coord.get_member_capabilities(self.group_id, m))
for m in members.get()]
self._coord.watch_join_group(self.group_id, self._on_member_join)
self._coord.watch_leave_group(self.group_id, self._on_member_leave)
self.ring = hashring.HashRing([], partitions=self.partitions)
for m_id, cap in caps:
self.ring.add_node(m_id, cap.get().get("weight", 1))
def _on_member_join(self, event):
weight = self._coord.get_member_capabilities(
self.group_id, event.member_id).get().get("weight", 1)
self.ring.add_node(event.member_id, weight)
def _on_member_leave(self, event):
self.ring.remove_node(event.member_id)
@staticmethod
def _hash_object(obj):
if hasattr(obj, "__tooz_hash__"):
return obj.__tooz_hash__()
return str(hash(obj)).encode('ascii')
def members_for_object(self, obj, ignore_members=None, replicas=1):
"""Return the members responsible for an object.
:param obj: The object to check owning for.
:param member_id: The member to check if it owns the object.
:param ignore_members: Group members to ignore.
:param replicas: Number of replicas for the object.
"""
return self.ring.get_nodes(self._hash_object(obj),
ignore_nodes=ignore_members,
replicas=replicas)
def belongs_to_member(self, obj, member_id,
ignore_members=None, replicas=1):
"""Return whether an object belongs to a member.
:param obj: The object to check owning for.
:param member_id: The member to check if it owns the object.
:param ignore_members: Group members to ignore.
:param replicas: Number of replicas for the object.
"""
return member_id in self.members_for_object(
obj, ignore_members=ignore_members, replicas=replicas)
def belongs_to_self(self, obj, ignore_members=None, replicas=1):
"""Return whether an object belongs to this coordinator.
:param obj: The object to check owning for.
:param ignore_members: Group members to ignore.
:param replicas: Number of replicas for the object.
"""
return self.belongs_to_member(obj, self._coord._member_id,
ignore_members=ignore_members,
replicas=replicas)
def stop(self):
"""Stop the partitioner."""
self._coord.unwatch_join_group(self.group_id, self._on_member_join)
self._coord.unwatch_leave_group(self.group_id, self._on_member_leave)

View File

@ -1,73 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 eNovance Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import fixtures
from oslo_utils import uuidutils
import six
from testtools import testcase
import tooz
def get_random_uuid():
return uuidutils.generate_uuid().encode('ascii')
def _skip_decorator(func):
@functools.wraps(func)
def skip_if_not_implemented(*args, **kwargs):
try:
return func(*args, **kwargs)
except tooz.NotImplemented as e:
raise testcase.TestSkipped(str(e))
return skip_if_not_implemented
class SkipNotImplementedMeta(type):
def __new__(cls, name, bases, local):
for attr in local:
value = local[attr]
if callable(value) and (
attr.startswith('test_') or attr == 'setUp'):
local[attr] = _skip_decorator(value)
return type.__new__(cls, name, bases, local)
@six.add_metaclass(SkipNotImplementedMeta)
class TestWithCoordinator(testcase.TestCase):
url = os.getenv("TOOZ_TEST_URL")
def setUp(self):
super(TestWithCoordinator, self).setUp()
if self.url is None:
raise RuntimeError("No URL set for this driver")
if os.getenv("TOOZ_TEST_ETCD3"):
self.url = self.url.replace("etcd://", "etcd3://")
if os.getenv("TOOZ_TEST_ETCD3GW"):
self.url = self.url.replace("etcd://", "etcd3+http://")
self.useFixture(fixtures.NestedTempfile())
self.group_id = get_random_uuid()
self.member_id = get_random_uuid()
self._coord = tooz.coordination.get_coordinator(self.url,
self.member_id)
self._coord.start()
def tearDown(self):
self._coord.stop()
super(TestWithCoordinator, self).tearDown()

View File

@ -1,73 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import mock
from testtools import testcase
import tooz
from tooz import coordination
from tooz import tests
class TestFileDriver(testcase.TestCase):
_FAKE_MEMBER_ID = tests.get_random_uuid()
def test_base_dir(self):
file_path = '/fake/file/path'
url = 'file://%s' % file_path
coord = coordination.get_coordinator(url, self._FAKE_MEMBER_ID)
self.assertEqual(file_path, coord._dir)
def test_leftover_file(self):
fixture = self.useFixture(fixtures.TempDir())
file_path = fixture.path
url = 'file://%s' % file_path
coord = coordination.get_coordinator(url, self._FAKE_MEMBER_ID)
coord.start()
self.addCleanup(coord.stop)
coord.create_group(b"my_group").get()
safe_group_id = coord._make_filesystem_safe(b"my_group")
with open(os.path.join(file_path, 'groups',
safe_group_id, "junk.txt"), "wb"):
pass
os.unlink(os.path.join(file_path, 'groups',
safe_group_id, '.metadata'))
self.assertRaises(tooz.ToozError,
coord.delete_group(b"my_group").get)
@mock.patch('os.path.normpath', lambda x: x.replace('/', '\\'))
@mock.patch('sys.platform', 'win32')
def test_base_dir_win32(self):
coord = coordination.get_coordinator(
'file:///C:/path/', self._FAKE_MEMBER_ID)
self.assertEqual('C:\\path\\', coord._dir)
coord = coordination.get_coordinator(
'file:////share_addr/share_path/', self._FAKE_MEMBER_ID)
self.assertEqual('\\\\share_addr\\share_path\\', coord._dir)
# Administrative shares should be handled properly.
coord = coordination.get_coordinator(
'file:////c$/path/', self._FAKE_MEMBER_ID)
self.assertEqual('\\\\c$\\path\\', coord._dir)

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from testtools import testcase
import tooz.coordination
class TestEtcd(testcase.TestCase):
FAKE_URL = "etcd://mocked-not-really-localhost:2379"
FAKE_MEMBER_ID = "mocked-not-really-member"
def setUp(self):
super(TestEtcd, self).setUp()
self._coord = tooz.coordination.get_coordinator(self.FAKE_URL,
self.FAKE_MEMBER_ID)
def test_multiple_locks_etcd_wait_index(self):
lock = self._coord.get_lock('mocked-not-really-random')
return_values = [
{'errorCode': {}, 'node': {}, 'index': 10},
{'errorCode': None, 'node': {}, 'index': 10}
]
with mock.patch.object(lock.client, 'put', side_effect=return_values):
with mock.patch.object(lock.client, 'get') as mocked_get:
self.assertTrue(lock.acquire())
mocked_get.assert_called_once()
call = str(mocked_get.call_args)
self.assertIn("waitIndex=11", call)

View File

@ -1,243 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import mock
from testtools import matchers
from testtools import testcase
from tooz import hashring
class HashRingTestCase(testcase.TestCase):
# NOTE(deva): the mapping used in these tests is as follows:
# if nodes = [foo, bar]:
# fake -> foo, bar
# if nodes = [foo, bar, baz]:
# fake -> foo, bar, baz
# fake-again -> bar, baz, foo
@mock.patch.object(hashlib, 'md5', autospec=True)
def test_hash2int_returns_int(self, mock_md5):
r1 = 32 * 'a'
r2 = 32 * 'b'
# 2**PARTITION_EXPONENT calls to md5.update per node
# PARTITION_EXPONENT is currently always 5, so 32 calls each here
mock_md5.return_value.hexdigest.side_effect = [r1] * 32 + [r2] * 32
nodes = ['foo', 'bar']
ring = hashring.HashRing(nodes)
self.assertIn(int(r1, 16), ring._ring)
self.assertIn(int(r2, 16), ring._ring)
def test_create_ring(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * 2, len(ring))
def test_add_node(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.add('baz')
ring.add_node('baz')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
def test_add_node_bytes(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.add(b'Z\xe2\xfa\x90\x17EC\xac\xae\x88\xa7[\xa1}:E')
ring.add_node(b'Z\xe2\xfa\x90\x17EC\xac\xae\x88\xa7[\xa1}:E')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
def test_add_node_unicode(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.add(u'\u0634\u0628\u06a9\u0647')
ring.add_node(u'\u0634\u0628\u06a9\u0647')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
def test_add_node_weight(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.add('baz')
ring.add_node('baz', weight=10)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * 12, len(ring))
def test_add_nodes_weight(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.add('baz')
nodes.add('baz2')
ring.add_nodes(set(['baz', 'baz2']), weight=10)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * 22, len(ring))
def test_remove_node(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.discard('bar')
ring.remove_node('bar')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
def test_remove_node_bytes(self):
nodes = {'foo', b'Z\xe2\xfa\x90\x17EC\xac\xae\x88\xa7[\xa1}:E'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.discard(b'Z\xe2\xfa\x90\x17EC\xac\xae\x88\xa7[\xa1}:E')
ring.remove_node(b'Z\xe2\xfa\x90\x17EC\xac\xae\x88\xa7[\xa1}:E')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
def test_remove_node_unknown(self):
nodes = ['foo', 'bar']
ring = hashring.HashRing(nodes)
self.assertRaises(
hashring.UnknownNode,
ring.remove_node, 'biz')
def test_add_then_removenode(self):
nodes = {'foo', 'bar'}
ring = hashring.HashRing(nodes)
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.add('baz')
ring.add_node('baz')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
nodes.discard('bar')
ring.remove_node('bar')
self.assertEqual(nodes, set(ring.nodes.keys()))
self.assertEqual(2 ** 5 * len(nodes), len(ring))
def test_distribution_one_replica(self):
nodes = ['foo', 'bar', 'baz']
ring = hashring.HashRing(nodes)
fake_1_nodes = ring.get_nodes(b'fake')
fake_2_nodes = ring.get_nodes(b'fake-again')
# We should have one nodes for each thing
self.assertEqual(1, len(fake_1_nodes))
self.assertEqual(1, len(fake_2_nodes))
# And they must not be the same answers even on this simple data.
self.assertNotEqual(fake_1_nodes, fake_2_nodes)
def test_distribution_more_replica(self):
nodes = ['foo', 'bar', 'baz']
ring = hashring.HashRing(nodes)
fake_1_nodes = ring.get_nodes(b'fake', replicas=2)
fake_2_nodes = ring.get_nodes(b'fake-again', replicas=2)
# We should have one nodes for each thing
self.assertEqual(2, len(fake_1_nodes))
self.assertEqual(2, len(fake_2_nodes))
fake_1_nodes = ring.get_nodes(b'fake', replicas=3)
fake_2_nodes = ring.get_nodes(b'fake-again', replicas=3)
# We should have one nodes for each thing
self.assertEqual(3, len(fake_1_nodes))
self.assertEqual(3, len(fake_2_nodes))
self.assertEqual(fake_1_nodes, fake_2_nodes)
def test_ignore_nodes(self):
nodes = ['foo', 'bar', 'baz']
ring = hashring.HashRing(nodes)
equals_bar_or_baz = matchers.MatchesAny(
matchers.Equals({'bar'}),
matchers.Equals({'baz'}))
self.assertThat(
ring.get_nodes(b'fake', ignore_nodes=['foo']),
equals_bar_or_baz)
self.assertThat(
ring.get_nodes(b'fake', ignore_nodes=['foo', 'bar']),
equals_bar_or_baz)
self.assertEqual(set(), ring.get_nodes(b'fake', ignore_nodes=nodes))
@staticmethod
def _compare_rings(nodes, conductors, ring, new_conductors, new_ring):
delta = {}
mapping = {
'node': list(ring.get_nodes(node.encode('ascii')))[0]
for node in nodes
}
new_mapping = {
'node': list(new_ring.get_nodes(node.encode('ascii')))[0]
for node in nodes
}
for key, old in mapping.items():
new = new_mapping.get(key, None)
if new != old:
delta[key] = (old, new)
return delta
def test_rebalance_stability_join(self):
num_services = 10
num_nodes = 10000
# Adding 1 service to a set of N should move 1/(N+1) of all nodes
# Eg, for a cluster of 10 nodes, adding one should move 1/11, or 9%
# We allow for 1/N to allow for rounding in tests.
redistribution_factor = 1.0 / num_services
nodes = [str(x) for x in range(num_nodes)]
services = [str(x) for x in range(num_services)]
new_services = services + ['new']
delta = self._compare_rings(
nodes, services, hashring.HashRing(services),
new_services, hashring.HashRing(new_services))
self.assertLess(len(delta), num_nodes * redistribution_factor)
def test_rebalance_stability_leave(self):
num_services = 10
num_nodes = 10000
# Removing 1 service from a set of N should move 1/(N) of all nodes
# Eg, for a cluster of 10 nodes, removing one should move 1/10, or 10%
# We allow for 1/(N-1) to allow for rounding in tests.
redistribution_factor = 1.0 / (num_services - 1)
nodes = [str(x) for x in range(num_nodes)]
services = [str(x) for x in range(num_services)]
new_services = services[:]
new_services.pop()
delta = self._compare_rings(
nodes, services, hashring.HashRing(services),
new_services, hashring.HashRing(new_services))
self.assertLess(len(delta), num_nodes * redistribution_factor)
def test_ignore_non_existent_node(self):
nodes = ['foo', 'bar']
ring = hashring.HashRing(nodes)
self.assertEqual({'foo'}, ring.get_nodes(b'fake',
ignore_nodes=['baz']))

View File

@ -1,85 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
try:
from unittest import mock
except ImportError:
import mock
from testtools import testcase
from tooz import coordination
from tooz import tests
class TestMemcacheDriverFailures(testcase.TestCase):
FAKE_URL = "memcached://mocked-not-really-localhost"
@mock.patch('pymemcache.client.PooledClient')
def test_client_failure_start(self, mock_client_cls):
mock_client_cls.side_effect = socket.timeout('timed-out')
member_id = tests.get_random_uuid()
coord = coordination.get_coordinator(self.FAKE_URL, member_id)
self.assertRaises(coordination.ToozConnectionError, coord.start)
@mock.patch('pymemcache.client.PooledClient')
def test_client_failure_join(self, mock_client_cls):
mock_client = mock.MagicMock()
mock_client_cls.return_value = mock_client
member_id = tests.get_random_uuid()
coord = coordination.get_coordinator(self.FAKE_URL, member_id)
coord.start()
mock_client.gets.side_effect = socket.timeout('timed-out')
fut = coord.join_group(tests.get_random_uuid())
self.assertRaises(coordination.ToozConnectionError, fut.get)
@mock.patch('pymemcache.client.PooledClient')
def test_client_failure_leave(self, mock_client_cls):
mock_client = mock.MagicMock()
mock_client_cls.return_value = mock_client
member_id = tests.get_random_uuid()
coord = coordination.get_coordinator(self.FAKE_URL, member_id)
coord.start()
mock_client.gets.side_effect = socket.timeout('timed-out')
fut = coord.leave_group(tests.get_random_uuid())
self.assertRaises(coordination.ToozConnectionError, fut.get)
@mock.patch('pymemcache.client.PooledClient')
def test_client_failure_heartbeat(self, mock_client_cls):
mock_client = mock.MagicMock()
mock_client_cls.return_value = mock_client
member_id = tests.get_random_uuid()
coord = coordination.get_coordinator(self.FAKE_URL, member_id)
coord.start()
mock_client.set.side_effect = socket.timeout('timed-out')
self.assertRaises(coordination.ToozConnectionError, coord.heartbeat)
@mock.patch(
'tooz.coordination.CoordinationDriverCachedRunWatchers.run_watchers',
autospec=True)
@mock.patch('pymemcache.client.PooledClient')
def test_client_run_watchers_mixin(self, mock_client_cls,
mock_run_watchers):
mock_client = mock.MagicMock()
mock_client_cls.return_value = mock_client
member_id = tests.get_random_uuid()
coord = coordination.get_coordinator(self.FAKE_URL, member_id)
coord.start()
coord.run_watchers()
self.assertTrue(mock_run_watchers.called)

View File

@ -1,54 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
from testtools import testcase
import tooz
from tooz import coordination
from tooz import tests
class TestMySQLDriver(testcase.TestCase):
def _create_coordinator(self, url):
def _safe_stop(coord):
try:
coord.stop()
except tooz.ToozError as e:
message = encodeutils.exception_to_unicode(e)
if (message != 'Can not stop a driver which has not'
' been started'):
raise
coord = coordination.get_coordinator(url,
tests.get_random_uuid())
self.addCleanup(_safe_stop, coord)
return coord
def test_connect_failure_invalid_hostname_provided(self):
c = self._create_coordinator("mysql://invalidhost/test")
self.assertRaises(coordination.ToozConnectionError, c.start)
def test_connect_failure_invalid_port_provided(self):
c = self._create_coordinator("mysql://localhost:54/test")
self.assertRaises(coordination.ToozConnectionError, c.start)
def test_connect_failure_invalid_hostname_and_port_provided(self):
c = self._create_coordinator("mysql://invalidhost:54/test")
self.assertRaises(coordination.ToozConnectionError, c.start)

View File

@ -1,103 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tooz import coordination
from tooz import tests
class TestPartitioner(tests.TestWithCoordinator):
def setUp(self):
super(TestPartitioner, self).setUp()
self._extra_coords = []
def tearDown(self):
for c in self._extra_coords:
c.stop()
super(TestPartitioner, self).tearDown()
def _add_members(self, number_of_members, weight=1):
groups = []
for _ in six.moves.range(number_of_members):
m = tests.get_random_uuid()
coord = coordination.get_coordinator(self.url, m)
coord.start()
groups.append(coord.join_partitioned_group(
self.group_id, weight=weight))
self._extra_coords.append(coord)
self._coord.run_watchers()
return groups
def _remove_members(self, number_of_members):
for _ in six.moves.range(number_of_members):
c = self._extra_coords.pop()
c.stop()
self._coord.run_watchers()
def test_join_partitioned_group(self):
group_id = tests.get_random_uuid()
self._coord.join_partitioned_group(group_id)
def test_hashring_size(self):
p = self._coord.join_partitioned_group(self.group_id)
self.assertEqual(1, len(p.ring.nodes))
self._add_members(1)
self.assertEqual(2, len(p.ring.nodes))
self._add_members(2)
self.assertEqual(4, len(p.ring.nodes))
self._remove_members(3)
self.assertEqual(1, len(p.ring.nodes))
p.stop()
def test_hashring_weight(self):
p = self._coord.join_partitioned_group(self.group_id, weight=5)
self.assertEqual([5], list(p.ring.nodes.values()))
p2 = self._add_members(1, weight=10)[0]
self.assertEqual(set([5, 10]), set(p.ring.nodes.values()))
self.assertEqual(set([5, 10]), set(p2.ring.nodes.values()))
p.stop()
def test_stop(self):
p = self._coord.join_partitioned_group(self.group_id)
p.stop()
self.assertEqual(0, len(self._coord._hooks_join_group))
self.assertEqual(0, len(self._coord._hooks_leave_group))
def test_members_of_object_and_others(self):
p = self._coord.join_partitioned_group(self.group_id)
self._add_members(3)
o = object()
m = p.members_for_object(o)
self.assertEqual(1, len(m))
m = m.pop()
self.assertTrue(p.belongs_to_member(o, m))
self.assertFalse(p.belongs_to_member(o, b"chupacabra"))
maybe = self.assertTrue if m == self.member_id else self.assertFalse
maybe(p.belongs_to_self(o))
p.stop()
class ZakeTestPartitioner(TestPartitioner):
url = "zake://"
class IPCTestPartitioner(TestPartitioner):
url = "ipc://"
class FileTestPartitioner(TestPartitioner):
url = "file:///tmp"

View File

@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
# Added in python 3.3+
from unittest import mock
except ImportError:
import mock
from oslo_utils import encodeutils
import testtools
from testtools import testcase
import tooz
from tooz import coordination
from tooz import tests
# Handle the case gracefully where the driver is not installed.
try:
import psycopg2
PGSQL_AVAILABLE = True
except ImportError:
PGSQL_AVAILABLE = False
@testtools.skipUnless(PGSQL_AVAILABLE, 'psycopg2 is not available')
class TestPostgreSQLFailures(testcase.TestCase):
# Not actually used (but required none the less), since we mock out
# the connect() method...
FAKE_URL = "postgresql://localhost:1"
def _create_coordinator(self):
def _safe_stop(coord):
try:
coord.stop()
except tooz.ToozError as e:
# TODO(harlowja): make this better, so that we don't have to
# do string checking...
message = encodeutils.exception_to_unicode(e)
if (message != 'Can not stop a driver which has not'
' been started'):
raise
coord = coordination.get_coordinator(self.FAKE_URL,
tests.get_random_uuid())
self.addCleanup(_safe_stop, coord)
return coord
@mock.patch("tooz.drivers.pgsql.psycopg2.connect")
def test_connect_failure(self, psycopg2_connector):
psycopg2_connector.side_effect = psycopg2.Error("Broken")
c = self._create_coordinator()
self.assertRaises(coordination.ToozConnectionError, c.start)
@mock.patch("tooz.drivers.pgsql.psycopg2.connect")
def test_connect_failure_operational(self, psycopg2_connector):
psycopg2_connector.side_effect = psycopg2.OperationalError("Broken")
c = self._create_coordinator()
self.assertRaises(coordination.ToozConnectionError, c.start)
@mock.patch("tooz.drivers.pgsql.psycopg2.connect")
def test_failure_acquire_lock(self, psycopg2_connector):
execute_mock = mock.MagicMock()
execute_mock.execute.side_effect = psycopg2.OperationalError("Broken")
cursor_mock = mock.MagicMock()
cursor_mock.__enter__ = mock.MagicMock(return_value=execute_mock)
cursor_mock.__exit__ = mock.MagicMock(return_value=False)
conn_mock = mock.MagicMock()
conn_mock.cursor.return_value = cursor_mock
psycopg2_connector.return_value = conn_mock
c = self._create_coordinator()
c.start()
test_lock = c.get_lock(b'test-lock')
self.assertRaises(tooz.ToozError, test_lock.acquire)
@mock.patch("tooz.drivers.pgsql.psycopg2.connect")
def test_failure_release_lock(self, psycopg2_connector):
execute_mock = mock.MagicMock()
execute_mock.execute.side_effect = [
True,
psycopg2.OperationalError("Broken"),
]
cursor_mock = mock.MagicMock()
cursor_mock.__enter__ = mock.MagicMock(return_value=execute_mock)
cursor_mock.__exit__ = mock.MagicMock(return_value=False)
conn_mock = mock.MagicMock()
conn_mock.cursor.return_value = cursor_mock
psycopg2_connector.return_value = conn_mock
c = self._create_coordinator()
c.start()
test_lock = c.get_lock(b'test-lock')
self.assertTrue(test_lock.acquire())
self.assertRaises(tooz.ToozError, test_lock.release)

View File

@ -1,136 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import futurist
import six
from testtools import testcase
import tooz
from tooz import utils
class TestProxyExecutor(testcase.TestCase):
def test_fetch_check_executor(self):
try_options = [
({'executor': 'sync'}, futurist.SynchronousExecutor),
({'executor': 'thread'}, futurist.ThreadPoolExecutor),
]
for options, expected_cls in try_options:
executor = utils.ProxyExecutor.build("test", options)
self.assertTrue(executor.internally_owned)
executor.start()
self.assertTrue(executor.started)
self.assertIsInstance(executor.executor, expected_cls)
executor.stop()
self.assertFalse(executor.started)
def test_fetch_default_executor(self):
executor = utils.ProxyExecutor.build("test", {})
executor.start()
try:
self.assertIsInstance(executor.executor,
futurist.ThreadPoolExecutor)
finally:
executor.stop()
def test_fetch_unknown_executor(self):
options = {'executor': 'huh'}
self.assertRaises(tooz.ToozError,
utils.ProxyExecutor.build, 'test',
options)
def test_no_submit_stopped(self):
executor = utils.ProxyExecutor.build("test", {})
self.assertRaises(tooz.ToozError,
executor.submit, lambda: None)
class TestUtilsSafePath(testcase.TestCase):
base = tempfile.gettempdir()
def test_join(self):
self.assertEqual(os.path.join(self.base, 'b'),
utils.safe_abs_path(self.base, "b"))
self.assertEqual(os.path.join(self.base, 'b', 'c'),
utils.safe_abs_path(self.base, "b", 'c'))
self.assertEqual(self.base,
utils.safe_abs_path(self.base, "b", 'c', '../..'))
def test_unsafe_join(self):
self.assertRaises(ValueError, utils.safe_abs_path,
self.base, "../b")
self.assertRaises(ValueError, utils.safe_abs_path,
self.base, "b", 'c', '../../../')
class TestUtilsCollapse(testcase.TestCase):
def test_bad_type(self):
self.assertRaises(TypeError, utils.collapse, "")
self.assertRaises(TypeError, utils.collapse, [])
self.assertRaises(TypeError, utils.collapse, 2)
def test_collapse_simple(self):
ex = {
'a': [1],
'b': 2,
'c': (1, 2, 3),
}
c_ex = utils.collapse(ex)
self.assertEqual({'a': 1, 'c': 3, 'b': 2}, c_ex)
def test_collapse_exclusions(self):
ex = {
'a': [1],
'b': 2,
'c': (1, 2, 3),
}
c_ex = utils.collapse(ex, exclude=['a'])
self.assertEqual({'a': [1], 'c': 3, 'b': 2}, c_ex)
def test_no_collapse(self):
ex = {
'a': [1],
'b': [2],
'c': (1, 2, 3),
}
c_ex = utils.collapse(ex, exclude=set(six.iterkeys(ex)))
self.assertEqual(ex, c_ex)
def test_custom_selector(self):
ex = {
'a': [1, 2, 3],
}
c_ex = utils.collapse(ex,
item_selector=lambda items: items[0])
self.assertEqual({'a': 1}, c_ex)
def test_empty_lists(self):
ex = {
'a': [],
'b': (),
'c': [1],
}
c_ex = utils.collapse(ex)
self.assertNotIn('b', c_ex)
self.assertNotIn('a', c_ex)
self.assertIn('c', c_ex)

View File

@ -1,225 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import operator
import os
import futurist
import msgpack
from oslo_serialization import msgpackutils
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
import tooz
class Base64LockEncoder(object):
def __init__(self, keyspace_url, prefix=''):
self.keyspace_url = keyspace_url
if prefix:
self.keyspace_url += prefix
def check_and_encode(self, name):
if not isinstance(name, (six.text_type, six.binary_type)):
raise TypeError("Provided lock name is expected to be a string"
" or binary type and not %s" % type(name))
try:
return self.encode(name)
except (UnicodeDecodeError, UnicodeEncodeError) as e:
raise ValueError("Invalid lock name due to encoding/decoding "
" issue: %s"
% encodeutils.exception_to_unicode(e))
def encode(self, name):
if isinstance(name, six.text_type):
name = name.encode("ascii")
enc_name = base64.urlsafe_b64encode(name)
return self.keyspace_url + "/" + enc_name.decode("ascii")
class ProxyExecutor(object):
KIND_TO_FACTORY = {
'threaded': (lambda:
futurist.ThreadPoolExecutor(max_workers=1)),
'synchronous': lambda: futurist.SynchronousExecutor(),
}
# Provide a few common aliases...
KIND_TO_FACTORY['thread'] = KIND_TO_FACTORY['threaded']
KIND_TO_FACTORY['threading'] = KIND_TO_FACTORY['threaded']
KIND_TO_FACTORY['sync'] = KIND_TO_FACTORY['synchronous']
DEFAULT_KIND = 'threaded'
def __init__(self, driver_name, default_executor_factory):
self.default_executor_factory = default_executor_factory
self.driver_name = driver_name
self.started = False
self.executor = None
self.internally_owned = True
@classmethod
def build(cls, driver_name, options):
default_executor_fact = cls.KIND_TO_FACTORY[cls.DEFAULT_KIND]
if 'executor' in options:
executor_kind = options['executor']
try:
default_executor_fact = cls.KIND_TO_FACTORY[executor_kind]
except KeyError:
executors_known = sorted(list(cls.KIND_TO_FACTORY))
raise tooz.ToozError("Unknown executor"
" '%s' provided, accepted values"
" are %s" % (executor_kind,
executors_known))
return cls(driver_name, default_executor_fact)
def start(self):
if self.started:
return
self.executor = self.default_executor_factory()
self.started = True
def stop(self):
executor = self.executor
self.executor = None
if executor is not None:
executor.shutdown()
self.started = False
def submit(self, cb, *args, **kwargs):
if not self.started:
raise tooz.ToozError("%s driver asynchronous executor"
" has not been started"
% self.driver_name)
try:
return self.executor.submit(cb, *args, **kwargs)
except RuntimeError:
raise tooz.ToozError("%s driver asynchronous executor has"
" been shutdown" % self.driver_name)
def safe_abs_path(rooted_at, *pieces):
# Avoids the following junk...
#
# >>> import os
# >>> os.path.join("/b", "..")
# '/b/..'
# >>> os.path.abspath(os.path.join("/b", ".."))
# '/'
path = os.path.abspath(os.path.join(rooted_at, *pieces))
if not path.startswith(rooted_at):
raise ValueError("Unable to create path that is outside of"
" parent directory '%s' using segments %s"
% (rooted_at, list(pieces)))
return path
def convert_blocking(blocking):
"""Converts a multi-type blocking variable into its derivatives."""
timeout = None
if not isinstance(blocking, bool):
timeout = float(blocking)
blocking = True
return blocking, timeout
def collapse(config, exclude=None, item_selector=operator.itemgetter(-1)):
"""Collapses config with keys and **list/tuple** values.
NOTE(harlowja): The last item/index from the list/tuple value is selected
be default as the new value (values that are not lists/tuples are left
alone). If the list/tuple value is empty (zero length), then no value
is set.
"""
if not isinstance(config, dict):
raise TypeError("Unexpected config type, dict expected")
if not config:
return {}
if exclude is None:
exclude = set()
collapsed = {}
for (k, v) in six.iteritems(config):
if isinstance(v, (tuple, list)):
if k in exclude:
collapsed[k] = v
else:
if len(v):
collapsed[k] = item_selector(v)
else:
collapsed[k] = v
return collapsed
def to_binary(text, encoding='ascii'):
"""Return the binary representation of string (if not already binary)."""
if not isinstance(text, six.binary_type):
text = text.encode(encoding)
return text
class SerializationError(tooz.ToozError):
"Exception raised when serialization or deserialization breaks."
def dumps(data, excp_cls=SerializationError):
"""Serializes provided data using msgpack into a byte string."""
try:
return msgpackutils.dumps(data)
except (msgpack.PackException, ValueError) as e:
raise_with_cause(excp_cls,
encodeutils.exception_to_unicode(e),
cause=e)
def loads(blob, excp_cls=SerializationError):
"""Deserializes provided data using msgpack (from a prior byte string)."""
try:
return msgpackutils.loads(blob)
except (msgpack.UnpackException, ValueError) as e:
raise_with_cause(excp_cls,
encodeutils.exception_to_unicode(e),
cause=e)
def millis_to_datetime(milliseconds):
"""Converts number of milliseconds (from epoch) into a datetime object."""
return datetime.datetime.fromtimestamp(float(milliseconds) / 1000)
def raise_with_cause(exc_cls, message, *args, **kwargs):
"""Helper to raise + chain exceptions (when able) and associate a *cause*.
**For internal usage only.**
NOTE(harlowja): Since in py3.x exceptions can be chained (due to
:pep:`3134`) we should try to raise the desired exception with the given
*cause*.
:param exc_cls: the :py:class:`~tooz.ToozError` class to raise.
:param message: the text/str message that will be passed to
the exceptions constructor as its first positional
argument.
:param args: any additional positional arguments to pass to the
exceptions constructor.
:param kwargs: any additional keyword arguments to pass to the
exceptions constructor.
"""
if not issubclass(exc_cls, tooz.ToozError):
raise ValueError("Subclass of tooz error is required")
excutils.raise_with_cause(exc_cls, message, *args, **kwargs)

63
tox.ini
View File

@ -1,63 +0,0 @@
[tox]
minversion = 1.8
envlist = py27,py35,py{27,35}-{zookeeper,redis,sentinel,memcached,postgresql,mysql,consul,etcd,etcd3,etcd3gw},pep8
[testenv]
# We need to install a bit more than just `test' because those drivers have
# custom tests that we always run
deps = .[test,zake,ipc,memcached,mysql,etcd,etcd3,etcd3gw]
zookeeper: .[zookeeper]
redis: .[redis]
sentinel: .[redis]
memcached: .[memcached]
postgresql: .[postgresql]
mysql: .[mysql]
etcd: .[etcd]
etcd3: .[etcd3]
etcd3gw: .[etcd3gw]
consul: .[consul]
setenv =
TOOZ_TEST_URLS = file:///tmp zake:// ipc://
zookeeper: TOOZ_TEST_DRIVERS = zookeeper
redis: TOOZ_TEST_DRIVERS = redis
sentinel: TOOZ_TEST_DRIVERS = redis --sentinel
memcached: TOOZ_TEST_DRIVERS = memcached
mysql: TOOZ_TEST_DRIVERS = mysql
postgresql: TOOZ_TEST_DRIVERS = postgresql
etcd: TOOZ_TEST_DRIVERS = etcd,etcd --cluster
etcd3: TOOZ_TEST_DRIVERS = etcd
etcd3: TOOZ_TEST_ETCD3 = 1
etcd3gw: TOOZ_TEST_DRIVERS = etcd
etcd3gw: TOOZ_TEST_ETCD3GW = 1
consul: TOOZ_TEST_DRIVERS = consul
# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt
commands =
{toxinidir}/run-tests.sh {toxinidir}/tools/pretty_tox.sh "{posargs}"
{toxinidir}/run-examples.sh
[testenv:venv]
# This target is used by the gate go run Sphinx to build the doc
deps = {[testenv:docs]deps}
commands = {posargs}
[testenv:cover]
commands = python setup.py testr --slowest --coverage --testr-args="{posargs}"
[testenv:docs]
deps = .[doc,zake,ipc,zookeeper,redis,memcached,mysql,postgresql,consul]
commands = python setup.py build_sphinx
[testenv:pep8]
deps = hacking<0.13,>=0.12
doc8
commands =
flake8
doc8 doc/source
[flake8]
exclude=.venv,.git,.tox,dist,*egg,*.egg-info,build,examples,doc
show-source = True
[testenv:releasenotes]
deps = .[doc]
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html