This project is no longer maintained.
The contents of this repository are still available in the Git source code management system. To see the contents of this repository before it reached its end of life, please check out the previous commit with "git checkout HEAD^1". http://lists.openstack.org/pipermail/openstack-dev/2016-March/090409.html Change-Id: I78a16ad052072feba7670aaea144216875ddc0d1 Depends-On: If9c42d2cec35b68c4de85b750c8540d86322f3e3
This commit is contained in:
parent
4e21fba469
commit
94987eaec7
|
@ -1,16 +0,0 @@
|
|||
If you would like to contribute to the development of OpenStack,
|
||||
you must follow the steps in this page:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
Once those steps have been completed, changes to OpenStack
|
||||
should be submitted for review via the Gerrit tool, following
|
||||
the workflow documented at:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
|
||||
https://bugs.launchpad.net/kite
|
|
@ -1,4 +0,0 @@
|
|||
kite Style Commandments
|
||||
===============================================
|
||||
|
||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
|
175
LICENSE
175
LICENSE
|
@ -1,175 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
|
@ -1,6 +0,0 @@
|
|||
include AUTHORS
|
||||
include ChangeLog
|
||||
exclude .gitignore
|
||||
exclude .gitreview
|
||||
|
||||
global-exclude *.pyc
|
17
README.rst
17
README.rst
|
@ -1,13 +1,8 @@
|
|||
===============================
|
||||
kite
|
||||
===============================
|
||||
This project is no longer maintained.
|
||||
|
||||
The easiest ways to get keys in the cloud
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/kite
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* TODO
|
||||
http://lists.openstack.org/pipermail/openstack-dev/2016-March/090409.html
|
||||
|
|
|
@ -1,528 +0,0 @@
|
|||
Version 1 API
|
||||
=============
|
||||
|
||||
Kite serves as a trusted third party that is
|
||||
responsible for generation and secure distribution of signing and
|
||||
encryption keys to communicating parties. These shared keys allow
|
||||
messages to be exchanged between communicating parties with message
|
||||
authentication, integrity and confidentiality. Kite is an integral part
|
||||
of the implementation of RPC message security.
|
||||
|
||||
To establish a trusted relationship between a communicating party and
|
||||
Kite, a long term shared key needs to be assigned to the party by a
|
||||
properly authorized user, such as a cloud administrator. Assigning a key
|
||||
to a party requires assigning an identity to that party in Kite. An
|
||||
identity is comprised of a unique party name and the long term shared
|
||||
key that it is associated with. This party name is used to identify a
|
||||
party when it communicates with Kite or another party.
|
||||
|
||||
Kite is designed to enable secure messages to be exchanged between two
|
||||
individual parties as well as between one individual party and a group
|
||||
party. When a party wants to obtain keys to be used for communication
|
||||
with another party, it makes an authenticated request to Kite for a
|
||||
ticket. Kite returns a ticket to the requesting party that is encrypted
|
||||
with the long term shared key that is associated with that party. This
|
||||
encryption ensures that the ticket can only be decrypted by one who
|
||||
possesses the long term key, which should only be the associated party
|
||||
and Kite itself.
|
||||
|
||||
A ticket that has been issued by Kite contains a copy of the shared
|
||||
encryption and signing keys. These keys are for the source party, which
|
||||
is the party that requested the ticket. The ticket also contains a
|
||||
payload that is intended for the destination party, which is the party
|
||||
that the source party wants to communicate with. This payload contains
|
||||
the information that is needed for the destination party to be able to
|
||||
derive the shared encryption and signing keys. When the destination
|
||||
party is an individual, the payload is encrypted with the long term
|
||||
shared key that is associated with the destination party. When the
|
||||
destination party is a group, the payload is encrypted with a shared
|
||||
group key that Kite makes available to all members of the group. This
|
||||
encryption allows the destination party to trust that the information in
|
||||
the payload was supplied by Kite. When the source party is ready to
|
||||
communicate with the destination party, it sends this encrypted payload
|
||||
to the destination party along with whatever data it has protected with
|
||||
the shared signing and encryption keys. The destination party can then
|
||||
decrypt the payload and derive the shared encryption and signing keys
|
||||
using the information in the payload. This results in both parties
|
||||
having a copy of the shared signing and encryption keys that are trusted
|
||||
as being issued by Kite. These shared keys can then be used by the
|
||||
destination party to authenticate and decrypt the data sent by the
|
||||
source party.
|
||||
|
||||
When a source party needs to send secure messages to multiple
|
||||
recipients, an authorized user can define a group for those recipients
|
||||
in Kite. Membership in a group is determined by comparing a party name
|
||||
with the group name. If the name of a party matches ``<group name>.*``,
|
||||
it is considered to be a member. For example, a party named
|
||||
``scheduler.host.example.com`` would be considered a member of a group
|
||||
named ``scheduler``. This matches up with the way that message queues
|
||||
are named within OpenStack.
|
||||
|
||||
When a source party requests a ticket where the destination party is a
|
||||
group, Kite generates a short-lived group key and assigns it to the
|
||||
group. This group key is used to encrypt the payload in the ticket,
|
||||
which contains the information that the destination party uses to derive
|
||||
the shared signing and encryption keys. When an individual destination
|
||||
party needs to decrypt the payload that it receives from the source
|
||||
party as a part of a group message, it makes an authenticated request to
|
||||
Kite to obtain the short-lived group key. If the requestor is a member of
|
||||
the target group, Kite provides the short-lived group key encrypted with
|
||||
the long term shared key associated with the individual destination
|
||||
party. The group key can then be decrypted by the individual destination
|
||||
party, allowing it to decrypt the payload and derive the shared signing
|
||||
and encryption keys that can be used to authenticate and decrypt the
|
||||
data sent by the source party.
|
||||
|
||||
When keys are obtained to send a message to a group, it is important to
|
||||
note that all members of the group and the sender share the signing and
|
||||
encryption keys. This makes it impossible for an individual destination
|
||||
party to determine if a message was truly sent by the source party or
|
||||
another destination party who is a member of the group. The only
|
||||
assurance that a destination party has is that a message was sent by a
|
||||
party who has possession of the shared signing and encryption keys. This
|
||||
requires that all parties within a group trust each other to not
|
||||
impersonate the source party.
|
||||
|
||||
The signing and encryption keys that are shared between communicating
|
||||
parties are short-lived. The lifetime of these keys is defined by an
|
||||
validity period that is set by Kite when it issues the ticket. A
|
||||
suggested reasonable default validity period is 15 minutes, though it is
|
||||
left up to the implementation to determine the appropriate validity
|
||||
period. Once the validity period for the keys expires, a party should
|
||||
refuse to use those keys anymore to prevent using keys that may have
|
||||
been compromised. This requires the source party to request a new ticket
|
||||
from Kite to get a new set of keys. If desired, an implementation could
|
||||
choose to implement a grace period to account for clock skew between
|
||||
parties. This grace period would allow a destination party to accept
|
||||
messages that use recently expired keys. If a grace period is used, it
|
||||
is recommended that the duration be kept small, such as 5 minutes or
|
||||
less.
|
||||
|
||||
The principal advantage of using a key server compared to a pure public
|
||||
key based system is that the encryption and signing key exchange can be
|
||||
regulated by the key server. Since the key server is actively involved
|
||||
in distributing keys to the communicating parties, it has the ability to
|
||||
apply access control and deny communication between arbitrary peers in
|
||||
the system when keys are requested. This allows for centralized access
|
||||
control, prevents unauthorized communication and avoids the need to
|
||||
perform post-authentication access control and policy look-ups on the
|
||||
receiving side.
|
||||
|
||||
API Considerations
|
||||
------------------
|
||||
|
||||
Kite requires that all ticket requests are authenticated and data is encrypted
|
||||
where appropriate.
|
||||
|
||||
All timestamp values used in the API must be specified as a UTC ISO 8601
|
||||
extended format date/time string that includes microseconds. An example
|
||||
of a properly formatted timestamp is ``2012-03-26T10:01:01.720000``.
|
||||
|
||||
The default algorithms for message authentication and encryption are
|
||||
respectively HMAC-SHA-256 and AES-128-CBC. Therefore the default block
|
||||
size is 128bit.
|
||||
|
||||
The source party that obtains a ticket is responsible for sending the
|
||||
encrypted payload ``esek`` to the destination party. The source and
|
||||
destination strings used when requesting the ticket also need to be sent
|
||||
to the destination party to allow it to derive the shared signing end
|
||||
encryption keys. Transferring this data to the destination party is
|
||||
handled outside of the API described in this document, as it's expected
|
||||
to be performed by the messaging implementation.
|
||||
|
||||
The key derivation used to generate the shared signing and encryption
|
||||
keys uses the Hashed Message Authentication Code (HMAC)-based key
|
||||
derivation function (HKDF) standard as described in RFC 5869. The
|
||||
destination party needs to use the HKDF ``expand`` function using the
|
||||
information that it receives from the source party in order to complete
|
||||
derivation of the shared signing and encryption keys. The inputs to the
|
||||
HKDF ``expand`` function are as follows:
|
||||
|
||||
::
|
||||
|
||||
HKDF-Expand(esek.key, info, 256)
|
||||
|
||||
The ``info`` input for the HKDF ``expand`` function is a string that
|
||||
concatenates the source, destination, and ``esek.timestamp`` strings
|
||||
using a ``,`` separator between each element. An example of a valid
|
||||
``info`` string where ``scheduler.host.example.com`` is the source,
|
||||
``compute.host.example.com`` is the destination, and
|
||||
``2012-03-26T10:01:01.720000`` is the ``esek.timestamp`` is as follows:
|
||||
|
||||
::
|
||||
|
||||
scheduler.host.example.com,compute.host.example.com,2012-03-26T10:01:01.720000
|
||||
|
||||
The output of the HKDF expand function is an array of bytes of 256 bit
|
||||
length. The first half is used as the signing key, and the second half
|
||||
is used as the encryption key.
|
||||
|
||||
The requests to create and delete long term keys should be restricted
|
||||
such that only a properly authorized user, such as a cloud administrator
|
||||
is allowed to successfully perform the operations. The authentication
|
||||
and authorization for these requests is left up to the implementation,
|
||||
though it expected that one would leverage the Identity API for these
|
||||
purposes.
|
||||
|
||||
Resources and Operations
|
||||
------------------------
|
||||
|
||||
Create Key: ``PUT /v1/keys/{name}``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Create a long term key in the Kite.
|
||||
|
||||
Request
|
||||
'''''''
|
||||
|
||||
The request resource name is the party associated with the key, and the
|
||||
body consists of just the key.
|
||||
|
||||
- ``key`` - A base64 encoded 128 bit long cryptographic random key.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"key": "TXkgcHJlY2lvdXNzcy4u..."
|
||||
}
|
||||
|
||||
Response
|
||||
''''''''
|
||||
|
||||
The response contains a name and generation value. The generation value
|
||||
will only be changed if a new key is set. If the request sets the key to
|
||||
the same value that already exists, the existing generation value will
|
||||
be returned in the response. This makes the request idempotent.
|
||||
|
||||
- ``name`` - The party name associated with the key.
|
||||
- ``generation`` - A unique integer used to identify the key.
|
||||
|
||||
::
|
||||
|
||||
Status: 201 Created
|
||||
Location: /v1/keys/--key-name--
|
||||
{
|
||||
"name": "--key-name--",
|
||||
"generation": 2
|
||||
}
|
||||
|
||||
Delete Key: ``DELETE /v1/keys/{name}``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Delete a key from Kite.
|
||||
|
||||
Request
|
||||
'''''''
|
||||
|
||||
The request body is empty.
|
||||
|
||||
Response
|
||||
''''''''
|
||||
|
||||
::
|
||||
|
||||
Status: 204 No Content
|
||||
|
||||
Generate Ticket: ``POST /v1/tickets``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A ticket is generated to facilitate messaging between a ``source`` and a
|
||||
``destination``.
|
||||
|
||||
Request
|
||||
'''''''
|
||||
|
||||
A generate ticket request comprises metadata supplied as a base64
|
||||
encoded JSON object and a signature.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"metadata": "Zhn8yhasf8hihkf...",
|
||||
"signature": "c2lnbmF0dXJl..."
|
||||
}
|
||||
|
||||
:Metadata:
|
||||
|
||||
A base64 encoded JSON object containing the following key/value pairs:
|
||||
|
||||
- ``source`` - The identity requesting a ticket.
|
||||
- ``destination`` - The target for which the ticket will be valid.
|
||||
- ``timestamp`` - Current timestamp from the requestor.
|
||||
- ``nonce`` - Random single use data.
|
||||
|
||||
A timestamp and a nonce are necessary to avoid replay attacks.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"source": "scheduler.host.example.com",
|
||||
"destination": "compute.host.example.com",
|
||||
"timestamp": "2012-03-26T10:01:01.720000",
|
||||
"nonce": 1234567890
|
||||
}
|
||||
|
||||
:Signature:
|
||||
|
||||
A base64 encoded HMAC Signature over the base64 encoded request metadata
|
||||
object.
|
||||
|
||||
::
|
||||
|
||||
Base64encode(HMAC(SigningKey, RequestMetadata))
|
||||
|
||||
The key used for the signature is the requestor's long term key. The Kite
|
||||
should verify the signature upon receipt of the request. This requires
|
||||
that the Kite access the ``source`` from the request metadata in order to
|
||||
lookup the associated long term key that can be used to verify the
|
||||
signature. The Kite should not access any other data contained in the
|
||||
request metadata before verifying the signature. Failure to verify the
|
||||
signature leaves the Kite open to issuing a ticket to a party that is
|
||||
impersonating the source.
|
||||
|
||||
Response
|
||||
''''''''
|
||||
|
||||
The response always returns a triplet of metadata, encrypted ticket and
|
||||
signature.
|
||||
|
||||
::
|
||||
|
||||
Status: 200 OK
|
||||
|
||||
{
|
||||
"metadata": "Zhn8yhasf8hihkf...",
|
||||
"ticket": "ZW5jcnlwdGVkIHRpY2tldA==",
|
||||
"signature": "c2lnbmF0dXJl..."
|
||||
}
|
||||
|
||||
:Metadata:
|
||||
|
||||
A base64 encoded JSON object containing the following key/value pairs:
|
||||
|
||||
- ``source`` - The identity of the requestor.
|
||||
- ``destination`` - The target for which the ticket is valid.
|
||||
- ``expiration`` - Timestamp of when the ticket expires.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"source": "scheduler.host.example.com",
|
||||
"destination": "compute.host.example.com",
|
||||
"expiration": "2012-03-26T11:01:01.720000"
|
||||
}
|
||||
|
||||
:Ticket:
|
||||
|
||||
The ticket is encrypted with the source's long term key and contains a
|
||||
base64 encoded JSON object containing the following key/value pairs:
|
||||
|
||||
- ``skey`` - The newly generated base64 encoded message signing key.
|
||||
- ``ekey`` - The newly generated base64 encoded message encryption key.
|
||||
- ``esek`` - Encrypted signing and encryption key pair for the
|
||||
receiver.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"skey": "ZjhkuYZH8y87rzhgi7...",
|
||||
"ekey": "Fk8yksa8z8zKtakc8s...",
|
||||
"esek": "KBo8fajfo8ysad5hq2..."
|
||||
}
|
||||
|
||||
The ``esek`` is encrypted with the destination's long term key and
|
||||
contains a base64 encoded JSON object containing the following key/value
|
||||
pairs:
|
||||
|
||||
- ``key`` - The base64 encoded random key used to derive the signing
|
||||
and encryption keys.
|
||||
- ``timestamp`` - Timestamp of when the key was created.
|
||||
- ``ttl`` - An integer containing the validity length of the key in
|
||||
seconds.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"key": "Afa8sad2hgsd7asv7ad...",
|
||||
"timestamp": "2012-03-26T10:01:01.720000",
|
||||
"ttl": 28800
|
||||
}
|
||||
|
||||
The ``key`` and ``timestamp`` are used as inputs to the HKDF ``expand``
|
||||
function to derive the signing and encryption keys as described in the
|
||||
``API Considerations`` section of this document.
|
||||
|
||||
The ``timestamp`` plus ``ttl`` should be equivalent to the
|
||||
``expiration`` timestamp contained in the response metadata.
|
||||
|
||||
:Signature:
|
||||
|
||||
A base64 encoded HMAC signature over the concatenation of the base64
|
||||
encoded response metadata object and base64 encoded ticket object.
|
||||
|
||||
::
|
||||
|
||||
Base64encode(HMAC(SigningKey, ResponseMetadata + Ticket))
|
||||
|
||||
The key used for the signature is the requestor's long term key. The
|
||||
requestor should verify the signature upon receipt of the response
|
||||
before accessing any data contained in the response metadata or the
|
||||
ticket. Failure to verify the signature leaves the requestor open to
|
||||
using metadata that was not actually issued by the Kite.
|
||||
|
||||
Create Group: ``PUT /v1/groups/{name}``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Create a group in the Kite.
|
||||
|
||||
Membership in groups is based on the party name. For example, a group
|
||||
named ``scheduler`` will implicitly include any party name starting with
|
||||
``scheduler.`` as a member (e.g. scheduler.host.example.com).
|
||||
|
||||
Request
|
||||
'''''''
|
||||
|
||||
The request body is empty.
|
||||
|
||||
Response
|
||||
''''''''
|
||||
|
||||
The response returns the group name from the request.
|
||||
|
||||
::
|
||||
|
||||
Status: 201 Created
|
||||
Location: /v1/groups/--group-name--
|
||||
|
||||
{
|
||||
"name": "--group-name--"
|
||||
}
|
||||
|
||||
Delete Group: ``DELETE /v1/groups/{name}``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Delete a group from the Kite.
|
||||
|
||||
Request
|
||||
'''''''
|
||||
|
||||
The request body is empty.
|
||||
|
||||
Response
|
||||
''''''''
|
||||
|
||||
::
|
||||
|
||||
Status: 204 No Content
|
||||
|
||||
Retrieve Group Key: ``POST /v1/groups``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When a ticket is requested where the destination is a group, a group key
|
||||
is generated that is valid for a predetermined amount of time. Any
|
||||
member of the group can retrieve the key as long as it is still valid.
|
||||
Group keys are necessary to verify signatures and decrypt messages that
|
||||
have a group name as the target.
|
||||
|
||||
Request
|
||||
'''''''
|
||||
|
||||
A group key retrieval request is identical to a generate ticket request
|
||||
except the destination is a group name instead of an individual party
|
||||
name.
|
||||
|
||||
Response
|
||||
''''''''
|
||||
|
||||
The response always returns a triplet of metadata, encrypted group key
|
||||
and signature.
|
||||
|
||||
::
|
||||
|
||||
Status: 200 OK
|
||||
|
||||
{
|
||||
"metadata": "Zhn8yhasf8hihkf...",
|
||||
"group_key": "ZW5jcnlwdGVkIGdyb3VwIGtleQ==",
|
||||
"signature": "c2lnbmF0dXJl"
|
||||
}
|
||||
|
||||
:Metadata:
|
||||
|
||||
A base64 encoded JSON object containing the following key/value pairs:
|
||||
|
||||
- ``source`` - The identity of the requestor.
|
||||
- ``destination`` - The target for which the ticket is valid.
|
||||
- ``expiration`` - Timestamp of when the ticket expires.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"source": "api.host.example.com",
|
||||
"destination": "scheduler",
|
||||
"expiration": "2012-03-26T11:01:01.720000"
|
||||
}
|
||||
|
||||
:Group key:
|
||||
|
||||
The group key is encrypted with the requestor's long term key.
|
||||
|
||||
:Signature:
|
||||
|
||||
A base64 encoded HMAC signature over the concatenation of the base64
|
||||
encoded response metadata object and the group key.
|
||||
|
||||
::
|
||||
|
||||
Base64encode(HMAC(SigningKey, ResponseMetadata + GroupKey))
|
||||
|
||||
The key used for the signature is the requestor's long term key. The
|
||||
requestor should verify the signature upon receipt of the response
|
||||
before accessing any data contained in the response metadata or the
|
||||
group key. Failure to verify the signature leaves the requestor open to
|
||||
using data that was not actually issued by the Kite.
|
||||
|
||||
HTTP Status Codes
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Kite uses the following HTTP status codes to communicate specific success
|
||||
and failure conditions to the client.
|
||||
|
||||
200 OK
|
||||
^^^^^^
|
||||
|
||||
This status code is returned in response to a successful ``POST``
|
||||
request to generate a ticket or a retrieve a group key.
|
||||
|
||||
201 Created
|
||||
^^^^^^^^^^^
|
||||
|
||||
This status code is returned in response to a successful ``PUT`` request
|
||||
to create a group or long term key.
|
||||
|
||||
204 No Content
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
This status code is returned in response to a successful ``DELETE``
|
||||
request to delete a group or long term key. No content body is returned.
|
||||
|
||||
401 Unauthorized
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
This status code is returned when either authentication has not been
|
||||
performed, or authentication fails.
|
||||
|
||||
403 Forbidden
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
This status code is returned when the requester field does not match
|
||||
either the sender or the receiver fields, or if the body of the request
|
||||
does not result in the supplied signature.
|
||||
|
||||
404 Not Found
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
This status code is returned in response to a failed ``DELETE`` request
|
||||
when a referenced entity cannot be found. It is also returned when a
|
||||
``POST`` request is made where the destination party specified in the
|
||||
request does not exist.
|
|
@ -1,75 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
'oslo.sphinx'
|
||||
]
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'kite'
|
||||
copyright = u'2013, OpenStack Foundation'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
u'%s Documentation' % project,
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
|
@ -1 +0,0 @@
|
|||
.. include:: ../../CONTRIBUTING.rst
|
|
@ -1,21 +0,0 @@
|
|||
Welcome to kite's documentation!
|
||||
================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
readme
|
||||
installation
|
||||
usage
|
||||
contributing
|
||||
|
||||
api/v1
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
|
@ -1,12 +0,0 @@
|
|||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
At the command line::
|
||||
|
||||
$ pip install kite
|
||||
|
||||
Or, if you have virtualenvwrapper installed::
|
||||
|
||||
$ mkvirtualenv kite
|
||||
$ pip install kite
|
|
@ -1 +0,0 @@
|
|||
.. include:: ../../README.rst
|
|
@ -1,7 +0,0 @@
|
|||
========
|
||||
Usage
|
||||
========
|
||||
|
||||
To use kite in a project::
|
||||
|
||||
import kite
|
|
@ -1,218 +0,0 @@
|
|||
[DEFAULT]
|
||||
|
||||
#
|
||||
# Options defined in kite.common.service
|
||||
#
|
||||
|
||||
# IP for the server to bind to (string value)
|
||||
#bind_ip=0.0.0.0
|
||||
|
||||
# The port for the server (integer value)
|
||||
#port=9109
|
||||
|
||||
|
||||
#
|
||||
# Options defined in kite.openstack.common.lockutils
|
||||
#
|
||||
|
||||
# Whether to disable inter-process locks (boolean value)
|
||||
#disable_process_locking=false
|
||||
|
||||
# Directory to use for lock files. (string value)
|
||||
#lock_path=<None>
|
||||
|
||||
|
||||
#
|
||||
# Options defined in kite.openstack.common.log
|
||||
#
|
||||
|
||||
# Print debugging output (set logging level to DEBUG instead
|
||||
# of default WARNING level). (boolean value)
|
||||
#debug=false
|
||||
|
||||
# Print more verbose output (set logging level to INFO instead
|
||||
# of default WARNING level). (boolean value)
|
||||
#verbose=false
|
||||
|
||||
# Log output to standard error (boolean value)
|
||||
#use_stderr=true
|
||||
|
||||
# Format string to use for log messages with context (string
|
||||
# value)
|
||||
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
||||
|
||||
# Format string to use for log messages without context
|
||||
# (string value)
|
||||
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
|
||||
|
||||
# Data to append to log format when level is DEBUG (string
|
||||
# value)
|
||||
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
|
||||
|
||||
# Prefix each line of exception output with this format
|
||||
# (string value)
|
||||
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
|
||||
|
||||
# List of logger=LEVEL pairs (list value)
|
||||
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
|
||||
|
||||
# Publish error events (boolean value)
|
||||
#publish_errors=false
|
||||
|
||||
# Make deprecations fatal (boolean value)
|
||||
#fatal_deprecations=false
|
||||
|
||||
# If an instance is passed with the log message, format it
|
||||
# like this (string value)
|
||||
#instance_format="[instance: %(uuid)s] "
|
||||
|
||||
# If an instance UUID is passed with the log message, format
|
||||
# it like this (string value)
|
||||
#instance_uuid_format="[instance: %(uuid)s] "
|
||||
|
||||
# The name of logging configuration file. It does not disable
|
||||
# existing loggers, but just appends specified logging
|
||||
# configuration to any other existing logging options. Please
|
||||
# see the Python logging module documentation for details on
|
||||
# logging configuration files. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/log_config
|
||||
#log_config_append=<None>
|
||||
|
||||
# DEPRECATED. A logging.Formatter log message format string
|
||||
# which may use any of the available logging.LogRecord
|
||||
# attributes. This option is deprecated. Please use
|
||||
# logging_context_format_string and
|
||||
# logging_default_format_string instead. (string value)
|
||||
#log_format=<None>
|
||||
|
||||
# Format string for %%(asctime)s in log records. Default:
|
||||
# %(default)s (string value)
|
||||
#log_date_format=%Y-%m-%d %H:%M:%S
|
||||
|
||||
# (Optional) Name of log file to output to. If no default is
|
||||
# set, logging will go to stdout. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/logfile
|
||||
#log_file=<None>
|
||||
|
||||
# (Optional) The base directory used for relative --log-file
|
||||
# paths (string value)
|
||||
# Deprecated group/name - [DEFAULT]/logdir
|
||||
#log_dir=<None>
|
||||
|
||||
# Use syslog for logging. Existing syslog format is DEPRECATED
|
||||
# during I, and then will be changed in J to honor RFC5424
|
||||
# (boolean value)
|
||||
#use_syslog=false
|
||||
|
||||
# (Optional) Use syslog rfc5424 format for logging. If
|
||||
# enabled, will add APP-NAME (RFC5424) before the MSG part of
|
||||
# the syslog message. The old format without APP-NAME is
|
||||
# deprecated in I, and will be removed in J. (boolean value)
|
||||
#use_syslog_rfc_format=false
|
||||
|
||||
# Syslog facility to receive log lines (string value)
|
||||
#syslog_log_facility=LOG_USER
|
||||
|
||||
|
||||
[database]
|
||||
|
||||
#
|
||||
# Options defined in kite.openstack.common.db.options
|
||||
#
|
||||
|
||||
# The file name to use with SQLite (string value)
|
||||
#sqlite_db=kite.sqlite
|
||||
|
||||
# If True, SQLite uses synchronous mode (boolean value)
|
||||
#sqlite_synchronous=true
|
||||
|
||||
# The backend to use for db (string value)
|
||||
# Deprecated group/name - [DEFAULT]/db_backend
|
||||
#backend=sqlalchemy
|
||||
|
||||
# The SQLAlchemy connection string used to connect to the
|
||||
# database (string value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
||||
# Deprecated group/name - [DATABASE]/sql_connection
|
||||
# Deprecated group/name - [sql]/connection
|
||||
#connection=<None>
|
||||
|
||||
# The SQL mode to be used for MySQL sessions. This option,
|
||||
# including the default, overrides any server-set SQL mode. To
|
||||
# use whatever SQL mode is set by the server configuration,
|
||||
# set this to no value. Example: mysql_sql_mode= (string
|
||||
# value)
|
||||
#mysql_sql_mode=TRADITIONAL
|
||||
|
||||
# Timeout before idle sql connections are reaped (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
||||
# Deprecated group/name - [sql]/idle_timeout
|
||||
#idle_timeout=3600
|
||||
|
||||
# Minimum number of SQL connections to keep open in a pool
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
||||
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
||||
#min_pool_size=1
|
||||
|
||||
# Maximum number of SQL connections to keep open in a pool
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
||||
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
||||
#max_pool_size=<None>
|
||||
|
||||
# Maximum db connection retries during startup. (setting -1
|
||||
# implies an infinite retry count) (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
||||
# Deprecated group/name - [DATABASE]/sql_max_retries
|
||||
#max_retries=10
|
||||
|
||||
# Interval between retries of opening a sql connection
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
||||
# Deprecated group/name - [DATABASE]/reconnect_interval
|
||||
#retry_interval=10
|
||||
|
||||
# If set, use this value for max_overflow with sqlalchemy
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
||||
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
||||
#max_overflow=<None>
|
||||
|
||||
# Verbosity of SQL debugging information. 0=None,
|
||||
# 100=Everything (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
||||
#connection_debug=0
|
||||
|
||||
# Add python stack traces to SQL as comment strings (boolean
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
||||
#connection_trace=false
|
||||
|
||||
# If set, use this value for pool_timeout with sqlalchemy
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
||||
#pool_timeout=<None>
|
||||
|
||||
# Enable the experimental use of database reconnect on
|
||||
# connection lost (boolean value)
|
||||
#use_db_reconnect=false
|
||||
|
||||
# seconds between db connection retries (integer value)
|
||||
#db_retry_interval=1
|
||||
|
||||
# Whether to increase interval between db connection retries,
|
||||
# up to db_max_retry_interval (boolean value)
|
||||
#db_inc_retry_interval=true
|
||||
|
||||
# max seconds between db connection retries, if
|
||||
# db_inc_retry_interval is enabled (integer value)
|
||||
#db_max_retry_interval=10
|
||||
|
||||
# maximum db connection retries before error is raised.
|
||||
# (setting -1 implies an infinite retry count) (integer value)
|
||||
#db_max_retries=20
|
||||
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pbr.version
|
||||
|
||||
|
||||
__version__ = pbr.version.VersionInfo(
|
||||
'kite').version_string()
|
|
@ -1,34 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import pecan
|
||||
|
||||
from kite.api import hooks
|
||||
from kite.api import root
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
DEFAULT_CONFIG = {'app': {}}
|
||||
|
||||
|
||||
def setup_app(config=None):
|
||||
app_hooks = [hooks.ConfigHook(),
|
||||
hooks.CryptoHook(),
|
||||
hooks.StorageHook(),
|
||||
]
|
||||
|
||||
app = pecan.make_app(root.RootController(),
|
||||
debug=CONF.debug,
|
||||
hooks=app_hooks)
|
||||
|
||||
return app
|
|
@ -1,34 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from pecan import hooks
|
||||
|
||||
from kite.common import crypto
|
||||
from kite.common import storage
|
||||
|
||||
|
||||
class ConfigHook(hooks.PecanHook):
|
||||
def before(self, state):
|
||||
state.request.conf = cfg.CONF
|
||||
|
||||
|
||||
class StorageHook(hooks.PecanHook):
|
||||
|
||||
def before(self, state):
|
||||
state.request.storage = storage.StorageManager.get_instance()
|
||||
|
||||
|
||||
class CryptoHook(hooks.PecanHook):
|
||||
|
||||
def before(self, state):
|
||||
state.request.crypto = crypto.CryptoManager.get_instance()
|
|
@ -1,29 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
|
||||
from kite.api.v1 import controllers
|
||||
|
||||
|
||||
class RootController(object):
|
||||
|
||||
v1 = controllers.Controller()
|
||||
|
||||
@pecan.expose('json')
|
||||
def index(self):
|
||||
pecan.response.status = 300
|
||||
return {
|
||||
'versions': [
|
||||
self.v1.version_info(),
|
||||
]
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from kite.api.v1.controllers import controller
|
||||
|
||||
Controller = controller.Controller
|
||||
|
||||
__all__ = ['Controller']
|
|
@ -1,37 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
|
||||
from kite.api.v1.controllers import group as group_controller
|
||||
from kite.api.v1.controllers import key as key_controller
|
||||
from kite.api.v1.controllers import ticket as ticket_controller
|
||||
|
||||
|
||||
class Controller(object):
|
||||
"""Version 1 API controller root."""
|
||||
|
||||
@staticmethod
|
||||
def version_info():
|
||||
return {'status': 'stable',
|
||||
'id': 'v1.0',
|
||||
'links': [{
|
||||
'href': '%s/v1/' % pecan.request.host_url,
|
||||
'rel': 'self'}]}
|
||||
|
||||
groups = group_controller.GroupController()
|
||||
keys = key_controller.KeyController()
|
||||
tickets = ticket_controller.TicketController()
|
||||
|
||||
@pecan.expose('json')
|
||||
def index(self):
|
||||
return {'version': self.version_info()}
|
|
@ -1,36 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import wsme
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from kite.api.v1 import models
|
||||
|
||||
|
||||
class GroupController(rest.RestController):
|
||||
|
||||
@wsme_pecan.wsexpose(models.Group, wsme.types.text)
|
||||
def put(self, name):
|
||||
pecan.request.storage.create_group(name)
|
||||
return models.Group(name=name)
|
||||
|
||||
@wsme_pecan.wsexpose(None, wsme.types.text, status_code=204)
|
||||
def delete(self, name):
|
||||
pecan.request.storage.delete_group(name)
|
||||
|
||||
@wsme.validate(models.GroupKey)
|
||||
@wsme_pecan.wsexpose(models.GroupKey, body=models.GroupKeyRequest)
|
||||
def post(self, group_request):
|
||||
group_request.verify()
|
||||
return group_request.new_response()
|
|
@ -1,27 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import wsme
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from kite.api.v1 import models
|
||||
|
||||
|
||||
class KeyController(rest.RestController):
|
||||
|
||||
@wsme.validate(models.KeyData)
|
||||
@wsme_pecan.wsexpose(models.KeyData, wsme.types.text, body=models.KeyInput)
|
||||
def put(self, key_name, key_input):
|
||||
generation = pecan.request.storage.set_key(key_name, key_input.key)
|
||||
return models.KeyData(name=key_name, generation=generation)
|
|
@ -1,52 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import wsme
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from kite.api.v1 import models
|
||||
from kite.openstack.common import jsonutils
|
||||
|
||||
|
||||
class TicketController(rest.RestController):
|
||||
|
||||
@wsme.validate(models.Ticket)
|
||||
@wsme_pecan.wsexpose(models.Ticket, body=models.TicketRequest)
|
||||
def post(self, ticket_request):
|
||||
# verify all required fields present and the signature is correct
|
||||
ticket_request.verify()
|
||||
|
||||
# create a new random base key. With the combination of this base key
|
||||
# and the information available in the metadata a client will be able
|
||||
# to re-generate the keys required for this session.
|
||||
rndkey = pecan.request.crypto.extract(ticket_request.source.key,
|
||||
pecan.request.crypto.new_key())
|
||||
|
||||
# generate the keys to communicate between these two endpoints.
|
||||
s_key, e_key = pecan.request.crypto.generate_keys(rndkey,
|
||||
ticket_request.info)
|
||||
|
||||
# encrypt the base key for the target, this can be used to generate
|
||||
# the sek on the target
|
||||
esek_data = {'key': base64.b64encode(rndkey),
|
||||
'timestamp': ticket_request.time_str,
|
||||
'ttl': ticket_request.ttl.seconds}
|
||||
|
||||
# encrypt returns a base64 encrypted string
|
||||
esek = pecan.request.crypto.encrypt(ticket_request.destination.key,
|
||||
jsonutils.dumps(esek_data))
|
||||
|
||||
return ticket_request.new_response(e_key, s_key, esek)
|
|
@ -1,23 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.api.v1.models.group import * # noqa
|
||||
from kite.api.v1.models.key import * # noqa
|
||||
from kite.api.v1.models.ticket import * # noqa
|
||||
|
||||
__all__ = ['Group',
|
||||
'GroupKey',
|
||||
'GroupKeyRequest',
|
||||
'KeyInput',
|
||||
'KeyData',
|
||||
'Ticket',
|
||||
'TicketRequest']
|
|
@ -1,183 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import functools
|
||||
|
||||
import pecan
|
||||
import wsme
|
||||
|
||||
from kite.common import exception
|
||||
from kite.common import utils
|
||||
from kite.openstack.common import jsonutils
|
||||
from kite.openstack.common import timeutils
|
||||
|
||||
|
||||
def memoize(f):
|
||||
"""Create a property and cache the return value for future."""
|
||||
@property
|
||||
@functools.wraps(f)
|
||||
def wrapper(self):
|
||||
try:
|
||||
val = self._cache[f.func_name]
|
||||
except KeyError:
|
||||
val = f(self)
|
||||
self._cache[f.func_name] = val
|
||||
|
||||
return val
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def malformed(msg):
|
||||
"""Raise a malformed message exception if something goes wrong."""
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except Exception:
|
||||
pecan.abort(400, 'Invalid %s' % msg)
|
||||
|
||||
return wrapper
|
||||
return wrap
|
||||
|
||||
|
||||
class Endpoint(object):
|
||||
"""A source or destination for a ticket."""
|
||||
|
||||
def __init__(self, endpoint_str, group=None):
|
||||
self._cache = dict()
|
||||
self._set_endpoint(endpoint_str)
|
||||
self._group = group
|
||||
|
||||
@malformed('endpoint')
|
||||
def _set_endpoint(self, endpoint_str):
|
||||
self.host, self.generation = utils.split_host(endpoint_str)
|
||||
|
||||
@memoize
|
||||
def key_data(self):
|
||||
try:
|
||||
return pecan.request.storage.get_key(self.host,
|
||||
self.generation,
|
||||
group=self._group)
|
||||
except exception.CryptoError:
|
||||
pecan.abort(500, "Failed to decrypt key for '%s:%s'. " %
|
||||
(self.host, self.generation))
|
||||
except exception.KeyNotFound:
|
||||
pecan.abort(404, "Could not find key")
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
return self.key_data['key']
|
||||
|
||||
@property
|
||||
def key_group(self):
|
||||
return self.key_data['group']
|
||||
|
||||
@property
|
||||
def key_generation(self):
|
||||
return self.key_data['generation']
|
||||
|
||||
@property
|
||||
def key_str(self):
|
||||
return utils.join_host(self.host, self.key_generation)
|
||||
|
||||
|
||||
class BaseRequest(wsme.types.Base):
|
||||
|
||||
metadata = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
signature = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(BaseRequest, self).__init__(**kwargs)
|
||||
self._cache = dict()
|
||||
self.now = timeutils.utcnow()
|
||||
|
||||
# NOTE(jamielennox): This is essentially a class variable, however
|
||||
# that confuses WSME.
|
||||
self.destination_is_group = None
|
||||
|
||||
@memoize
|
||||
@malformed("metadata")
|
||||
def meta(self):
|
||||
return jsonutils.loads(base64.decodestring(self.metadata))
|
||||
|
||||
@memoize
|
||||
@malformed("source")
|
||||
def source(self):
|
||||
return Endpoint(self.meta['source'], group=False)
|
||||
|
||||
@memoize
|
||||
@malformed("destination")
|
||||
def destination(self):
|
||||
return Endpoint(self.meta['destination'],
|
||||
group=self.destination_is_group)
|
||||
|
||||
@memoize
|
||||
@malformed("timestamp")
|
||||
def timestamp(self):
|
||||
return timeutils.parse_strtime(self.meta['timestamp'])
|
||||
|
||||
@property
|
||||
@malformed("nonce")
|
||||
def nonce(self):
|
||||
return self.meta['nonce']
|
||||
|
||||
@property
|
||||
def time_str(self):
|
||||
return timeutils.strtime(self.now)
|
||||
|
||||
def verify(self):
|
||||
"""Ensure that the ticket request is recent enough to be valid and
|
||||
the signature is correct for the requestor.
|
||||
"""
|
||||
if (self.now - self.timestamp) > self.ttl:
|
||||
pecan.abort(401, 'Ticket validity expired')
|
||||
|
||||
if not self.nonce:
|
||||
# just check this until we actually use it
|
||||
pecan.abort(400, 'Invalid nonce')
|
||||
|
||||
try:
|
||||
sigc = pecan.request.crypto.sign(self.source.key, self.metadata)
|
||||
except exception.CryptoError:
|
||||
pecan.abort(400, "Unexpected error: Couldn't reproduce signature")
|
||||
|
||||
if sigc != self.signature:
|
||||
pecan.abort(401, 'Invalid Signature')
|
||||
|
||||
|
||||
class BaseResponse(wsme.types.Base):
|
||||
|
||||
metadata = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
signature = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
|
||||
def set_metadata(self, source, destination, expiration):
|
||||
"""Attach the generation metadata to the ticket.
|
||||
|
||||
This informs the client and server of expiration and the expect sending
|
||||
and receiving host and will be validated by both client and server.
|
||||
"""
|
||||
metadata = jsonutils.dumps({'source': source,
|
||||
'destination': destination,
|
||||
'expiration': expiration,
|
||||
'encryption': True})
|
||||
self.metadata = base64.b64encode(metadata)
|
||||
|
||||
def sign(self, key, data):
|
||||
"""Sign the response.
|
||||
|
||||
This will be signed with the requestor's key so that it knows that the
|
||||
issuing server has a correct copy of the key.
|
||||
"""
|
||||
self.signature = pecan.request.crypto.sign(key, self.metadata + data)
|
|
@ -1,68 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
import pecan
|
||||
import wsme
|
||||
|
||||
from kite.api.v1.models import base
|
||||
from kite.common import exception
|
||||
|
||||
|
||||
class Group(wsme.types.Base):
|
||||
name = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
|
||||
|
||||
class GroupKey(base.BaseResponse):
|
||||
|
||||
group_key = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
|
||||
def sign(self, key):
|
||||
super(GroupKey, self).sign(key, self.group_key)
|
||||
|
||||
def set_group_key(self, rkey, group_key):
|
||||
self.group_key = pecan.request.crypto.encrypt(rkey, group_key)
|
||||
|
||||
|
||||
class GroupKeyRequest(base.BaseRequest):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(GroupKeyRequest, self).__init__(**kwargs)
|
||||
|
||||
seconds = int(pecan.request.conf.ticket_lifetime)
|
||||
self.ttl = datetime.timedelta(seconds=seconds)
|
||||
self.destination_is_group = True
|
||||
|
||||
def new_response(self):
|
||||
response = GroupKey()
|
||||
|
||||
response.set_metadata(source=self.source.key_str,
|
||||
destination=self.destination.key_str,
|
||||
expiration=self.now + self.ttl)
|
||||
|
||||
response.set_group_key(self.source.key, self.destination.key)
|
||||
response.sign(self.source.key)
|
||||
|
||||
return response
|
||||
|
||||
def verify(self):
|
||||
super(GroupKeyRequest, self).verify()
|
||||
|
||||
# check that we are a group member
|
||||
if self.source.host.split('.')[0] != self.destination.host:
|
||||
pecan.abort(401, 'Not a group member')
|
||||
|
||||
# we can only request a group key for a group
|
||||
if not self.destination.key_group:
|
||||
raise exception.KeyNotFound(name=self.destination.host,
|
||||
generation=self.destination.generation)
|
|
@ -1,22 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import wsme
|
||||
|
||||
|
||||
class KeyInput(wsme.types.Base):
|
||||
key = wsme.wsattr(wsme.types.binary, mandatory=True)
|
||||
|
||||
|
||||
class KeyData(wsme.types.Base):
|
||||
name = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
generation = wsme.wsattr(int, mandatory=True)
|
|
@ -1,81 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
|
||||
import pecan
|
||||
import wsme
|
||||
|
||||
from kite.api.v1.models import base
|
||||
from kite.openstack.common import jsonutils
|
||||
|
||||
|
||||
class Ticket(base.BaseResponse):
|
||||
|
||||
ticket = wsme.wsattr(wsme.types.text, mandatory=True)
|
||||
|
||||
def set_ticket(self, rkey, enc_key, signature, esek):
|
||||
"""Create and encrypt a ticket to the requestor.
|
||||
|
||||
The requestor will be able to decrypt the ticket with their key and the
|
||||
information in the metadata to get the new point-to-point key.
|
||||
"""
|
||||
ticket = jsonutils.dumps({'skey': base64.b64encode(signature),
|
||||
'ekey': base64.b64encode(enc_key),
|
||||
'esek': esek})
|
||||
|
||||
self.ticket = pecan.request.crypto.encrypt(rkey, ticket)
|
||||
|
||||
def sign(self, key):
|
||||
"""Sign the ticket response.
|
||||
|
||||
This will be signed with the requestor's key so that it knows that the
|
||||
issuing server has a correct copy of the key.
|
||||
"""
|
||||
super(Ticket, self).sign(key, self.ticket)
|
||||
|
||||
|
||||
class TicketRequest(base.BaseRequest):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(TicketRequest, self).__init__(**kwargs)
|
||||
|
||||
seconds = int(pecan.request.conf.ticket_lifetime)
|
||||
self.ttl = datetime.timedelta(seconds=seconds)
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
"""A predictable text string that can be used as the base for
|
||||
generating keys.
|
||||
"""
|
||||
return "%s,%s,%s" % (self.source.key_str,
|
||||
self.destination.key_str,
|
||||
self.time_str)
|
||||
|
||||
def new_response(self, enc_key, signature, esek):
|
||||
response = Ticket()
|
||||
|
||||
response.set_metadata(source=self.source.key_str,
|
||||
destination=self.destination.key_str,
|
||||
expiration=self.now + self.ttl)
|
||||
|
||||
# encrypt the sig and key back to the requester as well as the esek
|
||||
# to forward with messages.
|
||||
response.set_ticket(self.source.key, enc_key, signature, esek)
|
||||
|
||||
# finish building response and sign it, we sign it with the requester's
|
||||
# key at the end because the ticket doesn't have to be encrypted and we
|
||||
# still have to provide integrity of the ticket.
|
||||
response.sign(self.source.key)
|
||||
|
||||
return response
|
|
@ -1,55 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from wsgiref import simple_server
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.api import app
|
||||
from kite.common import service
|
||||
from kite.openstack.common import gettextutils
|
||||
from kite.openstack.common import log
|
||||
|
||||
PROJECT = 'kite'
|
||||
gettextutils.install(PROJECT)
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class Application(object):
|
||||
def __init__(self):
|
||||
self.app = app.setup_app()
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
def main():
|
||||
service.prepare_service(sys.argv)
|
||||
|
||||
# Build and start the WSGI app
|
||||
host = CONF.bind_ip
|
||||
port = CONF.port
|
||||
wsgi = simple_server.make_server(host, port, Application())
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
LOG.info(_("Serving on http://%(host)s:%(port)d"), {'host': host,
|
||||
'port': port})
|
||||
CONF.log_opt_values(LOG, logging.INFO)
|
||||
|
||||
try:
|
||||
wsgi.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
|
@ -1,66 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from kite.openstack.common import gettextutils
|
||||
|
||||
gettextutils.install('kite')
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.common import service
|
||||
from kite.db import migration
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def do_db_version():
|
||||
"""Print database's current migration level."""
|
||||
print(migration.version())
|
||||
|
||||
|
||||
def do_db_upgrade():
|
||||
return migration.upgrade(CONF.command.revision)
|
||||
|
||||
|
||||
def do_db_downgrade():
|
||||
return migration.downgrade(CONF.command.revision)
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
parser = subparsers.add_parser('db_version')
|
||||
parser.set_defaults(func=do_db_version)
|
||||
|
||||
parser = subparsers.add_parser('db_upgrade')
|
||||
parser.set_defaults(func=do_db_upgrade)
|
||||
parser.add_argument('--revision', nargs='?')
|
||||
|
||||
parser = subparsers.add_parser('db_downgrade')
|
||||
parser.set_defaults(func=do_db_downgrade)
|
||||
parser.add_argument('--revision', nargs='?')
|
||||
|
||||
|
||||
command_opt = cfg.SubCommandOpt('command',
|
||||
title='Commands',
|
||||
help='Available commands',
|
||||
handler=add_command_parsers)
|
||||
|
||||
|
||||
def main():
|
||||
CONF.register_cli_opt(command_opt)
|
||||
service.prepare_service(sys.argv)
|
||||
|
||||
try:
|
||||
CONF.command.func()
|
||||
except Exception as e:
|
||||
sys.exit("ERROR: %s" % e)
|
|
@ -1,162 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.common import exception
|
||||
from kite.common import utils
|
||||
from kite.openstack.common.crypto import utils as cryptoutils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
CRYPTO_OPTS = [
|
||||
cfg.StrOpt('master_key_file',
|
||||
default='/etc/kite/kds.mkey',
|
||||
help='The location of the KDS master key. MUST be private. '
|
||||
'If missing or unavailable one will be created.'),
|
||||
cfg.StrOpt('enctype',
|
||||
default='AES',
|
||||
help='Encryption Algorithm used to encrypt service keys '
|
||||
'for storing in the database.'),
|
||||
cfg.StrOpt('hashtype',
|
||||
default='SHA256',
|
||||
help='Hashing Algorithm used when generating signatures '
|
||||
'for integrity when storing keys to the database'),
|
||||
]
|
||||
|
||||
CONF.register_group(cfg.OptGroup(name='crypto',
|
||||
title='Cryptographic Options'))
|
||||
CONF.register_opts(CRYPTO_OPTS, group='crypto')
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CryptoManager(utils.SingletonManager):
|
||||
|
||||
KEY_SIZE = 16
|
||||
|
||||
def __init__(self):
|
||||
self.crypto = cryptoutils.SymmetricCrypto(
|
||||
enctype=CONF.crypto.enctype,
|
||||
hashtype=CONF.crypto.hashtype)
|
||||
self.hkdf = cryptoutils.HKDF(hashtype=CONF.crypto.hashtype)
|
||||
self.mkey = self._load_master_key()
|
||||
|
||||
def _load_master_key(self):
|
||||
"""Load the master key from file, or create one if not available."""
|
||||
|
||||
# TODO(jamielennox): This is but one way that a key file could be
|
||||
# stored. This can be pluggable later for storing/fetching keys from
|
||||
# better locations.
|
||||
|
||||
mkey = None
|
||||
|
||||
try:
|
||||
with open(CONF.crypto.master_key_file, 'r') as f:
|
||||
mkey = base64.b64decode(f.read())
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
|
||||
mkey = self.crypto.new_key(self.KEY_SIZE)
|
||||
f = None
|
||||
try:
|
||||
f = os.open(CONF.crypto.master_key_file, flags, 0o600)
|
||||
os.write(f, base64.b64encode(mkey))
|
||||
except Exception as x:
|
||||
_logger.warn('Failed to read master key initially: %s', e)
|
||||
_logger.warn('Failed to create new master key: %s', x)
|
||||
raise x
|
||||
finally:
|
||||
if f:
|
||||
os.close(f)
|
||||
else:
|
||||
# the file could be unreadable due to bad permissions
|
||||
# so just pop up whatever error comes
|
||||
raise e
|
||||
|
||||
return mkey
|
||||
|
||||
def new_key(self, key_size=KEY_SIZE):
|
||||
return self.crypto.new_key(key_size)
|
||||
|
||||
def generate_keys(self, prk, info, key_size=KEY_SIZE):
|
||||
"""Generate a new key from an existing key and information.
|
||||
|
||||
:param string prk: Existing pseudo-random key
|
||||
:param string info: Additional information for building a new key
|
||||
|
||||
:returns tuple(string, string): raw signature key, raw encryption key
|
||||
"""
|
||||
key = self.hkdf.expand(prk, info, 2 * key_size)
|
||||
return key[:key_size], key[key_size:]
|
||||
|
||||
def extract(self, key, rnd_data):
|
||||
return self.hkdf.extract(key, rnd_data)
|
||||
|
||||
def encrypt(self, key, data):
|
||||
return self.crypto.encrypt(key, data)
|
||||
|
||||
def sign(self, key, data):
|
||||
return self.crypto.sign(key, data)
|
||||
|
||||
def get_storage_keys(self, name):
|
||||
"""Get a set of keys that will be used to encrypt the data for this
|
||||
identity in the database.
|
||||
|
||||
:param string key_id: Key Identifier
|
||||
|
||||
:returns tuple(string, string): raw signature key, raw encryption key
|
||||
"""
|
||||
if not self.mkey:
|
||||
raise exception.CryptoError(reason=_('Failed to find mkey'))
|
||||
|
||||
return self.generate_keys(self.mkey, name, self.KEY_SIZE)
|
||||
|
||||
def encrypt_key(self, name, key):
|
||||
"""Encrypt a key for storage.
|
||||
|
||||
Returns the signature and the encryption key.
|
||||
"""
|
||||
ekey, skey = self.get_storage_keys(name)
|
||||
|
||||
try:
|
||||
enc_key = self.crypto.encrypt(ekey, key, b64encode=False)
|
||||
signature = self.crypto.sign(skey, enc_key, b64encode=False)
|
||||
except cryptoutils.CryptoutilsException:
|
||||
raise exception.CryptoError(reason=_('Failed to encrypt key'))
|
||||
|
||||
return enc_key, signature
|
||||
|
||||
def decrypt_key(self, name, enc_key, signature):
|
||||
"""Decrypt a key from storage.
|
||||
|
||||
Returns the raw key data.
|
||||
"""
|
||||
ekey, skey = self.get_storage_keys(name)
|
||||
|
||||
try:
|
||||
sigc = self.crypto.sign(skey, enc_key, b64encode=False)
|
||||
|
||||
if sigc != signature:
|
||||
raise exception.CryptoError(reason=_('Signature check failed'))
|
||||
|
||||
plain = self.crypto.decrypt(ekey, enc_key, b64decode=False)
|
||||
|
||||
except cryptoutils.CryptoutilsException:
|
||||
raise exception.CryptoError(reason=_('Failed to decrypt key'))
|
||||
|
||||
return plain
|
|
@ -1,64 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.openstack.common.gettextutils import _ # noqa
|
||||
|
||||
_FATAL_EXCEPTION_FORMAT_ERRORS = False
|
||||
|
||||
|
||||
class KdsException(Exception):
|
||||
"""Base Exception class.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'msg_fmt' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
|
||||
msg_fmt = _('An unknown exception occurred')
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self._error_string = self.msg_fmt % kwargs
|
||||
|
||||
except Exception:
|
||||
if _FATAL_EXCEPTION_FORMAT_ERRORS:
|
||||
raise
|
||||
else:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.msg_fmt
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class BackendException(KdsException):
|
||||
msg_fmt = _("Failed to load the '%(backend)s' backend because it is not "
|
||||
"allowed. Allowed backends are: %(allowed)s")
|
||||
|
||||
|
||||
class IntegrityError(KdsException):
|
||||
msg_fmt = _('Cannot set key data for %(name)s: %(reason)s')
|
||||
|
||||
|
||||
class GroupStatusChanged(IntegrityError):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('reason', "Can't change group status of a host")
|
||||
super(GroupStatusChanged, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class KeyNotFound(KdsException):
|
||||
msg_fmt = _('No key for %s(name)s:%(generation). %(reason)s')
|
||||
|
||||
|
||||
class CryptoError(KdsException):
|
||||
msg_fmt = _('Cryptographic Failure: %(reason)s')
|
|
@ -1,56 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.openstack.common.db import options
|
||||
from kite.openstack.common import log
|
||||
|
||||
_COMMON_PATH = os.path.abspath(os.path.dirname(__file__))
|
||||
_ROOT_PATH = os.path.normpath(os.path.join(_COMMON_PATH, '..', '..'))
|
||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///%s' % os.path.join(_ROOT_PATH,
|
||||
'kite.sqlite')
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
API_SERVICE_OPTS = [
|
||||
cfg.StrOpt('bind_ip',
|
||||
default='0.0.0.0',
|
||||
help='IP for the server to bind to'),
|
||||
cfg.IntOpt('port',
|
||||
default=9109,
|
||||
help='The port for the server'),
|
||||
cfg.IntOpt('ticket_lifetime',
|
||||
default=3600,
|
||||
help='Length of ticket validity (in seconds)'),
|
||||
]
|
||||
|
||||
CONF.register_opts(API_SERVICE_OPTS)
|
||||
|
||||
|
||||
def parse_args(args, default_config_files=None):
|
||||
CONF(args=args[1:],
|
||||
project='kite',
|
||||
default_config_files=default_config_files)
|
||||
|
||||
|
||||
def prepare_service(argv=[]):
|
||||
options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
|
||||
sqlite_db='kite.sqlite')
|
||||
cfg.set_defaults(log.log_opts,
|
||||
default_log_levels=['sqlalchemy=WARN',
|
||||
'eventlet.wsgi.server=WARN'
|
||||
])
|
||||
parse_args(argv)
|
||||
log.setup('kite')
|
|
@ -1,115 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
from kite.common import crypto
|
||||
from kite.common import exception
|
||||
from kite.common import utils
|
||||
from kite.db import api as dbapi
|
||||
from kite.openstack.common import timeutils
|
||||
|
||||
|
||||
class StorageManager(utils.SingletonManager):
|
||||
|
||||
def get_key(self, name, generation=None, group=None):
|
||||
"""Retrieves a key from the driver and decrypts it for use.
|
||||
|
||||
If it is a group key and it has expired or is not found then generate
|
||||
a new one and return that for use.
|
||||
|
||||
:param string name: Key Identifier
|
||||
:param int generation: Key generation to retrieve. Default latest
|
||||
"""
|
||||
key = dbapi.get_instance().get_key(name,
|
||||
generation=generation,
|
||||
group=group)
|
||||
crypto_manager = crypto.CryptoManager.get_instance()
|
||||
|
||||
if not key:
|
||||
# host or group not found
|
||||
raise exception.KeyNotFound(name=name, generation=generation)
|
||||
|
||||
if group is not None and group != key['group']:
|
||||
raise exception.KeyNotFound(name=name, generation=generation)
|
||||
|
||||
now = timeutils.utcnow()
|
||||
expiration = key.get('expiration')
|
||||
|
||||
if key['group'] and expiration and generation is not None:
|
||||
# if you ask for a specific group key generation then you can
|
||||
# retrieve it for a little while beyond it being expired
|
||||
timeout = expiration + datetime.timedelta(minutes=10)
|
||||
elif key['group'] and expiration:
|
||||
# when we can generate a new key we don't want to use an older one
|
||||
# that is just going to require refreshing soon
|
||||
timeout = expiration - datetime.timedelta(minutes=2)
|
||||
else:
|
||||
# otherwise we either have an un-expiring group or host key which
|
||||
# we just check against now
|
||||
timeout = expiration
|
||||
|
||||
if timeout and now >= timeout:
|
||||
if key['group']:
|
||||
# clear the key so it will generate a new group key
|
||||
key = {'group': True}
|
||||
else:
|
||||
raise exception.KeyNotFound(name=name, generation=generation)
|
||||
|
||||
if 'key' in key:
|
||||
dec_key = crypto_manager.decrypt_key(name,
|
||||
enc_key=key['key'],
|
||||
signature=key['signature'])
|
||||
return {'key': dec_key,
|
||||
'generation': key['generation'],
|
||||
'name': key['name'],
|
||||
'group': key['group']}
|
||||
|
||||
if generation is not None or not key['group']:
|
||||
# A specific generation was asked for or it's not a group key
|
||||
# so don't generate a new one
|
||||
raise exception.KeyNotFound(name=name, generation=generation)
|
||||
|
||||
# generate and return a new group key
|
||||
new_key = crypto_manager.new_key()
|
||||
enc_key, signature = crypto_manager.encrypt_key(name, new_key)
|
||||
expiration = now + datetime.timedelta(minutes=15)
|
||||
|
||||
new_gen = dbapi.get_instance().set_key(name,
|
||||
key=enc_key,
|
||||
signature=signature,
|
||||
group=True,
|
||||
expiration=expiration)
|
||||
|
||||
return {'key': new_key,
|
||||
'generation': new_gen,
|
||||
'name': name,
|
||||
'group': True,
|
||||
'expiration': expiration}
|
||||
|
||||
def set_key(self, name, key, expiration=None):
|
||||
"""Encrypt a key and store it to the backend.
|
||||
|
||||
:param string key_id: Key Identifier
|
||||
:param string keyblock: raw key data
|
||||
"""
|
||||
crypto_manager = crypto.CryptoManager.get_instance()
|
||||
enc_key, signature = crypto_manager.encrypt_key(name, key)
|
||||
return dbapi.get_instance().set_key(name, key=enc_key,
|
||||
signature=signature,
|
||||
group=False, expiration=expiration)
|
||||
|
||||
def create_group(self, name):
|
||||
dbapi.get_instance().create_group(name)
|
||||
|
||||
def delete_group(self, name):
|
||||
dbapi.get_instance().delete_host(name, group=True)
|
|
@ -1,93 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.common import exception
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
# NOTE(jamielennox): This class is a direct copy from nova, glance, heat and
|
||||
# a bunch of other projects. It has been submitted to OSLO
|
||||
# https://review.openstack.org/#/c/67002/ and should be synced when available.
|
||||
|
||||
|
||||
class LazyPluggable(object):
|
||||
"""A pluggable backend loaded lazily based on some value."""
|
||||
|
||||
def __init__(self, pivot, config_group=None, **backends):
|
||||
self.__backends = backends
|
||||
self.__pivot = pivot
|
||||
self.__backend = None
|
||||
self.__config_group = config_group
|
||||
|
||||
def __get_backend(self):
|
||||
if not self.__backend:
|
||||
if self.__config_group is None:
|
||||
backend_name = CONF[self.__pivot]
|
||||
else:
|
||||
backend_name = CONF[self.__config_group][self.__pivot]
|
||||
if backend_name not in self.__backends:
|
||||
allowed = ', '.join(self.__backends.iterkeys())
|
||||
raise exception.BackendException(backend=backend_name,
|
||||
allowed=allowed)
|
||||
|
||||
backend = self.__backends[backend_name]
|
||||
if isinstance(backend, tuple):
|
||||
name = backend[0]
|
||||
fromlist = backend[1]
|
||||
else:
|
||||
name = backend
|
||||
fromlist = backend
|
||||
|
||||
self.__backend = __import__(name=name, globals=None,
|
||||
locals=None, fromlist=fromlist)
|
||||
return self.__backend
|
||||
|
||||
def __getattr__(self, key):
|
||||
backend = self.__get_backend()
|
||||
return getattr(backend, key)
|
||||
|
||||
|
||||
class SingletonManager(object):
|
||||
|
||||
_instance = None
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
if not cls._instance:
|
||||
cls._instance = cls()
|
||||
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
def reset(cls):
|
||||
cls._instance = None
|
||||
|
||||
|
||||
def split_host(string):
|
||||
if not string:
|
||||
return (None, None)
|
||||
|
||||
try:
|
||||
host, generation = string.rsplit(':', 1)
|
||||
generation = int(generation)
|
||||
except ValueError:
|
||||
host = string
|
||||
generation = None
|
||||
|
||||
return (host, generation)
|
||||
|
||||
|
||||
def join_host(host, generation):
|
||||
return "%s:%d" % (host, generation)
|
|
@ -1,40 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.openstack.common.db import api as db_api
|
||||
|
||||
CONF = cfg.CONF
|
||||
IMPL = None
|
||||
|
||||
CONF.import_opt('backend',
|
||||
'kite.openstack.common.db.options',
|
||||
group='database')
|
||||
|
||||
_BACKEND_MAPPING = {'sqlalchemy': 'kite.db.sqlalchemy.api',
|
||||
'kvs': 'kite.db.kvs.api'}
|
||||
|
||||
|
||||
def reset():
|
||||
global IMPL
|
||||
IMPL = None
|
||||
|
||||
|
||||
def get_instance(force_new=False):
|
||||
"""Return a DB API instance."""
|
||||
global IMPL
|
||||
if not IMPL or force_new:
|
||||
IMPL = db_api.DBAPI(CONF.database.backend,
|
||||
backend_mapping=_BACKEND_MAPPING)
|
||||
|
||||
return IMPL
|
|
@ -1,85 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Connection(object):
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_key(self, name, key, signature, group, expiration=None):
|
||||
"""Set a key for a name in the database.
|
||||
|
||||
If a key is set for an existing key name then a new key entry with a
|
||||
new generation value is created.
|
||||
|
||||
:param string name: The unique name of the key to set.
|
||||
:param string key: The key data to save.
|
||||
:param string signature: The signature of the key data to save.
|
||||
:param bool group: Whether this is a group key or not.
|
||||
:param DateTime expiration: When the key should expire
|
||||
(None is never expire).
|
||||
|
||||
:raises IntegrityError: If a key exists then new keys assigned to the
|
||||
name must have the same 'group' setting. If the
|
||||
value of group is changed an IntegrityError is
|
||||
raised.
|
||||
|
||||
:returns int: The generation number of this key.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_key(self, name, generation=None, group=None):
|
||||
"""Get key related to kds_id.
|
||||
|
||||
:param string name: The unique name of the key to fetch.
|
||||
:param int generation: A specific generation of the key to retrieve. If
|
||||
not specified the most recent generation is
|
||||
retrieved.
|
||||
:param bool group: If provided only retrieve this key if its group
|
||||
value is the same.
|
||||
|
||||
:returns dict: A dictionary of the key information or None if not
|
||||
found. Keys will contain:
|
||||
- name: Unique name of the key.
|
||||
- group: If this key is a group key or not.
|
||||
- key: The key data.
|
||||
- signature: The signature of the key data.
|
||||
- generation: The generation of this key.
|
||||
- expiration: When the key expires (or None).
|
||||
Expired keys can be returned.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_group(self, name):
|
||||
"""Create a new group.
|
||||
|
||||
:param string name: The group name.
|
||||
|
||||
:returns bool: True if work was performed, False otherwise (eg if the
|
||||
group already existed).
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_host(self, name, group=None):
|
||||
"""Delete a host or group.
|
||||
|
||||
:param string name: The host or group name.
|
||||
:param bool group: (optional) If set only delete the host if it is (or
|
||||
is not if False) a group.
|
||||
|
||||
:returns bool: True if work was performed, False otherwise (eg deleting
|
||||
a group/host that never existed).
|
||||
"""
|
|
@ -1,92 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.common import exception
|
||||
from kite.db import connection
|
||||
|
||||
|
||||
def get_backend():
|
||||
return KvsDbImpl()
|
||||
|
||||
|
||||
def reset():
|
||||
pass
|
||||
|
||||
|
||||
class KvsDbImpl(connection.Connection):
|
||||
"""A simple in-memory Key Value backend.
|
||||
|
||||
KVS backends are designed for use in testing and for simple debugging.
|
||||
This backend should not be deployed in any production systems.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(KvsDbImpl, self).__init__()
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
self._data = dict()
|
||||
|
||||
def set_key(self, name, key, signature, group, expiration=None):
|
||||
host = self._data.setdefault(name, {'latest_generation': 0,
|
||||
'keys': dict(), 'group': group})
|
||||
|
||||
if host['group'] != group:
|
||||
raise exception.GroupStatusChanged(name=name)
|
||||
|
||||
host['latest_generation'] += 1
|
||||
host['keys'][host['latest_generation']] = {'key': key,
|
||||
'signature': signature,
|
||||
'expiration': expiration}
|
||||
|
||||
return host['latest_generation']
|
||||
|
||||
def get_key(self, name, generation=None, group=None):
|
||||
response = {'name': name}
|
||||
try:
|
||||
host = self._data[name]
|
||||
if generation is None:
|
||||
generation = host['latest_generation']
|
||||
key_data = host['keys'][generation]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
response['generation'] = generation
|
||||
response['group'] = host['group']
|
||||
|
||||
if group is not None and host['group'] != group:
|
||||
return None
|
||||
|
||||
response.update(key_data)
|
||||
return response
|
||||
|
||||
def create_group(self, name):
|
||||
if name in self._data:
|
||||
return False
|
||||
|
||||
self._data[name] = {'name': name,
|
||||
'latest_generation': 0,
|
||||
'group': True}
|
||||
return True
|
||||
|
||||
def delete_host(self, name, group=None):
|
||||
try:
|
||||
host = self._data[name]
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if group is not None and host['group'] != group:
|
||||
return False
|
||||
|
||||
del self._data[name]
|
||||
|
||||
return True
|
|
@ -1,40 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Database setup and migration commands."""
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.common import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('backend',
|
||||
'kite.openstack.common.db.options',
|
||||
group='database')
|
||||
|
||||
IMPL = utils.LazyPluggable(pivot='backend',
|
||||
config_group='database',
|
||||
sqlalchemy='kite.db.sqlalchemy.migration')
|
||||
|
||||
INIT_VERSION = 0
|
||||
|
||||
|
||||
def upgrade(revision=None):
|
||||
return IMPL.upgrade(revision=revision)
|
||||
|
||||
|
||||
def downgrade(revision=None):
|
||||
return IMPL.downgrade(revision=revision)
|
||||
|
||||
|
||||
def version():
|
||||
return IMPL.version()
|
|
@ -1,59 +0,0 @@
|
|||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
script_location = %(here)s/alembic
|
||||
|
||||
# template used to generate migration files
|
||||
# file_template = %%(rev)s_%%(slug)s
|
||||
|
||||
# max length of characters to apply to the
|
||||
# "slug" field
|
||||
#truncate_slug_length = 40
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# set to 'true' to allow .pyc and .pyo files without
|
||||
# a source .py file to be detected as revisions in the
|
||||
# versions/ directory
|
||||
# sourceless = false
|
||||
|
||||
; sqlalchemy.url = driver://user:pass@localhost/dbname
|
||||
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
|
@ -1,62 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
from logging import config as log_config
|
||||
|
||||
from alembic import context
|
||||
|
||||
from kite.db.sqlalchemy import api as db_api
|
||||
from kite.db.sqlalchemy import models
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
log_config.fileConfig(config.config_file_name)
|
||||
|
||||
# add your model's MetaData object here
|
||||
# for 'autogenerate' support
|
||||
# from myapp import mymodel
|
||||
# target_metadata = mymodel.Base.metadata
|
||||
target_metadata = models.Base.metadata
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
engine = db_api.get_engine()
|
||||
|
||||
with engine.connect() as connection:
|
||||
context.configure(connection=connection,
|
||||
target_metadata=target_metadata)
|
||||
|
||||
try:
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
|
||||
run_migrations_online()
|
|
@ -1,22 +0,0 @@
|
|||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
def upgrade():
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade():
|
||||
${downgrades if downgrades else "pass"}
|
|
@ -1,76 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Initialize Kite Tables
|
||||
|
||||
Revision ID: 49c8b865f6b
|
||||
Revises: None
|
||||
Create Date: 2014-04-01 14:31:06.415935
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '49c8b865f6b'
|
||||
down_revision = None
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table('hosts',
|
||||
sa.Column('id',
|
||||
sa.Integer(),
|
||||
primary_key=True,
|
||||
autoincrement=True),
|
||||
sa.Column('name',
|
||||
sa.Text(),
|
||||
nullable=False),
|
||||
sa.Column('group',
|
||||
sa.Boolean(),
|
||||
nullable=False),
|
||||
sa.Column('latest_generation',
|
||||
sa.Integer(),
|
||||
nullable=False),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8')
|
||||
|
||||
op.create_index('name_idx', 'hosts', ['name'],
|
||||
unique=True, mysql_length=20)
|
||||
|
||||
op.create_table('keys',
|
||||
sa.Column('host_id',
|
||||
sa.Integer(),
|
||||
sa.ForeignKey('hosts.id'),
|
||||
primary_key=True,
|
||||
autoincrement=False),
|
||||
sa.Column('generation',
|
||||
sa.Integer(),
|
||||
primary_key=True,
|
||||
autoincrement=False),
|
||||
sa.Column('signature',
|
||||
sa.LargeBinary(),
|
||||
nullable=False),
|
||||
sa.Column('enc_key',
|
||||
sa.LargeBinary(),
|
||||
nullable=False),
|
||||
sa.Column('expiration',
|
||||
sa.DateTime(),
|
||||
nullable=True,
|
||||
index=True),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8')
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_table('keys')
|
||||
op.drop_table('hosts')
|
|
@ -1,155 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from kite.common import exception
|
||||
from kite.db import connection
|
||||
from kite.db.sqlalchemy import models
|
||||
from kite.openstack.common.db import exception as db_exc
|
||||
from kite.openstack.common.db.sqlalchemy import session as db_session
|
||||
|
||||
CONF = cfg.CONF
|
||||
_facade = None
|
||||
|
||||
|
||||
def get_facade():
|
||||
global _facade
|
||||
if not _facade:
|
||||
_facade = db_session.EngineFacade.from_config(CONF.database.connection,
|
||||
CONF)
|
||||
return _facade
|
||||
|
||||
|
||||
def get_engine():
|
||||
return get_facade().get_engine()
|
||||
|
||||
|
||||
def get_session():
|
||||
return get_facade().get_session()
|
||||
|
||||
|
||||
def reset():
|
||||
global _facade
|
||||
_facade = None
|
||||
|
||||
|
||||
def get_backend():
|
||||
return SqlalchemyDbImpl()
|
||||
|
||||
|
||||
class SqlalchemyDbImpl(connection.Connection):
|
||||
|
||||
def set_key(self, name, key, signature, group, expiration=None):
|
||||
session = get_session()
|
||||
|
||||
with session.begin():
|
||||
q = session.query(models.Host)
|
||||
q = q.filter(models.Host.name == name)
|
||||
|
||||
try:
|
||||
host = q.one()
|
||||
except exc.NoResultFound:
|
||||
host = models.Host(name=name,
|
||||
latest_generation=0,
|
||||
group=group)
|
||||
else:
|
||||
if host.group != group:
|
||||
raise exception.GroupStatusChanged(name=name)
|
||||
|
||||
host.latest_generation += 1
|
||||
host.keys.append(models.Key(signature=signature,
|
||||
enc_key=key,
|
||||
generation=host.latest_generation,
|
||||
expiration=expiration))
|
||||
|
||||
session.add(host)
|
||||
|
||||
return host.latest_generation
|
||||
|
||||
def _get_group_data(self, session, name):
|
||||
"""Return data about a group.
|
||||
|
||||
In the case of getting a key where there is a Group defined but no key
|
||||
has yet been defined we are supposed to return the group data without
|
||||
a key. This is a difficult query to write as an all in one. This
|
||||
function is called when we fail to find a key, to return group data
|
||||
if the request host is a group.
|
||||
"""
|
||||
query = session.query(models.Host.name)
|
||||
query = query.filter(models.Host.group == True)
|
||||
query = query.filter(models.Host.name == name)
|
||||
|
||||
try:
|
||||
result = query.one()
|
||||
except exc.NoResultFound:
|
||||
return None
|
||||
else:
|
||||
return {'name': result.name,
|
||||
'group': True}
|
||||
|
||||
def get_key(self, name, generation=None, group=None):
|
||||
session = get_session()
|
||||
|
||||
query = session.query(models.Host, models.Key)
|
||||
query = query.filter(models.Host.id == models.Key.host_id)
|
||||
query = query.filter(models.Host.name == name)
|
||||
|
||||
if group is not None:
|
||||
query = query.filter(models.Host.group == group)
|
||||
|
||||
if generation is not None:
|
||||
query = query.filter(models.Key.generation == generation)
|
||||
else:
|
||||
query = query.filter(models.Host.latest_generation ==
|
||||
models.Key.generation)
|
||||
|
||||
try:
|
||||
result = query.one()
|
||||
except exc.NoResultFound:
|
||||
if group is not False and generation is None:
|
||||
return self._get_group_data(session, name)
|
||||
else:
|
||||
return None
|
||||
|
||||
return {'name': result.Host.name,
|
||||
'group': result.Host.group,
|
||||
'key': result.Key.enc_key,
|
||||
'signature': result.Key.signature,
|
||||
'generation': result.Key.generation,
|
||||
'expiration': result.Key.expiration}
|
||||
|
||||
def create_group(self, name):
|
||||
session = get_session()
|
||||
|
||||
try:
|
||||
with session.begin():
|
||||
group = models.Host(name=name, latest_generation=0, group=True)
|
||||
session.add(group)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
# an existing group of this name already exists.
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def delete_host(self, name, group=None):
|
||||
session = get_session()
|
||||
|
||||
with session.begin():
|
||||
query = session.query(models.Host).filter(models.Host.name == name)
|
||||
if group is not None:
|
||||
query = query.filter(models.Host.group == group)
|
||||
|
||||
count = query.delete()
|
||||
|
||||
return count > 0
|
|
@ -1,45 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
import alembic
|
||||
from alembic import config as alembic_config
|
||||
from alembic import migration as alembic_migration
|
||||
|
||||
from kite.db.sqlalchemy import api as db_api
|
||||
|
||||
|
||||
def _alembic_config():
|
||||
path = os.path.join(os.path.dirname(__file__), 'alembic.ini')
|
||||
config = alembic_config.Config(path)
|
||||
return config
|
||||
|
||||
|
||||
def upgrade(revision=None, config=None):
|
||||
alembic.command.upgrade(config or _alembic_config(), revision or 'head')
|
||||
|
||||
|
||||
def downgrade(revision=None, config=None):
|
||||
alembic.command.downgrade(config or _alembic_config(), revision or 'base')
|
||||
|
||||
|
||||
def version(config=None):
|
||||
"""Current database version.
|
||||
|
||||
:returns: Database version
|
||||
:rtype: string
|
||||
"""
|
||||
engine = db_api.get_engine()
|
||||
with engine.connect() as conn:
|
||||
context = alembic_migration.MigrationContext.configure(conn)
|
||||
return context.get_current_revision()
|
|
@ -1,60 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy as sql
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
from kite.openstack.common.db.sqlalchemy import models
|
||||
|
||||
|
||||
class KdsBase(models.ModelBase):
|
||||
pass
|
||||
|
||||
|
||||
Base = declarative_base(cls=KdsBase)
|
||||
|
||||
|
||||
class Host(Base):
|
||||
__tablename__ = 'hosts'
|
||||
|
||||
id = sql.Column(sql.Integer(), primary_key=True, autoincrement=True)
|
||||
name = sql.Column(sql.Text(), index=True, unique=True, nullable=False)
|
||||
group = sql.Column(sql.Boolean(), nullable=False, index=True)
|
||||
latest_generation = sql.Column(sql.Integer(), nullable=False)
|
||||
|
||||
|
||||
class Key(Base):
|
||||
__tablename__ = 'keys'
|
||||
|
||||
host_id = sql.Column(sql.Integer(),
|
||||
sql.ForeignKey('hosts.id'),
|
||||
primary_key=True,
|
||||
autoincrement=False)
|
||||
|
||||
generation = sql.Column(sql.Integer(),
|
||||
primary_key=True,
|
||||
autoincrement=False)
|
||||
|
||||
signature = sql.Column(sql.LargeBinary(),
|
||||
nullable=False)
|
||||
|
||||
enc_key = sql.Column(sql.LargeBinary(),
|
||||
nullable=False)
|
||||
|
||||
expiration = sql.Column(sql.DateTime(),
|
||||
nullable=True,
|
||||
index=True)
|
||||
|
||||
owner = sql.orm.relationship('Host',
|
||||
backref=sql.orm.backref('keys',
|
||||
order_by=sql.desc(
|
||||
generation)))
|
|
@ -1,17 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
|
||||
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
|
@ -1,302 +0,0 @@
|
|||
# Copyright 2012 SINA Corporation
|
||||
# Copyright 2014 Cisco Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Extracts OpenStack config option info from module(s)."""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import imp
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
import stevedore.named
|
||||
|
||||
from kite.openstack.common import gettextutils
|
||||
from kite.openstack.common import importutils
|
||||
|
||||
gettextutils.install('kite')
|
||||
|
||||
STROPT = "StrOpt"
|
||||
BOOLOPT = "BoolOpt"
|
||||
INTOPT = "IntOpt"
|
||||
FLOATOPT = "FloatOpt"
|
||||
LISTOPT = "ListOpt"
|
||||
DICTOPT = "DictOpt"
|
||||
MULTISTROPT = "MultiStrOpt"
|
||||
|
||||
OPT_TYPES = {
|
||||
STROPT: 'string value',
|
||||
BOOLOPT: 'boolean value',
|
||||
INTOPT: 'integer value',
|
||||
FLOATOPT: 'floating point value',
|
||||
LISTOPT: 'list value',
|
||||
DICTOPT: 'dict value',
|
||||
MULTISTROPT: 'multi valued',
|
||||
}
|
||||
|
||||
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
||||
FLOATOPT, LISTOPT, DICTOPT,
|
||||
MULTISTROPT]))
|
||||
|
||||
PY_EXT = ".py"
|
||||
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
"../../../../"))
|
||||
WORDWRAP_WIDTH = 60
|
||||
|
||||
|
||||
def generate(argv):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='generate sample configuration file',
|
||||
)
|
||||
parser.add_argument('-m', dest='modules', action='append')
|
||||
parser.add_argument('-l', dest='libraries', action='append')
|
||||
parser.add_argument('srcfiles', nargs='*')
|
||||
parsed_args = parser.parse_args(argv)
|
||||
|
||||
mods_by_pkg = dict()
|
||||
for filepath in parsed_args.srcfiles:
|
||||
pkg_name = filepath.split(os.sep)[1]
|
||||
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
||||
os.path.basename(filepath).split('.')[0]])
|
||||
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||
# NOTE(lzyeval): place top level modules before packages
|
||||
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
|
||||
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
|
||||
pkg_names.extend(ext_names)
|
||||
|
||||
# opts_by_group is a mapping of group name to an options list
|
||||
# The options list is a list of (module, options) tuples
|
||||
opts_by_group = {'DEFAULT': []}
|
||||
|
||||
if parsed_args.modules:
|
||||
for module_name in parsed_args.modules:
|
||||
module = _import_module(module_name)
|
||||
if module:
|
||||
for group, opts in _list_opts(module):
|
||||
opts_by_group.setdefault(group, []).append((module_name,
|
||||
opts))
|
||||
|
||||
# Look for entry points defined in libraries (or applications) for
|
||||
# option discovery, and include their return values in the output.
|
||||
#
|
||||
# Each entry point should be a function returning an iterable
|
||||
# of pairs with the group name (or None for the default group)
|
||||
# and the list of Opt instances for that group.
|
||||
if parsed_args.libraries:
|
||||
loader = stevedore.named.NamedExtensionManager(
|
||||
'oslo.config.opts',
|
||||
names=list(set(parsed_args.libraries)),
|
||||
invoke_on_load=False,
|
||||
)
|
||||
for ext in loader:
|
||||
for group, opts in ext.plugin():
|
||||
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
|
||||
opt_list.append((ext.name, opts))
|
||||
|
||||
for pkg_name in pkg_names:
|
||||
mods = mods_by_pkg.get(pkg_name)
|
||||
mods.sort()
|
||||
for mod_str in mods:
|
||||
if mod_str.endswith('.__init__'):
|
||||
mod_str = mod_str[:mod_str.rfind(".")]
|
||||
|
||||
mod_obj = _import_module(mod_str)
|
||||
if not mod_obj:
|
||||
raise RuntimeError("Unable to import module %s" % mod_str)
|
||||
|
||||
for group, opts in _list_opts(mod_obj):
|
||||
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
||||
|
||||
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
||||
for group in sorted(opts_by_group.keys()):
|
||||
print_group_opts(group, opts_by_group[group])
|
||||
|
||||
|
||||
def _import_module(mod_str):
|
||||
try:
|
||||
if mod_str.startswith('bin.'):
|
||||
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
|
||||
return sys.modules[mod_str[4:]]
|
||||
else:
|
||||
return importutils.import_module(mod_str)
|
||||
except Exception as e:
|
||||
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
|
||||
return None
|
||||
|
||||
|
||||
def _is_in_group(opt, group):
|
||||
"Check if opt is in group."
|
||||
for value in group._opts.values():
|
||||
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||
# newly released oslo.config support '==' operator.
|
||||
if not(value['opt'] != opt):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _guess_groups(opt, mod_obj):
|
||||
# is it in the DEFAULT group?
|
||||
if _is_in_group(opt, cfg.CONF):
|
||||
return 'DEFAULT'
|
||||
|
||||
# what other groups is it in?
|
||||
for value in cfg.CONF.values():
|
||||
if isinstance(value, cfg.CONF.GroupAttr):
|
||||
if _is_in_group(opt, value._group):
|
||||
return value._group.name
|
||||
|
||||
raise RuntimeError(
|
||||
"Unable to find group for option %s, "
|
||||
"maybe it's defined twice in the same group?"
|
||||
% opt.name
|
||||
)
|
||||
|
||||
|
||||
def _list_opts(obj):
|
||||
def is_opt(o):
|
||||
return (isinstance(o, cfg.Opt) and
|
||||
not isinstance(o, cfg.SubCommandOpt))
|
||||
|
||||
opts = list()
|
||||
for attr_str in dir(obj):
|
||||
attr_obj = getattr(obj, attr_str)
|
||||
if is_opt(attr_obj):
|
||||
opts.append(attr_obj)
|
||||
elif (isinstance(attr_obj, list) and
|
||||
all(map(lambda x: is_opt(x), attr_obj))):
|
||||
opts.extend(attr_obj)
|
||||
|
||||
ret = {}
|
||||
for opt in opts:
|
||||
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
|
||||
return ret.items()
|
||||
|
||||
|
||||
def print_group_opts(group, opts_by_module):
|
||||
print("[%s]" % group)
|
||||
print('')
|
||||
for mod, opts in opts_by_module:
|
||||
print('#')
|
||||
print('# Options defined in %s' % mod)
|
||||
print('#')
|
||||
print('')
|
||||
for opt in opts:
|
||||
_print_opt(opt)
|
||||
print('')
|
||||
|
||||
|
||||
def _get_my_ip():
|
||||
try:
|
||||
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
csock.connect(('8.8.8.8', 80))
|
||||
(addr, port) = csock.getsockname()
|
||||
csock.close()
|
||||
return addr
|
||||
except socket.error:
|
||||
return None
|
||||
|
||||
|
||||
def _sanitize_default(name, value):
|
||||
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
|
||||
if value.startswith(sys.prefix):
|
||||
# NOTE(jd) Don't use os.path.join, because it is likely to think the
|
||||
# second part is an absolute pathname and therefore drop the first
|
||||
# part.
|
||||
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
|
||||
elif value.startswith(BASEDIR):
|
||||
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
|
||||
elif BASEDIR in value:
|
||||
return value.replace(BASEDIR, '')
|
||||
elif value == _get_my_ip():
|
||||
return '10.0.0.1'
|
||||
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
|
||||
return 'kite'
|
||||
elif value.strip() != value:
|
||||
return '"%s"' % value
|
||||
return value
|
||||
|
||||
|
||||
def _print_opt(opt):
|
||||
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
|
||||
if not opt_help:
|
||||
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
|
||||
opt_help = ""
|
||||
opt_type = None
|
||||
try:
|
||||
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
|
||||
except (ValueError, AttributeError) as err:
|
||||
sys.stderr.write("%s\n" % str(err))
|
||||
sys.exit(1)
|
||||
opt_help = u'%s (%s)' % (opt_help,
|
||||
OPT_TYPES[opt_type])
|
||||
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
||||
if opt.deprecated_opts:
|
||||
for deprecated_opt in opt.deprecated_opts:
|
||||
if deprecated_opt.name:
|
||||
deprecated_group = (deprecated_opt.group if
|
||||
deprecated_opt.group else "DEFAULT")
|
||||
print('# Deprecated group/name - [%s]/%s' %
|
||||
(deprecated_group,
|
||||
deprecated_opt.name))
|
||||
try:
|
||||
if opt_default is None:
|
||||
print('#%s=<None>' % opt_name)
|
||||
elif opt_type == STROPT:
|
||||
assert(isinstance(opt_default, six.string_types))
|
||||
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
||||
opt_default)))
|
||||
elif opt_type == BOOLOPT:
|
||||
assert(isinstance(opt_default, bool))
|
||||
print('#%s=%s' % (opt_name, str(opt_default).lower()))
|
||||
elif opt_type == INTOPT:
|
||||
assert(isinstance(opt_default, int) and
|
||||
not isinstance(opt_default, bool))
|
||||
print('#%s=%s' % (opt_name, opt_default))
|
||||
elif opt_type == FLOATOPT:
|
||||
assert(isinstance(opt_default, float))
|
||||
print('#%s=%s' % (opt_name, opt_default))
|
||||
elif opt_type == LISTOPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
||||
elif opt_type == DICTOPT:
|
||||
assert(isinstance(opt_default, dict))
|
||||
opt_default_strlist = [str(key) + ':' + str(value)
|
||||
for (key, value) in opt_default.items()]
|
||||
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
|
||||
elif opt_type == MULTISTROPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
if not opt_default:
|
||||
opt_default = ['']
|
||||
for default in opt_default:
|
||||
print('#%s=%s' % (opt_name, default))
|
||||
print('')
|
||||
except Exception:
|
||||
sys.stderr.write('Error in option "%s"\n' % opt_name)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
generate(sys.argv[1:])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,111 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Simple class that stores security context information in the web request.
|
||||
|
||||
Projects should subclass this class if they wish to enhance the request
|
||||
context or provide additional information in their specific WSGI pipeline.
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_request_id():
|
||||
return 'req-%s' % str(uuid.uuid4())
|
||||
|
||||
|
||||
class RequestContext(object):
|
||||
|
||||
"""Helper class to represent useful information about a request context.
|
||||
|
||||
Stores information about the security context under which the user
|
||||
accesses the system, as well as additional request information.
|
||||
"""
|
||||
|
||||
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
|
||||
|
||||
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
|
||||
user_domain=None, project_domain=None, is_admin=False,
|
||||
read_only=False, show_deleted=False, request_id=None,
|
||||
instance_uuid=None):
|
||||
self.auth_token = auth_token
|
||||
self.user = user
|
||||
self.tenant = tenant
|
||||
self.domain = domain
|
||||
self.user_domain = user_domain
|
||||
self.project_domain = project_domain
|
||||
self.is_admin = is_admin
|
||||
self.read_only = read_only
|
||||
self.show_deleted = show_deleted
|
||||
self.instance_uuid = instance_uuid
|
||||
if not request_id:
|
||||
request_id = generate_request_id()
|
||||
self.request_id = request_id
|
||||
|
||||
def to_dict(self):
|
||||
user_idt = (
|
||||
self.user_idt_format.format(user=self.user or '-',
|
||||
tenant=self.tenant or '-',
|
||||
domain=self.domain or '-',
|
||||
user_domain=self.user_domain or '-',
|
||||
p_domain=self.project_domain or '-'))
|
||||
|
||||
return {'user': self.user,
|
||||
'tenant': self.tenant,
|
||||
'domain': self.domain,
|
||||
'user_domain': self.user_domain,
|
||||
'project_domain': self.project_domain,
|
||||
'is_admin': self.is_admin,
|
||||
'read_only': self.read_only,
|
||||
'show_deleted': self.show_deleted,
|
||||
'auth_token': self.auth_token,
|
||||
'request_id': self.request_id,
|
||||
'instance_uuid': self.instance_uuid,
|
||||
'user_identity': user_idt}
|
||||
|
||||
|
||||
def get_admin_context(show_deleted=False):
|
||||
context = RequestContext(None,
|
||||
tenant=None,
|
||||
is_admin=True,
|
||||
show_deleted=show_deleted)
|
||||
return context
|
||||
|
||||
|
||||
def get_context_from_function_and_args(function, args, kwargs):
|
||||
"""Find an arg of type RequestContext and return it.
|
||||
|
||||
This is useful in a couple of decorators where we don't
|
||||
know much about the function we're wrapping.
|
||||
"""
|
||||
|
||||
for arg in itertools.chain(kwargs.values(), args):
|
||||
if isinstance(arg, RequestContext):
|
||||
return arg
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_user_context(context):
|
||||
"""Indicates if the request context is a normal user."""
|
||||
if not context:
|
||||
return False
|
||||
if context.is_admin:
|
||||
return False
|
||||
if not context.user_id or not context.project_id:
|
||||
return False
|
||||
return True
|
|
@ -1,181 +0,0 @@
|
|||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
|
||||
from Crypto.Hash import HMAC
|
||||
from Crypto import Random
|
||||
|
||||
from kite.openstack.common.gettextutils import _
|
||||
from kite.openstack.common import importutils
|
||||
|
||||
|
||||
class CryptoutilsException(Exception):
|
||||
"""Generic Exception for Crypto utilities."""
|
||||
|
||||
message = _("An unknown error occurred in crypto utils.")
|
||||
|
||||
|
||||
class CipherBlockLengthTooBig(CryptoutilsException):
|
||||
"""The block size is too big."""
|
||||
|
||||
def __init__(self, requested, permitted):
|
||||
msg = _("Block size of %(given)d is too big, max = %(maximum)d")
|
||||
message = msg % {'given': requested, 'maximum': permitted}
|
||||
super(CryptoutilsException, self).__init__(message)
|
||||
|
||||
|
||||
class HKDFOutputLengthTooLong(CryptoutilsException):
|
||||
"""The amount of Key Material asked is too much."""
|
||||
|
||||
def __init__(self, requested, permitted):
|
||||
msg = _("Length of %(given)d is too long, max = %(maximum)d")
|
||||
message = msg % {'given': requested, 'maximum': permitted}
|
||||
super(CryptoutilsException, self).__init__(message)
|
||||
|
||||
|
||||
class HKDF(object):
|
||||
"""An HMAC-based Key Derivation Function implementation (RFC5869)
|
||||
|
||||
This class creates an object that allows to use HKDF to derive keys.
|
||||
"""
|
||||
|
||||
def __init__(self, hashtype='SHA256'):
|
||||
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
|
||||
self.max_okm_length = 255 * self.hashfn.digest_size
|
||||
|
||||
def extract(self, ikm, salt=None):
|
||||
"""An extract function that can be used to derive a robust key given
|
||||
weak Input Key Material (IKM) which could be a password.
|
||||
Returns a pseudorandom key (of HashLen octets)
|
||||
|
||||
:param ikm: input keying material (ex a password)
|
||||
:param salt: optional salt value (a non-secret random value)
|
||||
"""
|
||||
if salt is None:
|
||||
salt = '\x00' * self.hashfn.digest_size
|
||||
|
||||
return HMAC.new(salt, ikm, self.hashfn).digest()
|
||||
|
||||
def expand(self, prk, info, length):
|
||||
"""An expand function that will return arbitrary length output that can
|
||||
be used as keys.
|
||||
Returns a buffer usable as key material.
|
||||
|
||||
:param prk: a pseudorandom key of at least HashLen octets
|
||||
:param info: optional string (can be a zero-length string)
|
||||
:param length: length of output keying material (<= 255 * HashLen)
|
||||
"""
|
||||
if length > self.max_okm_length:
|
||||
raise HKDFOutputLengthTooLong(length, self.max_okm_length)
|
||||
|
||||
N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size
|
||||
|
||||
okm = ""
|
||||
tmp = ""
|
||||
for block in range(1, N + 1):
|
||||
tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest()
|
||||
okm += tmp
|
||||
|
||||
return okm[:length]
|
||||
|
||||
|
||||
MAX_CB_SIZE = 256
|
||||
|
||||
|
||||
class SymmetricCrypto(object):
|
||||
"""Symmetric Key Crypto object.
|
||||
|
||||
This class creates a Symmetric Key Crypto object that can be used
|
||||
to encrypt, decrypt, or sign arbitrary data.
|
||||
|
||||
:param enctype: Encryption Cipher name (default: AES)
|
||||
:param hashtype: Hash/HMAC type name (default: SHA256)
|
||||
"""
|
||||
|
||||
def __init__(self, enctype='AES', hashtype='SHA256'):
|
||||
self.cipher = importutils.import_module('Crypto.Cipher.' + enctype)
|
||||
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
|
||||
|
||||
def new_key(self, size):
|
||||
return Random.new().read(size)
|
||||
|
||||
def encrypt(self, key, msg, b64encode=True):
|
||||
"""Encrypt the provided msg and returns the cyphertext optionally
|
||||
base64 encoded.
|
||||
|
||||
Uses AES-128-CBC with a Random IV by default.
|
||||
|
||||
The plaintext is padded to reach blocksize length.
|
||||
The last byte of the block is the length of the padding.
|
||||
The length of the padding does not include the length byte itself.
|
||||
|
||||
:param key: The Encryption key.
|
||||
:param msg: the plain text.
|
||||
|
||||
:returns enc: a block of encrypted data.
|
||||
"""
|
||||
iv = Random.new().read(self.cipher.block_size)
|
||||
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
|
||||
|
||||
# CBC mode requires a fixed block size. Append padding and length of
|
||||
# padding.
|
||||
if self.cipher.block_size > MAX_CB_SIZE:
|
||||
raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE)
|
||||
r = len(msg) % self.cipher.block_size
|
||||
padlen = self.cipher.block_size - r - 1
|
||||
msg += '\x00' * padlen
|
||||
msg += chr(padlen)
|
||||
|
||||
enc = iv + cipher.encrypt(msg)
|
||||
if b64encode:
|
||||
enc = base64.b64encode(enc)
|
||||
return enc
|
||||
|
||||
def decrypt(self, key, msg, b64decode=True):
|
||||
"""Decrypts the provided ciphertext, optionally base64 encoded, and
|
||||
returns the plaintext message, after padding is removed.
|
||||
|
||||
Uses AES-128-CBC with an IV by default.
|
||||
|
||||
:param key: The Encryption key.
|
||||
:param msg: the ciphetext, the first block is the IV
|
||||
|
||||
:returns plain: the plaintext message.
|
||||
"""
|
||||
if b64decode:
|
||||
msg = base64.b64decode(msg)
|
||||
iv = msg[:self.cipher.block_size]
|
||||
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
|
||||
|
||||
padded = cipher.decrypt(msg[self.cipher.block_size:])
|
||||
l = ord(padded[-1]) + 1
|
||||
plain = padded[:-l]
|
||||
return plain
|
||||
|
||||
def sign(self, key, msg, b64encode=True):
|
||||
"""Signs a message string and returns a base64 encoded signature.
|
||||
|
||||
Uses HMAC-SHA-256 by default.
|
||||
|
||||
:param key: The Signing key.
|
||||
:param msg: the message to sign.
|
||||
|
||||
:returns out: a base64 encoded signature.
|
||||
"""
|
||||
h = HMAC.new(key, msg, self.hashfn)
|
||||
out = h.digest()
|
||||
if b64encode:
|
||||
out = base64.b64encode(out)
|
||||
return out
|
|
@ -1,162 +0,0 @@
|
|||
# Copyright (c) 2013 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Multiple DB API backend support.
|
||||
|
||||
A DB backend module should implement a method named 'get_backend' which
|
||||
takes no arguments. The method can return any object that implements DB
|
||||
API methods.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
from kite.openstack.common.db import exception
|
||||
from kite.openstack.common.gettextutils import _LE
|
||||
from kite.openstack.common import importutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def safe_for_db_retry(f):
|
||||
"""Enable db-retry for decorated function, if config option enabled."""
|
||||
f.__dict__['enable_retry'] = True
|
||||
return f
|
||||
|
||||
|
||||
class wrap_db_retry(object):
|
||||
"""Retry db.api methods, if DBConnectionError() raised
|
||||
|
||||
Retry decorated db.api methods. If we enabled `use_db_reconnect`
|
||||
in config, this decorator will be applied to all db.api functions,
|
||||
marked with @safe_for_db_retry decorator.
|
||||
Decorator catchs DBConnectionError() and retries function in a
|
||||
loop until it succeeds, or until maximum retries count will be reached.
|
||||
"""
|
||||
|
||||
def __init__(self, retry_interval, max_retries, inc_retry_interval,
|
||||
max_retry_interval):
|
||||
super(wrap_db_retry, self).__init__()
|
||||
|
||||
self.retry_interval = retry_interval
|
||||
self.max_retries = max_retries
|
||||
self.inc_retry_interval = inc_retry_interval
|
||||
self.max_retry_interval = max_retry_interval
|
||||
|
||||
def __call__(self, f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
next_interval = self.retry_interval
|
||||
remaining = self.max_retries
|
||||
|
||||
while True:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except exception.DBConnectionError as e:
|
||||
if remaining == 0:
|
||||
LOG.exception(_LE('DB exceeded retry limit.'))
|
||||
raise exception.DBError(e)
|
||||
if remaining != -1:
|
||||
remaining -= 1
|
||||
LOG.exception(_LE('DB connection error.'))
|
||||
# NOTE(vsergeyev): We are using patched time module, so
|
||||
# this effectively yields the execution
|
||||
# context to another green thread.
|
||||
time.sleep(next_interval)
|
||||
if self.inc_retry_interval:
|
||||
next_interval = min(
|
||||
next_interval * 2,
|
||||
self.max_retry_interval
|
||||
)
|
||||
return wrapper
|
||||
|
||||
|
||||
class DBAPI(object):
|
||||
def __init__(self, backend_name, backend_mapping=None, lazy=False,
|
||||
**kwargs):
|
||||
"""Initialize the chosen DB API backend.
|
||||
|
||||
:param backend_name: name of the backend to load
|
||||
:type backend_name: str
|
||||
|
||||
:param backend_mapping: backend name -> module/class to load mapping
|
||||
:type backend_mapping: dict
|
||||
|
||||
:param lazy: load the DB backend lazily on the first DB API method call
|
||||
:type lazy: bool
|
||||
|
||||
Keyword arguments:
|
||||
|
||||
:keyword use_db_reconnect: retry DB transactions on disconnect or not
|
||||
:type use_db_reconnect: bool
|
||||
|
||||
:keyword retry_interval: seconds between transaction retries
|
||||
:type retry_interval: int
|
||||
|
||||
:keyword inc_retry_interval: increase retry interval or not
|
||||
:type inc_retry_interval: bool
|
||||
|
||||
:keyword max_retry_interval: max interval value between retries
|
||||
:type max_retry_interval: int
|
||||
|
||||
:keyword max_retries: max number of retries before an error is raised
|
||||
:type max_retries: int
|
||||
|
||||
"""
|
||||
|
||||
self._backend = None
|
||||
self._backend_name = backend_name
|
||||
self._backend_mapping = backend_mapping or {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
if not lazy:
|
||||
self._load_backend()
|
||||
|
||||
self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
|
||||
self.retry_interval = kwargs.get('retry_interval', 1)
|
||||
self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
|
||||
self.max_retry_interval = kwargs.get('max_retry_interval', 10)
|
||||
self.max_retries = kwargs.get('max_retries', 20)
|
||||
|
||||
def _load_backend(self):
|
||||
with self._lock:
|
||||
if not self._backend:
|
||||
# Import the untranslated name if we don't have a mapping
|
||||
backend_path = self._backend_mapping.get(self._backend_name,
|
||||
self._backend_name)
|
||||
backend_mod = importutils.import_module(backend_path)
|
||||
self._backend = backend_mod.get_backend()
|
||||
|
||||
def __getattr__(self, key):
|
||||
if not self._backend:
|
||||
self._load_backend()
|
||||
|
||||
attr = getattr(self._backend, key)
|
||||
if not hasattr(attr, '__call__'):
|
||||
return attr
|
||||
# NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
|
||||
# DB API methods, decorated with @safe_for_db_retry
|
||||
# on disconnect.
|
||||
if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
|
||||
attr = wrap_db_retry(
|
||||
retry_interval=self.retry_interval,
|
||||
max_retries=self.max_retries,
|
||||
inc_retry_interval=self.inc_retry_interval,
|
||||
max_retry_interval=self.max_retry_interval)(attr)
|
||||
|
||||
return attr
|
|
@ -1,56 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""DB related custom exceptions."""
|
||||
|
||||
import six
|
||||
|
||||
from kite.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
class DBError(Exception):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, inner_exception=None):
|
||||
self.inner_exception = inner_exception
|
||||
super(DBError, self).__init__(six.text_type(inner_exception))
|
||||
|
||||
|
||||
class DBDuplicateEntry(DBError):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, columns=[], inner_exception=None):
|
||||
self.columns = columns
|
||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBDeadlock(DBError):
|
||||
def __init__(self, inner_exception=None):
|
||||
super(DBDeadlock, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBInvalidUnicodeParameter(Exception):
|
||||
message = _("Invalid Parameter: "
|
||||
"Unicode is not supported by the current database.")
|
||||
|
||||
|
||||
class DbMigrationError(DBError):
|
||||
"""Wraps migration specific exception."""
|
||||
def __init__(self, message=None):
|
||||
super(DbMigrationError, self).__init__(message)
|
||||
|
||||
|
||||
class DBConnectionError(DBError):
|
||||
"""Wraps connection specific exception."""
|
||||
pass
|
|
@ -1,171 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
|
||||
database_opts = [
|
||||
cfg.StrOpt('sqlite_db',
|
||||
deprecated_group='DEFAULT',
|
||||
default='kite.sqlite',
|
||||
help='The file name to use with SQLite'),
|
||||
cfg.BoolOpt('sqlite_synchronous',
|
||||
deprecated_group='DEFAULT',
|
||||
default=True,
|
||||
help='If True, SQLite uses synchronous mode'),
|
||||
cfg.StrOpt('backend',
|
||||
default='sqlalchemy',
|
||||
deprecated_name='db_backend',
|
||||
deprecated_group='DEFAULT',
|
||||
help='The backend to use for db'),
|
||||
cfg.StrOpt('connection',
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'database',
|
||||
secret=True,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_connection',
|
||||
group='DATABASE'),
|
||||
cfg.DeprecatedOpt('connection',
|
||||
group='sql'), ]),
|
||||
cfg.StrOpt('mysql_sql_mode',
|
||||
default='TRADITIONAL',
|
||||
help='The SQL mode to be used for MySQL sessions. '
|
||||
'This option, including the default, overrides any '
|
||||
'server-set SQL mode. To use whatever SQL mode '
|
||||
'is set by the server configuration, '
|
||||
'set this to no value. Example: mysql_sql_mode='),
|
||||
cfg.IntOpt('idle_timeout',
|
||||
default=3600,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_idle_timeout',
|
||||
group='DATABASE'),
|
||||
cfg.DeprecatedOpt('idle_timeout',
|
||||
group='sql')],
|
||||
help='Timeout before idle sql connections are reaped'),
|
||||
cfg.IntOpt('min_pool_size',
|
||||
default=1,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_min_pool_size',
|
||||
group='DATABASE')],
|
||||
help='Minimum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_pool_size',
|
||||
default=None,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_max_pool_size',
|
||||
group='DATABASE')],
|
||||
help='Maximum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_retries',
|
||||
default=10,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sql_max_retries',
|
||||
group='DATABASE')],
|
||||
help='Maximum db connection retries during startup. '
|
||||
'(setting -1 implies an infinite retry count)'),
|
||||
cfg.IntOpt('retry_interval',
|
||||
default=10,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('reconnect_interval',
|
||||
group='DATABASE')],
|
||||
help='Interval between retries of opening a sql connection'),
|
||||
cfg.IntOpt('max_overflow',
|
||||
default=None,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
|
||||
group='DEFAULT'),
|
||||
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
|
||||
group='DATABASE')],
|
||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||
cfg.IntOpt('connection_debug',
|
||||
default=0,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
|
||||
group='DEFAULT')],
|
||||
help='Verbosity of SQL debugging information. 0=None, '
|
||||
'100=Everything'),
|
||||
cfg.BoolOpt('connection_trace',
|
||||
default=False,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
|
||||
group='DEFAULT')],
|
||||
help='Add python stack traces to SQL as comment strings'),
|
||||
cfg.IntOpt('pool_timeout',
|
||||
default=None,
|
||||
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
|
||||
group='DATABASE')],
|
||||
help='If set, use this value for pool_timeout with sqlalchemy'),
|
||||
cfg.BoolOpt('use_db_reconnect',
|
||||
default=False,
|
||||
help='Enable the experimental use of database reconnect '
|
||||
'on connection lost'),
|
||||
cfg.IntOpt('db_retry_interval',
|
||||
default=1,
|
||||
help='seconds between db connection retries'),
|
||||
cfg.BoolOpt('db_inc_retry_interval',
|
||||
default=True,
|
||||
help='Whether to increase interval between db connection '
|
||||
'retries, up to db_max_retry_interval'),
|
||||
cfg.IntOpt('db_max_retry_interval',
|
||||
default=10,
|
||||
help='max seconds between db connection retries, if '
|
||||
'db_inc_retry_interval is enabled'),
|
||||
cfg.IntOpt('db_max_retries',
|
||||
default=20,
|
||||
help='maximum db connection retries before error is raised. '
|
||||
'(setting -1 implies an infinite retry count)'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(database_opts, 'database')
|
||||
|
||||
|
||||
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
|
||||
max_overflow=None, pool_timeout=None):
|
||||
"""Set defaults for configuration variables."""
|
||||
cfg.set_defaults(database_opts,
|
||||
connection=sql_connection,
|
||||
sqlite_db=sqlite_db)
|
||||
# Update the QueuePool defaults
|
||||
if max_pool_size is not None:
|
||||
cfg.set_defaults(database_opts,
|
||||
max_pool_size=max_pool_size)
|
||||
if max_overflow is not None:
|
||||
cfg.set_defaults(database_opts,
|
||||
max_overflow=max_overflow)
|
||||
if pool_timeout is not None:
|
||||
cfg.set_defaults(database_opts,
|
||||
pool_timeout=pool_timeout)
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Returns a list of oslo.config options available in the library.
|
||||
|
||||
The returned list includes all oslo.config options which may be registered
|
||||
at runtime by the library.
|
||||
|
||||
Each element of the list is a tuple. The first element is the name of the
|
||||
group under which the list of elements in the second element will be
|
||||
registered. A group name of None corresponds to the [DEFAULT] group in
|
||||
config files.
|
||||
|
||||
The purpose of this is to allow tools like the Oslo sample config file
|
||||
generator to discover the options exposed to users by this library.
|
||||
|
||||
:returns: a list of (group_name, opts) tuples
|
||||
"""
|
||||
return [('database', copy.deepcopy(database_opts))]
|
|
@ -1,268 +0,0 @@
|
|||
# coding: utf-8
|
||||
#
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
||||
# the following license:
|
||||
#
|
||||
# The MIT License
|
||||
#
|
||||
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from migrate.changeset import ansisql
|
||||
from migrate.changeset.databases import sqlite
|
||||
from migrate import exceptions as versioning_exceptions
|
||||
from migrate.versioning import api as versioning_api
|
||||
from migrate.versioning.repository import Repository
|
||||
import sqlalchemy
|
||||
from sqlalchemy.schema import UniqueConstraint
|
||||
|
||||
from kite.openstack.common.db import exception
|
||||
from kite.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
def _get_unique_constraints(self, table):
|
||||
"""Retrieve information about existing unique constraints of the table
|
||||
|
||||
This feature is needed for _recreate_table() to work properly.
|
||||
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
|
||||
|
||||
"""
|
||||
|
||||
data = table.metadata.bind.execute(
|
||||
"""SELECT sql
|
||||
FROM sqlite_master
|
||||
WHERE
|
||||
type='table' AND
|
||||
name=:table_name""",
|
||||
table_name=table.name
|
||||
).fetchone()[0]
|
||||
|
||||
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
|
||||
return [
|
||||
UniqueConstraint(
|
||||
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
|
||||
name=name
|
||||
)
|
||||
for name, cols in re.findall(UNIQUE_PATTERN, data)
|
||||
]
|
||||
|
||||
|
||||
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
|
||||
"""Recreate the table properly
|
||||
|
||||
Unlike the corresponding original method of sqlalchemy-migrate this one
|
||||
doesn't drop existing unique constraints when creating a new one.
|
||||
|
||||
"""
|
||||
|
||||
table_name = self.preparer.format_table(table)
|
||||
|
||||
# we remove all indexes so as not to have
|
||||
# problems during copy and re-create
|
||||
for index in table.indexes:
|
||||
index.drop()
|
||||
|
||||
# reflect existing unique constraints
|
||||
for uc in self._get_unique_constraints(table):
|
||||
table.append_constraint(uc)
|
||||
# omit given unique constraints when creating a new table if required
|
||||
table.constraints = set([
|
||||
cons for cons in table.constraints
|
||||
if omit_uniques is None or cons.name not in omit_uniques
|
||||
])
|
||||
|
||||
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
|
||||
self.execute()
|
||||
|
||||
insertion_string = self._modify_table(table, column, delta)
|
||||
|
||||
table.create(bind=self.connection)
|
||||
self.append(insertion_string % {'table_name': table_name})
|
||||
self.execute()
|
||||
self.append('DROP TABLE migration_tmp')
|
||||
self.execute()
|
||||
|
||||
|
||||
def _visit_migrate_unique_constraint(self, *p, **k):
|
||||
"""Drop the given unique constraint
|
||||
|
||||
The corresponding original method of sqlalchemy-migrate just
|
||||
raises NotImplemented error
|
||||
|
||||
"""
|
||||
|
||||
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
|
||||
|
||||
|
||||
def patch_migrate():
|
||||
"""A workaround for SQLite's inability to alter things
|
||||
|
||||
SQLite abilities to alter tables are very limited (please read
|
||||
http://www.sqlite.org/lang_altertable.html for more details).
|
||||
E. g. one can't drop a column or a constraint in SQLite. The
|
||||
workaround for this is to recreate the original table omitting
|
||||
the corresponding constraint (or column).
|
||||
|
||||
sqlalchemy-migrate library has recreate_table() method that
|
||||
implements this workaround, but it does it wrong:
|
||||
|
||||
- information about unique constraints of a table
|
||||
is not retrieved. So if you have a table with one
|
||||
unique constraint and a migration adding another one
|
||||
you will end up with a table that has only the
|
||||
latter unique constraint, and the former will be lost
|
||||
|
||||
- dropping of unique constraints is not supported at all
|
||||
|
||||
The proper way to fix this is to provide a pull-request to
|
||||
sqlalchemy-migrate, but the project seems to be dead. So we
|
||||
can go on with monkey-patching of the lib at least for now.
|
||||
|
||||
"""
|
||||
|
||||
# this patch is needed to ensure that recreate_table() doesn't drop
|
||||
# existing unique constraints of the table when creating a new one
|
||||
helper_cls = sqlite.SQLiteHelper
|
||||
helper_cls.recreate_table = _recreate_table
|
||||
helper_cls._get_unique_constraints = _get_unique_constraints
|
||||
|
||||
# this patch is needed to be able to drop existing unique constraints
|
||||
constraint_cls = sqlite.SQLiteConstraintDropper
|
||||
constraint_cls.visit_migrate_unique_constraint = \
|
||||
_visit_migrate_unique_constraint
|
||||
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
|
||||
sqlite.SQLiteConstraintGenerator)
|
||||
|
||||
|
||||
def db_sync(engine, abs_path, version=None, init_version=0):
|
||||
"""Upgrade or downgrade a database.
|
||||
|
||||
Function runs the upgrade() or downgrade() functions in change scripts.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
:param abs_path: Absolute path to migrate repository.
|
||||
:param version: Database will upgrade/downgrade until this version.
|
||||
If None - database will update to the latest
|
||||
available version.
|
||||
:param init_version: Initial database version
|
||||
"""
|
||||
if version is not None:
|
||||
try:
|
||||
version = int(version)
|
||||
except ValueError:
|
||||
raise exception.DbMigrationError(
|
||||
message=_("version should be an integer"))
|
||||
|
||||
current_version = db_version(engine, abs_path, init_version)
|
||||
repository = _find_migrate_repo(abs_path)
|
||||
_db_schema_sanity_check(engine)
|
||||
if version is None or version > current_version:
|
||||
return versioning_api.upgrade(engine, repository, version)
|
||||
else:
|
||||
return versioning_api.downgrade(engine, repository,
|
||||
version)
|
||||
|
||||
|
||||
def _db_schema_sanity_check(engine):
|
||||
"""Ensure all database tables were created with required parameters.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
|
||||
"""
|
||||
|
||||
if engine.name == 'mysql':
|
||||
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
||||
'from information_schema.TABLES '
|
||||
'where TABLE_SCHEMA=%s and '
|
||||
'TABLE_COLLATION NOT LIKE "%%utf8%%"')
|
||||
|
||||
table_names = [res[0] for res in engine.execute(onlyutf8_sql,
|
||||
engine.url.database)]
|
||||
if len(table_names) > 0:
|
||||
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
||||
'please make sure all tables are CHARSET=utf8'
|
||||
) % ','.join(table_names))
|
||||
|
||||
|
||||
def db_version(engine, abs_path, init_version):
|
||||
"""Show the current version of the repository.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
:param abs_path: Absolute path to migrate repository
|
||||
:param version: Initial database version
|
||||
"""
|
||||
repository = _find_migrate_repo(abs_path)
|
||||
try:
|
||||
return versioning_api.db_version(engine, repository)
|
||||
except versioning_exceptions.DatabaseNotControlledError:
|
||||
meta = sqlalchemy.MetaData()
|
||||
meta.reflect(bind=engine)
|
||||
tables = meta.tables
|
||||
if len(tables) == 0 or 'alembic_version' in tables:
|
||||
db_version_control(engine, abs_path, version=init_version)
|
||||
return versioning_api.db_version(engine, repository)
|
||||
else:
|
||||
raise exception.DbMigrationError(
|
||||
message=_(
|
||||
"The database is not under version control, but has "
|
||||
"tables. Please stamp the current version of the schema "
|
||||
"manually."))
|
||||
|
||||
|
||||
def db_version_control(engine, abs_path, version=None):
|
||||
"""Mark a database as under this repository's version control.
|
||||
|
||||
Once a database is under version control, schema changes should
|
||||
only be done via change scripts in this repository.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
:param abs_path: Absolute path to migrate repository
|
||||
:param version: Initial database version
|
||||
"""
|
||||
repository = _find_migrate_repo(abs_path)
|
||||
versioning_api.version_control(engine, repository, version)
|
||||
return version
|
||||
|
||||
|
||||
def _find_migrate_repo(abs_path):
|
||||
"""Get the project's change script repository
|
||||
|
||||
:param abs_path: Absolute path to migrate repository
|
||||
"""
|
||||
if not os.path.exists(abs_path):
|
||||
raise exception.DbMigrationError("Path %s not found" % abs_path)
|
||||
return Repository(abs_path)
|
|
@ -1,119 +0,0 @@
|
|||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# Copyright 2012 Cloudscaling Group, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
SQLAlchemy models.
|
||||
"""
|
||||
|
||||
import six
|
||||
|
||||
from sqlalchemy import Column, Integer
|
||||
from sqlalchemy import DateTime
|
||||
from sqlalchemy.orm import object_mapper
|
||||
|
||||
from kite.openstack.common import timeutils
|
||||
|
||||
|
||||
class ModelBase(six.Iterator):
|
||||
"""Base class for models."""
|
||||
__table_initialized__ = False
|
||||
|
||||
def save(self, session):
|
||||
"""Save this object."""
|
||||
|
||||
# NOTE(boris-42): This part of code should be look like:
|
||||
# session.add(self)
|
||||
# session.flush()
|
||||
# But there is a bug in sqlalchemy and eventlet that
|
||||
# raises NoneType exception if there is no running
|
||||
# transaction and rollback is called. As long as
|
||||
# sqlalchemy has this bug we have to create transaction
|
||||
# explicitly.
|
||||
with session.begin(subtransactions=True):
|
||||
session.add(self)
|
||||
session.flush()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def get(self, key, default=None):
|
||||
return getattr(self, key, default)
|
||||
|
||||
@property
|
||||
def _extra_keys(self):
|
||||
"""Specifies custom fields
|
||||
|
||||
Subclasses can override this property to return a list
|
||||
of custom fields that should be included in their dict
|
||||
representation.
|
||||
|
||||
For reference check tests/db/sqlalchemy/test_models.py
|
||||
"""
|
||||
return []
|
||||
|
||||
def __iter__(self):
|
||||
columns = dict(object_mapper(self).columns).keys()
|
||||
# NOTE(russellb): Allow models to specify other keys that can be looked
|
||||
# up, beyond the actual db columns. An example would be the 'name'
|
||||
# property for an Instance.
|
||||
columns.extend(self._extra_keys)
|
||||
self._i = iter(columns)
|
||||
return self
|
||||
|
||||
# In Python 3, __next__() has replaced next().
|
||||
def __next__(self):
|
||||
n = six.advance_iterator(self._i)
|
||||
return n, getattr(self, n)
|
||||
|
||||
def next(self):
|
||||
return self.__next__()
|
||||
|
||||
def update(self, values):
|
||||
"""Make the model object behave like a dict."""
|
||||
for k, v in six.iteritems(values):
|
||||
setattr(self, k, v)
|
||||
|
||||
def iteritems(self):
|
||||
"""Make the model object behave like a dict.
|
||||
|
||||
Includes attributes from joins.
|
||||
"""
|
||||
local = dict(self)
|
||||
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
|
||||
if not k[0] == '_'])
|
||||
local.update(joined)
|
||||
return six.iteritems(local)
|
||||
|
||||
|
||||
class TimestampMixin(object):
|
||||
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
|
||||
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
|
||||
|
||||
|
||||
class SoftDeleteMixin(object):
|
||||
deleted_at = Column(DateTime)
|
||||
deleted = Column(Integer, default=0)
|
||||
|
||||
def soft_delete(self, session):
|
||||
"""Mark this object as deleted."""
|
||||
self.deleted = self.id
|
||||
self.deleted_at = timeutils.utcnow()
|
||||
self.save(session=session)
|
|
@ -1,157 +0,0 @@
|
|||
# Copyright 2013 Mirantis.inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Provision test environment for specific DB backends"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
|
||||
from six import moves
|
||||
import sqlalchemy
|
||||
|
||||
from kite.openstack.common.db import exception as exc
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_engine(uri):
|
||||
"""Engine creation
|
||||
|
||||
Call the function without arguments to get admin connection. Admin
|
||||
connection required to create temporary user and database for each
|
||||
particular test. Otherwise use existing connection to recreate connection
|
||||
to the temporary database.
|
||||
"""
|
||||
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
|
||||
|
||||
|
||||
def _execute_sql(engine, sql, driver):
|
||||
"""Initialize connection, execute sql query and close it."""
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
if driver == 'postgresql':
|
||||
conn.connection.set_isolation_level(0)
|
||||
for s in sql:
|
||||
conn.execute(s)
|
||||
except sqlalchemy.exc.OperationalError:
|
||||
msg = ('%s does not match database admin '
|
||||
'credentials or database does not exist.')
|
||||
LOG.exception(msg % engine.url)
|
||||
raise exc.DBConnectionError(msg % engine.url)
|
||||
|
||||
|
||||
def create_database(engine):
|
||||
"""Provide temporary user and database for each particular test."""
|
||||
driver = engine.name
|
||||
|
||||
auth = {
|
||||
'database': ''.join(random.choice(string.ascii_lowercase)
|
||||
for i in moves.range(10)),
|
||||
'user': engine.url.username,
|
||||
'passwd': engine.url.password,
|
||||
}
|
||||
|
||||
sqls = [
|
||||
"drop database if exists %(database)s;",
|
||||
"create database %(database)s;"
|
||||
]
|
||||
|
||||
if driver == 'sqlite':
|
||||
return 'sqlite:////tmp/%s' % auth['database']
|
||||
elif driver in ['mysql', 'postgresql']:
|
||||
sql_query = map(lambda x: x % auth, sqls)
|
||||
_execute_sql(engine, sql_query, driver)
|
||||
else:
|
||||
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||
|
||||
params = auth.copy()
|
||||
params['backend'] = driver
|
||||
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
|
||||
|
||||
|
||||
def drop_database(admin_engine, current_uri):
|
||||
"""Drop temporary database and user after each particular test."""
|
||||
|
||||
engine = get_engine(current_uri)
|
||||
driver = engine.name
|
||||
auth = {'database': engine.url.database, 'user': engine.url.username}
|
||||
|
||||
if driver == 'sqlite':
|
||||
try:
|
||||
os.remove(auth['database'])
|
||||
except OSError:
|
||||
pass
|
||||
elif driver in ['mysql', 'postgresql']:
|
||||
sql = "drop database if exists %(database)s;"
|
||||
_execute_sql(admin_engine, [sql % auth], driver)
|
||||
else:
|
||||
raise ValueError('Unsupported RDBMS %s' % driver)
|
||||
|
||||
|
||||
def main():
|
||||
"""Controller to handle commands
|
||||
|
||||
::create: Create test user and database with random names.
|
||||
::drop: Drop user and database created by previous command.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Controller to handle database creation and dropping'
|
||||
' commands.',
|
||||
epilog='Under normal circumstances is not used directly.'
|
||||
' Used in .testr.conf to automate test database creation'
|
||||
' and dropping processes.')
|
||||
subparsers = parser.add_subparsers(
|
||||
help='Subcommands to manipulate temporary test databases.')
|
||||
|
||||
create = subparsers.add_parser(
|
||||
'create',
|
||||
help='Create temporary test '
|
||||
'databases and users.')
|
||||
create.set_defaults(which='create')
|
||||
create.add_argument(
|
||||
'instances_count',
|
||||
type=int,
|
||||
help='Number of databases to create.')
|
||||
|
||||
drop = subparsers.add_parser(
|
||||
'drop',
|
||||
help='Drop temporary test databases and users.')
|
||||
drop.set_defaults(which='drop')
|
||||
drop.add_argument(
|
||||
'instances',
|
||||
nargs='+',
|
||||
help='List of databases uri to be dropped.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
|
||||
'sqlite://')
|
||||
engine = get_engine(connection_string)
|
||||
which = args.which
|
||||
|
||||
if which == "create":
|
||||
for i in range(int(args.instances_count)):
|
||||
print(create_database(engine))
|
||||
elif which == "drop":
|
||||
for db in args.instances:
|
||||
drop_database(engine, db)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,897 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Session Handling for SQLAlchemy backend.
|
||||
|
||||
Recommended ways to use sessions within this framework:
|
||||
|
||||
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
|
||||
`model_query()` will implicitly use a session when called without one
|
||||
supplied. This is the ideal situation because it will allow queries
|
||||
to be automatically retried if the database connection is interrupted.
|
||||
|
||||
.. note:: Automatic retry will be enabled in a future patch.
|
||||
|
||||
It is generally fine to issue several queries in a row like this. Even though
|
||||
they may be run in separate transactions and/or separate sessions, each one
|
||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||
functionality should be handled at a logical level. For an example, look at
|
||||
the code around quotas and `reservation_rollback()`.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def get_foo(context, foo):
|
||||
return (model_query(context, models.Foo).
|
||||
filter_by(foo=foo).
|
||||
first())
|
||||
|
||||
def update_foo(context, id, newfoo):
|
||||
(model_query(context, models.Foo).
|
||||
filter_by(id=id).
|
||||
update({'foo': newfoo}))
|
||||
|
||||
def create_foo(context, values):
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(values)
|
||||
foo_ref.save()
|
||||
return foo_ref
|
||||
|
||||
|
||||
* Within the scope of a single method, keep all the reads and writes within
|
||||
the context managed by a single session. In this way, the session's
|
||||
`__exit__` handler will take care of calling `flush()` and `commit()` for
|
||||
you. If using this approach, you should not explicitly call `flush()` or
|
||||
`commit()`. Any error within the context of the session will cause the
|
||||
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
|
||||
raised in `session`'s `__exit__` handler, and any try/except within the
|
||||
context managed by `session` will not be triggered. And catching other
|
||||
non-database errors in the session will not trigger the ROLLBACK, so
|
||||
exception handlers should always be outside the session, unless the
|
||||
developer wants to do a partial commit on purpose. If the connection is
|
||||
dropped before this is possible, the database will implicitly roll back the
|
||||
transaction.
|
||||
|
||||
.. note:: Statements in the session scope will not be automatically retried.
|
||||
|
||||
If you create models within the session, they need to be added, but you
|
||||
do not need to call `model.save()`:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def create_many_foo(context, foos):
|
||||
session = sessionmaker()
|
||||
with session.begin():
|
||||
for foo in foos:
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(foo)
|
||||
session.add(foo_ref)
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
session = sessionmaker()
|
||||
with session.begin():
|
||||
foo_ref = (model_query(context, models.Foo, session).
|
||||
filter_by(id=foo_id).
|
||||
first())
|
||||
(model_query(context, models.Bar, session).
|
||||
filter_by(id=foo_ref['bar_id']).
|
||||
update({'bar': newbar}))
|
||||
|
||||
.. note:: `update_bar` is a trivially simple example of using
|
||||
``with session.begin``. Whereas `create_many_foo` is a good example of
|
||||
when a transaction is needed, it is always best to use as few queries as
|
||||
possible.
|
||||
|
||||
The two queries in `update_bar` can be better expressed using a single query
|
||||
which avoids the need for an explicit transaction. It can be expressed like
|
||||
so:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
subq = (model_query(context, models.Foo.id).
|
||||
filter_by(id=foo_id).
|
||||
limit(1).
|
||||
subquery())
|
||||
(model_query(context, models.Bar).
|
||||
filter_by(id=subq.as_scalar()).
|
||||
update({'bar': newbar}))
|
||||
|
||||
For reference, this emits approximately the following SQL statement:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
UPDATE bar SET bar = ${newbar}
|
||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||
|
||||
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
|
||||
exception while using ``with session.begin``. Here create two duplicate
|
||||
instances with same primary key, must catch the exception out of context
|
||||
managed by a single session:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def create_duplicate_foo(context):
|
||||
foo1 = models.Foo()
|
||||
foo2 = models.Foo()
|
||||
foo1.id = foo2.id = 1
|
||||
session = sessionmaker()
|
||||
try:
|
||||
with session.begin():
|
||||
session.add(foo1)
|
||||
session.add(foo2)
|
||||
except exception.DBDuplicateEntry as e:
|
||||
handle_error(e)
|
||||
|
||||
* Passing an active session between methods. Sessions should only be passed
|
||||
to private methods. The private method must use a subtransaction; otherwise
|
||||
SQLAlchemy will throw an error when you call `session.begin()` on an existing
|
||||
transaction. Public methods should not accept a session parameter and should
|
||||
not be involved in sessions within the caller's scope.
|
||||
|
||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
||||
due to nesting transactions, and it is not possible to implicitly retry
|
||||
failed database operations when using this approach.
|
||||
|
||||
This also makes code somewhat more difficult to read and debug, because a
|
||||
single database transaction spans more than one method. Error handling
|
||||
becomes less clear in this situation. When this is needed for code clarity,
|
||||
it should be clearly documented.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def myfunc(foo):
|
||||
session = sessionmaker()
|
||||
with session.begin():
|
||||
# do some database things
|
||||
bar = _private_func(foo, session)
|
||||
return bar
|
||||
|
||||
def _private_func(foo, session=None):
|
||||
if not session:
|
||||
session = sessionmaker()
|
||||
with session.begin(subtransaction=True):
|
||||
# do some other database things
|
||||
return bar
|
||||
|
||||
|
||||
There are some things which it is best to avoid:
|
||||
|
||||
* Don't keep a transaction open any longer than necessary.
|
||||
|
||||
This means that your ``with session.begin()`` block should be as short
|
||||
as possible, while still containing all the related calls for that
|
||||
transaction.
|
||||
|
||||
* Avoid ``with_lockmode('UPDATE')`` when possible.
|
||||
|
||||
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
|
||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||
"gap" where no rows exist, and prevents any other writes to that space.
|
||||
This can effectively prevent any INSERT into a table by locking the gap
|
||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
||||
|
||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
||||
number of rows matching a query, and if only one row is returned,
|
||||
then issue the SELECT FOR UPDATE.
|
||||
|
||||
The better long-term solution is to use
|
||||
``INSERT .. ON DUPLICATE KEY UPDATE``.
|
||||
However, this can not be done until the "deleted" columns are removed and
|
||||
proper UNIQUE constraints are added to the tables.
|
||||
|
||||
|
||||
Enabling soft deletes:
|
||||
|
||||
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
|
||||
to your model class. For example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||
pass
|
||||
|
||||
|
||||
Efficient use of soft deletes:
|
||||
|
||||
* There are two possible ways to mark a record as deleted:
|
||||
`model.soft_delete()` and `query.soft_delete()`.
|
||||
|
||||
The `model.soft_delete()` method works with a single already-fetched entry.
|
||||
`query.soft_delete()` makes only one db request for all entries that
|
||||
correspond to the query.
|
||||
|
||||
* In almost all cases you should use `query.soft_delete()`. Some examples:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def soft_delete_bar():
|
||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||
if session is None:
|
||||
session = sessionmaker()
|
||||
with session.begin(subtransactions=True):
|
||||
count = (model_query(BarModel).
|
||||
find(some_condition).
|
||||
soft_delete(synchronize_session=True))
|
||||
# Here synchronize_session is required, because we
|
||||
# don't know what is going on in outer session.
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
* There is only one situation where `model.soft_delete()` is appropriate: when
|
||||
you fetch a single record, work with it, and mark it as deleted in the same
|
||||
transaction.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def soft_delete_bar_model():
|
||||
session = sessionmaker()
|
||||
with session.begin():
|
||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||
# Work with bar_ref
|
||||
bar_ref.soft_delete(session=session)
|
||||
|
||||
However, if you need to work with all entries that correspond to query and
|
||||
then soft delete them you should use the `query.soft_delete()` method:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def soft_delete_multi_models():
|
||||
session = sessionmaker()
|
||||
with session.begin():
|
||||
query = (model_query(BarModel, session=session).
|
||||
find(some_condition))
|
||||
model_refs = query.all()
|
||||
# Work with model_refs
|
||||
query.soft_delete(synchronize_session=False)
|
||||
# synchronize_session=False should be set if there is no outer
|
||||
# session and these entries are not used after this.
|
||||
|
||||
When working with many rows, it is very important to use query.soft_delete,
|
||||
which issues a single query. Using `model.soft_delete()`, as in the following
|
||||
example, is very inefficient.
|
||||
|
||||
.. code:: python
|
||||
|
||||
for bar_ref in bar_refs:
|
||||
bar_ref.soft_delete(session=session)
|
||||
# This will produce count(bar_refs) db requests.
|
||||
|
||||
"""
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
|
||||
import six
|
||||
from sqlalchemy import exc as sqla_exc
|
||||
from sqlalchemy.interfaces import PoolListener
|
||||
import sqlalchemy.orm
|
||||
from sqlalchemy.pool import NullPool, StaticPool
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
|
||||
from kite.openstack.common.db import exception
|
||||
from kite.openstack.common.gettextutils import _LE, _LW
|
||||
from kite.openstack.common import timeutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SqliteForeignKeysListener(PoolListener):
|
||||
"""Ensures that the foreign key constraints are enforced in SQLite.
|
||||
|
||||
The foreign key constraints are disabled by default in SQLite,
|
||||
so the foreign key constraints will be enabled here for every
|
||||
database connection
|
||||
"""
|
||||
def connect(self, dbapi_con, con_record):
|
||||
dbapi_con.execute('pragma foreign_keys=ON')
|
||||
|
||||
|
||||
# note(boris-42): In current versions of DB backends unique constraint
|
||||
# violation messages follow the structure:
|
||||
#
|
||||
# sqlite:
|
||||
# 1 column - (IntegrityError) column c1 is not unique
|
||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||
#
|
||||
# sqlite since 3.7.16:
|
||||
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
|
||||
#
|
||||
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
|
||||
#
|
||||
# postgres:
|
||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "users_c1_key"
|
||||
# N columns - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "name_of_our_constraint"
|
||||
#
|
||||
# mysql:
|
||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
||||
# 'c1'")
|
||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||
# with -' for key 'name_of_our_constraint'")
|
||||
#
|
||||
# ibm_db_sa:
|
||||
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
|
||||
# statement, UPDATE statement, or foreign key update caused by a
|
||||
# DELETE statement are not valid because the primary key, unique
|
||||
# constraint or unique index identified by "2" constrains table
|
||||
# "NOVA.KEY_PAIRS" from having duplicate values for the index
|
||||
# key.
|
||||
_DUP_KEY_RE_DB = {
|
||||
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
|
||||
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
|
||||
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
|
||||
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||
"""Raise exception if two entries are duplicated.
|
||||
|
||||
In this function will be raised DBDuplicateEntry exception if integrity
|
||||
error wrap unique constraint violation.
|
||||
"""
|
||||
|
||||
def get_columns_from_uniq_cons_or_name(columns):
|
||||
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
|
||||
# where `t` it is table name and columns `c1`, `c2`
|
||||
# are in UniqueConstraint.
|
||||
uniqbase = "uniq_"
|
||||
if not columns.startswith(uniqbase):
|
||||
if engine_name == "postgresql":
|
||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||
return [columns]
|
||||
return columns[len(uniqbase):].split("0")[1:]
|
||||
|
||||
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
|
||||
return
|
||||
|
||||
# FIXME(johannes): The usage of the .message attribute has been
|
||||
# deprecated since Python 2.6. However, the exceptions raised by
|
||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||
# An audit across all three supported engines will be necessary to
|
||||
# ensure there are no regressions.
|
||||
for pattern in _DUP_KEY_RE_DB[engine_name]:
|
||||
match = pattern.match(integrity_error.message)
|
||||
if match:
|
||||
break
|
||||
else:
|
||||
return
|
||||
|
||||
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
|
||||
# columns so we have to omit that from the DBDuplicateEntry error.
|
||||
columns = ''
|
||||
|
||||
if engine_name != 'ibm_db_sa':
|
||||
columns = match.group(1)
|
||||
|
||||
if engine_name == "sqlite":
|
||||
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
|
||||
else:
|
||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||
|
||||
|
||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
||||
# messages follow the structure:
|
||||
#
|
||||
# mysql:
|
||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
||||
# 'restarting transaction') <query_str> <query_args>
|
||||
_DEADLOCK_RE_DB = {
|
||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||
"""Raise exception on deadlock condition.
|
||||
|
||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||
condition.
|
||||
"""
|
||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
||||
if re is None:
|
||||
return
|
||||
# FIXME(johannes): The usage of the .message attribute has been
|
||||
# deprecated since Python 2.6. However, the exceptions raised by
|
||||
# SQLAlchemy can differ when using unicode() and accessing .message.
|
||||
# An audit across all three supported engines will be necessary to
|
||||
# ensure there are no regressions.
|
||||
m = re.match(operational_error.message)
|
||||
if not m:
|
||||
return
|
||||
raise exception.DBDeadlock(operational_error)
|
||||
|
||||
|
||||
def _wrap_db_error(f):
|
||||
@functools.wraps(f)
|
||||
def _wrap(self, *args, **kwargs):
|
||||
try:
|
||||
assert issubclass(
|
||||
self.__class__, sqlalchemy.orm.session.Session
|
||||
), ('_wrap_db_error() can only be applied to methods of '
|
||||
'subclasses of sqlalchemy.orm.session.Session.')
|
||||
|
||||
return f(self, *args, **kwargs)
|
||||
except UnicodeEncodeError:
|
||||
raise exception.DBInvalidUnicodeParameter()
|
||||
except sqla_exc.OperationalError as e:
|
||||
_raise_if_db_connection_lost(e, self.bind)
|
||||
_raise_if_deadlock_error(e, self.bind.dialect.name)
|
||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||
# so let's not wrap it for now.
|
||||
raise
|
||||
# note(boris-42): We should catch unique constraint violation and
|
||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||
# violation is wrapped by IntegrityError.
|
||||
except sqla_exc.IntegrityError as e:
|
||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||
# DBs so we must do this. Also in some tables (for example
|
||||
# instance_types) there are more than one unique constraint. This
|
||||
# means we should get names of columns, which values violate
|
||||
# unique constraint, from error message.
|
||||
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
|
||||
raise exception.DBError(e)
|
||||
except Exception as e:
|
||||
LOG.exception(_LE('DB exception wrapped.'))
|
||||
raise exception.DBError(e)
|
||||
return _wrap
|
||||
|
||||
|
||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||
"""Switch sqlite connections to non-synchronous mode."""
|
||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||
|
||||
|
||||
def _add_regexp_listener(dbapi_con, con_record):
|
||||
"""Add REGEXP function to sqlite connections."""
|
||||
|
||||
def regexp(expr, item):
|
||||
reg = re.compile(expr)
|
||||
return reg.search(six.text_type(item)) is not None
|
||||
dbapi_con.create_function('regexp', 2, regexp)
|
||||
|
||||
|
||||
def _thread_yield(dbapi_con, con_record):
|
||||
"""Ensure other greenthreads get a chance to be executed.
|
||||
|
||||
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
|
||||
execute instead of time.sleep(0).
|
||||
Force a context switch. With common database backends (eg MySQLdb and
|
||||
sqlite), there is no implicit yield caused by network I/O since they are
|
||||
implemented by C libraries that eventlet cannot monkey patch.
|
||||
"""
|
||||
time.sleep(0)
|
||||
|
||||
|
||||
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
|
||||
"""Ensures that MySQL and DB2 connections are alive.
|
||||
|
||||
Borrowed from:
|
||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||
"""
|
||||
cursor = dbapi_conn.cursor()
|
||||
try:
|
||||
ping_sql = 'select 1'
|
||||
if engine.name == 'ibm_db_sa':
|
||||
# DB2 requires a table expression
|
||||
ping_sql = 'select 1 from (values (1)) AS t1'
|
||||
cursor.execute(ping_sql)
|
||||
except Exception as ex:
|
||||
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
|
||||
msg = _LW('Database server has gone away: %s') % ex
|
||||
LOG.warning(msg)
|
||||
raise sqla_exc.DisconnectionError(msg)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def _set_session_sql_mode(dbapi_con, connection_rec,
|
||||
connection_proxy, sql_mode=None):
|
||||
"""Set the sql_mode session variable.
|
||||
|
||||
MySQL supports several server modes. The default is None, but sessions
|
||||
may choose to enable server modes like TRADITIONAL, ANSI,
|
||||
several STRICT_* modes and others.
|
||||
|
||||
Note: passing in '' (empty string) for sql_mode clears
|
||||
the SQL mode for the session, overriding a potentially set
|
||||
server default.
|
||||
"""
|
||||
|
||||
cursor = dbapi_con.cursor()
|
||||
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
|
||||
|
||||
|
||||
def _mysql_get_effective_sql_mode(engine):
|
||||
"""Returns the effective SQL mode for connections from the engine pool.
|
||||
|
||||
Returns ``None`` if the mode isn't available, otherwise returns the mode.
|
||||
|
||||
"""
|
||||
# Get the real effective SQL mode. Even when unset by
|
||||
# our own config, the server may still be operating in a specific
|
||||
# SQL mode as set by the server configuration.
|
||||
# Also note that the checkout listener will be called on execute to
|
||||
# set the mode if it's registered.
|
||||
row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone()
|
||||
if row is None:
|
||||
return
|
||||
return row[1]
|
||||
|
||||
|
||||
def _mysql_check_effective_sql_mode(engine):
|
||||
"""Logs a message based on the effective SQL mode for MySQL connections."""
|
||||
realmode = _mysql_get_effective_sql_mode(engine)
|
||||
|
||||
if realmode is None:
|
||||
LOG.warning(_LW('Unable to detect effective SQL mode'))
|
||||
return
|
||||
|
||||
LOG.debug('MySQL server mode set to %s', realmode)
|
||||
# 'TRADITIONAL' mode enables several other modes, so
|
||||
# we need a substring match here
|
||||
if not ('TRADITIONAL' in realmode.upper() or
|
||||
'STRICT_ALL_TABLES' in realmode.upper()):
|
||||
LOG.warning(_LW("MySQL SQL mode is '%s', "
|
||||
"consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
|
||||
realmode)
|
||||
|
||||
|
||||
def _mysql_set_mode_callback(engine, sql_mode):
|
||||
if sql_mode is not None:
|
||||
mode_callback = functools.partial(_set_session_sql_mode,
|
||||
sql_mode=sql_mode)
|
||||
sqlalchemy.event.listen(engine, 'checkout', mode_callback)
|
||||
_mysql_check_effective_sql_mode(engine)
|
||||
|
||||
|
||||
def _is_db_connection_error(args):
|
||||
"""Return True if error in connecting to db."""
|
||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||
# to support Postgres and others.
|
||||
# For the db2, the error code is -30081 since the db2 is still not ready
|
||||
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
|
||||
for err_code in conn_err_codes:
|
||||
if args.find(err_code) != -1:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _raise_if_db_connection_lost(error, engine):
|
||||
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
|
||||
# requires connection and cursor in incoming parameters,
|
||||
# but we have no possibility to create connection if DB
|
||||
# is not available, so in such case reconnect fails.
|
||||
# But is_disconnect() ignores these parameters, so it
|
||||
# makes sense to pass to function None as placeholder
|
||||
# instead of connection and cursor.
|
||||
if engine.dialect.is_disconnect(error, None, None):
|
||||
raise exception.DBConnectionError(error)
|
||||
|
||||
|
||||
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
|
||||
idle_timeout=3600,
|
||||
connection_debug=0, max_pool_size=None, max_overflow=None,
|
||||
pool_timeout=None, sqlite_synchronous=True,
|
||||
connection_trace=False, max_retries=10, retry_interval=10):
|
||||
"""Return a new SQLAlchemy engine."""
|
||||
|
||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||
|
||||
engine_args = {
|
||||
"pool_recycle": idle_timeout,
|
||||
'convert_unicode': True,
|
||||
}
|
||||
|
||||
logger = logging.getLogger('sqlalchemy.engine')
|
||||
|
||||
# Map SQL debug level to Python log level
|
||||
if connection_debug >= 100:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif connection_debug >= 50:
|
||||
logger.setLevel(logging.INFO)
|
||||
else:
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
if "sqlite" in connection_dict.drivername:
|
||||
if sqlite_fk:
|
||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||
engine_args["poolclass"] = NullPool
|
||||
|
||||
if sql_connection == "sqlite://":
|
||||
engine_args["poolclass"] = StaticPool
|
||||
engine_args["connect_args"] = {'check_same_thread': False}
|
||||
else:
|
||||
if max_pool_size is not None:
|
||||
engine_args['pool_size'] = max_pool_size
|
||||
if max_overflow is not None:
|
||||
engine_args['max_overflow'] = max_overflow
|
||||
if pool_timeout is not None:
|
||||
engine_args['pool_timeout'] = pool_timeout
|
||||
|
||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||
|
||||
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
|
||||
|
||||
if engine.name in ['mysql', 'ibm_db_sa']:
|
||||
ping_callback = functools.partial(_ping_listener, engine)
|
||||
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
|
||||
if engine.name == 'mysql':
|
||||
if mysql_sql_mode:
|
||||
_mysql_set_mode_callback(engine, mysql_sql_mode)
|
||||
elif 'sqlite' in connection_dict.drivername:
|
||||
if not sqlite_synchronous:
|
||||
sqlalchemy.event.listen(engine, 'connect',
|
||||
_synchronous_switch_listener)
|
||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||
|
||||
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
|
||||
_patch_mysqldb_with_stacktrace_comments()
|
||||
|
||||
try:
|
||||
engine.connect()
|
||||
except sqla_exc.OperationalError as e:
|
||||
if not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
|
||||
remaining = max_retries
|
||||
if remaining == -1:
|
||||
remaining = 'infinite'
|
||||
while True:
|
||||
msg = _LW('SQL connection failed. %s attempts left.')
|
||||
LOG.warning(msg % remaining)
|
||||
if remaining != 'infinite':
|
||||
remaining -= 1
|
||||
time.sleep(retry_interval)
|
||||
try:
|
||||
engine.connect()
|
||||
break
|
||||
except sqla_exc.OperationalError as e:
|
||||
if (remaining != 'infinite' and remaining == 0) or \
|
||||
not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
return engine
|
||||
|
||||
|
||||
class Query(sqlalchemy.orm.query.Query):
|
||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
||||
def soft_delete(self, synchronize_session='evaluate'):
|
||||
return self.update({'deleted': literal_column('id'),
|
||||
'updated_at': literal_column('updated_at'),
|
||||
'deleted_at': timeutils.utcnow()},
|
||||
synchronize_session=synchronize_session)
|
||||
|
||||
|
||||
class Session(sqlalchemy.orm.session.Session):
|
||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
||||
@_wrap_db_error
|
||||
def query(self, *args, **kwargs):
|
||||
return super(Session, self).query(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def flush(self, *args, **kwargs):
|
||||
return super(Session, self).flush(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def execute(self, *args, **kwargs):
|
||||
return super(Session, self).execute(*args, **kwargs)
|
||||
|
||||
|
||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||
return sqlalchemy.orm.sessionmaker(bind=engine,
|
||||
class_=Session,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit,
|
||||
query_cls=Query)
|
||||
|
||||
|
||||
def _patch_mysqldb_with_stacktrace_comments():
|
||||
"""Adds current stack trace as a comment in queries.
|
||||
|
||||
Patches MySQLdb.cursors.BaseCursor._do_query.
|
||||
"""
|
||||
import MySQLdb.cursors
|
||||
import traceback
|
||||
|
||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
||||
|
||||
def _do_query(self, q):
|
||||
stack = ''
|
||||
for filename, line, method, function in traceback.extract_stack():
|
||||
# exclude various common things from trace
|
||||
if filename.endswith('session.py') and method == '_do_query':
|
||||
continue
|
||||
if filename.endswith('api.py') and method == 'wrapper':
|
||||
continue
|
||||
if filename.endswith('utils.py') and method == '_inner':
|
||||
continue
|
||||
if filename.endswith('exception.py') and method == '_wrap':
|
||||
continue
|
||||
# db/api is just a wrapper around db/sqlalchemy/api
|
||||
if filename.endswith('db/api.py'):
|
||||
continue
|
||||
# only trace inside kite
|
||||
index = filename.rfind('kite')
|
||||
if index == -1:
|
||||
continue
|
||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||
% (filename[index:], line, method, function)
|
||||
|
||||
# strip trailing " | " from stack
|
||||
if stack:
|
||||
stack = stack[:-3]
|
||||
qq = "%s /* %s */" % (q, stack)
|
||||
else:
|
||||
qq = q
|
||||
old_mysql_do_query(self, qq)
|
||||
|
||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
||||
|
||||
|
||||
class EngineFacade(object):
|
||||
"""A helper class for removing of global engine instances from kite.db.
|
||||
|
||||
As a library, kite.db can't decide where to store/when to create engine
|
||||
and sessionmaker instances, so this must be left for a target application.
|
||||
|
||||
On the other hand, in order to simplify the adoption of kite.db changes,
|
||||
we'll provide a helper class, which creates engine and sessionmaker
|
||||
on its instantiation and provides get_engine()/get_session() methods
|
||||
that are compatible with corresponding utility functions that currently
|
||||
exist in target projects, e.g. in Nova.
|
||||
|
||||
engine/sessionmaker instances will still be global (and they are meant to
|
||||
be global), but they will be stored in the app context, rather that in the
|
||||
kite.db context.
|
||||
|
||||
Note: using of this helper is completely optional and you are encouraged to
|
||||
integrate engine/sessionmaker instances into your apps any way you like
|
||||
(e.g. one might want to bind a session to a request context). Two important
|
||||
things to remember:
|
||||
|
||||
1. An Engine instance is effectively a pool of DB connections, so it's
|
||||
meant to be shared (and it's thread-safe).
|
||||
2. A Session instance is not meant to be shared and represents a DB
|
||||
transactional context (i.e. it's not thread-safe). sessionmaker is
|
||||
a factory of sessions.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, sql_connection,
|
||||
sqlite_fk=False, autocommit=True,
|
||||
expire_on_commit=False, **kwargs):
|
||||
"""Initialize engine and sessionmaker instances.
|
||||
|
||||
:param sqlite_fk: enable foreign keys in SQLite
|
||||
:type sqlite_fk: bool
|
||||
|
||||
:param autocommit: use autocommit mode for created Session instances
|
||||
:type autocommit: bool
|
||||
|
||||
:param expire_on_commit: expire session objects on commit
|
||||
:type expire_on_commit: bool
|
||||
|
||||
Keyword arguments:
|
||||
|
||||
:keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions.
|
||||
(defaults to TRADITIONAL)
|
||||
:keyword idle_timeout: timeout before idle sql connections are reaped
|
||||
(defaults to 3600)
|
||||
:keyword connection_debug: verbosity of SQL debugging information.
|
||||
0=None, 100=Everything (defaults to 0)
|
||||
:keyword max_pool_size: maximum number of SQL connections to keep open
|
||||
in a pool (defaults to SQLAlchemy settings)
|
||||
:keyword max_overflow: if set, use this value for max_overflow with
|
||||
sqlalchemy (defaults to SQLAlchemy settings)
|
||||
:keyword pool_timeout: if set, use this value for pool_timeout with
|
||||
sqlalchemy (defaults to SQLAlchemy settings)
|
||||
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
|
||||
(defaults to True)
|
||||
:keyword connection_trace: add python stack traces to SQL as comment
|
||||
strings (defaults to False)
|
||||
:keyword max_retries: maximum db connection retries during startup.
|
||||
(setting -1 implies an infinite retry count)
|
||||
(defaults to 10)
|
||||
:keyword retry_interval: interval between retries of opening a sql
|
||||
connection (defaults to 10)
|
||||
|
||||
"""
|
||||
|
||||
super(EngineFacade, self).__init__()
|
||||
|
||||
self._engine = create_engine(
|
||||
sql_connection=sql_connection,
|
||||
sqlite_fk=sqlite_fk,
|
||||
mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'),
|
||||
idle_timeout=kwargs.get('idle_timeout', 3600),
|
||||
connection_debug=kwargs.get('connection_debug', 0),
|
||||
max_pool_size=kwargs.get('max_pool_size'),
|
||||
max_overflow=kwargs.get('max_overflow'),
|
||||
pool_timeout=kwargs.get('pool_timeout'),
|
||||
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
|
||||
connection_trace=kwargs.get('connection_trace', False),
|
||||
max_retries=kwargs.get('max_retries', 10),
|
||||
retry_interval=kwargs.get('retry_interval', 10))
|
||||
self._session_maker = get_maker(
|
||||
engine=self._engine,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit)
|
||||
|
||||
def get_engine(self):
|
||||
"""Get the engine instance (note, that it's shared)."""
|
||||
|
||||
return self._engine
|
||||
|
||||
def get_session(self, **kwargs):
|
||||
"""Get a Session instance.
|
||||
|
||||
If passed, keyword arguments values override the ones used when the
|
||||
sessionmaker instance was created.
|
||||
|
||||
:keyword autocommit: use autocommit mode for created Session instances
|
||||
:type autocommit: bool
|
||||
|
||||
:keyword expire_on_commit: expire session objects on commit
|
||||
:type expire_on_commit: bool
|
||||
|
||||
"""
|
||||
|
||||
for arg in kwargs:
|
||||
if arg not in ('autocommit', 'expire_on_commit'):
|
||||
del kwargs[arg]
|
||||
|
||||
return self._session_maker(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, connection_string, conf,
|
||||
sqlite_fk=False, autocommit=True, expire_on_commit=False):
|
||||
"""Initialize EngineFacade using oslo.config config instance options.
|
||||
|
||||
:param connection_string: SQLAlchemy connection string
|
||||
:type connection_string: string
|
||||
|
||||
:param conf: oslo.config config instance
|
||||
:type conf: oslo.config.cfg.ConfigOpts
|
||||
|
||||
:param sqlite_fk: enable foreign keys in SQLite
|
||||
:type sqlite_fk: bool
|
||||
|
||||
:param autocommit: use autocommit mode for created Session instances
|
||||
:type autocommit: bool
|
||||
|
||||
:param expire_on_commit: expire session objects on commit
|
||||
:type expire_on_commit: bool
|
||||
|
||||
"""
|
||||
|
||||
return cls(sql_connection=connection_string,
|
||||
sqlite_fk=sqlite_fk,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit,
|
||||
**dict(conf.database.items()))
|
|
@ -1,153 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import functools
|
||||
import os
|
||||
|
||||
import fixtures
|
||||
import six
|
||||
|
||||
from kite.openstack.common.db.sqlalchemy import session
|
||||
from kite.openstack.common.db.sqlalchemy import utils
|
||||
from kite.openstack.common.fixture import lockutils
|
||||
from kite.openstack.common import test
|
||||
|
||||
|
||||
class DbFixture(fixtures.Fixture):
|
||||
"""Basic database fixture.
|
||||
|
||||
Allows to run tests on various db backends, such as SQLite, MySQL and
|
||||
PostgreSQL. By default use sqlite backend. To override default backend
|
||||
uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
|
||||
credentials for specific backend.
|
||||
"""
|
||||
|
||||
def _get_uri(self):
|
||||
return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
|
||||
|
||||
def __init__(self, test):
|
||||
super(DbFixture, self).__init__()
|
||||
|
||||
self.test = test
|
||||
|
||||
def setUp(self):
|
||||
super(DbFixture, self).setUp()
|
||||
|
||||
self.test.engine = session.create_engine(self._get_uri())
|
||||
self.test.sessionmaker = session.get_maker(self.test.engine)
|
||||
self.addCleanup(self.test.engine.dispose)
|
||||
|
||||
|
||||
class DbTestCase(test.BaseTestCase):
|
||||
"""Base class for testing of DB code.
|
||||
|
||||
Using `DbFixture`. Intended to be the main database test case to use all
|
||||
the tests on a given backend with user defined uri. Backend specific
|
||||
tests should be decorated with `backend_specific` decorator.
|
||||
"""
|
||||
|
||||
FIXTURE = DbFixture
|
||||
|
||||
def setUp(self):
|
||||
super(DbTestCase, self).setUp()
|
||||
self.useFixture(self.FIXTURE(self))
|
||||
|
||||
|
||||
ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
|
||||
|
||||
|
||||
def backend_specific(*dialects):
|
||||
"""Decorator to skip backend specific tests on inappropriate engines.
|
||||
|
||||
::dialects: list of dialects names under which the test will be launched.
|
||||
"""
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def ins_wrap(self):
|
||||
if not set(dialects).issubset(ALLOWED_DIALECTS):
|
||||
raise ValueError(
|
||||
"Please use allowed dialects: %s" % ALLOWED_DIALECTS)
|
||||
if self.engine.name not in dialects:
|
||||
msg = ('The test "%s" can be run '
|
||||
'only on %s. Current engine is %s.')
|
||||
args = (f.__name__, ' '.join(dialects), self.engine.name)
|
||||
self.skip(msg % args)
|
||||
else:
|
||||
return f(self)
|
||||
return ins_wrap
|
||||
return wrap
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class OpportunisticFixture(DbFixture):
|
||||
"""Base fixture to use default CI databases.
|
||||
|
||||
The databases exist in OpenStack CI infrastructure. But for the
|
||||
correct functioning in local environment the databases must be
|
||||
created manually.
|
||||
"""
|
||||
|
||||
DRIVER = abc.abstractproperty(lambda: None)
|
||||
DBNAME = PASSWORD = USERNAME = 'openstack_citest'
|
||||
|
||||
def _get_uri(self):
|
||||
return utils.get_connect_string(backend=self.DRIVER,
|
||||
user=self.USERNAME,
|
||||
passwd=self.PASSWORD,
|
||||
database=self.DBNAME)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class OpportunisticTestCase(DbTestCase):
|
||||
"""Base test case to use default CI databases.
|
||||
|
||||
The subclasses of the test case are running only when openstack_citest
|
||||
database is available otherwise a tests will be skipped.
|
||||
"""
|
||||
|
||||
FIXTURE = abc.abstractproperty(lambda: None)
|
||||
|
||||
def setUp(self):
|
||||
# TODO(bnemec): Remove this once infra is ready for
|
||||
# https://review.openstack.org/#/c/74963/ to merge.
|
||||
self.useFixture(lockutils.LockFixture('opportunistic-db'))
|
||||
credentials = {
|
||||
'backend': self.FIXTURE.DRIVER,
|
||||
'user': self.FIXTURE.USERNAME,
|
||||
'passwd': self.FIXTURE.PASSWORD,
|
||||
'database': self.FIXTURE.DBNAME}
|
||||
|
||||
if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
|
||||
msg = '%s backend is not available.' % self.FIXTURE.DRIVER
|
||||
return self.skip(msg)
|
||||
|
||||
super(OpportunisticTestCase, self).setUp()
|
||||
|
||||
|
||||
class MySQLOpportunisticFixture(OpportunisticFixture):
|
||||
DRIVER = 'mysql'
|
||||
|
||||
|
||||
class PostgreSQLOpportunisticFixture(OpportunisticFixture):
|
||||
DRIVER = 'postgresql'
|
||||
|
||||
|
||||
class MySQLOpportunisticTestCase(OpportunisticTestCase):
|
||||
FIXTURE = MySQLOpportunisticFixture
|
||||
|
||||
|
||||
class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
|
||||
FIXTURE = PostgreSQLOpportunisticFixture
|
|
@ -1,269 +0,0 @@
|
|||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# Copyright 2012-2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import lockfile
|
||||
from six import moves
|
||||
from six.moves.urllib import parse
|
||||
import sqlalchemy
|
||||
import sqlalchemy.exc
|
||||
|
||||
from kite.openstack.common.db.sqlalchemy import utils
|
||||
from kite.openstack.common.gettextutils import _LE
|
||||
from kite.openstack.common import test
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _have_mysql(user, passwd, database):
|
||||
present = os.environ.get('TEST_MYSQL_PRESENT')
|
||||
if present is None:
|
||||
return utils.is_backend_avail(backend='mysql',
|
||||
user=user,
|
||||
passwd=passwd,
|
||||
database=database)
|
||||
return present.lower() in ('', 'true')
|
||||
|
||||
|
||||
def _have_postgresql(user, passwd, database):
|
||||
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
|
||||
if present is None:
|
||||
return utils.is_backend_avail(backend='postgres',
|
||||
user=user,
|
||||
passwd=passwd,
|
||||
database=database)
|
||||
return present.lower() in ('', 'true')
|
||||
|
||||
|
||||
def _set_db_lock(lock_path=None, lock_prefix=None):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
path = lock_path or os.environ.get("KITE_LOCK_PATH")
|
||||
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
|
||||
with lock:
|
||||
LOG.debug('Got lock "%s"' % f.__name__)
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
LOG.debug('Lock released "%s"' % f.__name__)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class BaseMigrationTestCase(test.BaseTestCase):
|
||||
"""Base class fort testing of migration utils."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
|
||||
|
||||
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
||||
'test_migrations.conf')
|
||||
# Test machines can set the TEST_MIGRATIONS_CONF variable
|
||||
# to override the location of the config file for migration testing
|
||||
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
|
||||
self.DEFAULT_CONFIG_FILE)
|
||||
self.test_databases = {}
|
||||
self.migration_api = None
|
||||
|
||||
def setUp(self):
|
||||
super(BaseMigrationTestCase, self).setUp()
|
||||
|
||||
# Load test databases from the config file. Only do this
|
||||
# once. No need to re-run this on each test...
|
||||
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
|
||||
if os.path.exists(self.CONFIG_FILE_PATH):
|
||||
cp = moves.configparser.RawConfigParser()
|
||||
try:
|
||||
cp.read(self.CONFIG_FILE_PATH)
|
||||
defaults = cp.defaults()
|
||||
for key, value in defaults.items():
|
||||
self.test_databases[key] = value
|
||||
except moves.configparser.ParsingError as e:
|
||||
self.fail("Failed to read test_migrations.conf config "
|
||||
"file. Got error: %s" % e)
|
||||
else:
|
||||
self.fail("Failed to find test_migrations.conf config "
|
||||
"file.")
|
||||
|
||||
self.engines = {}
|
||||
for key, value in self.test_databases.items():
|
||||
self.engines[key] = sqlalchemy.create_engine(value)
|
||||
|
||||
# We start each test case with a completely blank slate.
|
||||
self._reset_databases()
|
||||
|
||||
def tearDown(self):
|
||||
# We destroy the test data store between each test case,
|
||||
# and recreate it, which ensures that we have no side-effects
|
||||
# from the tests
|
||||
self._reset_databases()
|
||||
super(BaseMigrationTestCase, self).tearDown()
|
||||
|
||||
def execute_cmd(self, cmd=None):
|
||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = process.communicate()[0]
|
||||
LOG.debug(output)
|
||||
self.assertEqual(0, process.returncode,
|
||||
"Failed to run: %s\n%s" % (cmd, output))
|
||||
|
||||
def _reset_pg(self, conn_pieces):
|
||||
(user,
|
||||
password,
|
||||
database,
|
||||
host) = utils.get_db_connection_info(conn_pieces)
|
||||
os.environ['PGPASSWORD'] = password
|
||||
os.environ['PGUSER'] = user
|
||||
# note(boris-42): We must create and drop database, we can't
|
||||
# drop database which we have connected to, so for such
|
||||
# operations there is a special database template1.
|
||||
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
||||
" '%(sql)s' -d template1")
|
||||
|
||||
sql = ("drop database if exists %s;") % database
|
||||
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||
self.execute_cmd(droptable)
|
||||
|
||||
sql = ("create database %s;") % database
|
||||
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||
self.execute_cmd(createtable)
|
||||
|
||||
os.unsetenv('PGPASSWORD')
|
||||
os.unsetenv('PGUSER')
|
||||
|
||||
@_set_db_lock(lock_prefix='migration_tests-')
|
||||
def _reset_databases(self):
|
||||
for key, engine in self.engines.items():
|
||||
conn_string = self.test_databases[key]
|
||||
conn_pieces = parse.urlparse(conn_string)
|
||||
engine.dispose()
|
||||
if conn_string.startswith('sqlite'):
|
||||
# We can just delete the SQLite database, which is
|
||||
# the easiest and cleanest solution
|
||||
db_path = conn_pieces.path.strip('/')
|
||||
if os.path.exists(db_path):
|
||||
os.unlink(db_path)
|
||||
# No need to recreate the SQLite DB. SQLite will
|
||||
# create it for us if it's not there...
|
||||
elif conn_string.startswith('mysql'):
|
||||
# We can execute the MySQL client to destroy and re-create
|
||||
# the MYSQL database, which is easier and less error-prone
|
||||
# than using SQLAlchemy to do this via MetaData...trust me.
|
||||
(user, password, database, host) = \
|
||||
utils.get_db_connection_info(conn_pieces)
|
||||
sql = ("drop database if exists %(db)s; "
|
||||
"create database %(db)s;") % {'db': database}
|
||||
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
|
||||
"-e \"%(sql)s\"") % {'user': user, 'password': password,
|
||||
'host': host, 'sql': sql}
|
||||
self.execute_cmd(cmd)
|
||||
elif conn_string.startswith('postgresql'):
|
||||
self._reset_pg(conn_pieces)
|
||||
|
||||
|
||||
class WalkVersionsMixin(object):
|
||||
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
|
||||
# Determine latest version script from the repo, then
|
||||
# upgrade from 1 through to the latest, with no data
|
||||
# in the databases. This just checks that the schema itself
|
||||
# upgrades successfully.
|
||||
|
||||
# Place the database under version control
|
||||
self.migration_api.version_control(engine, self.REPOSITORY,
|
||||
self.INIT_VERSION)
|
||||
self.assertEqual(self.INIT_VERSION,
|
||||
self.migration_api.db_version(engine,
|
||||
self.REPOSITORY))
|
||||
|
||||
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
|
||||
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
|
||||
|
||||
for version in versions:
|
||||
# upgrade -> downgrade -> upgrade
|
||||
self._migrate_up(engine, version, with_data=True)
|
||||
if snake_walk:
|
||||
downgraded = self._migrate_down(
|
||||
engine, version - 1, with_data=True)
|
||||
if downgraded:
|
||||
self._migrate_up(engine, version)
|
||||
|
||||
if downgrade:
|
||||
# Now walk it back down to 0 from the latest, testing
|
||||
# the downgrade paths.
|
||||
for version in reversed(versions):
|
||||
# downgrade -> upgrade -> downgrade
|
||||
downgraded = self._migrate_down(engine, version - 1)
|
||||
|
||||
if snake_walk and downgraded:
|
||||
self._migrate_up(engine, version)
|
||||
self._migrate_down(engine, version - 1)
|
||||
|
||||
def _migrate_down(self, engine, version, with_data=False):
|
||||
try:
|
||||
self.migration_api.downgrade(engine, self.REPOSITORY, version)
|
||||
except NotImplementedError:
|
||||
# NOTE(sirp): some migrations, namely release-level
|
||||
# migrations, don't support a downgrade.
|
||||
return False
|
||||
|
||||
self.assertEqual(
|
||||
version, self.migration_api.db_version(engine, self.REPOSITORY))
|
||||
|
||||
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
||||
# version). So if we have any downgrade checks, they need to be run for
|
||||
# the previous (higher numbered) migration.
|
||||
if with_data:
|
||||
post_downgrade = getattr(
|
||||
self, "_post_downgrade_%03d" % (version + 1), None)
|
||||
if post_downgrade:
|
||||
post_downgrade(engine)
|
||||
|
||||
return True
|
||||
|
||||
def _migrate_up(self, engine, version, with_data=False):
|
||||
"""migrate up to a new version of the db.
|
||||
|
||||
We allow for data insertion and post checks at every
|
||||
migration version with special _pre_upgrade_### and
|
||||
_check_### functions in the main test.
|
||||
"""
|
||||
# NOTE(sdague): try block is here because it's impossible to debug
|
||||
# where a failed data migration happens otherwise
|
||||
try:
|
||||
if with_data:
|
||||
data = None
|
||||
pre_upgrade = getattr(
|
||||
self, "_pre_upgrade_%03d" % version, None)
|
||||
if pre_upgrade:
|
||||
data = pre_upgrade(engine)
|
||||
|
||||
self.migration_api.upgrade(engine, self.REPOSITORY, version)
|
||||
self.assertEqual(version,
|
||||
self.migration_api.db_version(engine,
|
||||
self.REPOSITORY))
|
||||
if with_data:
|
||||
check = getattr(self, "_check_%03d" % version, None)
|
||||
if check:
|
||||
check(engine, data)
|
||||
except Exception:
|
||||
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
|
||||
(version, engine))
|
||||
raise
|
|
@ -1,647 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010-2011 OpenStack Foundation.
|
||||
# Copyright 2012 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
import sqlalchemy
|
||||
from sqlalchemy import Boolean
|
||||
from sqlalchemy import CheckConstraint
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy.engine import reflection
|
||||
from sqlalchemy.ext.compiler import compiles
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy import Index
|
||||
from sqlalchemy import Integer
|
||||
from sqlalchemy import MetaData
|
||||
from sqlalchemy import or_
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
from sqlalchemy.sql.expression import UpdateBase
|
||||
from sqlalchemy.sql import select
|
||||
from sqlalchemy import String
|
||||
from sqlalchemy import Table
|
||||
from sqlalchemy.types import NullType
|
||||
|
||||
from kite.openstack.common import context as request_context
|
||||
from kite.openstack.common.db.sqlalchemy import models
|
||||
from kite.openstack.common.gettextutils import _, _LI, _LW
|
||||
from kite.openstack.common import timeutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
|
||||
|
||||
|
||||
def sanitize_db_url(url):
|
||||
match = _DBURL_REGEX.match(url)
|
||||
if match:
|
||||
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
|
||||
return url
|
||||
|
||||
|
||||
class InvalidSortKey(Exception):
|
||||
message = _("Sort key supplied was not valid.")
|
||||
|
||||
|
||||
# copy from glance/db/sqlalchemy/api.py
|
||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||
sort_dir=None, sort_dirs=None):
|
||||
"""Returns a query with sorting / pagination criteria added.
|
||||
|
||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
||||
(If sort_keys is not unique, then we risk looping through values.)
|
||||
We use the last row in the previous page as the 'marker' for pagination.
|
||||
So we must return values that follow the passed marker in the order.
|
||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
||||
the lexicographical ordering:
|
||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
||||
|
||||
We also have to cope with different sort_directions.
|
||||
|
||||
Typically, the id of the last row is used as the client-facing pagination
|
||||
marker, then the actual marker object must be fetched from the db and
|
||||
passed in to us as marker.
|
||||
|
||||
:param query: the query object to which we should add paging/sorting
|
||||
:param model: the ORM model class
|
||||
:param limit: maximum number of items to return
|
||||
:param sort_keys: array of attributes by which results should be sorted
|
||||
:param marker: the last item of the previous page; we returns the next
|
||||
results after this value.
|
||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
||||
|
||||
:rtype: sqlalchemy.orm.query.Query
|
||||
:return: The query with sorting/pagination added.
|
||||
"""
|
||||
|
||||
if 'id' not in sort_keys:
|
||||
# TODO(justinsb): If this ever gives a false-positive, check
|
||||
# the actual primary key, rather than assuming its id
|
||||
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
|
||||
|
||||
assert(not (sort_dir and sort_dirs))
|
||||
|
||||
# Default the sort direction to ascending
|
||||
if sort_dirs is None and sort_dir is None:
|
||||
sort_dir = 'asc'
|
||||
|
||||
# Ensure a per-column sort direction
|
||||
if sort_dirs is None:
|
||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
||||
|
||||
assert(len(sort_dirs) == len(sort_keys))
|
||||
|
||||
# Add sorting
|
||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
||||
try:
|
||||
sort_dir_func = {
|
||||
'asc': sqlalchemy.asc,
|
||||
'desc': sqlalchemy.desc,
|
||||
}[current_sort_dir]
|
||||
except KeyError:
|
||||
raise ValueError(_("Unknown sort direction, "
|
||||
"must be 'desc' or 'asc'"))
|
||||
try:
|
||||
sort_key_attr = getattr(model, current_sort_key)
|
||||
except AttributeError:
|
||||
raise InvalidSortKey()
|
||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
||||
|
||||
# Add pagination
|
||||
if marker is not None:
|
||||
marker_values = []
|
||||
for sort_key in sort_keys:
|
||||
v = getattr(marker, sort_key)
|
||||
marker_values.append(v)
|
||||
|
||||
# Build up an array of sort criteria as in the docstring
|
||||
criteria_list = []
|
||||
for i in range(len(sort_keys)):
|
||||
crit_attrs = []
|
||||
for j in range(i):
|
||||
model_attr = getattr(model, sort_keys[j])
|
||||
crit_attrs.append((model_attr == marker_values[j]))
|
||||
|
||||
model_attr = getattr(model, sort_keys[i])
|
||||
if sort_dirs[i] == 'desc':
|
||||
crit_attrs.append((model_attr < marker_values[i]))
|
||||
else:
|
||||
crit_attrs.append((model_attr > marker_values[i]))
|
||||
|
||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
||||
criteria_list.append(criteria)
|
||||
|
||||
f = sqlalchemy.sql.or_(*criteria_list)
|
||||
query = query.filter(f)
|
||||
|
||||
if limit is not None:
|
||||
query = query.limit(limit)
|
||||
|
||||
return query
|
||||
|
||||
|
||||
def _read_deleted_filter(query, db_model, read_deleted):
|
||||
if 'deleted' not in db_model.__table__.columns:
|
||||
raise ValueError(_("There is no `deleted` column in `%s` table. "
|
||||
"Project doesn't use soft-deleted feature.")
|
||||
% db_model.__name__)
|
||||
|
||||
default_deleted_value = db_model.__table__.c.deleted.default.arg
|
||||
if read_deleted == 'no':
|
||||
query = query.filter(db_model.deleted == default_deleted_value)
|
||||
elif read_deleted == 'yes':
|
||||
pass # omit the filter to include deleted and active
|
||||
elif read_deleted == 'only':
|
||||
query = query.filter(db_model.deleted != default_deleted_value)
|
||||
else:
|
||||
raise ValueError(_("Unrecognized read_deleted value '%s'")
|
||||
% read_deleted)
|
||||
return query
|
||||
|
||||
|
||||
def _project_filter(query, db_model, context, project_only):
|
||||
if project_only and 'project_id' not in db_model.__table__.columns:
|
||||
raise ValueError(_("There is no `project_id` column in `%s` table.")
|
||||
% db_model.__name__)
|
||||
|
||||
if request_context.is_user_context(context) and project_only:
|
||||
if project_only == 'allow_none':
|
||||
is_none = None
|
||||
query = query.filter(or_(db_model.project_id == context.project_id,
|
||||
db_model.project_id == is_none))
|
||||
else:
|
||||
query = query.filter(db_model.project_id == context.project_id)
|
||||
|
||||
return query
|
||||
|
||||
|
||||
def model_query(context, model, session, args=None, project_only=False,
|
||||
read_deleted=None):
|
||||
"""Query helper that accounts for context's `read_deleted` field.
|
||||
|
||||
:param context: context to query under
|
||||
|
||||
:param model: Model to query. Must be a subclass of ModelBase.
|
||||
:type model: models.ModelBase
|
||||
|
||||
:param session: The session to use.
|
||||
:type session: sqlalchemy.orm.session.Session
|
||||
|
||||
:param args: Arguments to query. If None - model is used.
|
||||
:type args: tuple
|
||||
|
||||
:param project_only: If present and context is user-type, then restrict
|
||||
query to match the context's project_id. If set to
|
||||
'allow_none', restriction includes project_id = None.
|
||||
:type project_only: bool
|
||||
|
||||
:param read_deleted: If present, overrides context's read_deleted field.
|
||||
:type read_deleted: bool
|
||||
|
||||
Usage:
|
||||
|
||||
..code:: python
|
||||
|
||||
result = (utils.model_query(context, models.Instance, session=session)
|
||||
.filter_by(uuid=instance_uuid)
|
||||
.all())
|
||||
|
||||
query = utils.model_query(
|
||||
context, Node,
|
||||
session=session,
|
||||
args=(func.count(Node.id), func.sum(Node.ram))
|
||||
).filter_by(project_id=project_id)
|
||||
|
||||
"""
|
||||
|
||||
if not read_deleted:
|
||||
if hasattr(context, 'read_deleted'):
|
||||
# NOTE(viktors): some projects use `read_deleted` attribute in
|
||||
# their contexts instead of `show_deleted`.
|
||||
read_deleted = context.read_deleted
|
||||
else:
|
||||
read_deleted = context.show_deleted
|
||||
|
||||
if not issubclass(model, models.ModelBase):
|
||||
raise TypeError(_("model should be a subclass of ModelBase"))
|
||||
|
||||
query = session.query(model) if not args else session.query(*args)
|
||||
query = _read_deleted_filter(query, model, read_deleted)
|
||||
query = _project_filter(query, model, context, project_only)
|
||||
|
||||
return query
|
||||
|
||||
|
||||
def get_table(engine, name):
|
||||
"""Returns an sqlalchemy table dynamically from db.
|
||||
|
||||
Needed because the models don't work for us in migrations
|
||||
as models will be far out of sync with the current data.
|
||||
"""
|
||||
metadata = MetaData()
|
||||
metadata.bind = engine
|
||||
return Table(name, metadata, autoload=True)
|
||||
|
||||
|
||||
class InsertFromSelect(UpdateBase):
|
||||
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
|
||||
def __init__(self, table, select):
|
||||
self.table = table
|
||||
self.select = select
|
||||
|
||||
|
||||
@compiles(InsertFromSelect)
|
||||
def visit_insert_from_select(element, compiler, **kw):
|
||||
"""Form the `INSERT INTO table (SELECT ... )` statement."""
|
||||
return "INSERT INTO %s %s" % (
|
||||
compiler.process(element.table, asfrom=True),
|
||||
compiler.process(element.select))
|
||||
|
||||
|
||||
class ColumnError(Exception):
|
||||
"""Error raised when no column or an invalid column is found."""
|
||||
|
||||
|
||||
def _get_not_supported_column(col_name_col_instance, column_name):
|
||||
try:
|
||||
column = col_name_col_instance[column_name]
|
||||
except KeyError:
|
||||
msg = _("Please specify column %s in col_name_col_instance "
|
||||
"param. It is required because column has unsupported "
|
||||
"type by sqlite).")
|
||||
raise ColumnError(msg % column_name)
|
||||
|
||||
if not isinstance(column, Column):
|
||||
msg = _("col_name_col_instance param has wrong type of "
|
||||
"column instance for column %s It should be instance "
|
||||
"of sqlalchemy.Column.")
|
||||
raise ColumnError(msg % column_name)
|
||||
return column
|
||||
|
||||
|
||||
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
|
||||
**col_name_col_instance):
|
||||
"""Drop unique constraint from table.
|
||||
|
||||
DEPRECATED: this function is deprecated and will be removed from kite.db
|
||||
in a few releases. Please use UniqueConstraint.drop() method directly for
|
||||
sqlalchemy-migrate migration scripts.
|
||||
|
||||
This method drops UC from table and works for mysql, postgresql and sqlite.
|
||||
In mysql and postgresql we are able to use "alter table" construction.
|
||||
Sqlalchemy doesn't support some sqlite column types and replaces their
|
||||
type with NullType in metadata. We process these columns and replace
|
||||
NullType with the correct column type.
|
||||
|
||||
:param migrate_engine: sqlalchemy engine
|
||||
:param table_name: name of table that contains uniq constraint.
|
||||
:param uc_name: name of uniq constraint that will be dropped.
|
||||
:param columns: columns that are in uniq constraint.
|
||||
:param col_name_col_instance: contains pair column_name=column_instance.
|
||||
column_instance is instance of Column. These params
|
||||
are required only for columns that have unsupported
|
||||
types by sqlite. For example BigInteger.
|
||||
"""
|
||||
|
||||
from migrate.changeset import UniqueConstraint
|
||||
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
t = Table(table_name, meta, autoload=True)
|
||||
|
||||
if migrate_engine.name == "sqlite":
|
||||
override_cols = [
|
||||
_get_not_supported_column(col_name_col_instance, col.name)
|
||||
for col in t.columns
|
||||
if isinstance(col.type, NullType)
|
||||
]
|
||||
for col in override_cols:
|
||||
t.columns.replace(col)
|
||||
|
||||
uc = UniqueConstraint(*columns, table=t, name=uc_name)
|
||||
uc.drop()
|
||||
|
||||
|
||||
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
|
||||
use_soft_delete, *uc_column_names):
|
||||
"""Drop all old rows having the same values for columns in uc_columns.
|
||||
|
||||
This method drop (or mark ad `deleted` if use_soft_delete is True) old
|
||||
duplicate rows form table with name `table_name`.
|
||||
|
||||
:param migrate_engine: Sqlalchemy engine
|
||||
:param table_name: Table with duplicates
|
||||
:param use_soft_delete: If True - values will be marked as `deleted`,
|
||||
if False - values will be removed from table
|
||||
:param uc_column_names: Unique constraint columns
|
||||
"""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
table = Table(table_name, meta, autoload=True)
|
||||
columns_for_group_by = [table.c[name] for name in uc_column_names]
|
||||
|
||||
columns_for_select = [func.max(table.c.id)]
|
||||
columns_for_select.extend(columns_for_group_by)
|
||||
|
||||
duplicated_rows_select = select(columns_for_select,
|
||||
group_by=columns_for_group_by,
|
||||
having=func.count(table.c.id) > 1)
|
||||
|
||||
for row in migrate_engine.execute(duplicated_rows_select):
|
||||
# NOTE(boris-42): Do not remove row that has the biggest ID.
|
||||
delete_condition = table.c.id != row[0]
|
||||
is_none = None # workaround for pyflakes
|
||||
delete_condition &= table.c.deleted_at == is_none
|
||||
for name in uc_column_names:
|
||||
delete_condition &= table.c[name] == row[name]
|
||||
|
||||
rows_to_delete_select = select([table.c.id]).where(delete_condition)
|
||||
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
|
||||
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
|
||||
"%(table)s") % dict(id=row[0], table=table_name))
|
||||
|
||||
if use_soft_delete:
|
||||
delete_statement = table.update().\
|
||||
where(delete_condition).\
|
||||
values({
|
||||
'deleted': literal_column('id'),
|
||||
'updated_at': literal_column('updated_at'),
|
||||
'deleted_at': timeutils.utcnow()
|
||||
})
|
||||
else:
|
||||
delete_statement = table.delete().where(delete_condition)
|
||||
migrate_engine.execute(delete_statement)
|
||||
|
||||
|
||||
def _get_default_deleted_value(table):
|
||||
if isinstance(table.c.id.type, Integer):
|
||||
return 0
|
||||
if isinstance(table.c.id.type, String):
|
||||
return ""
|
||||
raise ColumnError(_("Unsupported id columns type"))
|
||||
|
||||
|
||||
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
|
||||
table = get_table(migrate_engine, table_name)
|
||||
|
||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||
real_indexes = insp.get_indexes(table_name)
|
||||
existing_index_names = dict(
|
||||
[(index['name'], index['column_names']) for index in real_indexes])
|
||||
|
||||
# NOTE(boris-42): Restore indexes on `deleted` column
|
||||
for index in indexes:
|
||||
if 'deleted' not in index['column_names']:
|
||||
continue
|
||||
name = index['name']
|
||||
if name in existing_index_names:
|
||||
column_names = [table.c[c] for c in existing_index_names[name]]
|
||||
old_index = Index(name, *column_names, unique=index["unique"])
|
||||
old_index.drop(migrate_engine)
|
||||
|
||||
column_names = [table.c[c] for c in index['column_names']]
|
||||
new_index = Index(index["name"], *column_names, unique=index["unique"])
|
||||
new_index.create(migrate_engine)
|
||||
|
||||
|
||||
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
|
||||
**col_name_col_instance):
|
||||
if migrate_engine.name == "sqlite":
|
||||
return _change_deleted_column_type_to_boolean_sqlite(
|
||||
migrate_engine, table_name, **col_name_col_instance)
|
||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||
indexes = insp.get_indexes(table_name)
|
||||
|
||||
table = get_table(migrate_engine, table_name)
|
||||
|
||||
old_deleted = Column('old_deleted', Boolean, default=False)
|
||||
old_deleted.create(table, populate_default=False)
|
||||
|
||||
table.update().\
|
||||
where(table.c.deleted == table.c.id).\
|
||||
values(old_deleted=True).\
|
||||
execute()
|
||||
|
||||
table.c.deleted.drop()
|
||||
table.c.old_deleted.alter(name="deleted")
|
||||
|
||||
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||
|
||||
|
||||
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
|
||||
**col_name_col_instance):
|
||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||
table = get_table(migrate_engine, table_name)
|
||||
|
||||
columns = []
|
||||
for column in table.columns:
|
||||
column_copy = None
|
||||
if column.name != "deleted":
|
||||
if isinstance(column.type, NullType):
|
||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||
column.name)
|
||||
else:
|
||||
column_copy = column.copy()
|
||||
else:
|
||||
column_copy = Column('deleted', Boolean, default=0)
|
||||
columns.append(column_copy)
|
||||
|
||||
constraints = [constraint.copy() for constraint in table.constraints]
|
||||
|
||||
meta = table.metadata
|
||||
new_table = Table(table_name + "__tmp__", meta,
|
||||
*(columns + constraints))
|
||||
new_table.create()
|
||||
|
||||
indexes = []
|
||||
for index in insp.get_indexes(table_name):
|
||||
column_names = [new_table.c[c] for c in index['column_names']]
|
||||
indexes.append(Index(index["name"], *column_names,
|
||||
unique=index["unique"]))
|
||||
|
||||
c_select = []
|
||||
for c in table.c:
|
||||
if c.name != "deleted":
|
||||
c_select.append(c)
|
||||
else:
|
||||
c_select.append(table.c.deleted == table.c.id)
|
||||
|
||||
ins = InsertFromSelect(new_table, select(c_select))
|
||||
migrate_engine.execute(ins)
|
||||
|
||||
table.drop()
|
||||
[index.create(migrate_engine) for index in indexes]
|
||||
|
||||
new_table.rename(table_name)
|
||||
new_table.update().\
|
||||
where(new_table.c.deleted == new_table.c.id).\
|
||||
values(deleted=True).\
|
||||
execute()
|
||||
|
||||
|
||||
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
|
||||
**col_name_col_instance):
|
||||
if migrate_engine.name == "sqlite":
|
||||
return _change_deleted_column_type_to_id_type_sqlite(
|
||||
migrate_engine, table_name, **col_name_col_instance)
|
||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||
indexes = insp.get_indexes(table_name)
|
||||
|
||||
table = get_table(migrate_engine, table_name)
|
||||
|
||||
new_deleted = Column('new_deleted', table.c.id.type,
|
||||
default=_get_default_deleted_value(table))
|
||||
new_deleted.create(table, populate_default=True)
|
||||
|
||||
deleted = True # workaround for pyflakes
|
||||
table.update().\
|
||||
where(table.c.deleted == deleted).\
|
||||
values(new_deleted=table.c.id).\
|
||||
execute()
|
||||
table.c.deleted.drop()
|
||||
table.c.new_deleted.alter(name="deleted")
|
||||
|
||||
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
|
||||
|
||||
|
||||
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
|
||||
**col_name_col_instance):
|
||||
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
|
||||
# constraints in sqlite DB and our `deleted` column has
|
||||
# 2 check constraints. So there is only one way to remove
|
||||
# these constraints:
|
||||
# 1) Create new table with the same columns, constraints
|
||||
# and indexes. (except deleted column).
|
||||
# 2) Copy all data from old to new table.
|
||||
# 3) Drop old table.
|
||||
# 4) Rename new table to old table name.
|
||||
insp = reflection.Inspector.from_engine(migrate_engine)
|
||||
meta = MetaData(bind=migrate_engine)
|
||||
table = Table(table_name, meta, autoload=True)
|
||||
default_deleted_value = _get_default_deleted_value(table)
|
||||
|
||||
columns = []
|
||||
for column in table.columns:
|
||||
column_copy = None
|
||||
if column.name != "deleted":
|
||||
if isinstance(column.type, NullType):
|
||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||
column.name)
|
||||
else:
|
||||
column_copy = column.copy()
|
||||
else:
|
||||
column_copy = Column('deleted', table.c.id.type,
|
||||
default=default_deleted_value)
|
||||
columns.append(column_copy)
|
||||
|
||||
def is_deleted_column_constraint(constraint):
|
||||
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
||||
# associated with deleted column.
|
||||
if not isinstance(constraint, CheckConstraint):
|
||||
return False
|
||||
sqltext = str(constraint.sqltext)
|
||||
return (sqltext.endswith("deleted in (0, 1)") or
|
||||
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
|
||||
|
||||
constraints = []
|
||||
for constraint in table.constraints:
|
||||
if not is_deleted_column_constraint(constraint):
|
||||
constraints.append(constraint.copy())
|
||||
|
||||
new_table = Table(table_name + "__tmp__", meta,
|
||||
*(columns + constraints))
|
||||
new_table.create()
|
||||
|
||||
indexes = []
|
||||
for index in insp.get_indexes(table_name):
|
||||
column_names = [new_table.c[c] for c in index['column_names']]
|
||||
indexes.append(Index(index["name"], *column_names,
|
||||
unique=index["unique"]))
|
||||
|
||||
ins = InsertFromSelect(new_table, table.select())
|
||||
migrate_engine.execute(ins)
|
||||
|
||||
table.drop()
|
||||
[index.create(migrate_engine) for index in indexes]
|
||||
|
||||
new_table.rename(table_name)
|
||||
deleted = True # workaround for pyflakes
|
||||
new_table.update().\
|
||||
where(new_table.c.deleted == deleted).\
|
||||
values(deleted=new_table.c.id).\
|
||||
execute()
|
||||
|
||||
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
||||
deleted = False # workaround for pyflakes
|
||||
new_table.update().\
|
||||
where(new_table.c.deleted == deleted).\
|
||||
values(deleted=default_deleted_value).\
|
||||
execute()
|
||||
|
||||
|
||||
def get_connect_string(backend, database, user=None, passwd=None):
|
||||
"""Get database connection
|
||||
|
||||
Try to get a connection with a very specific set of values, if we get
|
||||
these then we'll run the tests, otherwise they are skipped
|
||||
"""
|
||||
args = {'backend': backend,
|
||||
'user': user,
|
||||
'passwd': passwd,
|
||||
'database': database}
|
||||
if backend == 'sqlite':
|
||||
template = '%(backend)s:///%(database)s'
|
||||
else:
|
||||
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
|
||||
return template % args
|
||||
|
||||
|
||||
def is_backend_avail(backend, database, user=None, passwd=None):
|
||||
try:
|
||||
connect_uri = get_connect_string(backend=backend,
|
||||
database=database,
|
||||
user=user,
|
||||
passwd=passwd)
|
||||
engine = sqlalchemy.create_engine(connect_uri)
|
||||
connection = engine.connect()
|
||||
except Exception:
|
||||
# intentionally catch all to handle exceptions even if we don't
|
||||
# have any backend code loaded.
|
||||
return False
|
||||
else:
|
||||
connection.close()
|
||||
engine.dispose()
|
||||
return True
|
||||
|
||||
|
||||
def get_db_connection_info(conn_pieces):
|
||||
database = conn_pieces.path.strip('/')
|
||||
loc_pieces = conn_pieces.netloc.split('@')
|
||||
host = loc_pieces[1]
|
||||
|
||||
auth_pieces = loc_pieces[0].split(':')
|
||||
user = auth_pieces[0]
|
||||
password = ""
|
||||
if len(auth_pieces) > 1:
|
||||
password = auth_pieces[1].strip()
|
||||
|
||||
return (user, password, database, host)
|
|
@ -1,99 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2012, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exception related utilities.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import six
|
||||
|
||||
from kite.openstack.common.gettextutils import _LE
|
||||
|
||||
|
||||
class save_and_reraise_exception(object):
|
||||
"""Save current exception, run some code and then re-raise.
|
||||
|
||||
In some cases the exception context can be cleared, resulting in None
|
||||
being attempted to be re-raised after an exception handler is run. This
|
||||
can happen when eventlet switches greenthreads or when running an
|
||||
exception handler, code raises and catches an exception. In both
|
||||
cases the exception context will be cleared.
|
||||
|
||||
To work around this, we save the exception state, run handler code, and
|
||||
then re-raise the original exception. If another exception occurs, the
|
||||
saved exception is logged and the new exception is re-raised.
|
||||
|
||||
In some cases the caller may not want to re-raise the exception, and
|
||||
for those circumstances this context provides a reraise flag that
|
||||
can be used to suppress the exception. For example::
|
||||
|
||||
except Exception:
|
||||
with save_and_reraise_exception() as ctxt:
|
||||
decide_if_need_reraise()
|
||||
if not should_be_reraised:
|
||||
ctxt.reraise = False
|
||||
"""
|
||||
def __init__(self):
|
||||
self.reraise = True
|
||||
|
||||
def __enter__(self):
|
||||
self.type_, self.value, self.tb, = sys.exc_info()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if exc_type is not None:
|
||||
logging.error(_LE('Original exception being dropped: %s'),
|
||||
traceback.format_exception(self.type_,
|
||||
self.value,
|
||||
self.tb))
|
||||
return False
|
||||
if self.reraise:
|
||||
six.reraise(self.type_, self.value, self.tb)
|
||||
|
||||
|
||||
def forever_retry_uncaught_exceptions(infunc):
|
||||
def inner_func(*args, **kwargs):
|
||||
last_log_time = 0
|
||||
last_exc_message = None
|
||||
exc_count = 0
|
||||
while True:
|
||||
try:
|
||||
return infunc(*args, **kwargs)
|
||||
except Exception as exc:
|
||||
this_exc_message = six.u(str(exc))
|
||||
if this_exc_message == last_exc_message:
|
||||
exc_count += 1
|
||||
else:
|
||||
exc_count = 1
|
||||
# Do not log any more frequently than once a minute unless
|
||||
# the exception message changes
|
||||
cur_time = int(time.time())
|
||||
if (cur_time - last_log_time > 60 or
|
||||
this_exc_message != last_exc_message):
|
||||
logging.exception(
|
||||
_LE('Unexpected exception occurred %d time(s)... '
|
||||
'retrying.') % exc_count)
|
||||
last_log_time = cur_time
|
||||
last_exc_message = this_exc_message
|
||||
exc_count = 0
|
||||
# This should be a very rare event. In case it isn't, do
|
||||
# a sleep.
|
||||
time.sleep(1)
|
||||
return inner_func
|
|
@ -1,135 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from kite.openstack.common import excutils
|
||||
from kite.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_FILE_CACHE = {}
|
||||
|
||||
|
||||
def ensure_tree(path):
|
||||
"""Create a directory (and any ancestor directories required)
|
||||
|
||||
:param path: Directory to create
|
||||
"""
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
if not os.path.isdir(path):
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def read_cached_file(filename, force_reload=False):
|
||||
"""Read from a file if it has been modified.
|
||||
|
||||
:param force_reload: Whether to reload the file.
|
||||
:returns: A tuple with a boolean specifying if the data is fresh
|
||||
or not.
|
||||
"""
|
||||
global _FILE_CACHE
|
||||
|
||||
if force_reload and filename in _FILE_CACHE:
|
||||
del _FILE_CACHE[filename]
|
||||
|
||||
reloaded = False
|
||||
mtime = os.path.getmtime(filename)
|
||||
cache_info = _FILE_CACHE.setdefault(filename, {})
|
||||
|
||||
if not cache_info or mtime > cache_info.get('mtime', 0):
|
||||
LOG.debug("Reloading cached file %s" % filename)
|
||||
with open(filename) as fap:
|
||||
cache_info['data'] = fap.read()
|
||||
cache_info['mtime'] = mtime
|
||||
reloaded = True
|
||||
return (reloaded, cache_info['data'])
|
||||
|
||||
|
||||
def delete_if_exists(path, remove=os.unlink):
|
||||
"""Delete a file, but ignore file not found error.
|
||||
|
||||
:param path: File to delete
|
||||
:param remove: Optional function to remove passed path
|
||||
"""
|
||||
|
||||
try:
|
||||
remove(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def remove_path_on_error(path, remove=delete_if_exists):
|
||||
"""Protect code that wants to operate on PATH atomically.
|
||||
Any exception will cause PATH to be removed.
|
||||
|
||||
:param path: File to work with
|
||||
:param remove: Optional function to remove passed path
|
||||
"""
|
||||
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
remove(path)
|
||||
|
||||
|
||||
def file_open(*args, **kwargs):
|
||||
"""Open file
|
||||
|
||||
see built-in file() documentation for more details
|
||||
|
||||
Note: The reason this is kept in a separate module is to easily
|
||||
be able to provide a stub module that doesn't alter system
|
||||
state at all (for unit tests)
|
||||
"""
|
||||
return file(*args, **kwargs)
|
||||
|
||||
|
||||
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
|
||||
"""Create temporary file or use existing file.
|
||||
|
||||
This util is needed for creating temporary file with
|
||||
specified content, suffix and prefix. If path is not None,
|
||||
it will be used for writing content. If the path doesn't
|
||||
exist it'll be created.
|
||||
|
||||
:param content: content for temporary file.
|
||||
:param path: same as parameter 'dir' for mkstemp
|
||||
:param suffix: same as parameter 'suffix' for mkstemp
|
||||
:param prefix: same as parameter 'prefix' for mkstemp
|
||||
|
||||
For example: it can be used in database tests for creating
|
||||
configuration files.
|
||||
"""
|
||||
if path:
|
||||
ensure_tree(path)
|
||||
|
||||
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
|
||||
try:
|
||||
os.write(fd, content)
|
||||
finally:
|
||||
os.close(fd)
|
||||
return path
|
|
@ -1,85 +0,0 @@
|
|||
#
|
||||
# Copyright 2013 Mirantis, Inc.
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
|
||||
class Config(fixtures.Fixture):
|
||||
"""Allows overriding configuration settings for the test.
|
||||
|
||||
`conf` will be reset on cleanup.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, conf=cfg.CONF):
|
||||
self.conf = conf
|
||||
|
||||
def setUp(self):
|
||||
super(Config, self).setUp()
|
||||
# NOTE(morganfainberg): unregister must be added to cleanup before
|
||||
# reset is because cleanup works in reverse order of registered items,
|
||||
# and a reset must occur before unregistering options can occur.
|
||||
self.addCleanup(self._unregister_config_opts)
|
||||
self.addCleanup(self.conf.reset)
|
||||
self._registered_config_opts = {}
|
||||
|
||||
def config(self, **kw):
|
||||
"""Override configuration values.
|
||||
|
||||
The keyword arguments are the names of configuration options to
|
||||
override and their values.
|
||||
|
||||
If a `group` argument is supplied, the overrides are applied to
|
||||
the specified configuration option group, otherwise the overrides
|
||||
are applied to the ``default`` group.
|
||||
|
||||
"""
|
||||
|
||||
group = kw.pop('group', None)
|
||||
for k, v in six.iteritems(kw):
|
||||
self.conf.set_override(k, v, group)
|
||||
|
||||
def _unregister_config_opts(self):
|
||||
for group in self._registered_config_opts:
|
||||
self.conf.unregister_opts(self._registered_config_opts[group],
|
||||
group=group)
|
||||
|
||||
def register_opt(self, opt, group=None):
|
||||
"""Register a single option for the test run.
|
||||
|
||||
Options registered in this manner will automatically be unregistered
|
||||
during cleanup.
|
||||
|
||||
If a `group` argument is supplied, it will register the new option
|
||||
to that group, otherwise the option is registered to the ``default``
|
||||
group.
|
||||
"""
|
||||
self.conf.register_opt(opt, group=group)
|
||||
self._registered_config_opts.setdefault(group, set()).add(opt)
|
||||
|
||||
def register_opts(self, opts, group=None):
|
||||
"""Register multiple options for the test run.
|
||||
|
||||
This works in the same manner as register_opt() but takes a list of
|
||||
options as the first argument. All arguments will be registered to the
|
||||
same group if the ``group`` argument is supplied, otherwise all options
|
||||
will be registered to the ``default`` group.
|
||||
"""
|
||||
for opt in opts:
|
||||
self.register_opt(opt, group=group)
|
|
@ -1,51 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
|
||||
from kite.openstack.common import lockutils
|
||||
|
||||
|
||||
class LockFixture(fixtures.Fixture):
|
||||
"""External locking fixture.
|
||||
|
||||
This fixture is basically an alternative to the synchronized decorator with
|
||||
the external flag so that tearDowns and addCleanups will be included in
|
||||
the lock context for locking between tests. The fixture is recommended to
|
||||
be the first line in a test method, like so::
|
||||
|
||||
def test_method(self):
|
||||
self.useFixture(LockFixture)
|
||||
...
|
||||
|
||||
or the first line in setUp if all the test methods in the class are
|
||||
required to be serialized. Something like::
|
||||
|
||||
class TestCase(testtools.testcase):
|
||||
def setUp(self):
|
||||
self.useFixture(LockFixture)
|
||||
super(TestCase, self).setUp()
|
||||
...
|
||||
|
||||
This is because addCleanups are put on a LIFO queue that gets run after the
|
||||
test method exits. (either by completing or raising an exception)
|
||||
"""
|
||||
def __init__(self, name, lock_file_prefix=None):
|
||||
self.mgr = lockutils.lock(name, lock_file_prefix, True)
|
||||
|
||||
def setUp(self):
|
||||
super(LockFixture, self).setUp()
|
||||
self.addCleanup(self.mgr.__exit__, None, None, None)
|
||||
self.lock = self.mgr.__enter__()
|
|
@ -1,34 +0,0 @@
|
|||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
|
||||
|
||||
def get_logging_handle_error_fixture():
|
||||
"""returns a fixture to make logging raise formatting exceptions.
|
||||
|
||||
Usage:
|
||||
self.useFixture(logging.get_logging_handle_error_fixture())
|
||||
"""
|
||||
return fixtures.MonkeyPatch('logging.Handler.handleError',
|
||||
_handleError)
|
||||
|
||||
|
||||
def _handleError(self, record):
|
||||
"""Monkey patch for logging.Handler.handleError.
|
||||
|
||||
The default handleError just logs the error to stderr but we want
|
||||
the option of actually raising an exception.
|
||||
"""
|
||||
raise
|
|
@ -1,62 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
##
|
||||
## DO NOT MODIFY THIS FILE
|
||||
##
|
||||
## This file is being graduated to the kitetest library. Please make all
|
||||
## changes there, and only backport critical fixes here. - dhellmann
|
||||
##
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
|
||||
|
||||
class PatchObject(fixtures.Fixture):
|
||||
"""Deal with code around mock."""
|
||||
|
||||
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
|
||||
self.obj = obj
|
||||
self.attr = attr
|
||||
self.kwargs = kwargs
|
||||
self.new = new
|
||||
|
||||
def setUp(self):
|
||||
super(PatchObject, self).setUp()
|
||||
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
|
||||
self.mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
|
||||
class Patch(fixtures.Fixture):
|
||||
|
||||
"""Deal with code around mock.patch."""
|
||||
|
||||
def __init__(self, obj, new=mock.DEFAULT, **kwargs):
|
||||
self.obj = obj
|
||||
self.kwargs = kwargs
|
||||
self.new = new
|
||||
|
||||
def setUp(self):
|
||||
super(Patch, self).setUp()
|
||||
_p = mock.patch(self.obj, self.new, **self.kwargs)
|
||||
self.mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
|
@ -1,43 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
##
|
||||
## DO NOT MODIFY THIS FILE
|
||||
##
|
||||
## This file is being graduated to the kitetest library. Please make all
|
||||
## changes there, and only backport critical fixes here. - dhellmann
|
||||
##
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
|
||||
import fixtures
|
||||
from six.moves import mox
|
||||
|
||||
|
||||
class MoxStubout(fixtures.Fixture):
|
||||
"""Deal with code around mox and stubout as a fixture."""
|
||||
|
||||
def setUp(self):
|
||||
super(MoxStubout, self).setUp()
|
||||
# emulate some of the mox stuff, we can't use the metaclass
|
||||
# because it screws with our generators
|
||||
self.mox = mox.Mox()
|
||||
self.stubs = self.mox.stubs
|
||||
self.addCleanup(self.mox.UnsetStubs)
|
||||
self.addCleanup(self.mox.VerifyAll)
|
|
@ -1,448 +0,0 @@
|
|||
# Copyright 2012 Red Hat, Inc.
|
||||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
gettext for openstack-common modules.
|
||||
|
||||
Usual usage in an openstack.common module:
|
||||
|
||||
from kite.openstack.common.gettextutils import _
|
||||
"""
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import gettext
|
||||
import locale
|
||||
from logging import handlers
|
||||
import os
|
||||
|
||||
from babel import localedata
|
||||
import six
|
||||
|
||||
_localedir = os.environ.get('kite'.upper() + '_LOCALEDIR')
|
||||
_t = gettext.translation('kite', localedir=_localedir, fallback=True)
|
||||
|
||||
# We use separate translation catalogs for each log level, so set up a
|
||||
# mapping between the log level name and the translator. The domain
|
||||
# for the log level is project_name + "-log-" + log_level so messages
|
||||
# for each level end up in their own catalog.
|
||||
_t_log_levels = dict(
|
||||
(level, gettext.translation('kite' + '-log-' + level,
|
||||
localedir=_localedir,
|
||||
fallback=True))
|
||||
for level in ['info', 'warning', 'error', 'critical']
|
||||
)
|
||||
|
||||
_AVAILABLE_LANGUAGES = {}
|
||||
USE_LAZY = False
|
||||
|
||||
|
||||
def enable_lazy():
|
||||
"""Convenience function for configuring _() to use lazy gettext
|
||||
|
||||
Call this at the start of execution to enable the gettextutils._
|
||||
function to use lazy gettext functionality. This is useful if
|
||||
your project is importing _ directly instead of using the
|
||||
gettextutils.install() way of importing the _ function.
|
||||
"""
|
||||
global USE_LAZY
|
||||
USE_LAZY = True
|
||||
|
||||
|
||||
def _(msg):
|
||||
if USE_LAZY:
|
||||
return Message(msg, domain='kite')
|
||||
else:
|
||||
if six.PY3:
|
||||
return _t.gettext(msg)
|
||||
return _t.ugettext(msg)
|
||||
|
||||
|
||||
def _log_translation(msg, level):
|
||||
"""Build a single translation of a log message
|
||||
"""
|
||||
if USE_LAZY:
|
||||
return Message(msg, domain='kite' + '-log-' + level)
|
||||
else:
|
||||
translator = _t_log_levels[level]
|
||||
if six.PY3:
|
||||
return translator.gettext(msg)
|
||||
return translator.ugettext(msg)
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = functools.partial(_log_translation, level='info')
|
||||
_LW = functools.partial(_log_translation, level='warning')
|
||||
_LE = functools.partial(_log_translation, level='error')
|
||||
_LC = functools.partial(_log_translation, level='critical')
|
||||
|
||||
|
||||
def install(domain, lazy=False):
|
||||
"""Install a _() function using the given translation domain.
|
||||
|
||||
Given a translation domain, install a _() function using gettext's
|
||||
install() function.
|
||||
|
||||
The main difference from gettext.install() is that we allow
|
||||
overriding the default localedir (e.g. /usr/share/locale) using
|
||||
a translation-domain-specific environment variable (e.g.
|
||||
NOVA_LOCALEDIR).
|
||||
|
||||
:param domain: the translation domain
|
||||
:param lazy: indicates whether or not to install the lazy _() function.
|
||||
The lazy _() introduces a way to do deferred translation
|
||||
of messages by installing a _ that builds Message objects,
|
||||
instead of strings, which can then be lazily translated into
|
||||
any available locale.
|
||||
"""
|
||||
if lazy:
|
||||
# NOTE(mrodden): Lazy gettext functionality.
|
||||
#
|
||||
# The following introduces a deferred way to do translations on
|
||||
# messages in OpenStack. We override the standard _() function
|
||||
# and % (format string) operation to build Message objects that can
|
||||
# later be translated when we have more information.
|
||||
def _lazy_gettext(msg):
|
||||
"""Create and return a Message object.
|
||||
|
||||
Lazy gettext function for a given domain, it is a factory method
|
||||
for a project/module to get a lazy gettext function for its own
|
||||
translation domain (i.e. nova, glance, cinder, etc.)
|
||||
|
||||
Message encapsulates a string so that we can translate
|
||||
it later when needed.
|
||||
"""
|
||||
return Message(msg, domain=domain)
|
||||
|
||||
from six import moves
|
||||
moves.builtins.__dict__['_'] = _lazy_gettext
|
||||
else:
|
||||
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||
if six.PY3:
|
||||
gettext.install(domain,
|
||||
localedir=os.environ.get(localedir))
|
||||
else:
|
||||
gettext.install(domain,
|
||||
localedir=os.environ.get(localedir),
|
||||
unicode=True)
|
||||
|
||||
|
||||
class Message(six.text_type):
|
||||
"""A Message object is a unicode object that can be translated.
|
||||
|
||||
Translation of Message is done explicitly using the translate() method.
|
||||
For all non-translation intents and purposes, a Message is simply unicode,
|
||||
and can be treated as such.
|
||||
"""
|
||||
|
||||
def __new__(cls, msgid, msgtext=None, params=None,
|
||||
domain='kite', *args):
|
||||
"""Create a new Message object.
|
||||
|
||||
In order for translation to work gettext requires a message ID, this
|
||||
msgid will be used as the base unicode text. It is also possible
|
||||
for the msgid and the base unicode text to be different by passing
|
||||
the msgtext parameter.
|
||||
"""
|
||||
# If the base msgtext is not given, we use the default translation
|
||||
# of the msgid (which is in English) just in case the system locale is
|
||||
# not English, so that the base text will be in that locale by default.
|
||||
if not msgtext:
|
||||
msgtext = Message._translate_msgid(msgid, domain)
|
||||
# We want to initialize the parent unicode with the actual object that
|
||||
# would have been plain unicode if 'Message' was not enabled.
|
||||
msg = super(Message, cls).__new__(cls, msgtext)
|
||||
msg.msgid = msgid
|
||||
msg.domain = domain
|
||||
msg.params = params
|
||||
return msg
|
||||
|
||||
def translate(self, desired_locale=None):
|
||||
"""Translate this message to the desired locale.
|
||||
|
||||
:param desired_locale: The desired locale to translate the message to,
|
||||
if no locale is provided the message will be
|
||||
translated to the system's default locale.
|
||||
|
||||
:returns: the translated message in unicode
|
||||
"""
|
||||
|
||||
translated_message = Message._translate_msgid(self.msgid,
|
||||
self.domain,
|
||||
desired_locale)
|
||||
if self.params is None:
|
||||
# No need for more translation
|
||||
return translated_message
|
||||
|
||||
# This Message object may have been formatted with one or more
|
||||
# Message objects as substitution arguments, given either as a single
|
||||
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||
# When translating this Message we need to translate those Messages too
|
||||
translated_params = _translate_args(self.params, desired_locale)
|
||||
|
||||
translated_message = translated_message % translated_params
|
||||
|
||||
return translated_message
|
||||
|
||||
@staticmethod
|
||||
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||
if not desired_locale:
|
||||
system_locale = locale.getdefaultlocale()
|
||||
# If the system locale is not available to the runtime use English
|
||||
if not system_locale[0]:
|
||||
desired_locale = 'en_US'
|
||||
else:
|
||||
desired_locale = system_locale[0]
|
||||
|
||||
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||
lang = gettext.translation(domain,
|
||||
localedir=locale_dir,
|
||||
languages=[desired_locale],
|
||||
fallback=True)
|
||||
if six.PY3:
|
||||
translator = lang.gettext
|
||||
else:
|
||||
translator = lang.ugettext
|
||||
|
||||
translated_message = translator(msgid)
|
||||
return translated_message
|
||||
|
||||
def __mod__(self, other):
|
||||
# When we mod a Message we want the actual operation to be performed
|
||||
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||
# save the original msgid and the parameters in case of a translation
|
||||
params = self._sanitize_mod_params(other)
|
||||
unicode_mod = super(Message, self).__mod__(params)
|
||||
modded = Message(self.msgid,
|
||||
msgtext=unicode_mod,
|
||||
params=params,
|
||||
domain=self.domain)
|
||||
return modded
|
||||
|
||||
def _sanitize_mod_params(self, other):
|
||||
"""Sanitize the object being modded with this Message.
|
||||
|
||||
- Add support for modding 'None' so translation supports it
|
||||
- Trim the modded object, which can be a large dictionary, to only
|
||||
those keys that would actually be used in a translation
|
||||
- Snapshot the object being modded, in case the message is
|
||||
translated, it will be used as it was when the Message was created
|
||||
"""
|
||||
if other is None:
|
||||
params = (other,)
|
||||
elif isinstance(other, dict):
|
||||
# Merge the dictionaries
|
||||
# Copy each item in case one does not support deep copy.
|
||||
params = {}
|
||||
if isinstance(self.params, dict):
|
||||
for key, val in self.params.items():
|
||||
params[key] = self._copy_param(val)
|
||||
for key, val in other.items():
|
||||
params[key] = self._copy_param(val)
|
||||
else:
|
||||
params = self._copy_param(other)
|
||||
return params
|
||||
|
||||
def _copy_param(self, param):
|
||||
try:
|
||||
return copy.deepcopy(param)
|
||||
except Exception:
|
||||
# Fallback to casting to unicode this will handle the
|
||||
# python code-like objects that can't be deep-copied
|
||||
return six.text_type(param)
|
||||
|
||||
def __add__(self, other):
|
||||
msg = _('Message objects do not support addition.')
|
||||
raise TypeError(msg)
|
||||
|
||||
def __radd__(self, other):
|
||||
return self.__add__(other)
|
||||
|
||||
def __str__(self):
|
||||
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||
# and it expects specifically a UnicodeError in order to proceed.
|
||||
msg = _('Message objects do not support str() because they may '
|
||||
'contain non-ascii characters. '
|
||||
'Please use unicode() or translate() instead.')
|
||||
raise UnicodeError(msg)
|
||||
|
||||
|
||||
def get_available_languages(domain):
|
||||
"""Lists the available languages for the given translation domain.
|
||||
|
||||
:param domain: the domain to get languages for
|
||||
"""
|
||||
if domain in _AVAILABLE_LANGUAGES:
|
||||
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||
|
||||
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||
find = lambda x: gettext.find(domain,
|
||||
localedir=os.environ.get(localedir),
|
||||
languages=[x])
|
||||
|
||||
# NOTE(mrodden): en_US should always be available (and first in case
|
||||
# order matters) since our in-line message strings are en_US
|
||||
language_list = ['en_US']
|
||||
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||
# this check when the master list updates to >=1.0, and update all projects
|
||||
list_identifiers = (getattr(localedata, 'list', None) or
|
||||
getattr(localedata, 'locale_identifiers'))
|
||||
locale_identifiers = list_identifiers()
|
||||
|
||||
for i in locale_identifiers:
|
||||
if find(i) is not None:
|
||||
language_list.append(i)
|
||||
|
||||
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
||||
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
||||
# are perfectly legitimate locales:
|
||||
# https://github.com/mitsuhiko/babel/issues/37
|
||||
# In Babel 1.3 they fixed the bug and they support these locales, but
|
||||
# they are still not explicitly "listed" by locale_identifiers().
|
||||
# That is why we add the locales here explicitly if necessary so that
|
||||
# they are listed as supported.
|
||||
aliases = {'zh': 'zh_CN',
|
||||
'zh_Hant_HK': 'zh_HK',
|
||||
'zh_Hant': 'zh_TW',
|
||||
'fil': 'tl_PH'}
|
||||
for (locale, alias) in six.iteritems(aliases):
|
||||
if locale in language_list and alias not in language_list:
|
||||
language_list.append(alias)
|
||||
|
||||
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||
return copy.copy(language_list)
|
||||
|
||||
|
||||
def translate(obj, desired_locale=None):
|
||||
"""Gets the translated unicode representation of the given object.
|
||||
|
||||
If the object is not translatable it is returned as-is.
|
||||
If the locale is None the object is translated to the system locale.
|
||||
|
||||
:param obj: the object to translate
|
||||
:param desired_locale: the locale to translate the message to, if None the
|
||||
default system locale will be used
|
||||
:returns: the translated object in unicode, or the original object if
|
||||
it could not be translated
|
||||
"""
|
||||
message = obj
|
||||
if not isinstance(message, Message):
|
||||
# If the object to translate is not already translatable,
|
||||
# let's first get its unicode representation
|
||||
message = six.text_type(obj)
|
||||
if isinstance(message, Message):
|
||||
# Even after unicoding() we still need to check if we are
|
||||
# running with translatable unicode before translating
|
||||
return message.translate(desired_locale)
|
||||
return obj
|
||||
|
||||
|
||||
def _translate_args(args, desired_locale=None):
|
||||
"""Translates all the translatable elements of the given arguments object.
|
||||
|
||||
This method is used for translating the translatable values in method
|
||||
arguments which include values of tuples or dictionaries.
|
||||
If the object is not a tuple or a dictionary the object itself is
|
||||
translated if it is translatable.
|
||||
|
||||
If the locale is None the object is translated to the system locale.
|
||||
|
||||
:param args: the args to translate
|
||||
:param desired_locale: the locale to translate the args to, if None the
|
||||
default system locale will be used
|
||||
:returns: a new args object with the translated contents of the original
|
||||
"""
|
||||
if isinstance(args, tuple):
|
||||
return tuple(translate(v, desired_locale) for v in args)
|
||||
if isinstance(args, dict):
|
||||
translated_dict = {}
|
||||
for (k, v) in six.iteritems(args):
|
||||
translated_v = translate(v, desired_locale)
|
||||
translated_dict[k] = translated_v
|
||||
return translated_dict
|
||||
return translate(args, desired_locale)
|
||||
|
||||
|
||||
class TranslationHandler(handlers.MemoryHandler):
|
||||
"""Handler that translates records before logging them.
|
||||
|
||||
The TranslationHandler takes a locale and a target logging.Handler object
|
||||
to forward LogRecord objects to after translating them. This handler
|
||||
depends on Message objects being logged, instead of regular strings.
|
||||
|
||||
The handler can be configured declaratively in the logging.conf as follows:
|
||||
|
||||
[handlers]
|
||||
keys = translatedlog, translator
|
||||
|
||||
[handler_translatedlog]
|
||||
class = handlers.WatchedFileHandler
|
||||
args = ('/var/log/api-localized.log',)
|
||||
formatter = context
|
||||
|
||||
[handler_translator]
|
||||
class = openstack.common.log.TranslationHandler
|
||||
target = translatedlog
|
||||
args = ('zh_CN',)
|
||||
|
||||
If the specified locale is not available in the system, the handler will
|
||||
log in the default locale.
|
||||
"""
|
||||
|
||||
def __init__(self, locale=None, target=None):
|
||||
"""Initialize a TranslationHandler
|
||||
|
||||
:param locale: locale to use for translating messages
|
||||
:param target: logging.Handler object to forward
|
||||
LogRecord objects to after translation
|
||||
"""
|
||||
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||
# other handlers, such as a FileHandler, and still be able to
|
||||
# configure it using logging.conf, this handler has to extend
|
||||
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||
# parsing is implemented such that it accepts a target handler.
|
||||
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||
self.locale = locale
|
||||
|
||||
def setFormatter(self, fmt):
|
||||
self.target.setFormatter(fmt)
|
||||
|
||||
def emit(self, record):
|
||||
# We save the message from the original record to restore it
|
||||
# after translation, so other handlers are not affected by this
|
||||
original_msg = record.msg
|
||||
original_args = record.args
|
||||
|
||||
try:
|
||||
self._translate_and_log_record(record)
|
||||
finally:
|
||||
record.msg = original_msg
|
||||
record.args = original_args
|
||||
|
||||
def _translate_and_log_record(self, record):
|
||||
record.msg = translate(record.msg, self.locale)
|
||||
|
||||
# In addition to translating the message, we also need to translate
|
||||
# arguments that were passed to the log method that were not part
|
||||
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||
record.args = _translate_args(record.args, self.locale)
|
||||
|
||||
self.target.emit(record)
|
|
@ -1,73 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Import related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class."""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
try:
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except (ValueError, AttributeError):
|
||||
raise ImportError('Class %s cannot be found (%s)' %
|
||||
(class_str,
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
|
||||
|
||||
def import_object(import_str, *args, **kwargs):
|
||||
"""Import a class and return an instance of it."""
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""Tries to import object from default namespace.
|
||||
|
||||
Imports a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
import_value = "%s.%s" % (name_space, import_str)
|
||||
try:
|
||||
return import_class(import_value)(*args, **kwargs)
|
||||
except ImportError:
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
||||
|
||||
|
||||
def import_versioned_module(version, submodule=None):
|
||||
module = 'kite.v%s' % version
|
||||
if submodule:
|
||||
module = '.'.join((module, submodule))
|
||||
return import_module(module)
|
||||
|
||||
|
||||
def try_import(import_str, default=None):
|
||||
"""Try to import a module and if it fails return default."""
|
||||
try:
|
||||
return import_module(import_str)
|
||||
except ImportError:
|
||||
return default
|
|
@ -1,174 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
JSON related utilities.
|
||||
|
||||
This module provides a few things:
|
||||
|
||||
1) A handy function for getting an object down to something that can be
|
||||
JSON serialized. See to_primitive().
|
||||
|
||||
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||
automatically use to_primitive() for you if needed.
|
||||
|
||||
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||
is available.
|
||||
'''
|
||||
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import json
|
||||
|
||||
import six
|
||||
import six.moves.xmlrpc_client as xmlrpclib
|
||||
|
||||
from kite.openstack.common import gettextutils
|
||||
from kite.openstack.common import importutils
|
||||
from kite.openstack.common import timeutils
|
||||
|
||||
netaddr = importutils.try_import("netaddr")
|
||||
|
||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
|
||||
_simple_types = (six.string_types + six.integer_types
|
||||
+ (type(None), bool, float))
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
|
||||
Handy for JSON serialization. We can optionally handle instances,
|
||||
but since this is a recursive function, we could have cyclical
|
||||
data structures.
|
||||
|
||||
To handle cyclical data structures we could track the actual objects
|
||||
visited in a set, but not all objects are hashable. Instead we just
|
||||
track the depth of the object inspections and don't go too deep.
|
||||
|
||||
Therefore, convert_instances=True is lossy ... be aware.
|
||||
|
||||
"""
|
||||
# handle obvious types first - order of basic types determined by running
|
||||
# full tests on nova project, resulting in the following counts:
|
||||
# 572754 <type 'NoneType'>
|
||||
# 460353 <type 'int'>
|
||||
# 379632 <type 'unicode'>
|
||||
# 274610 <type 'str'>
|
||||
# 199918 <type 'dict'>
|
||||
# 114200 <type 'datetime.datetime'>
|
||||
# 51817 <type 'bool'>
|
||||
# 26164 <type 'list'>
|
||||
# 6491 <type 'float'>
|
||||
# 283 <type 'tuple'>
|
||||
# 19 <type 'long'>
|
||||
if isinstance(value, _simple_types):
|
||||
return value
|
||||
|
||||
if isinstance(value, datetime.datetime):
|
||||
if convert_datetime:
|
||||
return timeutils.strtime(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return six.text_type(value)
|
||||
|
||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||
# tests that raise an exception in a mocked method that
|
||||
# has a @wrap_exception with a notifier will fail. If
|
||||
# we up the dependency to 0.5.4 (when it is released) we
|
||||
# can remove this workaround.
|
||||
if getattr(value, '__module__', None) == 'mox':
|
||||
return 'mock'
|
||||
|
||||
if level > max_depth:
|
||||
return '?'
|
||||
|
||||
# The try block may not be necessary after the class check above,
|
||||
# but just in case ...
|
||||
try:
|
||||
recursive = functools.partial(to_primitive,
|
||||
convert_instances=convert_instances,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
if isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return [recursive(lv) for lv in value]
|
||||
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
if isinstance(value, xmlrpclib.DateTime):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif isinstance(value, gettextutils.Message):
|
||||
return value.data
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
elif hasattr(value, '__iter__'):
|
||||
return recursive(list(value))
|
||||
elif convert_instances and hasattr(value, '__dict__'):
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||
return six.text_type(value)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return six.text_type(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
# __iter__ defined but it isn't callable as list().
|
||||
return six.text_type(value)
|
||||
|
||||
|
||||
def dumps(value, default=to_primitive, **kwargs):
|
||||
return json.dumps(value, default=default, **kwargs)
|
||||
|
||||
|
||||
def loads(s):
|
||||
return json.loads(s)
|
||||
|
||||
|
||||
def load(s):
|
||||
return json.load(s)
|
||||
|
||||
|
||||
try:
|
||||
import anyjson
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||
'loads', ValueError, 'load'))
|
||||
anyjson.force_implementation(__name__)
|
|
@ -1,45 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Local storage of variables using weak references"""
|
||||
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
|
||||
class WeakLocal(threading.local):
|
||||
def __getattribute__(self, attr):
|
||||
rval = super(WeakLocal, self).__getattribute__(attr)
|
||||
if rval:
|
||||
# NOTE(mikal): this bit is confusing. What is stored is a weak
|
||||
# reference, not the value itself. We therefore need to lookup
|
||||
# the weak reference and return the inner value here.
|
||||
rval = rval()
|
||||
return rval
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
value = weakref.ref(value)
|
||||
return super(WeakLocal, self).__setattr__(attr, value)
|
||||
|
||||
|
||||
# NOTE(mikal): the name "store" should be deprecated in the future
|
||||
store = WeakLocal()
|
||||
|
||||
# A "weak" store uses weak references and allows an object to fall out of scope
|
||||
# when it falls out of scope in the code that uses the thread local storage. A
|
||||
# "strong" store will hold a reference to the object so that it never falls out
|
||||
# of scope.
|
||||
weak_store = WeakLocal()
|
||||
strong_store = threading.local()
|
|
@ -1,377 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import fcntl
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from kite.openstack.common import fileutils
|
||||
from kite.openstack.common.gettextutils import _, _LE, _LI
|
||||
from kite.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
util_opts = [
|
||||
cfg.BoolOpt('disable_process_locking', default=False,
|
||||
help='Whether to disable inter-process locks'),
|
||||
cfg.StrOpt('lock_path',
|
||||
default=os.environ.get("KITE_LOCK_PATH"),
|
||||
help=('Directory to use for lock files.'))
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(util_opts)
|
||||
|
||||
|
||||
def set_defaults(lock_path):
|
||||
cfg.set_defaults(util_opts, lock_path=lock_path)
|
||||
|
||||
|
||||
class _FileLock(object):
|
||||
"""Lock implementation which allows multiple locks, working around
|
||||
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
|
||||
not require any cleanup. Since the lock is always held on a file
|
||||
descriptor rather than outside of the process, the lock gets dropped
|
||||
automatically if the process crashes, even if __exit__ is not executed.
|
||||
|
||||
There are no guarantees regarding usage by multiple green threads in a
|
||||
single process here. This lock works only between processes. Exclusive
|
||||
access between local threads should be achieved using the semaphores
|
||||
in the @synchronized decorator.
|
||||
|
||||
Note these locks are released when the descriptor is closed, so it's not
|
||||
safe to close the file descriptor while another green thread holds the
|
||||
lock. Just opening and closing the lock file can break synchronisation,
|
||||
so lock files must be accessed only using this abstraction.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.lockfile = None
|
||||
self.fname = name
|
||||
|
||||
def acquire(self):
|
||||
basedir = os.path.dirname(self.fname)
|
||||
|
||||
if not os.path.exists(basedir):
|
||||
fileutils.ensure_tree(basedir)
|
||||
LOG.info(_LI('Created lock path: %s'), basedir)
|
||||
|
||||
self.lockfile = open(self.fname, 'w')
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Using non-blocking locks since green threads are not
|
||||
# patched to deal with blocking locking calls.
|
||||
# Also upon reading the MSDN docs for locking(), it seems
|
||||
# to have a laughable 10 attempts "blocking" mechanism.
|
||||
self.trylock()
|
||||
LOG.debug('Got file lock "%s"', self.fname)
|
||||
return True
|
||||
except IOError as e:
|
||||
if e.errno in (errno.EACCES, errno.EAGAIN):
|
||||
# external locks synchronise things like iptables
|
||||
# updates - give it some time to prevent busy spinning
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
raise threading.ThreadError(_("Unable to acquire lock on"
|
||||
" `%(filename)s` due to"
|
||||
" %(exception)s") %
|
||||
{
|
||||
'filename': self.fname,
|
||||
'exception': e,
|
||||
})
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
return self
|
||||
|
||||
def release(self):
|
||||
try:
|
||||
self.unlock()
|
||||
self.lockfile.close()
|
||||
LOG.debug('Released file lock "%s"', self.fname)
|
||||
except IOError:
|
||||
LOG.exception(_LE("Could not release the acquired lock `%s`"),
|
||||
self.fname)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.release()
|
||||
|
||||
def exists(self):
|
||||
return os.path.exists(self.fname)
|
||||
|
||||
def trylock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def unlock(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _WindowsLock(_FileLock):
|
||||
def trylock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
|
||||
|
||||
def unlock(self):
|
||||
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
|
||||
|
||||
class _FcntlLock(_FileLock):
|
||||
def trylock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
|
||||
def unlock(self):
|
||||
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
class _PosixLock(object):
|
||||
def __init__(self, name):
|
||||
# Hash the name because it's not valid to have POSIX semaphore
|
||||
# names with things like / in them. Then use base64 to encode
|
||||
# the digest() instead taking the hexdigest() because the
|
||||
# result is shorter and most systems can't have shm sempahore
|
||||
# names longer than 31 characters.
|
||||
h = hashlib.sha1()
|
||||
h.update(name.encode('ascii'))
|
||||
self.name = str((b'/' + base64.urlsafe_b64encode(
|
||||
h.digest())).decode('ascii'))
|
||||
|
||||
def acquire(self, timeout=None):
|
||||
self.semaphore = posix_ipc.Semaphore(self.name,
|
||||
flags=posix_ipc.O_CREAT,
|
||||
initial_value=1)
|
||||
self.semaphore.acquire(timeout)
|
||||
return self
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
return self
|
||||
|
||||
def release(self):
|
||||
self.semaphore.release()
|
||||
self.semaphore.close()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.release()
|
||||
|
||||
def exists(self):
|
||||
try:
|
||||
semaphore = posix_ipc.Semaphore(self.name)
|
||||
except posix_ipc.ExistentialError:
|
||||
return False
|
||||
else:
|
||||
semaphore.close()
|
||||
return True
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
InterProcessLock = _WindowsLock
|
||||
FileLock = _WindowsLock
|
||||
else:
|
||||
import base64
|
||||
import hashlib
|
||||
import posix_ipc
|
||||
InterProcessLock = _PosixLock
|
||||
FileLock = _FcntlLock
|
||||
|
||||
_semaphores = weakref.WeakValueDictionary()
|
||||
_semaphores_lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_lock_path(name, lock_file_prefix, lock_path=None):
|
||||
# NOTE(mikal): the lock name cannot contain directory
|
||||
# separators
|
||||
name = name.replace(os.sep, '_')
|
||||
if lock_file_prefix:
|
||||
sep = '' if lock_file_prefix.endswith('-') else '-'
|
||||
name = '%s%s%s' % (lock_file_prefix, sep, name)
|
||||
|
||||
local_lock_path = lock_path or CONF.lock_path
|
||||
|
||||
if not local_lock_path:
|
||||
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
|
||||
# unnecessarily raise the RequiredOptError below.
|
||||
if InterProcessLock is not _PosixLock:
|
||||
raise cfg.RequiredOptError('lock_path')
|
||||
local_lock_path = 'posixlock:/'
|
||||
|
||||
return os.path.join(local_lock_path, name)
|
||||
|
||||
|
||||
def external_lock(name, lock_file_prefix=None, lock_path=None):
|
||||
LOG.debug('Attempting to grab external lock "%(lock)s"',
|
||||
{'lock': name})
|
||||
|
||||
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
|
||||
|
||||
# NOTE(bnemec): If an explicit lock_path was passed to us then it
|
||||
# means the caller is relying on file-based locking behavior, so
|
||||
# we can't use posix locks for those calls.
|
||||
if lock_path:
|
||||
return FileLock(lock_file_path)
|
||||
return InterProcessLock(lock_file_path)
|
||||
|
||||
|
||||
def remove_external_lock_file(name, lock_file_prefix=None):
|
||||
"""Remove a external lock file when it's not used anymore
|
||||
This will be helpful when we have a lot of lock files
|
||||
"""
|
||||
with internal_lock(name):
|
||||
lock_file_path = _get_lock_path(name, lock_file_prefix)
|
||||
try:
|
||||
os.remove(lock_file_path)
|
||||
except OSError:
|
||||
LOG.info(_LI('Failed to remove file %(file)s'),
|
||||
{'file': lock_file_path})
|
||||
|
||||
|
||||
def internal_lock(name):
|
||||
with _semaphores_lock:
|
||||
try:
|
||||
sem = _semaphores[name]
|
||||
except KeyError:
|
||||
sem = threading.Semaphore()
|
||||
_semaphores[name] = sem
|
||||
|
||||
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
|
||||
return sem
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||
"""Context based lock
|
||||
|
||||
This function yields a `threading.Semaphore` instance (if we don't use
|
||||
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
|
||||
True, in which case, it'll yield an InterProcessLock instance.
|
||||
|
||||
:param lock_file_prefix: The lock_file_prefix argument is used to provide
|
||||
lock files on disk with a meaningful prefix.
|
||||
|
||||
:param external: The external keyword argument denotes whether this lock
|
||||
should work across multiple processes. This means that if two different
|
||||
workers both run a a method decorated with @synchronized('mylock',
|
||||
external=True), only one of them will execute at a time.
|
||||
"""
|
||||
int_lock = internal_lock(name)
|
||||
with int_lock:
|
||||
if external and not CONF.disable_process_locking:
|
||||
ext_lock = external_lock(name, lock_file_prefix, lock_path)
|
||||
with ext_lock:
|
||||
yield ext_lock
|
||||
else:
|
||||
yield int_lock
|
||||
|
||||
|
||||
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
|
||||
"""Synchronization decorator.
|
||||
|
||||
Decorating a method like so::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
ensures that only one thread will execute the foo method at a time.
|
||||
|
||||
Different methods can share the same lock::
|
||||
|
||||
@synchronized('mylock')
|
||||
def foo(self, *args):
|
||||
...
|
||||
|
||||
@synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
This way only one of either foo or bar can be executing at a time.
|
||||
"""
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
try:
|
||||
with lock(name, lock_file_prefix, external, lock_path):
|
||||
LOG.debug('Got semaphore / lock "%(function)s"',
|
||||
{'function': f.__name__})
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
LOG.debug('Semaphore / lock released "%(function)s"',
|
||||
{'function': f.__name__})
|
||||
return inner
|
||||
return wrap
|
||||
|
||||
|
||||
def synchronized_with_prefix(lock_file_prefix):
|
||||
"""Partial object generator for the synchronization decorator.
|
||||
|
||||
Redefine @synchronized in each project like so::
|
||||
|
||||
(in nova/utils.py)
|
||||
from nova.openstack.common import lockutils
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('nova-')
|
||||
|
||||
|
||||
(in nova/foo.py)
|
||||
from nova import utils
|
||||
|
||||
@utils.synchronized('mylock')
|
||||
def bar(self, *args):
|
||||
...
|
||||
|
||||
The lock_file_prefix argument is used to provide lock files on disk with a
|
||||
meaningful prefix.
|
||||
"""
|
||||
|
||||
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
||||
|
||||
|
||||
def main(argv):
|
||||
"""Create a dir for locks and pass it to command from arguments
|
||||
|
||||
If you run this:
|
||||
python -m openstack.common.lockutils python setup.py testr <etc>
|
||||
|
||||
a temporary directory will be created for all your locks and passed to all
|
||||
your tests in an environment variable. The temporary dir will be deleted
|
||||
afterwards and the return value will be preserved.
|
||||
"""
|
||||
|
||||
lock_dir = tempfile.mkdtemp()
|
||||
os.environ["KITE_LOCK_PATH"] = lock_dir
|
||||
try:
|
||||
ret_val = subprocess.call(argv[1:])
|
||||
finally:
|
||||
shutil.rmtree(lock_dir, ignore_errors=True)
|
||||
return ret_val
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv))
|
|
@ -1,712 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""OpenStack logging handler.
|
||||
|
||||
This module adds to logging functionality by adding the option to specify
|
||||
a context object when calling the various log methods. If the context object
|
||||
is not specified, default formatting is used. Additionally, an instance uuid
|
||||
may be passed as part of the log message, which is intended to make it easier
|
||||
for admins to find messages related to a specific instance.
|
||||
|
||||
It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import itertools
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
from kite.openstack.common.gettextutils import _
|
||||
from kite.openstack.common import importutils
|
||||
from kite.openstack.common import jsonutils
|
||||
from kite.openstack.common import local
|
||||
|
||||
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||
|
||||
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||
# for XML and JSON automatically.
|
||||
_SANITIZE_PATTERNS = []
|
||||
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||
r'(<%(key)s>).*?(</%(key)s>)',
|
||||
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
|
||||
|
||||
for key in _SANITIZE_KEYS:
|
||||
for pattern in _FORMAT_PATTERNS:
|
||||
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||
_SANITIZE_PATTERNS.append(reg_ex)
|
||||
|
||||
|
||||
common_cli_opts = [
|
||||
cfg.BoolOpt('debug',
|
||||
short='d',
|
||||
default=False,
|
||||
help='Print debugging output (set logging level to '
|
||||
'DEBUG instead of default WARNING level).'),
|
||||
cfg.BoolOpt('verbose',
|
||||
short='v',
|
||||
default=False,
|
||||
help='Print more verbose output (set logging level to '
|
||||
'INFO instead of default WARNING level).'),
|
||||
]
|
||||
|
||||
logging_cli_opts = [
|
||||
cfg.StrOpt('log-config-append',
|
||||
metavar='PATH',
|
||||
deprecated_name='log-config',
|
||||
help='The name of logging configuration file. It does not '
|
||||
'disable existing loggers, but just appends specified '
|
||||
'logging configuration to any other existing logging '
|
||||
'options. Please see the Python logging module '
|
||||
'documentation for details on logging configuration '
|
||||
'files.'),
|
||||
cfg.StrOpt('log-format',
|
||||
default=None,
|
||||
metavar='FORMAT',
|
||||
help='DEPRECATED. '
|
||||
'A logging.Formatter log message format string which may '
|
||||
'use any of the available logging.LogRecord attributes. '
|
||||
'This option is deprecated. Please use '
|
||||
'logging_context_format_string and '
|
||||
'logging_default_format_string instead.'),
|
||||
cfg.StrOpt('log-date-format',
|
||||
default=_DEFAULT_LOG_DATE_FORMAT,
|
||||
metavar='DATE_FORMAT',
|
||||
help='Format string for %%(asctime)s in log records. '
|
||||
'Default: %(default)s'),
|
||||
cfg.StrOpt('log-file',
|
||||
metavar='PATH',
|
||||
deprecated_name='logfile',
|
||||
help='(Optional) Name of log file to output to. '
|
||||
'If no default is set, logging will go to stdout.'),
|
||||
cfg.StrOpt('log-dir',
|
||||
deprecated_name='logdir',
|
||||
help='(Optional) The base directory used for relative '
|
||||
'--log-file paths'),
|
||||
cfg.BoolOpt('use-syslog',
|
||||
default=False,
|
||||
help='Use syslog for logging. '
|
||||
'Existing syslog format is DEPRECATED during I, '
|
||||
'and then will be changed in J to honor RFC5424'),
|
||||
cfg.BoolOpt('use-syslog-rfc-format',
|
||||
# TODO(bogdando) remove or use True after existing
|
||||
# syslog format deprecation in J
|
||||
default=False,
|
||||
help='(Optional) Use syslog rfc5424 format for logging. '
|
||||
'If enabled, will add APP-NAME (RFC5424) before the '
|
||||
'MSG part of the syslog message. The old format '
|
||||
'without APP-NAME is deprecated in I, '
|
||||
'and will be removed in J.'),
|
||||
cfg.StrOpt('syslog-log-facility',
|
||||
default='LOG_USER',
|
||||
help='Syslog facility to receive log lines')
|
||||
]
|
||||
|
||||
generic_log_opts = [
|
||||
cfg.BoolOpt('use_stderr',
|
||||
default=True,
|
||||
help='Log output to standard error')
|
||||
]
|
||||
|
||||
log_opts = [
|
||||
cfg.StrOpt('logging_context_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [%(request_id)s %(user_identity)s] '
|
||||
'%(instance)s%(message)s',
|
||||
help='Format string to use for log messages with context'),
|
||||
cfg.StrOpt('logging_default_format_string',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
|
||||
'%(name)s [-] %(instance)s%(message)s',
|
||||
help='Format string to use for log messages without context'),
|
||||
cfg.StrOpt('logging_debug_format_suffix',
|
||||
default='%(funcName)s %(pathname)s:%(lineno)d',
|
||||
help='Data to append to log format when level is DEBUG'),
|
||||
cfg.StrOpt('logging_exception_prefix',
|
||||
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
|
||||
'%(instance)s',
|
||||
help='Prefix each line of exception output with this format'),
|
||||
cfg.ListOpt('default_log_levels',
|
||||
default=[
|
||||
'amqp=WARN',
|
||||
'amqplib=WARN',
|
||||
'boto=WARN',
|
||||
'qpid=WARN',
|
||||
'sqlalchemy=WARN',
|
||||
'suds=INFO',
|
||||
'iso8601=WARN',
|
||||
'requests.packages.urllib3.connectionpool=WARN'
|
||||
],
|
||||
help='List of logger=LEVEL pairs'),
|
||||
cfg.BoolOpt('publish_errors',
|
||||
default=False,
|
||||
help='Publish error events'),
|
||||
cfg.BoolOpt('fatal_deprecations',
|
||||
default=False,
|
||||
help='Make deprecations fatal'),
|
||||
|
||||
# NOTE(mikal): there are two options here because sometimes we are handed
|
||||
# a full instance (and could include more information), and other times we
|
||||
# are just handed a UUID for the instance.
|
||||
cfg.StrOpt('instance_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='If an instance is passed with the log message, format '
|
||||
'it like this'),
|
||||
cfg.StrOpt('instance_uuid_format',
|
||||
default='[instance: %(uuid)s] ',
|
||||
help='If an instance UUID is passed with the log message, '
|
||||
'format it like this'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_cli_opts(common_cli_opts)
|
||||
CONF.register_cli_opts(logging_cli_opts)
|
||||
CONF.register_opts(generic_log_opts)
|
||||
CONF.register_opts(log_opts)
|
||||
|
||||
# our new audit level
|
||||
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
|
||||
# module aware of it so it acts like other levels.
|
||||
logging.AUDIT = logging.INFO + 1
|
||||
logging.addLevelName(logging.AUDIT, 'AUDIT')
|
||||
|
||||
|
||||
try:
|
||||
NullHandler = logging.NullHandler
|
||||
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
|
||||
class NullHandler(logging.Handler):
|
||||
def handle(self, record):
|
||||
pass
|
||||
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
def createLock(self):
|
||||
self.lock = None
|
||||
|
||||
|
||||
def _dictify_context(context):
|
||||
if context is None:
|
||||
return None
|
||||
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
|
||||
context = context.to_dict()
|
||||
return context
|
||||
|
||||
|
||||
def _get_binary_name():
|
||||
return os.path.basename(inspect.stack()[-1][1])
|
||||
|
||||
|
||||
def _get_log_file_path(binary=None):
|
||||
logfile = CONF.log_file
|
||||
logdir = CONF.log_dir
|
||||
|
||||
if logfile and not logdir:
|
||||
return logfile
|
||||
|
||||
if logfile and logdir:
|
||||
return os.path.join(logdir, logfile)
|
||||
|
||||
if logdir:
|
||||
binary = binary or _get_binary_name()
|
||||
return '%s.log' % (os.path.join(logdir, binary),)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def mask_password(message, secret="***"):
|
||||
"""Replace password with 'secret' in message.
|
||||
|
||||
:param message: The string which includes security information.
|
||||
:param secret: value with which to replace passwords.
|
||||
:returns: The unicode value of message with the password fields masked.
|
||||
|
||||
For example:
|
||||
|
||||
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||
"'adminPass' : '***'"
|
||||
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||
"'admin_pass' : '***'"
|
||||
>>> mask_password('"password" : "aaaaa"')
|
||||
'"password" : "***"'
|
||||
>>> mask_password("'original_password' : 'aaaaa'")
|
||||
"'original_password' : '***'"
|
||||
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||
"u'original_password' : u'***'"
|
||||
"""
|
||||
message = six.text_type(message)
|
||||
|
||||
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||
# we don't have to mask any passwords.
|
||||
if not any(key in message for key in _SANITIZE_KEYS):
|
||||
return message
|
||||
|
||||
secret = r'\g<1>' + secret + r'\g<2>'
|
||||
for pattern in _SANITIZE_PATTERNS:
|
||||
message = re.sub(pattern, secret, message)
|
||||
return message
|
||||
|
||||
|
||||
class BaseLoggerAdapter(logging.LoggerAdapter):
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
self.log(logging.AUDIT, msg, *args, **kwargs)
|
||||
|
||||
|
||||
class LazyAdapter(BaseLoggerAdapter):
|
||||
def __init__(self, name='unknown', version='unknown'):
|
||||
self._logger = None
|
||||
self.extra = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if not self._logger:
|
||||
self._logger = getLogger(self.name, self.version)
|
||||
return self._logger
|
||||
|
||||
|
||||
class ContextAdapter(BaseLoggerAdapter):
|
||||
warn = logging.LoggerAdapter.warning
|
||||
|
||||
def __init__(self, logger, project_name, version_string):
|
||||
self.logger = logger
|
||||
self.project = project_name
|
||||
self.version = version_string
|
||||
self._deprecated_messages_sent = dict()
|
||||
|
||||
@property
|
||||
def handlers(self):
|
||||
return self.logger.handlers
|
||||
|
||||
def deprecated(self, msg, *args, **kwargs):
|
||||
"""Call this method when a deprecated feature is used.
|
||||
|
||||
If the system is configured for fatal deprecations then the message
|
||||
is logged at the 'critical' level and :class:`DeprecatedConfig` will
|
||||
be raised.
|
||||
|
||||
Otherwise, the message will be logged (once) at the 'warn' level.
|
||||
|
||||
:raises: :class:`DeprecatedConfig` if the system is configured for
|
||||
fatal deprecations.
|
||||
|
||||
"""
|
||||
stdmsg = _("Deprecated: %s") % msg
|
||||
if CONF.fatal_deprecations:
|
||||
self.critical(stdmsg, *args, **kwargs)
|
||||
raise DeprecatedConfig(msg=stdmsg)
|
||||
|
||||
# Using a list because a tuple with dict can't be stored in a set.
|
||||
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
|
||||
|
||||
if args in sent_args:
|
||||
# Already logged this message, so don't log it again.
|
||||
return
|
||||
|
||||
sent_args.append(args)
|
||||
self.warn(stdmsg, *args, **kwargs)
|
||||
|
||||
def process(self, msg, kwargs):
|
||||
# NOTE(mrodden): catch any Message/other object and
|
||||
# coerce to unicode before they can get
|
||||
# to the python logging and possibly
|
||||
# cause string encoding trouble
|
||||
if not isinstance(msg, six.string_types):
|
||||
msg = six.text_type(msg)
|
||||
|
||||
if 'extra' not in kwargs:
|
||||
kwargs['extra'] = {}
|
||||
extra = kwargs['extra']
|
||||
|
||||
context = kwargs.pop('context', None)
|
||||
if not context:
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
extra.update(_dictify_context(context))
|
||||
|
||||
instance = kwargs.pop('instance', None)
|
||||
instance_uuid = (extra.get('instance_uuid') or
|
||||
kwargs.pop('instance_uuid', None))
|
||||
instance_extra = ''
|
||||
if instance:
|
||||
instance_extra = CONF.instance_format % instance
|
||||
elif instance_uuid:
|
||||
instance_extra = (CONF.instance_uuid_format
|
||||
% {'uuid': instance_uuid})
|
||||
extra['instance'] = instance_extra
|
||||
|
||||
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
|
||||
|
||||
extra['project'] = self.project
|
||||
extra['version'] = self.version
|
||||
extra['extra'] = extra.copy()
|
||||
return msg, kwargs
|
||||
|
||||
|
||||
class JSONFormatter(logging.Formatter):
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
# NOTE(jkoelker) we ignore the fmt argument, but its still there
|
||||
# since logging.config.fileConfig passes it.
|
||||
self.datefmt = datefmt
|
||||
|
||||
def formatException(self, ei, strip_newlines=True):
|
||||
lines = traceback.format_exception(*ei)
|
||||
if strip_newlines:
|
||||
lines = [moves.filter(
|
||||
lambda x: x,
|
||||
line.rstrip().splitlines()) for line in lines]
|
||||
lines = list(itertools.chain(*lines))
|
||||
return lines
|
||||
|
||||
def format(self, record):
|
||||
message = {'message': record.getMessage(),
|
||||
'asctime': self.formatTime(record, self.datefmt),
|
||||
'name': record.name,
|
||||
'msg': record.msg,
|
||||
'args': record.args,
|
||||
'levelname': record.levelname,
|
||||
'levelno': record.levelno,
|
||||
'pathname': record.pathname,
|
||||
'filename': record.filename,
|
||||
'module': record.module,
|
||||
'lineno': record.lineno,
|
||||
'funcname': record.funcName,
|
||||
'created': record.created,
|
||||
'msecs': record.msecs,
|
||||
'relative_created': record.relativeCreated,
|
||||
'thread': record.thread,
|
||||
'thread_name': record.threadName,
|
||||
'process_name': record.processName,
|
||||
'process': record.process,
|
||||
'traceback': None}
|
||||
|
||||
if hasattr(record, 'extra'):
|
||||
message['extra'] = record.extra
|
||||
|
||||
if record.exc_info:
|
||||
message['traceback'] = self.formatException(record.exc_info)
|
||||
|
||||
return jsonutils.dumps(message)
|
||||
|
||||
|
||||
def _create_logging_excepthook(product_name):
|
||||
def logging_excepthook(exc_type, value, tb):
|
||||
extra = {}
|
||||
if CONF.verbose or CONF.debug:
|
||||
extra['exc_info'] = (exc_type, value, tb)
|
||||
getLogger(product_name).critical(
|
||||
"".join(traceback.format_exception_only(exc_type, value)),
|
||||
**extra)
|
||||
return logging_excepthook
|
||||
|
||||
|
||||
class LogConfigError(Exception):
|
||||
|
||||
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
|
||||
|
||||
def __init__(self, log_config, err_msg):
|
||||
self.log_config = log_config
|
||||
self.err_msg = err_msg
|
||||
|
||||
def __str__(self):
|
||||
return self.message % dict(log_config=self.log_config,
|
||||
err_msg=self.err_msg)
|
||||
|
||||
|
||||
def _load_log_config(log_config_append):
|
||||
try:
|
||||
logging.config.fileConfig(log_config_append,
|
||||
disable_existing_loggers=False)
|
||||
except moves.configparser.Error as exc:
|
||||
raise LogConfigError(log_config_append, str(exc))
|
||||
|
||||
|
||||
def setup(product_name, version='unknown'):
|
||||
"""Setup logging."""
|
||||
if CONF.log_config_append:
|
||||
_load_log_config(CONF.log_config_append)
|
||||
else:
|
||||
_setup_logging_from_conf(product_name, version)
|
||||
sys.excepthook = _create_logging_excepthook(product_name)
|
||||
|
||||
|
||||
def set_defaults(logging_context_format_string):
|
||||
cfg.set_defaults(log_opts,
|
||||
logging_context_format_string=
|
||||
logging_context_format_string)
|
||||
|
||||
|
||||
def _find_facility_from_conf():
|
||||
facility_names = logging.handlers.SysLogHandler.facility_names
|
||||
facility = getattr(logging.handlers.SysLogHandler,
|
||||
CONF.syslog_log_facility,
|
||||
None)
|
||||
|
||||
if facility is None and CONF.syslog_log_facility in facility_names:
|
||||
facility = facility_names.get(CONF.syslog_log_facility)
|
||||
|
||||
if facility is None:
|
||||
valid_facilities = facility_names.keys()
|
||||
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
|
||||
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
|
||||
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
|
||||
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
|
||||
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
|
||||
valid_facilities.extend(consts)
|
||||
raise TypeError(_('syslog facility must be one of: %s') %
|
||||
', '.join("'%s'" % fac
|
||||
for fac in valid_facilities))
|
||||
|
||||
return facility
|
||||
|
||||
|
||||
class RFCSysLogHandler(logging.handlers.SysLogHandler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.binary_name = _get_binary_name()
|
||||
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
msg = super(RFCSysLogHandler, self).format(record)
|
||||
msg = self.binary_name + ' ' + msg
|
||||
return msg
|
||||
|
||||
|
||||
def _setup_logging_from_conf(project, version):
|
||||
log_root = getLogger(None).logger
|
||||
for handler in log_root.handlers:
|
||||
log_root.removeHandler(handler)
|
||||
|
||||
if CONF.use_syslog:
|
||||
facility = _find_facility_from_conf()
|
||||
# TODO(bogdando) use the format provided by RFCSysLogHandler
|
||||
# after existing syslog format deprecation in J
|
||||
if CONF.use_syslog_rfc_format:
|
||||
syslog = RFCSysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
else:
|
||||
syslog = logging.handlers.SysLogHandler(address='/dev/log',
|
||||
facility=facility)
|
||||
log_root.addHandler(syslog)
|
||||
|
||||
logpath = _get_log_file_path()
|
||||
if logpath:
|
||||
filelog = logging.handlers.WatchedFileHandler(logpath)
|
||||
log_root.addHandler(filelog)
|
||||
|
||||
if CONF.use_stderr:
|
||||
streamlog = ColorHandler()
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
elif not logpath:
|
||||
# pass sys.stdout as a positional argument
|
||||
# python2.6 calls the argument strm, in 2.7 it's stream
|
||||
streamlog = logging.StreamHandler(sys.stdout)
|
||||
log_root.addHandler(streamlog)
|
||||
|
||||
if CONF.publish_errors:
|
||||
handler = importutils.import_object(
|
||||
"kite.openstack.common.log_handler.PublishErrorsHandler",
|
||||
logging.ERROR)
|
||||
log_root.addHandler(handler)
|
||||
|
||||
datefmt = CONF.log_date_format
|
||||
for handler in log_root.handlers:
|
||||
# NOTE(alaski): CONF.log_format overrides everything currently. This
|
||||
# should be deprecated in favor of context aware formatting.
|
||||
if CONF.log_format:
|
||||
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
|
||||
datefmt=datefmt))
|
||||
log_root.info('Deprecated: log_format is now deprecated and will '
|
||||
'be removed in the next release')
|
||||
else:
|
||||
handler.setFormatter(ContextFormatter(project=project,
|
||||
version=version,
|
||||
datefmt=datefmt))
|
||||
|
||||
if CONF.debug:
|
||||
log_root.setLevel(logging.DEBUG)
|
||||
elif CONF.verbose:
|
||||
log_root.setLevel(logging.INFO)
|
||||
else:
|
||||
log_root.setLevel(logging.WARNING)
|
||||
|
||||
for pair in CONF.default_log_levels:
|
||||
mod, _sep, level_name = pair.partition('=')
|
||||
level = logging.getLevelName(level_name)
|
||||
logger = logging.getLogger(mod)
|
||||
logger.setLevel(level)
|
||||
|
||||
_loggers = {}
|
||||
|
||||
|
||||
def getLogger(name='unknown', version='unknown'):
|
||||
if name not in _loggers:
|
||||
_loggers[name] = ContextAdapter(logging.getLogger(name),
|
||||
name,
|
||||
version)
|
||||
return _loggers[name]
|
||||
|
||||
|
||||
def getLazyLogger(name='unknown', version='unknown'):
|
||||
"""Returns lazy logger.
|
||||
|
||||
Creates a pass-through logger that does not create the real logger
|
||||
until it is really needed and delegates all calls to the real logger
|
||||
once it is created.
|
||||
"""
|
||||
return LazyAdapter(name, version)
|
||||
|
||||
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
def __init__(self, logger, level=logging.INFO):
|
||||
self.logger = logger
|
||||
self.level = level
|
||||
|
||||
def write(self, msg):
|
||||
self.logger.log(self.level, msg.rstrip())
|
||||
|
||||
|
||||
class ContextFormatter(logging.Formatter):
|
||||
"""A context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_format_string
|
||||
and logging_default_format_string. You can also specify
|
||||
logging_debug_format_suffix to append extra formatting if the log level is
|
||||
debug.
|
||||
|
||||
For information about what variables are available for the formatter see:
|
||||
http://docs.python.org/library/logging.html#formatter
|
||||
|
||||
If available, uses the context value stored in TLS - local.store.context
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize ContextFormatter instance
|
||||
|
||||
Takes additional keyword arguments which can be used in the message
|
||||
format string.
|
||||
|
||||
:keyword project: project name
|
||||
:type project: string
|
||||
:keyword version: project version
|
||||
:type version: string
|
||||
|
||||
"""
|
||||
|
||||
self.project = kwargs.pop('project', 'unknown')
|
||||
self.version = kwargs.pop('version', 'unknown')
|
||||
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
"""Uses contextstring if request_id is set, otherwise default."""
|
||||
|
||||
# store project info
|
||||
record.project = self.project
|
||||
record.version = self.version
|
||||
|
||||
# store request info
|
||||
context = getattr(local.store, 'context', None)
|
||||
if context:
|
||||
d = _dictify_context(context)
|
||||
for k, v in d.items():
|
||||
setattr(record, k, v)
|
||||
|
||||
# NOTE(sdague): default the fancier formatting params
|
||||
# to an empty string so we don't throw an exception if
|
||||
# they get used
|
||||
for key in ('instance', 'color', 'user_identity'):
|
||||
if key not in record.__dict__:
|
||||
record.__dict__[key] = ''
|
||||
|
||||
if record.__dict__.get('request_id'):
|
||||
self._fmt = CONF.logging_context_format_string
|
||||
else:
|
||||
self._fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
self._fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
# Cache this on the record, Logger will respect our formatted copy
|
||||
if record.exc_info:
|
||||
record.exc_text = self.formatException(record.exc_info, record)
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
def formatException(self, exc_info, record=None):
|
||||
"""Format exception output with CONF.logging_exception_prefix."""
|
||||
if not record:
|
||||
return logging.Formatter.formatException(self, exc_info)
|
||||
|
||||
stringbuffer = moves.StringIO()
|
||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||
None, stringbuffer)
|
||||
lines = stringbuffer.getvalue().split('\n')
|
||||
stringbuffer.close()
|
||||
|
||||
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
formatted_lines = []
|
||||
for line in lines:
|
||||
pl = CONF.logging_exception_prefix % record.__dict__
|
||||
fl = '%s%s' % (pl, line)
|
||||
formatted_lines.append(fl)
|
||||
return '\n'.join(formatted_lines)
|
||||
|
||||
|
||||
class ColorHandler(logging.StreamHandler):
|
||||
LEVEL_COLORS = {
|
||||
logging.DEBUG: '\033[00;32m', # GREEN
|
||||
logging.INFO: '\033[00;36m', # CYAN
|
||||
logging.AUDIT: '\033[01;36m', # BOLD CYAN
|
||||
logging.WARN: '\033[01;33m', # BOLD YELLOW
|
||||
logging.ERROR: '\033[01;31m', # BOLD RED
|
||||
logging.CRITICAL: '\033[01;31m', # BOLD RED
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
record.color = self.LEVEL_COLORS[record.levelno]
|
||||
return logging.StreamHandler.format(self, record)
|
||||
|
||||
|
||||
class DeprecatedConfig(Exception):
|
||||
message = _("Fatal call to deprecated config: %(msg)s")
|
||||
|
||||
def __init__(self, msg):
|
||||
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
@ -1,99 +0,0 @@
|
|||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
##
|
||||
## DO NOT MODIFY THIS FILE
|
||||
##
|
||||
## This file is being graduated to the kitetest library. Please make all
|
||||
## changes there, and only backport critical fixes here. - dhellmann
|
||||
##
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
|
||||
"""Common utilities used in testing"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import fixtures
|
||||
import testtools
|
||||
|
||||
_TRUE_VALUES = ('True', 'true', '1', 'yes')
|
||||
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
|
||||
|
||||
|
||||
class BaseTestCase(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestCase, self).setUp()
|
||||
self._set_timeout()
|
||||
self._fake_output()
|
||||
self._fake_logs()
|
||||
self.useFixture(fixtures.NestedTempfile())
|
||||
self.useFixture(fixtures.TempHomeDir())
|
||||
self.tempdirs = []
|
||||
|
||||
def _set_timeout(self):
|
||||
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||
try:
|
||||
test_timeout = int(test_timeout)
|
||||
except ValueError:
|
||||
# If timeout value is invalid do not set a timeout.
|
||||
test_timeout = 0
|
||||
if test_timeout > 0:
|
||||
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||
|
||||
def _fake_output(self):
|
||||
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
|
||||
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
|
||||
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||
|
||||
def _fake_logs(self):
|
||||
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
|
||||
level = logging.DEBUG
|
||||
else:
|
||||
level = logging.INFO
|
||||
capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES
|
||||
if capture_logs:
|
||||
self.useFixture(
|
||||
fixtures.FakeLogger(
|
||||
format=_LOG_FORMAT,
|
||||
level=level,
|
||||
nuke_handlers=capture_logs,
|
||||
)
|
||||
)
|
||||
else:
|
||||
logging.basicConfig(format=_LOG_FORMAT, level=level)
|
||||
|
||||
def create_tempfiles(self, files, ext='.conf'):
|
||||
tempfiles = []
|
||||
for (basename, contents) in files:
|
||||
if not os.path.isabs(basename):
|
||||
(fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext)
|
||||
else:
|
||||
path = basename + ext
|
||||
fd = os.open(path, os.O_CREAT | os.O_WRONLY)
|
||||
tempfiles.append(path)
|
||||
try:
|
||||
os.write(fd, contents)
|
||||
finally:
|
||||
os.close(fd)
|
||||
return tempfiles
|
|
@ -1,210 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Time related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import iso8601
|
||||
import six
|
||||
|
||||
|
||||
# ISO 8601 extended time format with microseconds
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
"""Parse time from ISO 8601 format."""
|
||||
try:
|
||||
return iso8601.parse_date(timestr)
|
||||
except iso8601.ParseError as e:
|
||||
raise ValueError(six.text_type(e))
|
||||
except TypeError as e:
|
||||
raise ValueError(six.text_type(e))
|
||||
|
||||
|
||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Returns formatted utcnow."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
return at.strftime(fmt)
|
||||
|
||||
|
||||
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Turn a formatted time back into a datetime."""
|
||||
return datetime.datetime.strptime(timestr, fmt)
|
||||
|
||||
|
||||
def normalize_time(timestamp):
|
||||
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||
offset = timestamp.utcoffset()
|
||||
if offset is None:
|
||||
return timestamp
|
||||
return timestamp.replace(tzinfo=None) - offset
|
||||
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, six.string_types):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
else:
|
||||
before = before.replace(tzinfo=None)
|
||||
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, six.string_types):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
else:
|
||||
after = after.replace(tzinfo=None)
|
||||
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def utcnow_ts():
|
||||
"""Timestamp version of our utcnow function."""
|
||||
if utcnow.override_time is None:
|
||||
# NOTE(kgriffs): This is several times faster
|
||||
# than going through calendar.timegm(...)
|
||||
return int(time.time())
|
||||
|
||||
return calendar.timegm(utcnow().timetuple())
|
||||
|
||||
|
||||
def utcnow():
|
||||
"""Overridable version of utils.utcnow."""
|
||||
if utcnow.override_time:
|
||||
try:
|
||||
return utcnow.override_time.pop(0)
|
||||
except AttributeError:
|
||||
return utcnow.override_time
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns a iso8601 formatted date from timestamp."""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def set_time_override(override_time=None):
|
||||
"""Overrides utils.utcnow.
|
||||
|
||||
Make it return a constant time or a list thereof, one at a time.
|
||||
|
||||
:param override_time: datetime instance or list thereof. If not
|
||||
given, defaults to the current UTC time.
|
||||
"""
|
||||
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def advance_time_delta(timedelta):
|
||||
"""Advance overridden time using a datetime.timedelta."""
|
||||
assert(not utcnow.override_time is None)
|
||||
try:
|
||||
for dt in utcnow.override_time:
|
||||
dt += timedelta
|
||||
except TypeError:
|
||||
utcnow.override_time += timedelta
|
||||
|
||||
|
||||
def advance_time_seconds(seconds):
|
||||
"""Advance overridden time by seconds."""
|
||||
advance_time_delta(datetime.timedelta(0, seconds))
|
||||
|
||||
|
||||
def clear_time_override():
|
||||
"""Remove the overridden time."""
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def marshall_now(now=None):
|
||||
"""Make an rpc-safe datetime with microseconds.
|
||||
|
||||
Note: tzinfo is stripped, but not required for relative times.
|
||||
"""
|
||||
if not now:
|
||||
now = utcnow()
|
||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||
minute=now.minute, second=now.second,
|
||||
microsecond=now.microsecond)
|
||||
|
||||
|
||||
def unmarshall_time(tyme):
|
||||
"""Unmarshall a datetime dict."""
|
||||
return datetime.datetime(day=tyme['day'],
|
||||
month=tyme['month'],
|
||||
year=tyme['year'],
|
||||
hour=tyme['hour'],
|
||||
minute=tyme['minute'],
|
||||
second=tyme['second'],
|
||||
microsecond=tyme['microsecond'])
|
||||
|
||||
|
||||
def delta_seconds(before, after):
|
||||
"""Return the difference between two timing objects.
|
||||
|
||||
Compute the difference in seconds between two date, time, or
|
||||
datetime objects (as a float, to microsecond resolution).
|
||||
"""
|
||||
delta = after - before
|
||||
return total_seconds(delta)
|
||||
|
||||
|
||||
def total_seconds(delta):
|
||||
"""Return the total seconds of datetime.timedelta object.
|
||||
|
||||
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||
"""
|
||||
try:
|
||||
return delta.total_seconds()
|
||||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:param dt: the time
|
||||
:param window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) <= soon
|
|
@ -1,15 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.openstack.common import gettextutils
|
||||
|
||||
gettextutils.install('kite', lazy=True)
|
|
@ -1,87 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pecan.testing
|
||||
import webtest
|
||||
|
||||
from kite.common import crypto
|
||||
from kite.common import storage
|
||||
from kite.db import api as db_api
|
||||
from kite.openstack.common import jsonutils
|
||||
from kite.tests import base
|
||||
|
||||
|
||||
def urljoin(*args):
|
||||
return "/%s" % "/".join([a.strip("/") for a in args])
|
||||
|
||||
|
||||
def method_func(method):
|
||||
def func(self, url, **kwargs):
|
||||
kwargs['method'] = method
|
||||
return self.request(url, **kwargs)
|
||||
|
||||
return func
|
||||
|
||||
|
||||
class BaseTestCase(base.BaseTestCase):
|
||||
|
||||
METHODS = {'get': webtest.TestApp.get,
|
||||
'post': webtest.TestApp.post,
|
||||
'put': webtest.TestApp.put,
|
||||
'patch': webtest.TestApp.patch,
|
||||
'delete': webtest.TestApp.delete,
|
||||
'options': webtest.TestApp.options,
|
||||
'head': webtest.TestApp.head}
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestCase, self).setUp()
|
||||
|
||||
self.config_fixture.config(backend='kvs', group='database')
|
||||
db_api.reset()
|
||||
|
||||
root = 'kite.api.root.RootController'
|
||||
self.app_config = {
|
||||
'app': {
|
||||
'root': root,
|
||||
'modules': ['kite.api'],
|
||||
},
|
||||
}
|
||||
|
||||
self.CRYPTO = crypto.CryptoManager.get_instance()
|
||||
self.DB = db_api.get_instance()
|
||||
self.STORAGE = storage.StorageManager.get_instance()
|
||||
|
||||
self.app = pecan.testing.load_test_app(self.app_config)
|
||||
self.addCleanup(pecan.set_config, {}, overwrite=True)
|
||||
|
||||
def request(self, url, method, **kwargs):
|
||||
try:
|
||||
json = kwargs.pop('json')
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
kwargs['content_type'] = 'application/json'
|
||||
kwargs['params'] = jsonutils.dumps(json)
|
||||
|
||||
try:
|
||||
func = self.METHODS[method.lower()]
|
||||
except KeyError:
|
||||
self.fail("Unsupported HTTP Method: %s" % method)
|
||||
else:
|
||||
return func(self.app, url, **kwargs)
|
||||
|
||||
get = method_func('get')
|
||||
post = method_func('post')
|
||||
put = method_func('put')
|
||||
delete = method_func('delete')
|
||||
options = method_func('options')
|
||||
head = method_func('head')
|
|
@ -1,28 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.tests.api import base
|
||||
|
||||
|
||||
class SimpleTest(base.BaseTestCase):
|
||||
|
||||
def test_version(self):
|
||||
resp = self.get('/')
|
||||
versions = resp.json['versions']
|
||||
self.assertEqual(resp.status_code, 300)
|
||||
|
||||
host = 'http://localhost' # webtest default
|
||||
|
||||
self.assertEqual(versions[0]['status'], 'stable')
|
||||
self.assertEqual(versions[0]['id'], 'v1.0')
|
||||
self.assertEqual(versions[0]['links'][0]['href'], '%s/v1/' % host)
|
||||
self.assertEqual(versions[0]['links'][0]['rel'], 'self')
|
|
@ -1,32 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.tests.api import base
|
||||
|
||||
|
||||
def v1_url(*args):
|
||||
return base.urljoin('v1', *args)
|
||||
|
||||
|
||||
class BaseTestCase(base.BaseTestCase):
|
||||
|
||||
def get(self, url, *args, **kwargs):
|
||||
return super(BaseTestCase, self).get(v1_url(url), *args, **kwargs)
|
||||
|
||||
def post(self, url, *args, **kwargs):
|
||||
return super(BaseTestCase, self).post(v1_url(url), *args, **kwargs)
|
||||
|
||||
def put(self, url, *args, **kwargs):
|
||||
return super(BaseTestCase, self).put(v1_url(url), *args, **kwargs)
|
||||
|
||||
def delete(self, url, *args, **kwargs):
|
||||
return super(BaseTestCase, self).delete(v1_url(url), *args, **kwargs)
|
|
@ -1,28 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.tests.api.v1 import base
|
||||
|
||||
|
||||
class TestVersion(base.BaseTestCase):
|
||||
|
||||
def test_versions(self):
|
||||
resp = self.get('/')
|
||||
version = resp.json['version']
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
host = 'http://localhost' # webtest default
|
||||
|
||||
self.assertEqual(version['id'], 'v1.0')
|
||||
self.assertEqual(version['status'], 'stable')
|
||||
self.assertEqual(version['links'][0]['href'], '%s/v1/' % host)
|
||||
self.assertEqual(version['links'][0]['rel'], 'self')
|
|
@ -1,27 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kite.tests.api.v1 import base
|
||||
|
||||
|
||||
class GroupCrudTest(base.BaseTestCase):
|
||||
|
||||
def test_create_group(self):
|
||||
self.put('/groups/test-name', status=200)
|
||||
self.delete('/groups/test-name', status=204)
|
||||
|
||||
def test_double_create_group(self):
|
||||
self.put('/groups/test-name', status=200)
|
||||
self.put('/groups/test-name', status=200)
|
||||
|
||||
def test_delete_without_create_group(self):
|
||||
self.delete('/groups/test-name', status=204)
|
|
@ -1,58 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
|
||||
from kite.tests.api.v1 import base
|
||||
|
||||
DEFAULT_REQUESTOR = 'home.local'
|
||||
REQUEST_KEY = base64.b64decode('LDIVKc+m4uFdrzMoxIhQOQ==')
|
||||
REQUEST_KEY2 = base64.b64decode('LyyszflO/JZXLpPV0CtHdQ==')
|
||||
|
||||
|
||||
class KeyApiTests(base.BaseTestCase):
|
||||
|
||||
def test_key_setting(self):
|
||||
resp = self.put('keys/%s' % DEFAULT_REQUESTOR,
|
||||
status=200,
|
||||
json={'key': base64.b64encode(REQUEST_KEY)})
|
||||
|
||||
key = self.DB.get_key(DEFAULT_REQUESTOR)
|
||||
|
||||
self.assertNotEqual(REQUEST_KEY, key['key'])
|
||||
self.assertNotEqual(REQUEST_KEY, key['signature'])
|
||||
|
||||
key_data = self.STORAGE.get_key(DEFAULT_REQUESTOR)
|
||||
|
||||
self.assertEqual(REQUEST_KEY, key_data['key'])
|
||||
|
||||
self.assertEqual(DEFAULT_REQUESTOR, resp.json['name'])
|
||||
self.assertEqual(key_data['generation'], resp.json['generation'])
|
||||
|
||||
def test_override_key(self):
|
||||
resp1 = self.put('keys/%s' % DEFAULT_REQUESTOR,
|
||||
status=200,
|
||||
json={'key': base64.b64encode(REQUEST_KEY)})
|
||||
|
||||
key1 = self.STORAGE.get_key(DEFAULT_REQUESTOR)
|
||||
|
||||
self.assertEqual(REQUEST_KEY, key1['key'])
|
||||
self.assertEqual(resp1.json['generation'], key1['generation'])
|
||||
|
||||
resp2 = self.put('keys/%s' % DEFAULT_REQUESTOR,
|
||||
status=200,
|
||||
json={'key': base64.b64encode(REQUEST_KEY2)})
|
||||
|
||||
key2 = self.STORAGE.get_key(DEFAULT_REQUESTOR)
|
||||
|
||||
self.assertEqual(REQUEST_KEY2, key2['key'])
|
||||
self.assertEqual(resp2.json['generation'], key2['generation'])
|
|
@ -1,181 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
|
||||
import six
|
||||
|
||||
from kite.openstack.common.crypto import utils as cryptoutils
|
||||
from kite.openstack.common import jsonutils
|
||||
from kite.openstack.common import timeutils
|
||||
from kite.tests.api.v1 import base
|
||||
|
||||
SOURCE_KEY = base64.b64decode('LDIVKc+m4uFdrzMoxIhQOQ==')
|
||||
DEST_KEY = base64.b64decode('EEGfTxGFcZiT7oPO+brs+A==')
|
||||
|
||||
TEST_KEY = base64.b64decode('Jx5CVBcxuA86050355mTrg==')
|
||||
|
||||
DEFAULT_SOURCE = 'home.local'
|
||||
DEFAULT_DEST = 'tests.openstack.remote'
|
||||
DEFAULT_GROUP = 'home'
|
||||
DEFAULT_NONCE = '42'
|
||||
|
||||
|
||||
class TicketTest(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TicketTest, self).setUp()
|
||||
|
||||
self.crypto = cryptoutils.SymmetricCrypto(
|
||||
enctype=self.CONF.crypto.enctype,
|
||||
hashtype=self.CONF.crypto.hashtype)
|
||||
|
||||
def _ticket_metadata(self, source=DEFAULT_SOURCE,
|
||||
destination=DEFAULT_DEST, nonce=DEFAULT_NONCE,
|
||||
timestamp=None, b64encode=True):
|
||||
if not timestamp:
|
||||
timestamp = timeutils.utcnow()
|
||||
|
||||
return {'source': source, 'destination': destination,
|
||||
'nonce': nonce, 'timestamp': timestamp}
|
||||
|
||||
def _add_key(self, name, key=None, b64encode=True):
|
||||
if not key:
|
||||
if name == DEFAULT_SOURCE:
|
||||
key = SOURCE_KEY
|
||||
elif name == DEFAULT_DEST:
|
||||
key = DEST_KEY
|
||||
else:
|
||||
raise ValueError("No default key available")
|
||||
|
||||
if b64encode:
|
||||
key = base64.b64encode(key)
|
||||
|
||||
resp = self.put('keys/%s' % name,
|
||||
status=200,
|
||||
json={'key': key}).json
|
||||
|
||||
return "%s:%s" % (resp['name'], resp['generation'])
|
||||
|
||||
def _request_ticket(self, metadata=None, signature=None,
|
||||
source=DEFAULT_SOURCE, destination=DEFAULT_DEST,
|
||||
nonce=DEFAULT_NONCE, timestamp=None,
|
||||
source_key=None, status=200):
|
||||
if not metadata:
|
||||
metadata = self._ticket_metadata(source=source,
|
||||
nonce=nonce,
|
||||
destination=destination,
|
||||
timestamp=timestamp)
|
||||
|
||||
if not isinstance(metadata, six.text_type):
|
||||
metadata = base64.b64encode(jsonutils.dumps(metadata))
|
||||
|
||||
if not signature:
|
||||
if not source_key and source == DEFAULT_SOURCE:
|
||||
source_key = SOURCE_KEY
|
||||
|
||||
signature = self.crypto.sign(source_key, metadata)
|
||||
|
||||
return self.post('tickets',
|
||||
json={'metadata': metadata, 'signature': signature},
|
||||
status=status)
|
||||
|
||||
def test_valid_ticket(self):
|
||||
self._add_key(DEFAULT_SOURCE)
|
||||
self._add_key(DEFAULT_DEST)
|
||||
|
||||
response = self._request_ticket().json
|
||||
|
||||
b64m = response['metadata']
|
||||
metadata = jsonutils.loads(base64.b64decode(b64m))
|
||||
signature = response['signature']
|
||||
b64t = response['ticket']
|
||||
|
||||
# check signature was signed to source
|
||||
csig = self.crypto.sign(SOURCE_KEY, b64m + b64t)
|
||||
self.assertEqual(signature, csig)
|
||||
|
||||
# decrypt the ticket base if required, done by source
|
||||
if metadata['encryption']:
|
||||
ticket = self.crypto.decrypt(SOURCE_KEY, b64t)
|
||||
|
||||
ticket = jsonutils.loads(ticket)
|
||||
|
||||
skey = base64.b64decode(ticket['skey'])
|
||||
ekey = base64.b64decode(ticket['ekey'])
|
||||
b64esek = ticket['esek']
|
||||
|
||||
# the esek part is sent to the destination, so destination should be
|
||||
# able to decrypt it from here.
|
||||
esek = self.crypto.decrypt(DEST_KEY, b64esek)
|
||||
esek = jsonutils.loads(esek)
|
||||
|
||||
self.assertEqual(int(self.CONF.ticket_lifetime), esek['ttl'])
|
||||
|
||||
# now should be able to reconstruct skey, ekey from esek data
|
||||
info = '%s,%s,%s' % (metadata['source'], metadata['destination'],
|
||||
esek['timestamp'])
|
||||
|
||||
key = base64.b64decode(esek['key'])
|
||||
new_sig, new_key = self.CRYPTO.generate_keys(key, info)
|
||||
|
||||
self.assertEqual(new_key, ekey)
|
||||
self.assertEqual(new_sig, skey)
|
||||
|
||||
def test_missing_source_key(self):
|
||||
self._add_key(DEFAULT_DEST)
|
||||
self._request_ticket(status=404)
|
||||
|
||||
def test_missing_dest_key(self):
|
||||
self._add_key(DEFAULT_SOURCE)
|
||||
self._request_ticket(status=404)
|
||||
|
||||
def test_wrong_source_key(self):
|
||||
# install TEST_KEY but sign with SOURCE_KEY
|
||||
self._add_key(DEFAULT_SOURCE, TEST_KEY)
|
||||
self._add_key(DEFAULT_DEST)
|
||||
|
||||
self._request_ticket(status=401)
|
||||
|
||||
def test_invalid_signature(self):
|
||||
self._add_key(DEFAULT_SOURCE)
|
||||
self._add_key(DEFAULT_DEST)
|
||||
|
||||
self._request_ticket(status=401, signature='bad-signature')
|
||||
|
||||
def test_invalid_expired_request(self):
|
||||
self._add_key(DEFAULT_SOURCE)
|
||||
self._add_key(DEFAULT_DEST)
|
||||
|
||||
timestamp = timeutils.utcnow() - datetime.timedelta(hours=5)
|
||||
|
||||
self._request_ticket(status=401, timestamp=timestamp)
|
||||
|
||||
def test_fails_on_garbage_metadata(self):
|
||||
self._request_ticket(metadata='garbage',
|
||||
signature='signature',
|
||||
status=400)
|
||||
|
||||
self._request_ticket(metadata='{"json": "string"}',
|
||||
signature='signature',
|
||||
status=400)
|
||||
|
||||
def test_missing_attributes_in_metadata(self):
|
||||
self._add_key(DEFAULT_SOURCE)
|
||||
self._add_key(DEFAULT_DEST)
|
||||
|
||||
for attr in ['source', 'timestamp', 'destination', 'nonce']:
|
||||
metadata = self._ticket_metadata(b64encode=False)
|
||||
del metadata[attr]
|
||||
|
||||
self._request_ticket(metadata=metadata, status=400)
|
|
@ -1,47 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslotest import base
|
||||
|
||||
from kite.common import crypto
|
||||
from kite.common import service
|
||||
from kite.common import storage
|
||||
from kite.openstack.common.fixture import config
|
||||
from kite.openstack.common.fixture import mockpatch
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('master_key_file', 'kite.common.crypto', group='crypto')
|
||||
|
||||
|
||||
class BaseTestCase(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestCase, self).setUp()
|
||||
self.config_fixture = self.useFixture(config.Config())
|
||||
self.CONF = self.config_fixture.conf
|
||||
|
||||
storage.StorageManager.reset()
|
||||
crypto.CryptoManager.reset()
|
||||
|
||||
self.mkey = os.urandom(crypto.CryptoManager.KEY_SIZE)
|
||||
patch = mockpatch.Patch(
|
||||
'kite.common.crypto.CryptoManager._load_master_key',
|
||||
new=lambda x: self.mkey)
|
||||
self.useFixture(patch)
|
||||
|
||||
service.parse_args(args=[])
|
||||
|
||||
def config(self, *args, **kwargs):
|
||||
self.config_fixture.config(*args, **kwargs)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue