Allow to update resource-type (add attributes)

This change allows to patch a resource type.

The patch payload format is the RFC6902.

For now, only operation add on /attributes path is allowed.

Partial-Bug: #1615077

Change-Id: Ia88396dbe771f916f9e72d7072caa03d30a8e693
This commit is contained in:
Mehdi Abaakouk 2016-04-04 09:08:05 +02:00
parent cc38221984
commit 6163dfbde8
10 changed files with 397 additions and 26 deletions

View File

@ -368,6 +368,10 @@ It can also be deleted if no more resources are associated to it:
{{ scenarios['delete-resource-type']['doc'] }}
Attributes can be added:
{{ scenarios['patch-resource-type']['doc'] }}
Creating resource type means creation of new tables on the indexer backend.
This is heavy operation that will lock some tables for a short amount of times.
When the resource type is created, its initial `state` is `creating`. When the

View File

@ -389,6 +389,20 @@
- name: list-resource-type
request: GET /v1/resource_type HTTP/1.1
- name: patch-resource-type
request: |
PATCH /v1/resource_type/my_custom_type HTTP/1.1
Content-Type: application/json-patch+json
[
{
"op": "add",
"path": "/attributes/awesome-stuff",
"value": {"type": "bool", "required": false}
}
]
- name: delete-resource-type
request: DELETE /v1/resource_type/my_custom_type HTTP/1.1

View File

@ -14,6 +14,7 @@
"create resource type": "role:admin",
"delete resource type": "role:admin",
"update resource type": "role:admin",
"list resource type": "",
"get resource type": "",

View File

@ -0,0 +1,74 @@
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add updating resource type states
Revision ID: 27d2a1d205ff
Revises: 7e6f9d542f8b
Create Date: 2016-08-31 14:05:34.316496
"""
from alembic import op
import sqlalchemy as sa
from gnocchi.indexer import sqlalchemy_base
from gnocchi import utils
# revision identifiers, used by Alembic.
revision = '27d2a1d205ff'
down_revision = '7e6f9d542f8b'
branch_labels = None
depends_on = None
resource_type = sa.sql.table(
'resource_type',
sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp()))
def upgrade():
op.alter_column('resource_type', 'state',
type_=sa.Enum("active", "creating",
"creation_error", "deleting",
"deletion_error", "updating",
"updating_error",
name="resource_type_state_enum"),
nullable=False,
server_default="creating")
# NOTE(sileht): postgresql have a builtin ENUM type, so
# just altering the column won't works.
# https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type
# Does it break offline migration because we use get_bind() ?
# NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction,
# we split the 'ALTER TYPE' operation into several steps.
bind = op.get_bind()
if bind and bind.engine.name == "postgresql":
op.execute("ALTER TYPE resource_type_state_enum ADD VALUE 'updating';")
op.execute("ALTER TYPE resource_type_state_enum ADD VALUE "
"'updating_error';")
op.add_column("resource_type",
sa.Column("updated_at",
sqlalchemy_base.PreciseTimestamp(),
nullable=True))
op.execute(resource_type.update().values({'updated_at': utils.utcnow()}))
op.alter_column("resource_type", "updated_at",
type_=sqlalchemy_base.PreciseTimestamp(),
nullable=False)

View File

@ -20,6 +20,8 @@ import os.path
import threading
import uuid
from alembic import migration
from alembic import operations
import oslo_db.api
from oslo_db import exception
from oslo_db.sqlalchemy import enginefacade
@ -106,8 +108,10 @@ class PerInstanceFacade(object):
class ResourceClassMapper(object):
def __init__(self):
# FIXME(sileht): 3 attributes, perhaps we need a better structure.
self._cache = {'generic': {'resource': base.Resource,
'history': base.ResourceHistory}}
'history': base.ResourceHistory,
'updated_at': utils.utcnow()}}
@staticmethod
def _build_class_mappers(resource_type, baseclass=None):
@ -127,14 +131,24 @@ class ResourceClassMapper(object):
{"__tablename__": ("%s_history" % tablename),
"__table_args__": tables_args})
return {'resource': resource_ext,
'history': resource_history_ext}
'history': resource_history_ext,
'updated_at': resource_type.updated_at}
def get_classes(self, resource_type):
# NOTE(sileht): We don't care about concurrency here because we allow
# sqlalchemy to override its global object with extend_existing=True
# this is safe because classname and tablename are uuid.
try:
return self._cache[resource_type.tablename]
mappers = self._cache[resource_type.tablename]
# Cache is outdated
if (resource_type.name != "generic"
and resource_type.updated_at > mappers['updated_at']):
for table_purpose in ['resource', 'history']:
Base.metadata.remove(Base.metadata.tables[
mappers[table_purpose].__tablename__])
del self._cache[resource_type.tablename]
raise KeyError
return mappers
except KeyError:
mapper = self._build_class_mappers(resource_type)
self._cache[resource_type.tablename] = mapper
@ -147,8 +161,8 @@ class ResourceClassMapper(object):
"creating")
mappers = self.get_classes(resource_type)
tables = [Base.metadata.tables[klass.__tablename__]
for klass in mappers.values()]
tables = [Base.metadata.tables[mappers["resource"].__tablename__],
Base.metadata.tables[mappers["history"].__tablename__]]
try:
with facade.writer_connection() as connection:
@ -187,8 +201,8 @@ class ResourceClassMapper(object):
mappers = self.get_classes(resource_type)
del self._cache[resource_type.tablename]
tables = [Base.metadata.tables[klass.__tablename__]
for klass in mappers.values()]
tables = [Base.metadata.tables[mappers['resource'].__tablename__],
Base.metadata.tables[mappers['history'].__tablename__]]
# NOTE(sileht): Base.metadata.drop_all doesn't
# issue CASCADE stuffs correctly at least on postgresql
@ -376,6 +390,48 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
resource_type.state = "active"
return resource_type
def update_resource_type(self, name, add_attributes=None):
if not add_attributes:
return
self._set_resource_type_state(name, "updating", "active")
try:
with self.facade.independent_writer() as session:
rt = self._get_resource_type(session, name)
with self.facade.writer_connection() as connection:
ctx = migration.MigrationContext.configure(connection)
op = operations.Operations(ctx)
with op.batch_alter_table(rt.tablename) as batch_op:
for attr in add_attributes:
# TODO(sileht): When attr.required is True, we have
# to pass a default. rest layer current protect us,
# requied = True is not yet allowed
batch_op.add_column(sqlalchemy.Column(
attr.name, attr.satype,
nullable=not attr.required))
rt.state = "active"
rt.updated_at = utils.utcnow()
rt.attributes.extend(add_attributes)
# FIXME(sileht): yeah that's wierd but attributes is a custom
# json column and 'extend' doesn't trigger sql update, this
# enforce the update. I wonder if sqlalchemy provides something
# on column description side.
sqlalchemy.orm.attributes.flag_modified(rt, 'attributes')
except Exception:
# NOTE(sileht): We fail the DDL, we have no way to automatically
# recover, just set a particular state
# TODO(sileht): Create a repair REST endpoint that delete
# columns not existing in the database but in the resource type
# description. This will allow to pass wrong update_error to active
# state, that currently not possible.
self._set_resource_type_state(name, "updating_error")
raise
return rt
def get_resource_type(self, name):
with self.facade.independent_reader() as session:
return self._get_resource_type(session, name)
@ -387,12 +443,20 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
return resource_type
@retry_on_deadlock
def _set_resource_type_state(self, name, state):
def _set_resource_type_state(self, name, state,
expected_previous_state=None):
with self.facade.writer() as session:
q = session.query(ResourceType)
q = q.filter(ResourceType.name == name)
if expected_previous_state is not None:
q = q.filter(ResourceType.state == expected_previous_state)
update = q.update({'state': state})
if update == 0:
if expected_previous_state is not None:
rt = session.query(ResourceType).get(name)
if rt:
raise indexer.UnexpectedResourceTypeState(
name, expected_previous_state, rt.state)
raise indexer.IndexerException(
"Fail to set resource type state of %s to %s" %
(name, state))
@ -474,7 +538,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
self._delete_resource_type(name)
def _resource_type_to_classes(self, session, name):
def _resource_type_to_mappers(self, session, name):
resource_type = self._get_resource_type(session, name)
if resource_type.state != "active":
raise indexer.UnexpectedResourceTypeState(
@ -642,7 +706,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
raise ValueError(
"Start timestamp cannot be after end timestamp")
with self.facade.writer() as session:
resource_cls = self._resource_type_to_classes(
resource_cls = self._resource_type_to_mappers(
session, resource_type)['resource']
r = resource_cls(
id=id,
@ -678,9 +742,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
create_revision=True,
**kwargs):
with self.facade.writer() as session:
classes = self._resource_type_to_classes(session, resource_type)
resource_cls = classes["resource"]
resource_history_cls = classes["history"]
mappers = self._resource_type_to_mappers(session, resource_type)
resource_cls = mappers["resource"]
resource_history_cls = mappers["history"]
try:
# NOTE(sileht): We use FOR UPDATE that is not galera friendly,
@ -799,7 +863,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
@retry_on_deadlock
def get_resource(self, resource_type, resource_id, with_metrics=False):
with self.facade.independent_reader() as session:
resource_cls = self._resource_type_to_classes(
resource_cls = self._resource_type_to_mappers(
session, resource_type)['resource']
q = session.query(
resource_cls).filter(
@ -809,9 +873,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
return q.first()
def _get_history_result_mapper(self, session, resource_type):
classes = self._resource_type_to_classes(session, resource_type)
resource_cls = classes['resource']
history_cls = classes['history']
mappers = self._resource_type_to_mappers(session, resource_type)
resource_cls = mappers['resource']
history_cls = mappers['history']
resource_cols = {}
history_cols = {}
@ -863,7 +927,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
target_cls = self._get_history_result_mapper(
session, resource_type)
else:
target_cls = self._resource_type_to_classes(
target_cls = self._resource_type_to_mappers(
session, resource_type)["resource"]
q = session.query(target_cls)
@ -916,7 +980,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
all_resources.extend(resources)
else:
try:
target_cls = self._resource_type_to_classes(
target_cls = self._resource_type_to_mappers(
session, type)['history' if is_history else
'resource']
except (indexer.UnexpectedResourceTypeState,

View File

@ -244,10 +244,19 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType):
attributes = sqlalchemy.Column(ResourceTypeAttributes)
state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating",
"creation_error", "deleting",
"deletion_error",
"deletion_error", "updating",
"updating_error",
name="resource_type_state_enum"),
nullable=False,
server_default="creating")
updated_at = sqlalchemy.Column(PreciseTimestamp, nullable=False,
# NOTE(jd): We would like to use
# sqlalchemy.func.now, but we can't
# because the type of PreciseTimestamp in
# MySQL is not a Timestamp, so it would
# not store a timestamp but a date as an
# integer.
default=lambda: utils.utcnow())
def to_baseclass(self):
cols = {}

View File

@ -17,6 +17,7 @@
import itertools
import uuid
import jsonpatch
from oslo_utils import strutils
import pecan
from pecan import rest
@ -146,10 +147,13 @@ def set_resp_location_hdr(location):
pecan.response.headers['Location'] = location
def deserialize():
def deserialize(expected_content_types=None):
if expected_content_types is None:
expected_content_types = ("application/json", )
mime_type, options = werkzeug.http.parse_options_header(
pecan.request.headers.get('Content-Type'))
if mime_type != "application/json":
if mime_type not in expected_content_types:
abort(415)
try:
params = json.load(pecan.request.body_file_raw,
@ -159,10 +163,11 @@ def deserialize():
return params
def deserialize_and_validate(schema, required=True):
def deserialize_and_validate(schema, required=True,
expected_content_types=None):
try:
return voluptuous.Schema(schema, required=required)(
deserialize())
deserialize(expected_content_types=expected_content_types))
except voluptuous.Error as e:
abort(400, "Invalid input: %s" % e)
@ -747,6 +752,20 @@ def etag_set_headers(obj):
pecan.response.last_modified = obj.lastmodified
def AttributesPath(value):
if value.startswith("/attributes"):
return value
raise ValueError("Only attributes can be modified")
# TODO(sileht): Implements delete op
ResourceTypeJsonPatchSchema = voluptuous.Schema([{
"op": "add",
"path": AttributesPath,
"value": dict,
}])
class ResourceTypeController(rest.RestController):
def __init__(self, name):
self._name = name
@ -760,6 +779,61 @@ class ResourceTypeController(rest.RestController):
enforce("get resource type", rt)
return rt
@pecan.expose('json')
def patch(self):
# NOTE(sileht): should we check for "application/json-patch+json"
# Content-Type ?
try:
rt = pecan.request.indexer.get_resource_type(self._name)
except indexer.NoSuchResourceType as e:
abort(404, e)
enforce("update resource type", rt)
# Ensure this is a valid jsonpatch dict
patch = deserialize_and_validate(
ResourceTypeJsonPatchSchema,
expected_content_types=["application/json-patch+json"])
# Add new attributes to the resource type
rt_json_current = rt.jsonify()
try:
rt_json_next = jsonpatch.apply_patch(rt_json_current, patch)
except jsonpatch.JsonPatchException as e:
abort(400, e)
del rt_json_next['state']
# Validate that the whole new resource_type is valid
schema = pecan.request.indexer.get_resource_type_schema()
try:
rt_json_next = voluptuous.Schema(schema, required=True)(
rt_json_next)
except voluptuous.Error as e:
abort(400, "Invalid input: %s" % e)
# Get only newly formatted attributes
attrs = {k: v for k, v in rt_json_next["attributes"].items()
if k not in rt_json_current["attributes"]}
try:
attrs = schema.attributes_from_dict(attrs)
except resource_type.InvalidResourceAttributeName as e:
abort(400, e)
# TODO(sileht): Add a default field on an attribute
# to be able to fill non-nullable column on sql side.
# And obviousy remove this limitation
for attr in attrs:
if attr.required:
abort(400, ValueError("Adding required attributes is not yet "
"possible."))
try:
return pecan.request.indexer.update_resource_type(
self._name, add_attributes=attrs)
except indexer.NoSuchResourceType as e:
abort(400, e)
@pecan.expose()
def delete(self):
try:

View File

@ -305,8 +305,6 @@ tests:
$.uuid: e495ebad-be64-46c0-81d6-b079beb48df9
$.int:
# Ensure we can't delete the type
- name: list resource history
GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast
request_headers:
@ -320,6 +318,131 @@ tests:
$[1].name: foo
$[1].foobar: what
# CRUD resource type attributes
- name: post a new resource attribute
url: /v1/resource_type/my_custom_resource
method: patch
request_headers:
x-roles: admin
content-type: application/json-patch+json
data:
- op: add
path: /attributes/newstuff
value:
type: string
required: False
min_length: 0
max_length: 255
status: 200
response_json_paths:
$.name: my_custom_resource
$.attributes:
name:
type: string
required: True
min_length: 2
max_length: 5
foobar:
type: string
required: False
min_length: 0
max_length: 255
uuid:
type: uuid
required: True
int:
type: number
required: False
min: -2
max: 3
float:
type: number
required: false
min: -2.3
max:
bool:
type: bool
required: false
newstuff:
type: string
required: False
min_length: 0
max_length: 255
- name: get the new custom resource type
url: /v1/resource_type/my_custom_resource
response_json_paths:
$.name: my_custom_resource
$.attributes:
name:
type: string
required: True
min_length: 2
max_length: 5
foobar:
type: string
required: False
min_length: 0
max_length: 255
uuid:
type: uuid
required: True
int:
type: number
required: False
min: -2
max: 3
float:
type: number
required: false
min: -2.3
max:
bool:
type: bool
required: false
newstuff:
type: string
required: False
min_length: 0
max_length: 255
# Invalid patch
- name: patch a resource attribute replace
url: /v1/resource_type/my_custom_resource
method: patch
request_headers:
x-roles: admin
content-type: application/json-patch+json
data:
- op: replace
path: /attributes/newstuff
value:
type: string
required: False
min_length: 0
max_length: 255
status: 400
- name: patch a resource attribute type not exist
url: /v1/resource_type/my_custom_resource
method: patch
request_headers:
x-roles: admin
content-type: application/json-patch+json
data:
- op: add
path: /attributes/newstuff
value:
type: notexist
required: False
min_length: 0
max_length: 255
status: 400
# Ensure we can't delete the type
- name: delete in use resource_type
DELETE: /v1/resource_type/my_custom_resource
request_headers:

View File

@ -0,0 +1,7 @@
---
features:
- |-
a new REST API endpoint have been added to be able
to update a resource-type: "PATCH /v1/resource-type/foobar"
The expected payload is in RFC6902 format. Some examples
can be found in the documentation.

View File

@ -11,6 +11,7 @@ pandas>=0.17.0
pecan>=0.9
pytimeparse>=1.1.5
futures
jsonpatch
cotyledon>=1.2.2
requests
six