Modified delete to archive services instead of hard delete

This change also resolves the issue where services would not be
deleted when providers have issues.

Closes-Bug: 1400525

Change-Id: I8bd798bc4c97a1ed88694c611c32ea2cfadcd849
This commit is contained in:
amitgandhinz 2014-12-11 16:18:16 -05:00
parent 92ab98ba65
commit bf99d8efa7
13 changed files with 305 additions and 129 deletions

View File

@ -36,7 +36,7 @@ manager = default
storage = cassandra
# Provider modules list (a list of comma separated provider module list)
providers = fastly
providers = fastly,akamai
# DNS driver module (e.g., default, designate, rackspace)
dns = rackspace

View File

@ -41,7 +41,7 @@ manager = default
storage = mockdb
# Provider modules list (a list of comma separated provider module list)
providers = mock,fastly
providers = mock,fastly,akamai
# DNS driver module (e.g. default, designate, rackspace)
dns = default
@ -54,6 +54,7 @@ bind = 0.0.0.0
port = 8888
[drivers:storage:cassandra]
archive_on_delete = True
# Comma-separated list of hosts (Example: cass01,cass02,cass03)
cluster = localhost
;port = 9042

View File

@ -35,9 +35,6 @@ def service_delete_worker(provider_details, service_controller,
dns_responder = service_controller.dns_controller.delete(provider_details)
for responder in responders:
# this is the item of responder, if there's "error"
# key in it, it means the deletion for this provider failed.
# in that case we cannot delete service from poppy storage.
provider_name = list(responder.items())[0][0]
if 'error' in responder[provider_name]:
@ -49,7 +46,7 @@ def service_delete_worker(provider_details, service_controller,
responder[provider_name].get('error_info'))
elif 'error' in dns_responder[provider_name]:
LOG.info('Delete service from DNS failed')
LOG.info('Updating provider detail status of %s for %s'.foramt(
LOG.info('Updating provider detail status of %s for %s'.format(
(provider_name, service_name)))
# stores the error info for debugging purposes.
provider_details[provider_name].error_info = (
@ -59,19 +56,21 @@ def service_delete_worker(provider_details, service_controller,
del provider_details[provider_name]
service_controller.storage_controller._driver.connect()
if provider_details == {}:
# Only if all provider successfully deleted we can delete
# the poppy service.
LOG.info('Deleting poppy service %s from all providers successful'
% service_name)
service_controller.storage_controller.delete(project_id, service_name)
LOG.info('Deleting poppy service %s succeeded' % service_name)
else:
# Leave failed provider details with error infomation for further
if provider_details != {}:
# Store failed provider details with error infomation for further
# action, maybe for debug and/or support.
LOG.info('Updating poppy service provider details for %s' %
LOG.info('Delete failed for one or more providers'
'Updating poppy service provider details for %s' %
service_name)
service_controller.storage_controller.update_provider_details(
project_id,
service_name,
provider_details)
# always delete from Poppy. Provider Details will contain
# any provider issues that may have occurred.
LOG.info('Deleting poppy service %s from all providers successful'
% service_name)
service_controller.storage_controller.delete(project_id, service_name)
LOG.info('Deleting poppy service %s succeeded' % service_name)

View File

@ -57,6 +57,8 @@ CASSANDRA_OPTIONS = [
},
help='Replication strategy for Cassandra cluster'
),
cfg.BoolOpt('archive_on_delete', default=True,
help='Archive services on delete?'),
]
CASSANDRA_GROUP = 'drivers:storage:cassandra'
@ -137,6 +139,7 @@ class CassandraStorageDriver(base.Driver):
self.cassandra_conf = conf[CASSANDRA_GROUP]
self.datacenter = conf.datacenter
self.session = None
self.archive_on_delete = self.cassandra_conf.archive_on_delete
self.lock = multiprocessing.Lock()
def change_namespace(self, namespace):

View File

@ -13,6 +13,19 @@ CREATE TABLE services (
PRIMARY KEY (project_id, service_name)
);
CREATE TABLE archives (
project_id VARCHAR,
service_name VARCHAR,
flavor_id VARCHAR,
domains LIST<TEXT>,
origins LIST<TEXT>,
caching_rules LIST<TEXT>,
restrictions LIST<TEXT>,
provider_details MAP<TEXT, TEXT>,
archived_time timestamp,
PRIMARY KEY (project_id, service_name, archived_time)
);
CREATE TABLE flavors (
flavor_id VARCHAR,
providers MAP<TEXT, TEXT>,

View File

@ -32,5 +32,18 @@ schema_statements = [
providers MAP<TEXT, TEXT>,
PRIMARY KEY (flavor_id)
);
''',
'''CREATE TABLE archives (
project_id VARCHAR,
service_name VARCHAR,
flavor_id VARCHAR,
domains LIST<TEXT>,
origins LIST<TEXT>,
caching_rules LIST<TEXT>,
restrictions LIST<TEXT>,
provider_details MAP<TEXT, TEXT>,
archived_time timestamp,
PRIMARY KEY (project_id, service_name, archived_time)
);
'''
]

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from poppy.model.helpers import cachingrule
@ -29,6 +30,7 @@ CQL_GET_ALL_SERVICES = '''
SELECT project_id,
service_name,
domains,
flavor_id,
origins,
caching_rules,
restrictions
@ -57,7 +59,6 @@ CQL_GET_SERVICE = '''
service_name,
flavor_id,
domains,
flavor_id,
origins,
caching_rules,
restrictions,
@ -66,6 +67,33 @@ CQL_GET_SERVICE = '''
WHERE project_id = %(project_id)s AND service_name = %(service_name)s
'''
CQL_ARCHIVE_SERVICE = '''
BEGIN BATCH
INSERT INTO archives (project_id,
service_name,
flavor_id,
domains,
origins,
caching_rules,
restrictions,
provider_details,
archived_time
)
VALUES (%(project_id)s,
%(service_name)s,
%(flavor_id)s,
%(domains)s,
%(origins)s,
%(caching_rules)s,
%(restrictions)s,
%(provider_details)s,
%(archived_time)s)
DELETE FROM services
WHERE project_id = %(project_id)s AND service_name = %(service_name)s;
APPLY BATCH;
'''
CQL_DELETE_SERVICE = '''
DELETE FROM services
WHERE project_id = %(project_id)s AND service_name = %(service_name)s
@ -260,14 +288,36 @@ class ServicesController(base.ServicesController):
def delete(self, project_id, service_name):
"""delete.
Delete local configuration storage
Archive local configuration storage
"""
# delete local configuration from storage
args = {
'project_id': project_id,
'service_name': service_name
}
self.session.execute(CQL_DELETE_SERVICE, args)
if self._driver.archive_on_delete:
# get the existing service
results = self.session.execute(CQL_GET_SERVICE, args)
result = results[0]
if (result):
archive_args = {
'project_id': result.get('project_id'),
'service_name': result.get('service_name'),
'flavor_id': result.get('flavor_id'),
'domains': result.get('domains'),
'origins': result.get('origins'),
'caching_rules': result.get('caching_rules'),
'restrictions': result.get('restrictions'),
'provider_details': result.get('provider_details'),
'archived_time': datetime.datetime.utcnow()
}
# archive and delete the service
self.session.execute(CQL_ARCHIVE_SERVICE, archive_args)
else:
self.session.execute(CQL_DELETE_SERVICE, args)
def get_provider_details(self, project_id, service_name):
"""get_provider_details.

View File

@ -34,7 +34,7 @@ class ServicesController(base.ServicesController):
return self._driver.database
def list(self, project_id, marker=None, limit=None):
provider_details = {
provider_details_list = {
'MaxCDN': json.dumps(
{'id': 11942,
'access_urls': [{'operator_url': 'mypullzone.netdata.com'}]}),
@ -49,27 +49,33 @@ class ServicesController(base.ServicesController):
'access_urls':
[{'operator_url': 'mockcf123.fastly.prod.com'}]})}
services = [{'name': 'mockdb1_service_name',
'domains': [json.dumps({'domain': 'www.mywebsite.com'})],
'origins': [json.dumps({'origin': 'mywebsite.com',
'port': 80,
'ssl': False})],
'flavor_id': 'standard',
'caching': [{'name': 'default',
'ttl': 3600},
{'name': 'home',
'ttl': 17200,
'rules': [{'name': 'index',
'request_url': '/index.htm'}]},
{'name': 'images',
'ttl': 12800,
'rules': [{'name': 'images',
'request_url': '*.png'}]}],
'restrictions': [{'name': 'website only',
'rules': [{'name': 'mywebsite.com',
'http_host':
'www.mywebsite.com'}]}],
'provider_details': provider_details}]
services = []
for i in self.created_service_names:
services = [{'name': i,
'domains': [json.dumps(
{'domain': 'www.mywebsite.com'})
],
'origins': [json.dumps({'origin': 'mywebsite.com',
'port': 80,
'ssl': False})],
'flavor_id': 'standard',
'caching': [{'name': 'default',
'ttl': 3600},
{'name': 'home',
'ttl': 17200,
'rules': [
{'name': 'index',
'request_url': '/index.htm'}
]},
{'name': 'images',
'ttl': 12800,
'rules': [{'name': 'images',
'request_url': '*.png'}]}],
'restrictions': [{'name': 'website only',
'rules': [{'name': 'mywebsite.com',
'http_host':
'www.mywebsite.com'}]}],
'provider_details': provider_details_list}]
services_result = []
for r in services:
@ -80,64 +86,70 @@ class ServicesController(base.ServicesController):
def get(self, project_id, service_name):
# get the requested service from storage
if service_name == "non_exist_service_name":
if service_name not in self.created_service_names:
raise ValueError("service: % does not exist")
origin_json = json.dumps({'origin': 'mywebsite.com',
'port': 80,
'ssl': False})
domain_json = json.dumps({'domain': 'www.mywebsite.com'})
provider_details = {
'MaxCDN': json.dumps(
{'id': 11942,
'access_urls': [{'operator_url': 'mypullzone.netdata.com'}]}),
'Mock': json.dumps(
{'id': 73242,
'access_urls': [{'operator_url': 'mycdn.mock.com'}]}),
'CloudFront': json.dumps(
{'id': '5ABC892',
'access_urls': [{'operator_url': 'cf123.cloudcf.com'}]}),
'Fastly': json.dumps(
{'id': 3488,
'access_urls':
[{'operator_url': 'mockcf123.fastly.prod.com'}]})}
else:
origin_json = json.dumps({'origin': 'mywebsite.com',
'port': 80,
'ssl': False})
domain_json = json.dumps({'domain': 'www.mywebsite.com'})
provider_details_list = {
'MaxCDN': json.dumps(
{'id': 11942,
'access_urls': [
{'operator_url': 'mypullzone.netdata.com'}]}),
'Mock': json.dumps(
{'id': 73242,
'access_urls': [
{'operator_url': 'mycdn.mock.com'}]}),
'CloudFront': json.dumps(
{'id': '5ABC892',
'access_urls': [
{'operator_url': 'cf123.cloudcf.com'}]}),
'Fastly': json.dumps(
{'id': 3488,
'access_urls':
[{'operator_url': 'mockcf123.fastly.prod.com'}]})}
service_dict = {'name': service_name,
'domains': [domain_json],
'origins': [origin_json],
'flavor_id': 'standard',
'caching': [{'name': 'default',
'ttl': 3600},
{'name': 'home',
'ttl': 17200,
'rules': [{'name': 'index',
'request_url': '/index.htm'}]},
{'name': 'images',
'ttl': 12800,
'rules': [{'name': 'images',
'request_url': '*.png'}]}],
'restrictions': [{'name': 'website only',
'rules': [{'name': 'mywebsite.com',
'http_host':
'www.mywebsite.com'}]}],
'provider_details': provider_details}
service_result = self.format_result(service_dict)
return service_result
service_dict = {'name': service_name,
'domains': [domain_json],
'origins': [origin_json],
'flavor_id': 'standard',
'caching': [{'name': 'default',
'ttl': 3600},
{'name': 'home',
'ttl': 17200,
'rules': [
{'name': 'index',
'request_url': '/index.htm'}]},
{'name': 'images',
'ttl': 12800,
'rules': [{'name': 'images',
'request_url': '*.png'}]}],
'restrictions': [{'name': 'website only',
'rules': [
{'name': 'mywebsite.com',
'http_host':
'www.mywebsite.com'}]}],
'provider_details': provider_details_list}
service_result = self.format_result(service_dict)
return service_result
def create(self, project_id, service_obj):
if service_obj.name in self.created_service_names:
raise ValueError("Service %s already exists..." % service_obj.name)
raise ValueError("Service %s already exists." % service_obj.name)
else:
# TODO(amitgandhinz): append the entire service
# instead of just the name
self.created_service_names.append(service_obj.name)
return ""
def update(self, project_id, service_name, service_json):
# update configuration in storage
return ''
def delete(self, project_id, service_name):
# delete from providers
return ''
if (service_name in self.created_service_names):
self.created_service_names.remove(service_name)
def get_provider_details(self, project_id, service_name):
if service_name == 'non_exist_service_name':

View File

@ -1 +1 @@
cassandra-driver>=1.0.0
cassandra-driver>=2.0.0

View File

@ -15,6 +15,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import uuid
import ddt
@ -237,10 +238,95 @@ class TestListServices(base.TestBase):
super(TestListServices, self).tearDown()
class TestDeleteService(base.TestBase):
def setUp(self):
super(TestDeleteService, self).setUp()
self.service_name = str(uuid.uuid1())
self.flavor_id = str(uuid.uuid1())
if self.test_config.generate_flavors:
self.flavor_id = str(uuid.uuid1())
self.client.create_flavor(
flavor_id=self.flavor_id,
provider_list=[{"provider": "fastly",
"links": [{"href": "www.fastly.com",
"rel": "provider_url"}]}])
else:
self.flavor_id = self.test_config.default_flavor
# ensure the flavor referred to exists
self.client.create_flavor(flavor_id=self.flavor_id,
provider_list=[{
"provider": "fastly",
"links": [{"href": "www.fastly.com",
"rel": "provider_url"}]}])
domain = str(uuid.uuid1()) + '.com'
self.domain_list = [{"domain": domain}]
origin = str(uuid.uuid1()) + '.com'
self.origin_list = [{"origin": origin,
"port": 443, "ssl": False}]
self.caching_list = [{"name": "default", "ttl": 3600},
{"name": "home", "ttl": 1200,
"rules": [{"name": "index",
"request_url": "/index.htm"}]}]
self.client.create_service(service_name=self.service_name,
domain_list=self.domain_list,
origin_list=self.origin_list,
caching_list=self.caching_list,
flavor_id=self.flavor_id)
def test_delete_service(self):
resp = self.client.delete_service(service_name=self.service_name)
self.assertEqual(resp.status_code, 202)
resp = self.client.get_service(service_name=self.service_name)
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertEqual(body['status'], 'delete_in_progress')
# TODO(malini): find a better solution
# As is, the servvice is still available in the DB till deleted from
# the provider. The test should be able to handle this with
# exponential sleep or whatever(!).
status_code = 0
count = 0
while (count < 5):
service_deleted = self.client.get_service(
service_name=self.service_name)
status_code = service_deleted.status_code
if status_code == 200:
time.sleep(1)
else:
break
count = count + 1
self.assertEqual(404, status_code)
def test_delete_non_existing_service(self):
resp = self.client.delete_service(service_name='this_cant_be_true')
self.assertEqual(resp.status_code, 404)
def test_delete_failed_service(self):
# TODO(malini): Add test to verify that a failed service can be
# deleted.
# Placeholder till we figure out how to create provider side failure.
pass
def tearDown(self):
self.client.delete_service(service_name=self.service_name)
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestDeleteService, self).tearDown()
@ddt.ddt
class TestServiceActions(base.TestBase):
"""Tests for PATCH, GET & DELETE Services."""
"""Tests for PATCH, GET Services."""
def setUp(self):
super(TestServiceActions, self).setUp()
@ -356,34 +442,6 @@ class TestServiceActions(base.TestBase):
# Placeholder till we figure out how to create provider side failure.
pass
def test_delete_service(self):
resp = self.client.delete_service(service_name=self.service_name)
self.assertEqual(resp.status_code, 202)
resp = self.client.get_service(service_name=self.service_name)
self.assertEqual(resp.status_code, 200)
body = resp.json()
self.assertEqual(body['status'], 'delete_in_progress')
# TODO(malini): find a better solution
# As is, the service is still available in the DB till deleted from
# the provider. The test should be able to handle this with
# exponential sleep or whatever(!).
# time.sleep(20)
# resp = self.client.get_service(service_name=self.service_name)
# self.assertEqual(resp.status_code, 404)
def test_delete_non_existing_service(self):
resp = self.client.delete_service(service_name='this_cant_be_true')
self.assertEqual(resp.status_code, 404)
def test_delete_failed_service(self):
# TODO(malini): Add test to verify that a failed service can be
# deleted.
# Placeholder till we figure out how to create provider side failure.
pass
def tearDown(self):
self.client.delete_service(service_name=self.service_name)
if self.test_config.generate_flavors:

View File

@ -285,15 +285,44 @@ class ServiceControllerTest(base.FunctionalTest):
patch_ret_val = self.test_fake_controller._handle_patch('patch', '')
self.assertTrue(len(patch_ret_val) == 2)
# def test_delete(self):
# TODO(amitgandhinz): commented this out until the Delete Patch lands
# due to this test failing.
# response = self.app.delete('/v1.0/services/fake_service_name_4')
# self.assertEqual(200, response.status_code)
def test_delete(self):
response = self.app.delete(
'/v1.0/services/%s' % self.service_name,
headers={
'Content-Type': 'application/json',
'X-Project-ID': self.project_id
}
)
self.assertEqual(202, response.status_code)
def test_delete_non_eixst(self):
response = self.app.delete('/v1.0/%s/services/non_exist_service_name' %
self.project_id,
# TODO(amitgandhinz): commented out as thread model
# is not allowing thread to process with test
# # check if it actually gets deleted
# status_code = 0
# count = 0
# while (count < 5):
# service_deleted = self.app.get(
# '/v1.0/services/' + self.service_name,
# headers={'X-Project-ID': self.project_id},
# expect_errors=True)
# status_code = service_deleted.status_code
# print("service delete status: %s" % status_code)
# if status_code == 200:
# print 'not yet deleted, so try again in 1s'
# import time
# time.sleep(1)
# else:
# break
# count = count + 1
# self.assertEqual(404, status_code)
def test_delete_non_exist(self):
response = self.app.delete('/v1.0/services/non_exist_service_name',
headers={
'Content-Type': 'application/json',
'X-Project-ID': self.project_id

View File

@ -48,7 +48,9 @@ CASSANDRA_OPTIONS = [
'replication_factor': '1'
},
help='Replication strategy for Cassandra cluster'
)
),
cfg.BoolOpt('archive_on_delete', default=True,
help='Archive services on delete?'),
]

View File

@ -37,7 +37,3 @@ class MockDBStorageFlavorsTests(base.TestCase):
self.sc = services.ServicesController(mockdb_driver)
self.project_id = "fake_project_id"
self.service_name = "fake_service_name"
def test_delete_service(self):
self.assertTrue(self.sc.delete(self.project_id, self.service_name)
== '')