Remove legacy patching code

In Fuel 5.1 we had an experimental feature - 'patching openstack env'.
The idea was to update and to rollback OpenStack environments between
minor releases. However, we have encounter a lot of problems with
restoring OpenStack databases andresolving dependency hell in packages,
so we buried it and never release it.

This patch removes legacy code from the source tree. We can do it
without fear, since it wasn't released in public.

Related-Bug: #1511499
Change-Id: I58b3fedd239eb7fe4226e51c2d6386efab14395d
This commit is contained in:
Igor Kalnitsky 2016-02-05 18:53:07 +02:00
parent 357e184eaa
commit e91363ba9c
24 changed files with 75 additions and 481 deletions

View File

@ -41,7 +41,6 @@ from nailgun.task.manager import ApplyChangesTaskManager
from nailgun.task.manager import ClusterDeletionManager
from nailgun.task.manager import ResetEnvironmentTaskManager
from nailgun.task.manager import StopDeploymentTaskManager
from nailgun.task.manager import UpdateEnvironmentTaskManager
class ClusterHandler(SingleHandler):
@ -105,14 +104,6 @@ class ClusterResetHandler(DeferredTaskHandler):
task_manager = ResetEnvironmentTaskManager
class ClusterUpdateHandler(DeferredTaskHandler):
log_message = u"Trying to update environment '{env_id}'"
log_error = u"Error during execution of update task " \
u"on environment '{env_id}': {error}"
task_manager = UpdateEnvironmentTaskManager
class ClusterAttributesHandler(BaseHandler):
"""Cluster attributes handler"""

View File

@ -33,7 +33,6 @@ from nailgun.api.v1.handlers.cluster import ClusterGeneratedData
from nailgun.api.v1.handlers.cluster import ClusterHandler
from nailgun.api.v1.handlers.cluster import ClusterResetHandler
from nailgun.api.v1.handlers.cluster import ClusterStopDeploymentHandler
from nailgun.api.v1.handlers.cluster import ClusterUpdateHandler
from nailgun.api.v1.handlers.cluster import VmwareAttributesDefaultsHandler
from nailgun.api.v1.handlers.cluster import VmwareAttributesHandler
from nailgun.api.v1.handlers.component import ComponentCollectionHandler
@ -210,8 +209,6 @@ urls = (
ClusterStopDeploymentHandler,
r'/clusters/(?P<cluster_id>\d+)/reset/?$',
ClusterResetHandler,
r'/clusters/(?P<cluster_id>\d+)/update/?$',
ClusterUpdateHandler,
r'/clusters/(?P<obj_id>\d+)/deployment_tasks/?$',
ClusterDeploymentTasksHandler,
@ -329,7 +326,6 @@ urls = (
OpenstackConfigHandler,
r'/openstack-config/execute/?$',
OpenstackConfigExecuteHandler,
)
feature_groups_urls = {

View File

@ -43,21 +43,6 @@ class ClusterValidator(BasicValidator):
'net_provider',
)
@classmethod
def _can_update_release(cls, curr_release, pend_release):
return any([
# redeploy
curr_release.id == pend_release.id,
# update to upper release
curr_release.operating_system == pend_release.operating_system
and curr_release.version in pend_release.can_update_from_versions,
# update to lower release
curr_release.operating_system == pend_release.operating_system
and pend_release.version in curr_release.can_update_from_versions,
])
@classmethod
def _validate_common(cls, data, instance=None):
d = cls.validate_json(data)
@ -73,27 +58,6 @@ class ClusterValidator(BasicValidator):
release_id), log_message=True)
cls._validate_mode(d, release)
pend_release_id = d.get("pending_release_id")
if pend_release_id:
pend_release = objects.Release.get_by_uid(pend_release_id,
fail_if_not_found=True)
if not release_id:
if not instance:
raise errors.InvalidData(
"Cannot set pending release when "
"there is no current release",
log_message=True
)
release_id = instance.release_id
curr_release = objects.Release.get_by_uid(release_id)
if not cls._can_update_release(curr_release, pend_release):
raise errors.InvalidData(
"Cannot set pending release as "
"it cannot update current release",
log_message=True
)
return d
@classmethod

View File

@ -43,7 +43,6 @@ single_schema = {
},
"ui_settings": base_types.UI_SETTINGS,
"release_id": {"type": "number"},
"pending_release_id": base_types.NULLABLE_ID,
"replaced_deployment_info": {"type": "object"},
"replaced_provisioning_info": {"type": "object"},
"is_customized": {"type": "boolean"},

View File

@ -51,8 +51,6 @@ CLUSTER_STATUSES = Enum(
'operational',
'error',
'remove',
'update',
'update_error'
)
NETWORKS = Enum(

View File

@ -24,16 +24,35 @@ from alembic import op
import six
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db.sqlalchemy.models import fields
from nailgun.utils.migration import upgrade_enum
revision = '11a9adc6d36a'
down_revision = '43b2cb64dae6'
cluster_statuses_old = (
'new',
'deployment',
'stopped',
'operational',
'error',
'remove',
'update',
'update_error'
)
cluster_statuses_new = (
'new',
'deployment',
'stopped',
'operational',
'error',
'remove',
)
def upgrade():
add_foreign_key_ondelete()
@ -43,9 +62,11 @@ def upgrade():
merge_node_attributes_with_nodes()
upgrade_node_attributes()
upgrade_remove_wizard_metadata_from_releases()
drop_legacy_patching()
def downgrade():
restore_legacy_patching()
downgrade_remove_wizard_metadata_from_releases()
downgrade_node_attributes()
downgrade_merge_node_attributes_with_nodes()
@ -680,3 +701,53 @@ def downgrade_remove_wizard_metadata_from_releases():
nullable=True
)
)
def drop_legacy_patching():
upgrade_enum(
"clusters", # table
"status", # column
"cluster_status", # ENUM name
cluster_statuses_old, # old options
cluster_statuses_new, # new options
)
op.drop_constraint(
'fk_pending_release_id',
'clusters',
type_='foreignkey'
)
op.drop_column('clusters', 'pending_release_id')
op.drop_column('releases', 'can_update_from_versions')
def restore_legacy_patching():
op.add_column(
'releases',
sa.Column(
'can_update_from_versions',
fields.JSON(),
nullable=False,
server_default='[]'
))
op.add_column(
'clusters',
sa.Column(
'pending_release_id',
sa.Integer(),
nullable=True
))
op.create_foreign_key(
'fk_pending_release_id',
'clusters',
'releases',
['pending_release_id'],
['id'])
upgrade_enum(
"clusters", # table
"status", # column
"cluster_status", # ENUM name
cluster_statuses_new, # new options
cluster_statuses_old, # old options
)

View File

@ -86,7 +86,6 @@ class Cluster(Base):
)
name = Column(UnicodeText, unique=True, nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
pending_release_id = Column(Integer, ForeignKey('releases.id'))
nodes = relationship(
"Node", backref="cluster", cascade="delete", order_by='Node.id')
tasks = relationship("Task", backref="cluster", cascade="delete")

View File

@ -39,8 +39,6 @@ class Release(Base):
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), nullable=False)
version = Column(String(30), nullable=False)
can_update_from_versions = Column(MutableList.as_mutable(JSON), default=[],
nullable=False, server_default='[]')
description = Column(Unicode)
operating_system = Column(String(50), nullable=False)
state = Column(

View File

@ -1735,7 +1735,6 @@
name: "Liberty on CentOS 6.5"
state: "unavailable"
version: "liberty-9.0"
can_update_from_versions: []
operating_system: "CentOS"
description: "This option will install the OpenStack Liberty packages using a CentOS based operating system. With high availability features built in, you are getting a robust, enterprise-grade OpenStack deployment."
attributes_metadata:
@ -1809,7 +1808,6 @@
fields:
name: "Liberty on Ubuntu 14.04"
version: "liberty-9.0"
can_update_from_versions: []
operating_system: "Ubuntu"
description: "This option will install the OpenStack Liberty packages using Ubuntu as a base operating system. With high availability features built in, you are getting a robust, enterprise-grade OpenStack deployment."
attributes_metadata:

View File

@ -964,7 +964,6 @@ class Cluster(NailgunObject):
- if there is deployment_graph defined by user - use it instead of
defined
- if instance assigned for patching - return custom patching graph
- else return default for release and enabled plugins
deployment graph
"""

View File

@ -30,7 +30,6 @@ class ClusterSerializer(BasicSerializer):
"net_provider",
"release_id",
"fuel_version",
"pending_release_id",
"is_locked",
"components"
)
@ -49,6 +48,5 @@ class AttributesSerializer(BasicSerializer):
"net_provider",
"release_id",
"fuel_version",
"pending_release_id",
"is_locked"
)

View File

@ -24,7 +24,6 @@ class ReleaseSerializer(BasicSerializer):
"id",
"name",
"version",
"can_update_from_versions",
"description",
"operating_system",
"modes_metadata",

View File

@ -236,26 +236,6 @@ class Task(NailgunObject):
else:
cls.__update_cluster_status(cluster, 'stopped')
elif instance.name == consts.TASK_NAMES.update:
if instance.status == consts.TASK_STATUSES.error:
cls.__update_cluster_status(
cluster,
consts.CLUSTER_STATUSES.update_error
)
q_nodes_to_error = \
TaskHelper.get_nodes_to_deployment_error(cluster)
cls.__update_nodes_to_error(
q_nodes_to_error, error_type=consts.NODE_ERRORS.deploy)
elif instance.status == consts.TASK_STATUSES.ready:
cls.__update_cluster_status(
cluster,
consts.CLUSTER_STATUSES.operational
)
cluster.release_id = cluster.pending_release_id
cluster.pending_release_id = None
@classmethod
def _clean_data(cls, data):
result = copy.copy(data)

View File

@ -112,13 +112,10 @@ class DeploymentMultinodeSerializer(object):
attrs = objects.Cluster.get_attributes(cluster)
attrs = objects.Attributes.merged_attrs_values(attrs)
release = self.current_release(cluster)
attrs['deployment_mode'] = cluster.mode
attrs['deployment_id'] = cluster.id
attrs['openstack_version_prev'] = getattr(
self.previous_release(cluster), 'version', None)
attrs['openstack_version'] = release.version
attrs['openstack_version'] = cluster.release.version
attrs['fuel_version'] = cluster.fuel_version
attrs['nodes'] = self.node_list(
objects.Cluster.get_nodes_not_for_deletion(cluster))
@ -145,23 +142,6 @@ class DeploymentMultinodeSerializer(object):
return attrs
def current_release(self, cluster):
"""Actual cluster release."""
return objects.Release.get_by_uid(cluster.pending_release_id) \
if cluster.status == consts.CLUSTER_STATUSES.update \
else cluster.release
def previous_release(self, cluster):
"""Returns previous release.
:param cluster: a ``Cluster`` instance to retrieve release from
:returns: a ``Release`` instance of previous release or ``None``
in case there's no previous release (fresh deployment).
"""
if cluster.status == consts.CLUSTER_STATUSES.update:
return cluster.release
return None
def set_storage_parameters(self, cluster, attrs):
"""Generate pg_num

View File

@ -804,60 +804,6 @@ class ResetEnvironmentTaskManager(TaskManager):
return supertask
class UpdateEnvironmentTaskManager(TaskManager):
def execute(self):
if not self.cluster.pending_release_id:
raise errors.InvalidReleaseId(
u"Can't update environment '{0}' when "
u"new release Id is invalid".format(self.cluster.name))
running_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
status='running'
).filter(
Task.name.in_([
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.reset_environment,
consts.TASK_NAMES.stop_deployment
])
)
if running_tasks.first():
raise errors.TaskAlreadyRunning(
u"Can't update environment '{0}' when "
u"other task is running".format(
self.cluster.id
)
)
nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster)
logger.debug('Nodes to update: {0}'.format(
' '.join([objects.Node.get_node_fqdn(n)
for n in nodes_to_change])))
task_update = Task(name=consts.TASK_NAMES.update, cluster=self.cluster)
db().add(task_update)
self.cluster.status = 'update'
db().flush()
deployment_message = self._call_silently(
task_update,
tasks.UpdateTask,
nodes_to_change,
method_name='message')
db().refresh(task_update)
for node in nodes_to_change:
node.status = 'deploying'
node.progress = 0
db().commit()
rpc.cast('naily', deployment_message)
return task_update
class CheckNetworksTaskManager(TaskManager):
def execute(self, data, check_all_parameters=False):
@ -909,7 +855,6 @@ class VerifyNetworksTaskManager(TaskManager):
_blocking_statuses = (
consts.CLUSTER_STATUSES.deployment,
consts.CLUSTER_STATUSES.update,
)
def remove_previous_task(self):

View File

@ -276,40 +276,6 @@ class UpdateNodesInfoTask(object):
return rpc_message
class UpdateTask(object):
@classmethod
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
serialized_cluster = deployment_serializers.serialize(
orchestrator_graph, task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster
}
)
db().flush()
return rpc_message
class ProvisionTask(object):
@classmethod

View File

@ -1022,31 +1022,6 @@ class EnvironmentManager(object):
"Nothing to delete - try creating cluster"
)
def update_environment(self, pending_release_id=None, expect_http=202,
cluster_id=None):
if self.clusters:
cluster = self._get_cluster_by_id(cluster_id)
if not pending_release_id:
pending_release_id = cluster.release_id
cluster.pending_release_id = pending_release_id
self.db.commit()
resp = self.app.put(
reverse(
'ClusterUpdateHandler',
kwargs={'cluster_id': cluster.id}),
expect_errors=True,
headers=self.default_headers)
self.tester.assertEqual(expect_http, resp.status_code)
if not str(expect_http).startswith("2"):
return resp.body
return self.db.query(Task).filter_by(
name=consts.TASK_NAMES.update
).first()
else:
raise NotImplementedError(
"Nothing to update - try creating cluster"
)
def launch_verify_networks(self, data=None, expect_errors=False,
cluster_id=None):
if self.clusters:

View File

@ -99,7 +99,6 @@ class TestHandlers(BaseIntegrationTest):
'master_ip': '127.0.0.1',
'use_cinder': True,
'deployment_id': cluster_db.id,
'openstack_version_prev': None,
'openstack_version': cluster_db.release.version,
'fuel_version': cluster_db.fuel_version,
'plugins': []
@ -460,7 +459,6 @@ class TestHandlers(BaseIntegrationTest):
'master_ip': '127.0.0.1',
'use_cinder': True,
'deployment_id': cluster_db.id,
'openstack_version_prev': None,
'openstack_version': cluster_db.release.version,
'fuel_version': cluster_db.fuel_version,
'tasks': [],
@ -946,7 +944,6 @@ class TestHandlers(BaseIntegrationTest):
'master_ip': '127.0.0.1',
'use_cinder': True,
'deployment_id': cluster_db.id,
'openstack_version_prev': None,
'openstack_version': cluster_db.release.version,
'fuel_version': cluster_db.fuel_version,
'plugins': []

View File

@ -68,7 +68,7 @@ class OrchestratorSerializerTestBase(base.BaseIntegrationTest):
def setUp(self):
super(OrchestratorSerializerTestBase, self).setUp()
self.cluster_mock = mock.MagicMock(pending_release_id=None)
self.cluster_mock = mock.MagicMock()
self.cluster_mock.id = 0
self.cluster_mock.deployment_tasks = []
self.cluster_mock.release.deployment_tasks = []
@ -1454,77 +1454,6 @@ class TestNovaOrchestratorHASerializer51(TestNovaOrchestratorHASerializer):
self.assertEqual(expected_priorities, nodes)
class TestHASerializerPatching(TestNovaOrchestratorHASerializer):
env_version = '1111-5.0'
@property
def serializer(self):
self.cluster_mock.pending_release_id = '111'
self.cluster_mock.release.environment_version = '5.0'
return DeploymentHASerializer(AstuteGraph(self.cluster_mock))
def test_set_deployment_priorities(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'}
]
self.add_default_params(nodes)
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'mongo', 'priority': 200},
{'role': 'primary-mongo', 'priority': 300},
{'role': 'primary-controller', 'priority': 400},
{'role': 'controller', 'priority': 500},
{'role': 'controller', 'priority': 600},
{'role': 'ceph-osd', 'priority': 700}
]
self.add_default_params(expected_priorities)
self.assertEqual(expected_priorities, nodes)
def test_set_deployment_priorities_many_cntrls(self):
nodes = [
{'role': 'zabbix-server'},
{'role': 'mongo'},
{'role': 'primary-mongo'},
{'role': 'primary-controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'controller'},
{'role': 'ceph-osd'}
]
self.add_default_params(nodes)
self.serializer.set_deployment_priorities(nodes)
expected_priorities = [
{'role': 'zabbix-server', 'priority': 100},
{'role': 'mongo', 'priority': 200},
{'role': 'primary-mongo', 'priority': 300},
{'role': 'primary-controller', 'priority': 400},
{'role': 'controller', 'priority': 500},
{'role': 'controller', 'priority': 600},
{'role': 'controller', 'priority': 700},
{'role': 'controller', 'priority': 800},
{'role': 'controller', 'priority': 900},
{'role': 'controller', 'priority': 1000},
{'role': 'controller', 'priority': 1100},
{'role': 'controller', 'priority': 1200},
{'role': 'ceph-osd', 'priority': 1300}
]
self.add_default_params(expected_priorities)
self.assertEqual(expected_priorities, nodes)
# TODO(awoodward): multinode deprecation: probably has duplicates
class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):

View File

@ -826,96 +826,6 @@ class TestDhcpCheckTask(BaseReciverTestCase):
self.assertEqual(self.task.result, {})
class TestClusterUpdate(BaseReciverTestCase):
def setUp(self):
super(TestClusterUpdate, self).setUp()
cluster_id = self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": False, "status": consts.NODE_STATUSES.deploying},
{"api": False, "status": consts.NODE_STATUSES.deploying}],
)['id']
self.cluster = self.db.query(Cluster).get(cluster_id)
self.cluster.pending_release_id = self.cluster.release_id
self.cluster.status = consts.CLUSTER_STATUSES.update
self.db.commit()
self.task = Task(
uuid=str(uuid.uuid4()),
name=consts.TASK_NAMES.update,
cluster_id=self.cluster.id
)
self.db.add(self.task)
self.db.commit()
def test_node_deploy_resp_ready(self):
node1, node2 = self.env.nodes
kwargs = {'task_uuid': self.task.uuid,
'status': consts.TASK_STATUSES.ready,
'nodes': [
{'uid': node1.id, 'status': consts.NODE_STATUSES.ready},
{'uid': node2.id, 'status': consts.NODE_STATUSES.ready}]}
self.receiver.deploy_resp(**kwargs)
self.assertEqual(
(node1.status, node2.status),
(consts.NODE_STATUSES.ready, consts.NODE_STATUSES.ready))
self.assertEqual(self.task.status, consts.TASK_STATUSES.ready)
self.assertEqual(self.cluster.status,
consts.CLUSTER_STATUSES.operational)
self.assertEqual(self.cluster.pending_release_id, None)
def test_node_deploy_resp_node_error(self):
node1, node2 = self.env.nodes
kwargs = {'task_uuid': self.task.uuid,
'nodes': [
{'uid': node1.id, 'status': consts.NODE_STATUSES.ready},
{'uid': node2.id, 'status': consts.NODE_STATUSES.error}]}
self.receiver.deploy_resp(**kwargs)
self.assertEqual(
(node1.status, node2.status),
(consts.NODE_STATUSES.ready, consts.NODE_STATUSES.error))
self.assertEqual(self.task.status, consts.TASK_STATUSES.running)
self.assertEqual(self.cluster.status, consts.CLUSTER_STATUSES.update)
self.assertEqual(self.cluster.pending_release_id,
self.cluster.release_id)
def test_node_deploy_resp_update_error(self):
node1, node2 = self.env.nodes
kwargs = {'task_uuid': self.task.uuid,
'status': consts.TASK_STATUSES.error,
'nodes': [
{'uid': node1.id, 'status': consts.NODE_STATUSES.ready},
{'uid': node2.id, 'status': consts.NODE_STATUSES.error}]}
self.receiver.deploy_resp(**kwargs)
self.assertEqual(
(node1.status, node2.status),
(consts.NODE_STATUSES.ready, consts.NODE_STATUSES.error))
self.assertEqual(self.task.status, consts.TASK_STATUSES.error)
self.assertEqual(self.cluster.status,
consts.CLUSTER_STATUSES.update_error)
self.assertEqual(self.cluster.pending_release_id,
self.cluster.release_id)
def test_node_deploy_resp_update_error_wo_explicit_nodes(self):
node1, node2 = self.env.nodes
kwargs = {'task_uuid': self.task.uuid,
'status': consts.TASK_STATUSES.error}
self.receiver.deploy_resp(**kwargs)
self.assertEqual(
(node1.status, node2.status),
(consts.NODE_STATUSES.error, consts.NODE_STATUSES.error))
self.assertEqual(self.task.status, consts.TASK_STATUSES.error)
self.assertEqual(self.cluster.status,
consts.CLUSTER_STATUSES.update_error)
self.assertEqual(self.cluster.pending_release_id,
self.cluster.release_id)
class TestConsumer(BaseReciverTestCase):
def test_node_deploy_resp(self):

View File

@ -174,22 +174,6 @@ class TestTasksLogging(BaseIntegrationTest):
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.stop_deployment)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_update_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"roles": ["controller"], "status": "ready"},
{"roles": ["cinder"], "status": "ready"},
{"roles": ["compute"], "status": "ready"},
]
)
self.env.update_environment()
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.update)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_dump_task_logging(self, logger):

View File

@ -157,7 +157,6 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
cluster_db = self.env.clusters[0]
blocking_statuses = (
consts.CLUSTER_STATUSES.deployment,
consts.CLUSTER_STATUSES.update,
)
for status in blocking_statuses:
cluster_db.status = status

View File

@ -332,7 +332,7 @@ class TestInstallationInfo(BaseTestCase):
# Removing of not required fields
remove_fields = (
'tasks', 'cluster_changes', 'nodegroups', 'pending_release_id',
'tasks', 'cluster_changes', 'nodegroups',
'releases', 'replaced_provisioning_info', 'notifications',
'deployment_tasks', 'name', 'replaced_deployment_info',
'ui_settings'

View File

@ -80,87 +80,6 @@ class TestClusterValidator(BaseTestCase):
self.assertRaises(errors.NotAllowed,
ClusterValidator.validate, self.cluster_data)
def test_pending_release_validation_success(self):
curr_release = Mock(
id=1,
operating_system='Ubuntu',
version='2014.1',
can_update_from_versions=[],
)
pend_release = Mock(
id=2,
operating_system='Ubuntu',
version='2014.2',
can_update_from_versions=['2014.1'],
)
self.assertTrue(
ClusterValidator._can_update_release(
curr_release, curr_release
)
)
self.assertTrue(
ClusterValidator._can_update_release(
curr_release, pend_release
)
)
self.assertTrue(
ClusterValidator._can_update_release(
pend_release, curr_release
)
)
def test_pending_release_validation_unsuccess(self):
curr_release = Mock(
id=1,
operating_system='Ubuntu',
version='2014.1',
can_update_from_versions=[],
)
pend_release = Mock(
id=2,
operating_system='Ubuntu',
version='2014.2',
can_update_from_versions=[],
)
self.assertFalse(
ClusterValidator._can_update_release(
curr_release, pend_release
)
)
pend_release = Mock(
id=2,
operating_system='CentOS',
version='2014.2',
can_update_from_versions=['2014.1'],
)
self.assertFalse(
ClusterValidator._can_update_release(
curr_release, pend_release
)
)
curr_release = Mock(
id=1,
operating_system='Ubuntu',
version='2014.1',
can_update_from_versions=[],
)
pend_release = Mock(
id=2,
operating_system='Ubuntu',
version='2014.2',
can_update_from_versions=['2014.0'],
)
self.assertFalse(
ClusterValidator._can_update_release(
pend_release, curr_release
)
)
@patch('nailgun.api.v1.validators.cluster.objects'
'.ClusterCollection.filter_by')
@patch('nailgun.api.v1.validators.cluster.objects.Release.get_by_uid')