Resolve tasks by node tags

A 'tags' attribute has been added to each role in 'roles_metadata'.
Initially all non-controller roles will only have a tag of their own
role name. This will allow existing tasks which do not have tags
associated with them to work correctly. In the abscence of tags a
task's roles will be used to determine which nodes it will run on.

Implements: blueprint role-decomposition

Change-Id: I390580146048b6e00ec5c42d0adf995a4cff9167
This commit is contained in:
Ryan Moe 2016-09-06 10:44:10 -07:00 committed by Viacheslav Valyavskiy
parent 426e97fb00
commit e368be6b10
31 changed files with 386 additions and 243 deletions

View File

@ -203,6 +203,9 @@ def upload_fixture(fileobj, loader=None):
fire_callback_on_node_create(new_obj)
db().commit()
if new_obj.__class__.__name__ == 'Release':
objects.Release.create_tags(new_obj)
def get_base_fixtures_path():
return os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures')

View File

@ -1067,7 +1067,7 @@ class NeutronNetworkDeploymentSerializer70(
for node in objects.Cluster.get_nodes_not_for_deletion(cluster):
name = objects.Node.get_slave_name(node)
node_roles = objects.Node.all_roles(node)
node_roles = objects.Node.all_tags(node)
network_roles = cls.get_network_role_mapping_to_ip(node)
# Use permanent identifier as a node key
key = objects.Node.permanent_id(node)

View File

@ -365,7 +365,7 @@ class NovaNetworkDeploymentSerializer70(NovaNetworkDeploymentSerializer61):
for n in Cluster.get_nodes_not_for_deletion(cluster):
name = Node.get_slave_name(n)
node_roles = Node.all_roles(n)
node_roles = Node.all_tags(n)
# Use permanent identifier as a node key
key = Node.permanent_id(n)

View File

@ -28,6 +28,8 @@
- condition: "settings:neutron_advanced_configuration.neutron_l3_ha.value == true"
min: 2
message: "Neutron L3 HA requires at least 2 Controller nodes to function properly."
tags:
- controller
compute:
name: "Compute"
description: "A Compute node creates, manages, and terminates virtual machine instances."
@ -37,6 +39,8 @@
limits:
recommended: 1
fault_tolerance: "2%"
tags:
- compute
cinder:
# NOTE: naming, see https://bugs.launchpad.net/fuel/+bug/1383224
name: "Cinder"
@ -51,6 +55,8 @@
# NOTE: https://bugs.launchpad.net/fuel/+bug/1372914 - Prohibit possibility of adding cinder nodes to an environment with Ceph RBD
- condition: "settings:storage.volumes_ceph.value == true"
message: "Ceph RBD cannot be used with Cinder"
tags:
- cinder
cinder-block-device:
name: 'Cinder Block Device'
description: 'Host node for Cinder Block Devices.'
@ -68,6 +74,8 @@
message: "Cinder Block Device driver should be enabled in the environment settings."
- condition: "settings:storage.volumes_ceph.value == true"
message: "Ceph RBD cannot be used with Cinder Block Device"
tags:
- cinder-block-device
cinder-vmware:
name: "Cinder Proxy to VMware Datastore"
description: "Cinder-VMware provides scheduling of block storage resources delivered over VMware vCenter. Block storage can be used for database storage, expandable file systems, or providing a server with access to raw block level devices."
@ -79,6 +87,8 @@
- condition: "settings:common.use_vcenter.value == false"
action: "hide"
message: "VMware vCenter not enabled for cluster"
tags:
- cinder-vmware
ceph-osd:
name: "Ceph OSD"
description: "Ceph storage can be configured to provide storage for block volumes (Cinder), images (Glance) and ephemeral instance storage (Nova). It can also provide object storage through the S3 and Swift API (See settings to enable each)."
@ -91,6 +101,8 @@
message: "Ceph should be enabled in the environment settings."
update_once:
- controller
tags:
- ceph-osd
mongo:
name: "Telemetry - MongoDB"
description: "A feature-complete and recommended database for storage of metering data from OpenStack Telemetry (Ceilometer)."
@ -114,11 +126,15 @@
message: "Ceilometer should be enabled in the environment settings."
- condition: "settings:additional_components.mongo.value == true"
message: "You are already using external MongoDB."
tags:
- mongo
base-os:
name: "Operating System"
description: "Install base Operating System without additional packages and configuration."
weight: 70
group: "other"
tags:
- base-os
virt:
name: "Virtual"
description: "ADVANCED: Make available possibilities to spawn vms on this node that can be assign as a normal nodes."
@ -132,6 +148,8 @@
- condition: "not ('advanced' in version:feature_groups)"
action: "hide"
message: "Advanced feature should be enabled in feature groups"
tags:
- virt
compute-vmware:
name: "Compute VMware"
description: "A node that runs nova-compute with VCDriver, that manages ESXi computing resources via VMware vCenter."
@ -151,6 +169,8 @@
- condition: "settings:common.use_vcenter.value == false"
action: "hide"
message: "VMware vCenter not enabled for cluster"
tags:
- compute-vmware
ironic:
name: "Ironic"
description: "Ironic conductor."
@ -164,6 +184,21 @@
restrictions:
- condition: "settings:additional_components.ironic.value == false"
message: "Ironic should be enabled in the environment settings."
tags:
- ironic
tags_metadata:
controller:
has_primary: True
mysql:
has_primary: True
mongo:
has_primary: True
compute:
has_primary: False
ceph-osd:
has_primary: False
cinder:
has_primary: False
network_roles_metadata:
-

View File

@ -24,7 +24,7 @@ from nailgun import errors
from nailgun.lcm.task_serializer import TasksSerializersFactory
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.utils.role_resolver import NameMatchingPolicy
from nailgun.utils.resolvers import NameMatchingPolicy
# This class has similar functional with TasksSerializer from task deploy
@ -130,8 +130,8 @@ class TransactionSerializer(object):
consts.ORCHESTRATOR_TASK_TYPES.skipped
)
def __init__(self, context, role_resolver):
self.role_resolver = role_resolver
def __init__(self, context, resolver):
self.resolver = resolver
self.context = context
self.tasks_graph = {}
self.tasks_dictionary = {}
@ -142,15 +142,15 @@ class TransactionSerializer(object):
self.concurrency_policy = get_concurrency_policy()
@classmethod
def serialize(cls, context, tasks, role_resolver):
def serialize(cls, context, tasks, resolver):
"""Resolves roles and dependencies for tasks.
:param context: the deployment context
:param tasks: the deployment tasks
:param role_resolver: the nodes role resolver
:param resolver: the nodes tag resolver
:return: the list of serialized task per node
"""
serializer = cls(context, role_resolver)
serializer = cls(context, resolver)
serializer.process_tasks(tasks)
serializer.resolve_dependencies()
tasks_graph = serializer.tasks_graph
@ -225,8 +225,8 @@ class TransactionSerializer(object):
yield node_id, task
for task in groups:
node_ids = self.role_resolver.resolve(
task.get('roles', task.get('groups'))
node_ids = self.resolver.resolve(
task.get('tags', task.get('roles', task.get('groups')))
)
if not node_ids:
continue
@ -256,8 +256,8 @@ class TransactionSerializer(object):
# all synchronisation tasks will run on sync node
return [None]
# TODO(bgaifullin) remove deprecated groups
return self.role_resolver.resolve(
task.get('roles', task.get('groups'))
return self.resolver.resolve(
task.get('tags', task.get('roles', task.get('groups')))
)
def resolve_dependencies(self):
@ -315,7 +315,7 @@ class TransactionSerializer(object):
return
for dep in six.moves.filter(None, dependencies):
roles = dep.get('role', consts.TASK_ROLES.all)
roles = dep.get('tags', dep.get('role', consts.TASK_ROLES.all))
if roles == consts.TASK_ROLES.self:
node_ids = [node_id]
@ -324,7 +324,7 @@ class TransactionSerializer(object):
node_ids = [None]
excludes = []
else:
node_ids = self.role_resolver.resolve(
node_ids = self.resolver.resolve(
roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all)
)
excludes = [(task_id, node_id)]

View File

@ -28,6 +28,9 @@ from nailgun.objects.deployment_graph import DeploymentGraph
from nailgun.objects.deployment_graph import DeploymentGraphCollection
from nailgun.objects.deployment_graph import DeploymentGraphTask
from nailgun.objects.tag import Tag
from nailgun.objects.tag import TagCollection
from nailgun.objects.release import Release
from nailgun.objects.release import ReleaseCollection
@ -66,9 +69,6 @@ from nailgun.extensions.network_manager.objects.interface import NICCollection
from nailgun.extensions.network_manager.objects.bond import Bond
from nailgun.extensions.network_manager.objects.bond import BondCollection
from nailgun.objects.tag import Tag
from nailgun.objects.tag import TagCollection
from nailgun.objects.node import Node
from nailgun.objects.node import NodeAttributes
from nailgun.objects.node import NodeCollection

View File

@ -45,6 +45,7 @@ from nailgun.objects import NailgunObject
from nailgun.objects.plugin import ClusterPlugin
from nailgun.objects import Release
from nailgun.objects.serializers.cluster import ClusterSerializer
from nailgun.objects import TagCollection
from nailgun.plugins.manager import PluginManager
from nailgun.policy.merge import NetworkRoleMergePolicy
from nailgun.settings import settings
@ -259,10 +260,20 @@ class Cluster(NailgunObject):
db().query(models.Node.id).
filter_by(cluster_id=instance.id).
order_by(models.Node.id)]
cls.delete_tags(instance)
fire_callback_on_node_collection_delete(node_ids)
fire_callback_on_cluster_delete(instance)
super(Cluster, cls).delete(instance)
@classmethod
def delete_tags(cls, instance):
TagCollection.filter_by(
None,
owner_id=instance.id,
owner_type='cluster'
).delete()
db().flush()
@classmethod
def get_default_kernel_params(cls, instance):
kernel_params = instance.attributes.editable.get("kernel_params", {})
@ -795,66 +806,59 @@ class Cluster(NailgunObject):
return available_roles
@classmethod
def set_primary_role(cls, instance, nodes, role_name):
"""Method for assigning primary attribute for specific role.
def get_primary_node(cls, instance, tag):
from nailgun import objects
logger.debug("Getting primary node for tag: %s", tag)
- verify that there is no primary attribute of specific role
assigned to cluster nodes with this role in role list
or pending role list, and this node is not marked for deletion
- if there is no primary role assigned, filter nodes which have current
role in roles or pending_roles
- if there is nodes with ready state - they should have higher priority
- if role was in primary_role_list - change primary attribute
for that association, same for role_list, this is required
because deployment_serializer used by cli to generate deployment info
primary_node = objects.NodeCollection.filter_by(
None,
cluster_id=instance.id,
pending_deletion=False
).filter(
models.NodeTag.node_id == models.Node.id,
models.NodeTag.tag_id == models.Tag.id,
models.Tag.tag == tag,
models.NodeTag.is_primary == True # noqa
).first()
:param instance: Cluster db objects
:param nodes: list of Node db objects
:param role_name: string with known role name
"""
if role_name not in cls.get_roles(instance):
logger.warning(
'Trying to assign primary for non-existing role %s', role_name)
if primary_node is None:
logger.debug("Not found primary node for tag: %s", tag)
else:
logger.debug("Found primary node: %s for tag: %s",
primary_node.id, tag)
return primary_node
@classmethod
def set_primary_tag(cls, instance, nodes, tag):
primary_node = cls.get_primary_node(instance, tag)
if primary_node:
return
node = cls.get_primary_node(instance, role_name)
if not node:
# get nodes with a given role name which are not going to be
# removed
filtered_nodes = []
for node in nodes:
if (not node.pending_deletion and (
role_name in set(node.roles + node.pending_roles))):
filtered_nodes.append(node)
filtered_nodes = sorted(filtered_nodes, key=lambda node: node.id)
filtered_nodes = []
for node in nodes:
if (not node.pending_deletion and (
tag in node.tag_names)):
filtered_nodes.append(node)
filtered_nodes.sort(key=lambda node: node.id)
if filtered_nodes:
primary_node = next((
node for node in filtered_nodes
if node.status == consts.NODE_STATUSES.ready),
filtered_nodes[0])
if filtered_nodes:
primary_node = next((
node for node in filtered_nodes
if node.status == consts.NODE_STATUSES.ready),
filtered_nodes[0])
primary_node.primary_roles = list(primary_node.primary_roles)
primary_node.primary_roles.append(role_name)
for t in primary_node.tags:
if t.tag.tag == tag:
t.is_primary = True
break
db().flush()
@classmethod
def set_primary_roles(cls, instance, nodes):
"""Assignment of all primary attribute for all roles that requires it.
This method is idempotent
To mark role as primary add has_primary: true attribute to release
:param instance: Cluster db object
:param nodes: list of Node db objects
"""
def set_primary_tags(cls, instance, nodes):
if not instance.is_ha_mode:
return
roles_metadata = cls.get_roles(instance)
for role, meta in six.iteritems(roles_metadata):
if meta.get('has_primary'):
cls.set_primary_role(instance, nodes, role)
for tag in TagCollection.get_cluster_tags(instance, has_primary=True):
cls.set_primary_tag(instance, nodes, tag.tag)
@classmethod
def get_nodes_by_role(cls, instance, role_name):
@ -896,39 +900,6 @@ class Cluster(NailgunObject):
query = query.filter(sa.not_(models.Node.id.in_(exclude)))
return query
@classmethod
def get_primary_node(cls, instance, role_name):
"""Get primary node for role_name
If primary node is not found None will be returned
Pending roles and roles are used in search
:param instance: cluster db object
:type: python object
:param role_name: node role name
:type: string
:returns: node db object or None
"""
logger.debug("Getting primary node for role: %s", role_name)
if role_name not in cls.get_roles(instance):
logger.debug("Role not found: %s", role_name)
return None
primary_node = db().query(models.Node).filter_by(
pending_deletion=False,
cluster_id=instance.id
).filter(
models.Node.primary_roles.any(role_name)
).first()
if primary_node is None:
logger.debug("Not found primary node for role: %s", role_name)
else:
logger.debug("Found primary node: %s for role: %s",
primary_node.id, role_name)
return primary_node
@classmethod
def get_controllers_group_id(cls, instance):
return cls.get_controllers_node_group(instance).id

View File

@ -55,6 +55,7 @@ from nailgun.objects import NIC
from nailgun.objects import Notification
from nailgun.objects import Release
from nailgun.objects.serializers.node import NodeSerializer
from nailgun.objects import TagCollection
from nailgun.policy import cpu_distribution
from nailgun.policy import hugepages_distribution
from nailgun.settings import settings
@ -724,6 +725,7 @@ class Node(NailgunObject):
}
cls.update(instance, node_data)
cls.move_roles_to_pending_roles(instance)
instance.tags = []
# when node reseted to discover:
# - cobbler system is deleted
# - mac to ip mapping from dnsmasq.conf is deleted
@ -867,6 +869,7 @@ class Node(NailgunObject):
instance.full_name,
new_roles))
cls.update_tags(instance, new_roles)
instance.roles = new_roles
db().flush()
@ -899,6 +902,7 @@ class Node(NailgunObject):
node_id=instance.id
)
cls.update_tags(instance, new_pending_roles)
instance.pending_roles = new_pending_roles
db().flush()
@ -1020,6 +1024,7 @@ class Node(NailgunObject):
cls.remove_replaced_params(instance)
instance.cluster_id = None
instance.group_id = None
instance.tags = []
instance.kernel_params = None
instance.primary_roles = []
instance.hostname = cls.default_slave_name(instance)
@ -1068,6 +1073,41 @@ class Node(NailgunObject):
db().flush()
@classmethod
def update_tags(cls, instance, new_roles):
roles_metadata = instance.cluster.release.roles_metadata
current_tags = set()
new_tags = set()
# Find all role tags associated with node (this doesn't include any
# custom tags)
for role in instance.all_roles:
current_tags.update(roles_metadata.get(role, {}).get('tags', []))
# Set of tags for the new node roles
for role in new_roles:
new_tags.update(roles_metadata.get(role, {}).get('tags', []))
# There are tags currently assigned that won't be needed after updating
# the roles
q_remained_tags = TagCollection.get_node_tags_query(
instance.id
).filter(~models.Tag.tag.in_(current_tags - new_tags))
q_new_role_tags = TagCollection.get_cluster_tags_query(
instance.cluster
).filter(models.Tag.tag.in_(new_tags - current_tags))
role_tags = q_new_role_tags.union(q_remained_tags).all()
instance.tags = []
for tag in role_tags:
new_tag = models.NodeTag(is_primary=False)
new_tag.tag = tag
db().add(new_tag)
instance.tags.append(new_tag)
@classmethod
def move_roles_to_pending_roles(cls, instance):
"""Move roles to pending_roles"""
@ -1290,6 +1330,17 @@ class Node(NailgunObject):
nm = Cluster.get_network_manager(instance.cluster)
return nm.dpdk_nics(instance)
@classmethod
def all_tags(cls, instance):
tags = set(t.tag.tag for t in instance.tags)
for assoc in instance.tags:
if assoc.is_primary:
tags.remove(assoc.tag.tag)
tags.add('primary-{}'.format(assoc.tag.tag))
return sorted(tags)
class NodeCollection(NailgunCollection):
"""Node collection"""

View File

@ -31,6 +31,7 @@ from nailgun.objects import DeploymentGraph
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects.serializers import release as release_serializer
from nailgun.objects import Tag
from nailgun.orchestrator import graph_configuration
from nailgun.plugins.manager import PluginManager
from nailgun.settings import settings
@ -73,6 +74,8 @@ class Release(NailgunObject):
for graph_type, graph_data in six.iteritems(graphs):
DeploymentGraph.create_for_model(
graph_data, release_obj, graph_type)
cls.create_tags(release_obj)
return release_obj
@classmethod
@ -326,6 +329,21 @@ class Release(NailgunObject):
metadata = instance.networks_metadata
return metadata.get('dpdk_drivers', {})
@classmethod
def create_tags(cls, instance):
metadata = instance.tags_metadata
for role, role_data in instance.roles_metadata.items():
tag_meta = metadata.get(role, {})
for tag in role_data.get('tags', []):
data = {
'owner_id': instance.id,
'owner_type': 'release',
'tag': tag,
'has_primary': tag_meta.get('has_primary', False),
'read_only': True
}
Tag.create(data)
@classmethod
def delete(cls, instance):
"""Delete release.

View File

@ -18,9 +18,11 @@
Tag object and collection
"""
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects.plugin import ClusterPlugin
from nailgun.objects.serializers.tag import TagSerializer
@ -32,4 +34,29 @@ class Tag(NailgunObject):
class TagCollection(NailgunCollection):
@classmethod
def get_cluster_tags_query(cls, cluster):
plugins_ids = (ClusterPlugin.get_enabled(cluster.id)
.with_entities(models.Plugin.id).subquery())
return db().query(models.Tag).filter(
((models.Tag.owner_id == cluster.release.id) &
(models.Tag.owner_type == 'release')) |
((models.Tag.owner_id == cluster.id) &
(models.Tag.owner_type == 'cluster')) |
((models.Tag.owner_id.in_(plugins_ids)) &
(models.Tag.owner_type == 'plugin'))
)
@classmethod
def get_node_tags_query(cls, node_id):
return db().query(models.Tag).join(
models.NodeTag
).filter(
models.NodeTag.node_id == node_id
)
@classmethod
def get_cluster_tags(cls, cluster, **kwargs):
return cls.get_cluster_tags_query(cluster).filter_by(**kwargs)
single = Tag

View File

@ -28,8 +28,8 @@ from nailgun import objects
from nailgun import plugins
from nailgun.settings import settings
from nailgun import utils
from nailgun.utils.role_resolver import NameMatchingPolicy
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.resolvers import NameMatchingPolicy
from nailgun.utils.resolvers import TagResolver
from nailgun.orchestrator.base_serializers import MuranoMetadataSerializerMixin
from nailgun.orchestrator.base_serializers import \
@ -51,17 +51,17 @@ class DeploymentMultinodeSerializer(object):
def __init__(self, tasks_graph=None):
self.task_graph = tasks_graph
self.all_nodes = None
self.role_resolver = None
self.resolver = None
self.initialized = None
def initialize(self, cluster):
self.all_nodes = objects.Cluster.get_nodes_not_for_deletion(cluster)
self.role_resolver = RoleResolver(self.all_nodes)
self.resolver = TagResolver(self.all_nodes)
self.initialized = cluster.id
def finalize(self):
self.all_nodes = None
self.role_resolver = None
self.resolver = None
self.initialized = None
def _ensure_initialized_for(self, cluster):
@ -162,7 +162,7 @@ class DeploymentMultinodeSerializer(object):
username = attrs['workloads_collector'].pop('user', None)
attrs['workloads_collector']['username'] = username
if self.role_resolver.resolve(['cinder']):
if self.resolver.resolve(['cinder']):
attrs['use_cinder'] = True
net_serializer = self.get_net_provider_serializer(cluster)
@ -178,7 +178,7 @@ class DeploymentMultinodeSerializer(object):
node_list = []
for node in nodes:
for role in objects.Node.all_roles(node):
for role in objects.Node.all_tags(node):
node_list.append(cls.serialize_node_for_node_list(node, role))
return node_list
@ -207,7 +207,7 @@ class DeploymentMultinodeSerializer(object):
"""
serialized_nodes = []
for node in nodes:
for role in objects.Node.all_roles(node):
for role in objects.Node.all_tags(node):
serialized_nodes.append(
self.serialize_node(node, role)
)
@ -728,7 +728,7 @@ class DeploymentLCMSerializer(DeploymentHASerializer90):
def serialize_nodes(self, nodes):
serialized_nodes = []
for node in nodes:
roles = objects.Node.all_roles(node)
roles = objects.Node.all_tags(node)
if roles:
serialized_nodes.append(
self.serialize_node(node, roles)
@ -878,7 +878,7 @@ def _invoke_serializer(serializer, cluster, nodes,
cluster, cluster.nodes, ignore_customized
)
objects.Cluster.set_primary_roles(cluster, nodes)
objects.Cluster.set_primary_tags(cluster, nodes)
return serializer.serialize(
cluster, nodes,
ignore_customized=ignore_customized, skip_extensions=skip_extensions

View File

@ -30,7 +30,7 @@ from nailgun import objects
from nailgun.orchestrator import priority_serializers as ps
from nailgun.orchestrator.tasks_serializer import TaskSerializers
from nailgun.policy.name_match import NameMatchingPolicy
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.resolvers import TagResolver
class GraphSolver(nx.DiGraph):
@ -432,7 +432,7 @@ class AstuteGraph(object):
:param nodes: list of node db objects
"""
serialized = []
role_resolver = RoleResolver(nodes)
resolver = TagResolver(nodes)
for task in tasks:
@ -440,7 +440,7 @@ class AstuteGraph(object):
continue
serializer = self.serializers.get_stage_serializer(task)(
task, self.cluster, nodes, role_resolver=role_resolver)
task, self.cluster, nodes, resolver=resolver)
if not serializer.should_execute():
continue

View File

@ -25,7 +25,7 @@ from nailgun import errors
from nailgun.logger import logger
import nailgun.orchestrator.tasks_templates as templates
from nailgun.settings import settings
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.resolvers import TagResolver
# TODO(bgaifullin) HUCK to prevent cycle imports
from nailgun.plugins.manager import PluginManager
@ -34,17 +34,17 @@ from nailgun.plugins.manager import PluginManager
class BasePluginDeploymentHooksSerializer(object):
# TODO(dshulyak) refactor it to be consistent with task_serializer
def __init__(self, cluster, nodes, role_resolver=None):
def __init__(self, cluster, nodes, resolver=None):
"""Initialises.
:param cluster: the cluster object instance
:param nodes: the list of nodes for deployment
:param role_resolver: the instance of BaseRoleResolver
:param resolver: the instance of BaseRoleResolver
"""
self.cluster = cluster
self.nodes = nodes
self.role_resolver = role_resolver or RoleResolver(nodes)
self.resolver = resolver or TagResolver(nodes)
def deployment_tasks(self, plugins, stage):
plugin_tasks = []
@ -58,7 +58,7 @@ class BasePluginDeploymentHooksSerializer(object):
sorted_tasks = self._sort_by_stage_postfix(plugin_tasks)
for task in sorted_tasks:
make_task = None
uids = self.role_resolver.resolve(task['role'])
uids = self.resolver.resolve(task.get('tags', task['role']))
if not uids:
continue
@ -163,10 +163,10 @@ class PluginsPreDeploymentHooksSerializer(BasePluginDeploymentHooksSerializer):
for task in tasks_to_process:
# plugin tasks may store information about node
# role not only in `role` key but also in `groups`
task_role = task.get('role', task.get('groups'))
task_role = task.get('tags', task.get('role', task.get('groups')))
if task_role == consts.TASK_ROLES.all:
# just return all nodes
return self.role_resolver.resolve(consts.TASK_ROLES.all)
return self.resolver.resolve(consts.TASK_ROLES.all)
elif isinstance(task_role, six.string_types):
roles.add(task_role)
elif isinstance(task_role, (list, tuple)):
@ -187,7 +187,7 @@ class PluginsPreDeploymentHooksSerializer(BasePluginDeploymentHooksSerializer):
# executes `apt-get update` which fails on CentOS
roles.discard(consts.TASK_ROLES.master)
return list(self.role_resolver.resolve(roles))
return list(self.resolver.resolve(roles))
def create_repositories(self, plugins):
operating_system = self.cluster.release.operating_system

View File

@ -36,16 +36,16 @@ def stage_serialize(serializer, graph_tasks):
def pre_deployment_serialize(orchestrator_graph, cluster, nodes,
role_resolver=None):
resolver=None):
graph_tasks = orchestrator_graph.pre_tasks_serialize(nodes)
return stage_serialize(
plugins_serializers.PluginsPreDeploymentHooksSerializer(
cluster, nodes, role_resolver=role_resolver), graph_tasks)
cluster, nodes, resolver=resolver), graph_tasks)
def post_deployment_serialize(orchestrator_graph, cluster, nodes,
role_resolver=None):
resolver=None):
graph_tasks = orchestrator_graph.post_tasks_serialize(nodes)
return stage_serialize(
plugins_serializers.PluginsPostDeploymentHooksSerializer(
cluster, nodes, role_resolver=role_resolver), graph_tasks)
cluster, nodes, resolver=resolver), graph_tasks)

View File

@ -29,9 +29,9 @@ from nailgun.orchestrator.tasks_serializer import CreateVMsOnCompute
from nailgun.orchestrator.tasks_serializer import StandardConfigRolesHook
from nailgun.orchestrator.tasks_serializer import TaskSerializers
from nailgun.orchestrator.tasks_templates import make_noop_task
from nailgun.utils.role_resolver import NameMatchingPolicy
from nailgun.utils.role_resolver import NullResolver
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.resolvers import NameMatchingPolicy
from nailgun.utils.resolvers import NullResolver
from nailgun.utils.resolvers import TagResolver
class NoopSerializer(StandardConfigRolesHook):
@ -40,11 +40,12 @@ class NoopSerializer(StandardConfigRolesHook):
return True
def get_uids(self):
roles = self.task.get('groups', self.task.get('role'))
roles = self.task.get('tags', self.task.get('groups',
self.task.get('role')))
if roles is None:
# it means that task is not associated with any node
return [None]
return self.role_resolver.resolve(roles)
return self.resolver.resolve(roles)
def serialize(self):
uids = self.get_uids()
@ -60,7 +61,7 @@ class PluginTaskSerializer(StandardConfigRolesHook):
def serialize(self):
serializer = self.serializer_class(
self.cluster, self.nodes, role_resolver=self.role_resolver
self.cluster, self.nodes, resolver=self.resolver
)
return itertools.chain(
serializer.serialize_begin_tasks(),
@ -417,7 +418,7 @@ class TasksSerializer(object):
self.deployment_nodes = nodes
self.affected_node_ids = frozenset()
self.cluster = cluster
self.role_resolver = RoleResolver(self.deployment_nodes)
self.resolver = TagResolver(self.deployment_nodes)
self.task_serializer = DeployTaskSerializer()
self.task_processor = TaskProcessor()
self.tasks_connections = collections.defaultdict(dict)
@ -466,17 +467,17 @@ class TasksSerializer(object):
else:
tasks_mapping[task['id']] = task
skip = not self.task_filter(task['id'])
self.process_task(task, self.role_resolver, skip)
self.process_task(task, self.resolver, skip)
self.expand_task_groups(groups, tasks_mapping)
# make sure that null node is present
self.tasks_connections.setdefault(None, dict())
def process_task(self, task, role_resolver, skip=False):
def process_task(self, task, resolver, skip=False):
"""Processes one task one nodes of cluster.
:param task: the task instance
:param role_resolver: the role resolver
:param resolver: the role resolver
:param skip: make the task as skipped
"""
@ -485,7 +486,7 @@ class TasksSerializer(object):
)
task_serializer = serializer_factory(
task, self.cluster, self.deployment_nodes,
role_resolver=role_resolver
resolver=resolver
)
skipped = skip or not task_serializer.should_execute()
force = self.events and self.events.check_subscription(task)
@ -493,12 +494,12 @@ class TasksSerializer(object):
# Do not call real serializer if it should be skipped
task_serializer = NoopSerializer(
task, self.cluster, self.deployment_nodes,
role_resolver=role_resolver
resolver=resolver
)
serialised_tasks = self.task_processor.process_tasks(
task, task_serializer.serialize()
)
for serialized in serialised_tasks:
# all skipped task shall have type skipped
# do not exclude them from graph to keep connections between nodes
@ -576,7 +577,8 @@ class TasksSerializer(object):
"""
for task in groups:
skipped = not self.task_filter(task['id'])
node_ids = self.role_resolver.resolve(task.get('role', ()))
node_ids = self.resolver.resolve(task.get('tags',
task.get('role', ())))
for sub_task_id in task.get('tasks', ()):
try:
sub_task = task_mapping[sub_task_id]
@ -624,13 +626,13 @@ class TasksSerializer(object):
return
for dep in dependencies:
roles = dep.get('role', consts.TASK_ROLES.all)
roles = dep.get('tags', dep.get('role', consts.TASK_ROLES.all))
if roles == consts.TASK_ROLES.self:
node_ids = [node_id]
excludes = []
else:
node_ids = self.role_resolver.resolve(
node_ids = self.resolver.resolve(
roles, dep.get('policy', consts.NODE_RESOLVE_POLICY.all)
)
excludes = [(node_id, task_id)]

View File

@ -28,7 +28,7 @@ from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import tasks_templates as templates
from nailgun.settings import settings
from nailgun import utils
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.resolvers import TagResolver
@six.add_metaclass(abc.ABCMeta)
@ -89,14 +89,15 @@ class PuppetHook(GenericNodeHook):
class StandardConfigRolesHook(ExpressionBasedTask):
"""Role hooks that serializes task based on config file only."""
def __init__(self, task, cluster, nodes, role_resolver=None):
def __init__(self, task, cluster, nodes, resolver=None):
super(StandardConfigRolesHook, self).__init__(task, cluster)
self.nodes = nodes
self.role_resolver = role_resolver or RoleResolver(nodes)
self.resolver = resolver or TagResolver(nodes)
def get_uids(self):
return list(self.role_resolver.resolve(
self.task.get('role', self.task.get('groups'))
return list(self.resolver.resolve(
self.task.get('tags', self.task.get('role',
self.task.get('groups')))
))
def serialize(self):
@ -115,7 +116,7 @@ class UploadMOSRepo(GenericRolesHook):
identity = 'upload_core_repos'
def get_uids(self):
return list(self.role_resolver.resolve(consts.TASK_ROLES.all))
return list(self.resolver.resolve(consts.TASK_ROLES.all))
def serialize(self):
uids = self.get_uids()
@ -159,7 +160,7 @@ class RsyncPuppet(GenericRolesHook):
identity = 'rsync_core_puppet'
def get_uids(self):
return list(self.role_resolver.resolve(consts.TASK_ROLES.all))
return list(self.resolver.resolve(consts.TASK_ROLES.all))
def serialize(self):
src_path = self.task['parameters']['src'].format(
@ -238,7 +239,7 @@ class IronicCopyBootstrapKey(CopyKeys):
identity = 'ironic_copy_bootstrap_key'
def should_execute(self):
return len(self.role_resolver.resolve(['ironic'])) > 0
return len(self.resolver.resolve(['ironic'])) > 0
class RestartRadosGW(GenericRolesHook):
@ -258,9 +259,9 @@ class CreateVMsOnCompute(GenericRolesHook):
identity = 'generate_vms'
hook_type = 'puppet'
def __init__(self, task, cluster, nodes, role_resolver=None):
def __init__(self, task, cluster, nodes, resolver=None):
super(CreateVMsOnCompute, self).__init__(
task, cluster, [], role_resolver
task, cluster, [], resolver
)
self.vm_nodes = objects.Cluster.get_nodes_to_spawn_vms(self.cluster)
@ -336,9 +337,9 @@ class UploadConfiguration(GenericRolesHook):
identity = 'upload_configuration'
def __init__(self, task, cluster, nodes, configs=None, role_resolver=None):
def __init__(self, task, cluster, nodes, configs=None, resolver=None):
super(UploadConfiguration, self).__init__(
task, cluster, nodes, role_resolver=role_resolver
task, cluster, nodes, resolver=resolver
)
self.configs = configs

View File

@ -93,12 +93,12 @@ def _add_cross_depends(task, depends):
return task
def adapt_legacy_tasks(deployment_tasks, legacy_plugin_tasks, role_resolver):
def adapt_legacy_tasks(deployment_tasks, legacy_plugin_tasks, resolver):
"""Adapt the legacy tasks to execute with Task Based Engine.
:param deployment_tasks: the list of deployment tasks
:param legacy_plugin_tasks: the pre/post tasks from tasks.yaml
:param role_resolver: the RoleResolver instance
:param resolver: the TagResolver instance
"""
min_task_version = StrictVersion(consts.TASK_CROSS_DEPENDENCY)
@ -135,7 +135,7 @@ def adapt_legacy_tasks(deployment_tasks, legacy_plugin_tasks, role_resolver):
elif task['id'] in post_deployment_graph.node:
required_for.add(TASK_END_TEMPLATE.format('post_deployment'))
else:
for role in role_resolver.get_all_roles(_get_role(task)):
for role in resolver.get_all_roles(_get_role(task)):
required_for.add(TASK_END_TEMPLATE.format(role))
task['required_for'] = list(required_for)
if task_version < min_task_version:
@ -168,7 +168,7 @@ def adapt_legacy_tasks(deployment_tasks, legacy_plugin_tasks, role_resolver):
logger.info("Added cross_depends for legacy task: %s", task['id'])
task_depends = [
{'name': TASK_START_TEMPLATE.format(g), 'role': 'self'}
for g in role_resolver.get_all_roles(_get_role(task))
for g in resolver.get_all_roles(_get_role(task))
]
yield _add_cross_depends(task, task_depends)

View File

@ -56,8 +56,8 @@ from nailgun.task.fake import FAKE_THREADS
from nailgun.task.helpers import TaskHelper
from nailgun.task.legacy_tasks_adapter import adapt_legacy_tasks
from nailgun.utils import logs as logs_utils
from nailgun.utils.resolvers import TagResolver
from nailgun.utils.restrictions import VmwareAttributesRestriction
from nailgun.utils.role_resolver import RoleResolver
def make_astute_message(task, method, respond_to, args):
@ -337,7 +337,7 @@ class DeploymentTask(BaseDeploymentTask):
# NOTE(dshulyak) At this point parts of the orchestration can be empty,
# it should not cause any issues with deployment/progress and was
# done by design
role_resolver = RoleResolver(nodes)
resolver = TagResolver(nodes)
serialized_cluster = deployment_serializers.serialize(
graph, transaction.cluster, nodes)
@ -346,10 +346,10 @@ class DeploymentTask(BaseDeploymentTask):
pre_deployment = stages.pre_deployment_serialize(
graph, transaction.cluster, nodes,
role_resolver=role_resolver)
resolver=resolver)
post_deployment = stages.post_deployment_serialize(
graph, transaction.cluster, nodes,
role_resolver=role_resolver)
resolver=resolver)
if affected_nodes:
graph.reexecutable_tasks(events)
@ -362,10 +362,10 @@ class DeploymentTask(BaseDeploymentTask):
pre_deployment_affected = stages.pre_deployment_serialize(
graph, transaction.cluster, affected_nodes,
role_resolver=role_resolver)
resolver=resolver)
post_deployment_affected = stages.post_deployment_serialize(
graph, transaction.cluster, affected_nodes,
role_resolver=role_resolver)
resolver=resolver)
cls._extend_tasks_list(pre_deployment, pre_deployment_affected)
cls._extend_tasks_list(post_deployment, post_deployment_affected)
@ -555,7 +555,7 @@ class ClusterTransaction(DeploymentTask):
# TODO(bgaifullin) Primary roles applied in deployment_serializers
# need to move this code from deployment serializer
# also role resolver should be created after serialization completed
role_resolver = RoleResolver(nodes)
resolver = TagResolver(nodes)
cluster = transaction.cluster
if objects.Cluster.is_propagate_task_deploy_enabled(cluster):
@ -566,12 +566,12 @@ class ClusterTransaction(DeploymentTask):
)
else:
plugin_tasks = None
tasks = adapt_legacy_tasks(tasks, plugin_tasks, role_resolver)
tasks = adapt_legacy_tasks(tasks, plugin_tasks, resolver)
directory, graph, metadata = lcm.TransactionSerializer.serialize(
context,
tasks,
role_resolver,
resolver,
)
logger.info("tasks serialization is finished.")

View File

@ -155,7 +155,7 @@ class TestReplacedDeploymentInfoSerialization(OrchestratorSerializerTestBase):
self.cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={'api': False})
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
def test_replaced_tasks_is_not_preserved(self):
node = self.env.create_node(
@ -184,7 +184,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorSerializer, self).setUp()
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
def create_env(self, mode, network_manager='FlatDHCPManager'):
node_args = [
@ -488,7 +488,7 @@ class TestNovaNetworkOrchestratorSerializer61(OrchestratorSerializerTestBase):
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.Cluster.prepare_for_deployment(cluster_db)
objects.Cluster.set_primary_roles(cluster_db, cluster_db.nodes)
objects.Cluster.set_primary_tags(cluster_db, cluster_db.nodes)
self.db.flush()
return cluster_db
@ -743,7 +743,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.Cluster.prepare_for_deployment(cluster_db)
objects.Cluster.set_primary_roles(cluster_db, cluster_db.nodes)
objects.Cluster.set_primary_tags(cluster_db, cluster_db.nodes)
self.db.flush()
return cluster_db
@ -1227,7 +1227,7 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorHASerializer, self).setUp()
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
def create_env(self, mode):
cluster = self.env.create(
@ -1454,7 +1454,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorSerializer, self).setUp()
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
def create_env(self, mode, segment_type='vlan'):
release_kwargs = {}
@ -2128,10 +2128,11 @@ class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorHASerializer, self).setUp()
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
def create_env(self, mode):
cluster = self.env.create(
api=True,
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': mode,
@ -2857,7 +2858,7 @@ class TestDeploymentGraphlessSerializers(OrchestratorSerializerTestBase):
{'roles': [], 'pending_roles': ['cinder'],
'pending_addition': True}]
)
objects.Cluster.set_primary_roles(self.cluster, self.cluster.nodes)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
@property
def serializer(self):

View File

@ -771,6 +771,7 @@ class TestPluginDeploymentTasksInjection70(base.BaseIntegrationTest):
]
)
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
self.plugin_data = {
'package_version': '3.0.0',
'releases': [
@ -1128,8 +1129,10 @@ class TestRolesSerializationWithPlugins(BaseDeploymentSerializer,
objects.Cluster.prepare_for_deployment(self.cluster)
serializer = self._get_serializer(self.cluster)
serialized_data = serializer.serialize(
self.cluster, self.cluster.nodes)
with mock.patch('nailgun.objects.node.Node.all_tags',
mock.Mock(return_value=['test_role'])):
serialized_data = serializer.serialize(
self.cluster, self.cluster.nodes)
serialized_data = deployment_info_to_legacy(serialized_data)
self.assertItemsEqual(serialized_data[0]['tasks'], [{
'parameters': {

View File

@ -1187,16 +1187,16 @@ class TestTaskManagers(BaseIntegrationTest):
nodes_kwargs=[{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready}] * 3)
task_manager = manager.NodeDeletionTaskManager(cluster_id=cluster.id)
objects.Cluster.set_primary_roles(cluster, self.env.nodes)
objects.Cluster.set_primary_tags(cluster, self.env.nodes)
primary_node = filter(
lambda n: 'controller' in n.primary_roles,
lambda n: 'primary-controller' in objects.Node.all_tags(n),
self.env.nodes)[0]
task_manager.execute([primary_node])
self.env.refresh_nodes()
new_primary = filter(
lambda n: ('controller' in n.primary_roles and
lambda n: ('primary-controller' in objects.Node.all_tags(n) and
n.pending_deletion is False),
self.env.nodes)[0]

View File

@ -20,7 +20,7 @@ import multiprocessing.dummy
from nailgun import consts
from nailgun import errors
from nailgun import lcm
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.resolvers import TagResolver
from nailgun.test.base import BaseUnitTest
@ -108,13 +108,13 @@ class TestTransactionSerializer(BaseUnitTest):
}
})
with mock.patch('nailgun.utils.role_resolver.objects') as m_objects:
m_objects.Node.all_roles = lambda x: x.roles
cls.role_resolver = RoleResolver(cls.nodes)
with mock.patch('nailgun.utils.resolvers.objects') as m_objects:
m_objects.Node.all_tags = lambda x: x.roles
cls.resolver = TagResolver(cls.nodes)
def test_serialize_integration(self):
serialized = lcm.TransactionSerializer.serialize(
self.context, self.tasks, self.role_resolver
self.context, self.tasks, self.resolver
)[1]
# controller
self.datadiff(
@ -191,7 +191,7 @@ class TestTransactionSerializer(BaseUnitTest):
def test_resolve_nodes(self):
serializer = lcm.TransactionSerializer(
self.context, self.role_resolver
self.context, self.resolver
)
self.assertEqual(
[None],
@ -219,7 +219,7 @@ class TestTransactionSerializer(BaseUnitTest):
def test_dependencies_de_duplication(self):
serializer = lcm.TransactionSerializer(
self.context, self.role_resolver
self.context, self.resolver
)
serializer.tasks_graph = {
None: {},
@ -277,7 +277,7 @@ class TestTransactionSerializer(BaseUnitTest):
'tasks': ['task4', 'task2']
})
serialized = lcm.TransactionSerializer.serialize(
self.context, tasks, self.role_resolver
self.context, tasks, self.resolver
)
tasks_per_node = serialized[1]
self.datadiff(
@ -326,7 +326,7 @@ class TestTransactionSerializer(BaseUnitTest):
def test_expand_dependencies(self):
serializer = lcm.TransactionSerializer(
self.context, self.role_resolver
self.context, self.resolver
)
serializer.tasks_graph = {
'1': {'task1': {}},
@ -342,7 +342,7 @@ class TestTransactionSerializer(BaseUnitTest):
def test_expand_cross_dependencies(self):
serializer = lcm.TransactionSerializer(
self.context, self.role_resolver
self.context, self.resolver
)
serializer.tasks_graph = {
'1': {'task1': {}, 'task2': {}},
@ -380,7 +380,7 @@ class TestTransactionSerializer(BaseUnitTest):
def test_need_update_task(self):
serializer = lcm.TransactionSerializer(
self.context, self.role_resolver
self.context, self.resolver
)
self.assertTrue(serializer.need_update_task(
{}, {"id": "task1", "type": "puppet"}

View File

@ -59,9 +59,9 @@ class TestLegacyTasksAdapter(BaseTestCase):
@classmethod
def setUpClass(cls):
super(TestLegacyTasksAdapter, cls).setUpClass()
cls.role_resolver = mock.MagicMock()
cls.role_resolver.get_all_roles.side_effect = \
cls.role_resolver_side_effect
cls.resolver = mock.MagicMock()
cls.resolver.get_all_roles.side_effect = \
cls.resolver_side_effect
def test_returns_same_task_if_no_legacy(self):
tasks = [
@ -71,7 +71,7 @@ class TestLegacyTasksAdapter(BaseTestCase):
{'id': 'group1', 'type': consts.ORCHESTRATOR_TASK_TYPES.group},
{'id': 'stage1', 'type': consts.ORCHESTRATOR_TASK_TYPES.stage}
]
new_tasks = list(adapt_legacy_tasks(tasks, None, self.role_resolver))
new_tasks = list(adapt_legacy_tasks(tasks, None, self.resolver))
self.datadiff(tasks, new_tasks, ignore_keys='required_for')
self.assertEqual([], tasks[0].get('required_for', []))
self.assertEqual(
@ -79,7 +79,7 @@ class TestLegacyTasksAdapter(BaseTestCase):
)
@staticmethod
def role_resolver_side_effect(roles):
def resolver_side_effect(roles):
if isinstance(roles, six.string_types):
roles = [roles]
return set(roles)
@ -118,7 +118,7 @@ class TestLegacyTasksAdapter(BaseTestCase):
]
tasks.extend(stages)
new_tasks = list(adapt_legacy_tasks(tasks, [], self.role_resolver))
new_tasks = list(adapt_legacy_tasks(tasks, [], self.resolver))
self.assertEqual(
{
@ -297,7 +297,7 @@ class TestLegacyTasksAdapter(BaseTestCase):
}
]
new_tasks = list(adapt_legacy_tasks(
tasks, legacy_plugin_tasks, self.role_resolver
tasks, legacy_plugin_tasks, self.resolver
))
stage1_tasks = new_tasks[-5:-2]
depends = [{'role': None, 'name': 'stage1_end'}]

View File

@ -45,6 +45,7 @@ from nailgun import consts
from nailgun import plugins
from nailgun.db.sqlalchemy.models import NodeGroup
from nailgun.db.sqlalchemy.models import NodeTag
from nailgun.db.sqlalchemy.models import Task
from nailgun.extensions.network_manager.manager import NetworkManager
@ -742,6 +743,35 @@ class TestNodeObject(BaseIntegrationTest):
}
self.assertEqual(expected_attributes, node.attributes)
def test_update_tags(self):
self.env.create(
cluster_kwargs={'api': False},
nodes_kwargs=[{'role': 'controller'}])
node = self.env.nodes[0]
self.assertEqual(['controller'], objects.Node.all_tags(node))
objects.Node.update_roles(node, ['controller', 'cinder'])
self.assertItemsEqual(
['controller', 'cinder'], objects.Node.all_tags(node)
)
t = objects.Tag.create({
'tag': 'test',
'owner_id': node.cluster.id,
'owner_type': 'cluster'
})
node_tag = NodeTag(tag=t)
node.tags.append(node_tag)
self.db.add(node_tag)
self.db.flush()
self.assertItemsEqual(
['controller', 'cinder', 'test'], objects.Node.all_tags(node)
)
objects.Node.update_roles(node, [])
self.assertEquals(['test'], objects.Node.all_tags(node))
class TestTaskObject(BaseIntegrationTest):

View File

@ -25,7 +25,7 @@ from nailgun.orchestrator.plugins_serializers import \
from nailgun.orchestrator.plugins_serializers import \
PluginsPreDeploymentHooksSerializer
from nailgun.test import base
from nailgun.utils.role_resolver import NullResolver
from nailgun.utils.resolvers import NullResolver
class TestBasePluginDeploymentHooksSerializer(base.BaseTestCase):
@ -41,7 +41,7 @@ class TestBasePluginDeploymentHooksSerializer(base.BaseTestCase):
self.hook = BasePluginDeploymentHooksSerializer(
self.nodes,
self.cluster,
role_resolver=NullResolver([x['id'] for x in self.nodes])
resolver=NullResolver([x['id'] for x in self.nodes])
)
def test_original_order_of_deployment_tasks(self):
@ -146,7 +146,7 @@ class TestTasksDeploymentOrder(base.BaseTestCase):
self.hook = BasePluginDeploymentHooksSerializer(
self.nodes,
self.cluster,
role_resolver=NullResolver([x['id'] for x in self.nodes])
resolver=NullResolver([x['id'] for x in self.nodes])
)
def make_plugin_mock_with_stages(self, plugin_name, stages):
@ -223,7 +223,7 @@ class TestPluginsPreDeploymentHooksSerializer(
self.hook = PluginsPreDeploymentHooksSerializer(
self.cluster,
self.nodes,
role_resolver=NullResolver([x['id'] for x in self.nodes]))
resolver=NullResolver([x['id'] for x in self.nodes]))
@mock.patch(
'nailgun.orchestrator.plugins_serializers.'
@ -283,7 +283,7 @@ class TestPluginsPostDeploymentHooksSerializer(
self.hook = PluginsPostDeploymentHooksSerializer(
self.cluster,
self.nodes,
role_resolver=NullResolver([x['id'] for x in self.nodes]))
resolver=NullResolver([x['id'] for x in self.nodes]))
def test_serialize_begin_tasks(self):
self.assertItemsEqual(self.hook.serialize_begin_tasks(), list())

View File

@ -49,13 +49,13 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
{'pending_roles': [self.role_name],
'status': consts.NODE_STATUSES.discover,
'pending_addition': True}])
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
objects.Cluster.set_primary_tags(cluster, cluster.nodes)
nodes = sorted(cluster.nodes, key=lambda node: node.id)
# with lowest uid is assigned as primary
self.assertEqual(
objects.Node.all_roles(nodes[0]), [self.primary_role_name])
objects.Node.all_tags(nodes[0]), [self.primary_role_name])
self.assertEqual(
objects.Node.all_roles(nodes[1]), [self.role_name])
objects.Node.all_tags(nodes[1]), [self.role_name])
def test_primary_controller_assigned_for_ready_node(self):
cluster = self.env.create(
@ -69,16 +69,16 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
{'roles': [self.role_name],
'status': consts.NODE_STATUSES.ready,
'pending_addition': True}])
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
objects.Cluster.set_primary_tags(cluster, cluster.nodes)
# primary assigned to node with ready status
nodes = sorted(cluster.nodes, key=lambda node: node.id)
ready_node = next(n for n in cluster.nodes
if n.status == consts.NODE_STATUSES.ready)
self.assertEqual(nodes[1], ready_node)
self.assertEqual(
objects.Node.all_roles(nodes[1]), [self.primary_role_name])
objects.Node.all_tags(nodes[1]), [self.primary_role_name])
self.assertEqual(
objects.Node.all_roles(nodes[0]), [self.role_name])
objects.Node.all_tags(nodes[0]), [self.role_name])
def test_primary_assignment_multinode(self):
"""Primary should not be assigned in multinode env."""
@ -95,11 +95,11 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
{'roles': [self.role_name],
'status': consts.NODE_STATUSES.ready,
'pending_addition': True}])
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
objects.Cluster.set_primary_tags(cluster, cluster.nodes)
self.assertEqual(
objects.Node.all_roles(cluster.nodes[0]), [self.role_name])
objects.Node.all_tags(cluster.nodes[0]), [self.role_name])
self.assertEqual(
objects.Node.all_roles(cluster.nodes[1]), [self.role_name])
objects.Node.all_tags(cluster.nodes[1]), [self.role_name])
def test_primary_not_assigned_to_pending_deletion(self):
cluster = self.env.create(
@ -110,9 +110,9 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
{'roles': [self.role_name],
'status': consts.NODE_STATUSES.ready,
'pending_deletion': True}])
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
objects.Cluster.set_primary_tags(cluster, cluster.nodes)
self.assertEqual(
objects.Node.all_roles(cluster.nodes[0]), [self.role_name])
objects.Node.all_tags(cluster.nodes[0]), [self.role_name])
@contextmanager
def assert_node_reassigned(self):
@ -127,16 +127,16 @@ class BasePrimaryRolesAssignmentTestCase(base.BaseTestCase):
{'roles': [self.role_name],
'status': consts.NODE_STATUSES.ready,
'pending_addition': True}])
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
objects.Cluster.set_primary_tags(cluster, cluster.nodes)
nodes = sorted(cluster.nodes, key=lambda node: node.id)
self.assertEqual(
objects.Node.all_roles(nodes[1]), [self.primary_role_name])
objects.Node.all_tags(nodes[1]), [self.primary_role_name])
self.assertEqual(
objects.Node.all_roles(nodes[0]), [self.role_name])
objects.Node.all_tags(nodes[0]), [self.role_name])
yield nodes[1]
objects.Cluster.set_primary_roles(cluster, cluster.nodes)
objects.Cluster.set_primary_tags(cluster, cluster.nodes)
self.assertEqual(
objects.Node.all_roles(nodes[0]), [self.primary_role_name])
objects.Node.all_tags(nodes[0]), [self.primary_role_name])
def test_primary_assign_after_reset_to_discovery(self):
"""Removing primary roles after resetting node to discovery"""

View File

@ -19,10 +19,10 @@ import six
from nailgun import consts
from nailgun.test.base import BaseUnitTest
from nailgun.utils import role_resolver
from nailgun.utils import resolvers
class TestPatternBasedRoleResolver(BaseUnitTest):
class TestPatternBasedTagResolver(BaseUnitTest):
@classmethod
def setUpClass(cls):
cls.roles_of_nodes = [
@ -38,12 +38,12 @@ class TestPatternBasedRoleResolver(BaseUnitTest):
]
def setUp(self):
objs_mock = mock.patch('nailgun.utils.role_resolver.objects')
objs_mock = mock.patch('nailgun.utils.resolvers.objects')
self.addCleanup(objs_mock.stop)
objs_mock.start().Node.all_roles.side_effect = self.roles_of_nodes
objs_mock.start().Node.all_tags.side_effect = self.roles_of_nodes
def test_resolve_by_pattern(self):
resolver = role_resolver.RoleResolver(self.nodes)
resolver = resolvers.TagResolver(self.nodes)
self.assertItemsEqual(
["0", "2", "3"],
resolver.resolve(["/.*controller/"])
@ -58,14 +58,14 @@ class TestPatternBasedRoleResolver(BaseUnitTest):
)
def test_resolve_all(self):
resolver = role_resolver.RoleResolver(self.nodes)
resolver = resolvers.TagResolver(self.nodes)
self.assertItemsEqual(
(x.uid for x in self.nodes),
resolver.resolve("*")
)
def test_resolve_master(self):
resolver = role_resolver.RoleResolver(self.nodes)
resolver = resolvers.TagResolver(self.nodes)
self.assertItemsEqual(
[consts.MASTER_NODE_UID],
resolver.resolve(consts.TASK_ROLES.master)
@ -76,7 +76,7 @@ class TestPatternBasedRoleResolver(BaseUnitTest):
)
def test_resolve_any(self):
resolver = role_resolver.RoleResolver(self.nodes)
resolver = resolvers.TagResolver(self.nodes)
all_nodes = resolver.resolve("*", consts.NODE_RESOLVE_POLICY.all)
self.assertItemsEqual(
all_nodes,
@ -87,7 +87,7 @@ class TestPatternBasedRoleResolver(BaseUnitTest):
self.assertTrue(any_node.issubset(all_nodes))
def test_get_all_roles(self):
resolver = role_resolver.RoleResolver(self.nodes)
resolver = resolvers.TagResolver(self.nodes)
all_roles = {r for roles in self.roles_of_nodes for r in roles}
self.assertEqual(all_roles, resolver.get_all_roles())
self.assertEqual(all_roles, resolver.get_all_roles(
@ -108,5 +108,5 @@ class TestNullResolver(BaseUnitTest):
node_ids = ['1', '2', '3']
self.assertIs(
node_ids,
role_resolver.NullResolver(node_ids).resolve("controller")
resolvers.NullResolver(node_ids).resolve("controller")
)

View File

@ -50,6 +50,7 @@ class BaseTaskSerializationTest(base.BaseTestCase):
self.env.create_node(
roles=['cinder', 'compute'], cluster_id=self.cluster.id)]
self.all_uids = [n.uid for n in self.nodes]
objects.Cluster.set_primary_tags(self.cluster, self.cluster.nodes)
# imitate behaviour of old-style tasks merge where cluster-level
# deployment graph is overriding all other graphs.
@ -80,7 +81,7 @@ class BaseTaskSerializationTestUbuntu(base.BaseTestCase):
self.env.create_node(
roles=['controller'], cluster_id=self.cluster.id),
self.env.create_node(
roles=['primary-controller'], cluster_id=self.cluster.id),
roles=['controller'], cluster_id=self.cluster.id),
self.env.create_node(
roles=['cinder', 'compute'], cluster_id=self.cluster.id)]
self.all_uids = [n.uid for n in self.nodes]
@ -187,7 +188,7 @@ class TestHooksSerializers(BaseTaskSerializationTest):
self.assertFalse(task.should_execute())
@mock.patch.object(NetworkDeploymentSerializer, 'update_nodes_net_info')
@mock.patch.object(objects.Node, 'all_roles')
@mock.patch.object(objects.Node, 'all_tags')
def test_upload_nodes_info(self, m_roles, m_update_nodes):
# mark one node as ready so we can test for duplicates
self.env.nodes[0].status = consts.NODE_STATUSES.ready

View File

@ -360,7 +360,7 @@ class TestTaskSerializers(BaseTestCase):
def test_expand_task_groups(self):
node_ids = ['1', '2']
with mock.patch.object(self.serializer, 'role_resolver') as m_resolve:
with mock.patch.object(self.serializer, 'resolver') as m_resolve:
m_resolve.resolve.return_value = node_ids
self.serializer.expand_task_groups(
[
@ -433,7 +433,7 @@ class TestTaskSerializers(BaseTestCase):
}
}
with mock.patch.object(self.serializer, 'role_resolver') as m_resolve:
with mock.patch.object(self.serializer, 'resolver') as m_resolve:
m_resolve.resolve.return_value = node_ids
# the default role and policy
self.assertItemsEqual(
@ -536,7 +536,7 @@ class TestTaskSerializers(BaseTestCase):
self.serializer.task_processor.origin_task_ids = {
'task_1': 'task'
}
self.serializer.role_resolver = task_based_deployment.NullResolver(
self.serializer.resolver = task_based_deployment.NullResolver(
node_ids
)
self.serializer.resolve_dependencies()
@ -592,11 +592,11 @@ class TestTaskSerializers(BaseTestCase):
def test_deploy_only_selected_nodes(self):
tasks = [
{
"id": "test1", "role": ["controller"],
"id": "test1", "role": ["controller"], "tags": ["controller"],
"type": "puppet", "version": "2.0.0", "parameters": {}
},
{
"id": "test2", "role": ["compute"],
"id": "test2", "role": ["compute"], "tags": ["compute"],
"type": "puppet", "version": "2.0.0", "parameters": {}
}
]
@ -619,20 +619,20 @@ class TestTaskSerializers(BaseTestCase):
def test_serialise_with_events(self):
tasks = [
{
"id": "test1", "role": ["controller"],
"id": "test1", "role": ["controller"], "tags": ["controller"],
"type": "puppet", "version": "2.0.0", "parameters": {}
},
{
"id": "test2", "role": ["compute"],
"id": "test2", "role": ["compute"], "tags": ["compute"],
"type": "puppet", "version": "2.0.0", "parameters": {},
"reexecute_on": ["deploy"]
},
{
"id": "test3", "role": ["compute"],
"id": "test3", "role": ["compute"], "tags": ["compute"],
"type": "puppet", "version": "2.0.0", "parameters": {}
},
{
"id": "test4", "role": ["cinder"],
"id": "test4", "role": ["cinder"], "tags": ["cinder"],
"type": "puppet", "version": "2.0.0", "parameters": {}
}
]

View File

@ -33,7 +33,7 @@ from nailgun.task import legacy_tasks_adapter
from nailgun.utils import dict_update
from nailgun.utils import get_in
from nailgun.utils import mule
from nailgun.utils import role_resolver
from nailgun.utils import resolvers
from nailgun import yaql_ext
@ -358,8 +358,8 @@ class TransactionsManager(object):
# we should initialize primary roles for cluster before
# role resolve has been created
objects.Cluster.set_primary_roles(cluster, nodes)
resolver = role_resolver.RoleResolver(nodes)
objects.Cluster.set_primary_tags(cluster, nodes)
resolver = resolvers.TagResolver(nodes)
_adjust_graph_tasks(
graph,
cluster,

View File

@ -67,7 +67,7 @@ class NullResolver(BaseRoleResolver):
return []
class RoleResolver(BaseRoleResolver):
class TagResolver(BaseRoleResolver):
"""The general role resolver.
Allows to use patterns in name of role
@ -86,7 +86,7 @@ class RoleResolver(BaseRoleResolver):
"""
self.__mapping = defaultdict(set)
for node in nodes:
for r in objects.Node.all_roles(node):
for r in objects.Node.all_tags(node):
self.__mapping[r].add(node.uid)
def resolve(self, roles, policy=None):