From 6904aab001d754bff8ace83c53c33bebd8332531 Mon Sep 17 00:00:00 2001 From: "Bob.Haddleton" Date: Wed, 17 May 2017 21:53:36 -0500 Subject: [PATCH] Support os_version, os_type, and os_distro image properties Modified default images to use os_type, os_version and os_distro properties. Added logic to look for distribution/os_distro, type/os_type and version/os_version image properties when matching image properties. Cleaned up minor python warnings in tosca_compute.py Removed DOS-style newline characters from tosca_cluster_policies_scaling.py Change-Id: I72043cf4a4358cfdbc8f98238276d85dc2f5bcc0 Closes-Bug: 1689673 --- translator/common/images.py | 61 +-- .../tosca/tosca_cluster_policies_scaling.py | 390 +++++++++--------- translator/hot/tosca/tosca_compute.py | 54 ++- 3 files changed, 266 insertions(+), 239 deletions(-) mode change 100644 => 100755 translator/common/images.py diff --git a/translator/common/images.py b/translator/common/images.py old mode 100644 new mode 100755 index f9fa4f18..d9b8818a --- a/translator/common/images.py +++ b/translator/common/images.py @@ -1,3 +1,4 @@ + # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -24,37 +25,46 @@ log = logging.getLogger('heat-translator') PREDEF_IMAGES = { 'ubuntu-software-config-os-init': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'Ubuntu', - 'version': '14.04'}, + 'os_type': 'linux', + 'os_distro': 'ubuntu', + 'os_version': '14.04' + }, + 'ubuntu-12.04-software-config-os-init': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'Ubuntu', - 'version': '12.04'}, + 'os_type': 'linux', + 'os_distro': 'ubuntu', + 'os_version': '12.04' + }, 'fedora-amd64-heat-config': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'Fedora', - 'version': '18.0'}, + 'os_type': 'linux', + 'os_distro': 'fedora', + 'os_version': '18.0' + }, 'F18-x86_64-cfntools': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'Fedora', - 'version': '19'}, + 'os_type': 'linux', + 'os_distro': 'fedora', + 'os_version': '19' + }, 'Fedora-x86_64-20-20131211.1-sda': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'Fedora', - 'version': '20'}, + 'os_type': 'linux', + 'os_distro': 'fedora', + 'os_version': '20' + }, 'cirros-0.3.1-x86_64-uec': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'CirrOS', - 'version': '0.3.1'}, + 'os_type': 'linux', + 'os_distro': 'cirros', + 'os_version': '0.3.1' + }, 'cirros-0.3.2-x86_64-uec': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'CirrOS', - 'version': '0.3.2'}, + 'os_type': 'linux', + 'os_distro': 'cirros', + 'os_version': '0.3.2' + }, 'rhel-6.5-test-image': {'architecture': 'x86_64', - 'type': 'Linux', - 'distribution': 'RHEL', - 'version': '6.5'} + 'os_type': 'linux', + 'os_distro': 'rhel', + 'os_version': '6.5' + } } SESSION = None @@ -78,7 +88,8 @@ def get_images(): else: for image in client.images.list(): image_name = image.name.encode('ascii', 'ignore') - metadata = ["architecture", "type", "distribution", "version"] + metadata = ["architecture", "type", "distribution", "version", + "os_distro", "os_type", "os_version"] if any(key in image.keys() for key in metadata): IMAGES[image_name] = {} for key in metadata: diff --git a/translator/hot/tosca/tosca_cluster_policies_scaling.py b/translator/hot/tosca/tosca_cluster_policies_scaling.py index 68b94c62..1de01581 100644 --- a/translator/hot/tosca/tosca_cluster_policies_scaling.py +++ b/translator/hot/tosca/tosca_cluster_policies_scaling.py @@ -1,195 +1,195 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import defaultdict - -from translator.hot.syntax.hot_resource import HotResource -# Name used to dynamically load appropriate map class. -TARGET_CLASS_NAME = 'ToscaClusterAutoscaling' - -SCALE_POLICY = 'senlin.policy.scaling-1.0' -SERVER_TYPE = 'os.nova.server-1.0' -SCALE_TYPE = {'SCALE_IN': 'CLUSTER_SCALE_IN', - 'SCALE_OUT': 'CLUSTER_SCALE_OUT'} - -ALARM_METER_NAME = {'utilization': 'cpu_util'} -ALARM_COMPARISON_OPERATOR = {'greater_than': 'gt', 'gerater_equal': 'ge', - 'less_than': 'lt', 'less_equal': 'le', - 'equal': 'eq', 'not_equal': 'ne'} -ALARM_STATISTIC = {'average': 'avg'} - - -class ToscaClusterAutoscaling(HotResource): - '''Translate TOSCA node type tosca.policies.Scaling.Cluster''' - - toscatype = 'tosca.policies.Scaling.Cluster' - - def __init__(self, policy, csar_dir=None): - hot_type = "OS::Senlin::Policy" - super(ToscaClusterAutoscaling, self).__init__(policy, - type=hot_type, - csar_dir=csar_dir) - self.policy = policy - - def _generate_scale_properties(self, - target_cluster_nodes, - cluster_scale_type): - properties = {} - bindings = [] - policy_res = {} - adjustment = {} - properties["type"] = SCALE_POLICY - for cluster_node in target_cluster_nodes: - bindings.append({'cluster': cluster_node}) - properties["bindings"] = bindings - policy_res["event"] = cluster_scale_type - adjustment["type"] = "CHANGE_IN_CAPACITY" - adjustment["number"] = self.\ - policy.entity_tpl["properties"]["increment"] - policy_res["adjustment"] = adjustment - properties["properties"] = policy_res - return properties - - def handle_expansion(self): - hot_resources = [] - trigger_receivers = defaultdict(list) - for node in self.policy.targets: - for trigger in self.policy.entity_tpl['triggers']: - for action in self.policy.\ - entity_tpl['triggers'][trigger]['action']: - scale_name = action - action_sample = self.policy.\ - entity_tpl['triggers'][trigger]['action'][action] - scale_type = action_sample['type'] - scale_implement = action_sample['implementation'] - (entity, method) = scale_implement.split('.') - receiver_prop = {} - receiver_prop['cluster'] = { - "get_resource": "%s_cluster" % node - } - receiver_prop['action'] = SCALE_TYPE[scale_type] - receiver_prop['type'] = method - receiver_name = node + '_' + scale_name + '_receiver' - trigger_receivers[trigger].append(receiver_name) - receiver_resources = HotResource(self.nodetemplate, - type='OS::Senlin::Receiver', - name=receiver_name, - properties=receiver_prop) - hot_resources.append(receiver_resources) - - for trigger in self.policy.entity_tpl['triggers']: - sample = self.policy.\ - entity_tpl['triggers'][trigger]['condition'] - (meter_name, comparison_operator, threshold) = \ - sample["constraint"].split() - threshold = threshold.strip("%") - alarm_prop = {} - alarm_prop["description"] = self.policy.entity_tpl['description'] - alarm_prop["meter_name"] = self.policy.\ - entity_tpl['triggers'][trigger]['event_type']['metrics'] - alarm_prop["statistic"] = ALARM_STATISTIC[sample['method']] - alarm_prop["period"] = sample["period"] - alarm_prop["evaluation_periods"] = sample["evaluations"] - alarm_prop["threshold"] = threshold - alarm_prop["comparison_operator"] = \ - ALARM_COMPARISON_OPERATOR[comparison_operator] - alarm_prop["repeat_actions"] = "True" - alarm_prop["alarm_actions"] = [] - for index in range(len(trigger_receivers[trigger])): - alarm_prop["alarm_actions"].\ - append({'get_attr': [trigger_receivers[trigger][index], - 'channel', - 'alarm_url']}) - ceilometer_resources = HotResource(self.nodetemplate, - type='OS::Aodh::Alarm', - name=trigger + '_alarm', - properties=alarm_prop) - hot_resources.append(ceilometer_resources) - return hot_resources - - def handle_properties(self, resources): - remove_resources = [] - networks = defaultdict(list) - for index, resource in enumerate(resources): - if resource.type == 'OS::Neutron::Port': - for hot_resource in resource.depends_on_nodes: - if hot_resource.type != 'OS::Neutron::Net': - networks[hot_resource.name].\ - append( - {'network': '%s' % resource.properties['network']} - ) - remove_resources.append(resource) - elif resource.type == 'OS::Neutron::Net': - remove_resources.append(resource) - elif resource.name in self.policy.targets and \ - resource.type != 'OS::Senlin::Policy': - props = {} - del resource.properties['user_data_format'] - del resource.properties['networks'] - props['type'] = SERVER_TYPE - props['properties'] = resource.properties - profile_resources = \ - HotResource(resource, - type='OS::Senlin::Profile', - name=resource.name, - properties=props) - resources.pop(index) - resources.insert(index, profile_resources) - for remove_resource in remove_resources: - resources.remove(remove_resource) - - for index, resource in enumerate(resources): - if resource.name in self.policy.targets: - resource.properties['properties']['networks'] = \ - networks[resource.name] - - for node in self.policy.targets: - props = {} - props["profile"] = {'get_resource': '%s' % node} - temp = self.policy.entity_tpl["properties"] - props["min_size"] = temp["min_instances"] - props["max_size"] = temp["max_instances"] - props["desired_capacity"] = temp["default_instances"] - self.cluster_name = '%s_cluster' % node - cluster_resources = \ - HotResource(self.nodetemplate, - type='OS::Senlin::Cluster', - name=self.cluster_name, - properties=props) - resources.append(cluster_resources) - - trigger_num = len(self.policy.entity_tpl['triggers']) - for num, trigger in enumerate(self.policy.entity_tpl['triggers']): - target_cluster_nodes = [] - for action in self.policy.\ - entity_tpl['triggers'][trigger]['action']: - scale_type = self.policy.\ - entity_tpl['triggers'][trigger]['action'][action]['type'] - for node in self.policy.targets: - target_cluster_nodes.\ - append({"get_resource": "%s_cluster" % node}) - cluster_scale_type = SCALE_TYPE[scale_type] - scale_in_props = \ - self._generate_scale_properties(target_cluster_nodes, - cluster_scale_type) - if num == trigger_num - 1: - self.name = self.name + '_' + trigger - self.properties = scale_in_props - break - policy_resources = \ - HotResource(self.nodetemplate, - type='OS::Senlin::Policy', - name=self.name + '_' + trigger, - properties=scale_in_props) - resources.append(policy_resources) - return resources +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from collections import defaultdict + +from translator.hot.syntax.hot_resource import HotResource +# Name used to dynamically load appropriate map class. +TARGET_CLASS_NAME = 'ToscaClusterAutoscaling' + +SCALE_POLICY = 'senlin.policy.scaling-1.0' +SERVER_TYPE = 'os.nova.server-1.0' +SCALE_TYPE = {'SCALE_IN': 'CLUSTER_SCALE_IN', + 'SCALE_OUT': 'CLUSTER_SCALE_OUT'} + +ALARM_METER_NAME = {'utilization': 'cpu_util'} +ALARM_COMPARISON_OPERATOR = {'greater_than': 'gt', 'gerater_equal': 'ge', + 'less_than': 'lt', 'less_equal': 'le', + 'equal': 'eq', 'not_equal': 'ne'} +ALARM_STATISTIC = {'average': 'avg'} + + +class ToscaClusterAutoscaling(HotResource): + '''Translate TOSCA node type tosca.policies.Scaling.Cluster''' + + toscatype = 'tosca.policies.Scaling.Cluster' + + def __init__(self, policy, csar_dir=None): + hot_type = "OS::Senlin::Policy" + super(ToscaClusterAutoscaling, self).__init__(policy, + type=hot_type, + csar_dir=csar_dir) + self.policy = policy + + def _generate_scale_properties(self, + target_cluster_nodes, + cluster_scale_type): + properties = {} + bindings = [] + policy_res = {} + adjustment = {} + properties["type"] = SCALE_POLICY + for cluster_node in target_cluster_nodes: + bindings.append({'cluster': cluster_node}) + properties["bindings"] = bindings + policy_res["event"] = cluster_scale_type + adjustment["type"] = "CHANGE_IN_CAPACITY" + adjustment["number"] = self.\ + policy.entity_tpl["properties"]["increment"] + policy_res["adjustment"] = adjustment + properties["properties"] = policy_res + return properties + + def handle_expansion(self): + hot_resources = [] + trigger_receivers = defaultdict(list) + for node in self.policy.targets: + for trigger in self.policy.entity_tpl['triggers']: + for action in self.policy.\ + entity_tpl['triggers'][trigger]['action']: + scale_name = action + action_sample = self.policy.\ + entity_tpl['triggers'][trigger]['action'][action] + scale_type = action_sample['type'] + scale_implement = action_sample['implementation'] + (entity, method) = scale_implement.split('.') + receiver_prop = {} + receiver_prop['cluster'] = { + "get_resource": "%s_cluster" % node + } + receiver_prop['action'] = SCALE_TYPE[scale_type] + receiver_prop['type'] = method + receiver_name = node + '_' + scale_name + '_receiver' + trigger_receivers[trigger].append(receiver_name) + receiver_resources = HotResource(self.nodetemplate, + type='OS::Senlin::Receiver', + name=receiver_name, + properties=receiver_prop) + hot_resources.append(receiver_resources) + + for trigger in self.policy.entity_tpl['triggers']: + sample = self.policy.\ + entity_tpl['triggers'][trigger]['condition'] + (meter_name, comparison_operator, threshold) = \ + sample["constraint"].split() + threshold = threshold.strip("%") + alarm_prop = {} + alarm_prop["description"] = self.policy.entity_tpl['description'] + alarm_prop["meter_name"] = self.policy.\ + entity_tpl['triggers'][trigger]['event_type']['metrics'] + alarm_prop["statistic"] = ALARM_STATISTIC[sample['method']] + alarm_prop["period"] = sample["period"] + alarm_prop["evaluation_periods"] = sample["evaluations"] + alarm_prop["threshold"] = threshold + alarm_prop["comparison_operator"] = \ + ALARM_COMPARISON_OPERATOR[comparison_operator] + alarm_prop["repeat_actions"] = "True" + alarm_prop["alarm_actions"] = [] + for index in range(len(trigger_receivers[trigger])): + alarm_prop["alarm_actions"].\ + append({'get_attr': [trigger_receivers[trigger][index], + 'channel', + 'alarm_url']}) + ceilometer_resources = HotResource(self.nodetemplate, + type='OS::Aodh::Alarm', + name=trigger + '_alarm', + properties=alarm_prop) + hot_resources.append(ceilometer_resources) + return hot_resources + + def handle_properties(self, resources): + remove_resources = [] + networks = defaultdict(list) + for index, resource in enumerate(resources): + if resource.type == 'OS::Neutron::Port': + for hot_resource in resource.depends_on_nodes: + if hot_resource.type != 'OS::Neutron::Net': + networks[hot_resource.name].\ + append( + {'network': '%s' % resource.properties['network']} + ) + remove_resources.append(resource) + elif resource.type == 'OS::Neutron::Net': + remove_resources.append(resource) + elif resource.name in self.policy.targets and \ + resource.type != 'OS::Senlin::Policy': + props = {} + del resource.properties['user_data_format'] + del resource.properties['networks'] + props['type'] = SERVER_TYPE + props['properties'] = resource.properties + profile_resources = \ + HotResource(resource, + type='OS::Senlin::Profile', + name=resource.name, + properties=props) + resources.pop(index) + resources.insert(index, profile_resources) + for remove_resource in remove_resources: + resources.remove(remove_resource) + + for index, resource in enumerate(resources): + if resource.name in self.policy.targets: + resource.properties['properties']['networks'] = \ + networks[resource.name] + + for node in self.policy.targets: + props = {} + props["profile"] = {'get_resource': '%s' % node} + temp = self.policy.entity_tpl["properties"] + props["min_size"] = temp["min_instances"] + props["max_size"] = temp["max_instances"] + props["desired_capacity"] = temp["default_instances"] + self.cluster_name = '%s_cluster' % node + cluster_resources = \ + HotResource(self.nodetemplate, + type='OS::Senlin::Cluster', + name=self.cluster_name, + properties=props) + resources.append(cluster_resources) + + trigger_num = len(self.policy.entity_tpl['triggers']) + for num, trigger in enumerate(self.policy.entity_tpl['triggers']): + target_cluster_nodes = [] + for action in self.policy.\ + entity_tpl['triggers'][trigger]['action']: + scale_type = self.policy.\ + entity_tpl['triggers'][trigger]['action'][action]['type'] + for node in self.policy.targets: + target_cluster_nodes.\ + append({"get_resource": "%s_cluster" % node}) + cluster_scale_type = SCALE_TYPE[scale_type] + scale_in_props = \ + self._generate_scale_properties(target_cluster_nodes, + cluster_scale_type) + if num == trigger_num - 1: + self.name = self.name + '_' + trigger + self.properties = scale_in_props + break + policy_resources = \ + HotResource(self.nodetemplate, + type='OS::Senlin::Policy', + name=self.name + '_' + trigger, + properties=scale_in_props) + resources.append(policy_resources) + return resources diff --git a/translator/hot/tosca/tosca_compute.py b/translator/hot/tosca/tosca_compute.py index 2cbc14b5..dd94017a 100755 --- a/translator/hot/tosca/tosca_compute.py +++ b/translator/hot/tosca/tosca_compute.py @@ -28,13 +28,16 @@ TARGET_CLASS_NAME = 'ToscaCompute' class ToscaCompute(HotResource): - '''Translate TOSCA node type tosca.nodes.Compute.''' + """Translate TOSCA node type tosca.nodes.Compute.""" COMPUTE_HOST_PROP = (DISK_SIZE, MEM_SIZE, NUM_CPUS) = \ ('disk_size', 'mem_size', 'num_cpus') COMPUTE_OS_PROP = (ARCHITECTURE, DISTRIBUTION, TYPE, VERSION) = \ ('architecture', 'distribution', 'type', 'version') + + IMAGE_OS_PROP = (OS_DISTRO, OS_TYPE, OS_VERSION) = \ + ('os_distro', 'os_type', 'os_version') toscatype = 'tosca.nodes.Compute' ALLOWED_NOVA_SERVER_PROPS = \ @@ -141,32 +144,41 @@ class ToscaCompute(HotResource): # Check whether user exported all required environment variables. images = glance_images.get_images() match_all = images.keys() + architecture = properties.get(self.ARCHITECTURE) if architecture is None: self._log_compute_msg(self.ARCHITECTURE, 'image') match_arch = self._match_images(match_all, images, - self.ARCHITECTURE, architecture) - type = properties.get(self.TYPE) - if type is None: + [self.ARCHITECTURE], architecture) + + image_type = properties.get(self.TYPE) + if image_type is None: self._log_compute_msg(self.TYPE, 'image') - match_type = self._match_images(match_arch, images, self.TYPE, type) + match_type = self._match_images(match_arch, images, [self.TYPE, + self.OS_TYPE], + image_type) + distribution = properties.get(self.DISTRIBUTION) if distribution is None: self._log_compute_msg(self.DISTRIBUTION, 'image') match_distribution = self._match_images(match_type, images, - self.DISTRIBUTION, + [self.DISTRIBUTION, + self.OS_DISTRO], distribution) + version = properties.get(self.VERSION) if version is None: self._log_compute_msg(self.VERSION, 'image') match_version = self._match_images(match_distribution, images, - self.VERSION, version) + [self.VERSION, self.OS_VERSION], + version) if len(match_version): return list(match_version)[0] - def _match_flavors(self, this_list, this_dict, attr, size): - '''Return from this list all flavors matching the attribute size.''' + @staticmethod + def _match_flavors(this_list, this_dict, attr, size): + """Return from this list all flavors matching the attribute size.""" if not size: return list(this_list) matching_flavors = [] @@ -177,24 +189,27 @@ class ToscaCompute(HotResource): log.debug(_('Returning list of flavors matching the attribute size.')) return matching_flavors - def _least_flavor(self, this_list, this_dict, attr): - '''Return from this list the flavor with the smallest attr.''' + @staticmethod + def _least_flavor(this_list, this_dict, attr): + """Return from this list the flavor with the smallest attr.""" least_flavor = this_list[0] for flavor in this_list: if this_dict[flavor][attr] < this_dict[least_flavor][attr]: least_flavor = flavor return least_flavor - def _match_images(self, this_list, this_dict, attr, prop): + @staticmethod + def _match_images(this_list, this_dict, attr_list, prop): if not prop: return this_list matching_images = [] for image in this_list: - if attr in this_dict[image]: - if this_dict[image][attr].lower() == str(prop).lower(): - matching_images.insert(0, image) - else: - matching_images.append(image) + for attr in attr_list: + if attr in this_dict[image]: + if this_dict[image][attr].lower() == str(prop).lower(): + matching_images.insert(0, image) + else: + matching_images.append(image) return matching_images def get_hot_attribute(self, attribute, args): @@ -214,8 +229,9 @@ class ToscaCompute(HotResource): return attr - def _log_compute_msg(self, prop, what): + @staticmethod + def _log_compute_msg(prop, what): msg = _('No value is provided for Compute capability ' 'property "%(prop)s". This may set an undesired "%(what)s" ' 'in the template.') % {'prop': prop, 'what': what} - log.warn(msg) + log.warning(msg)