Merge "remove vmware"

This commit is contained in:
Jenkins 2017-02-16 13:35:48 +00:00 committed by Gerrit Code Review
commit 5b5ec8b4fb
115 changed files with 40 additions and 5406 deletions

View File

@ -65,15 +65,6 @@
strategy:
type: parallel
- id: cinder-vmware
type: group
role: [cinder-vmware]
requires: [controller]
required_for: [deploy_end]
parameters:
strategy:
type: parallel
- id: compute
type: group
role: [compute]
@ -84,16 +75,6 @@
strategy:
type: parallel
- id: compute-vmware
type: group
role: [compute-vmware]
requires: [controller]
required_for: [deploy_end]
tasks: [globals, hiera, tools, logging, netconfig, connectivity_tests, hosts, firewall, top-role-compute]
parameters:
strategy:
type: parallel
- id: mongo
type: group
role: [mongo]

View File

@ -44,9 +44,6 @@ class openstack::cinder(
$region = 'RegionOne',
$notification_driver = undef,
$service_workers = $::os_workers,
$vmware_host_ip = '10.10.10.10',
$vmware_host_username = 'administrator@vsphere.local',
$vmware_host_password = 'password',
$rbd_pool = 'volumes',
$rbd_user = 'volumes',
$rbd_secret_uuid = 'a5d0dd94-57c4-ae55-ffe0-7e3732a24455',

View File

@ -36,7 +36,7 @@
- id: ceilometer-compute
type: puppet
version: 2.1.0
groups: [compute, compute-vmware]
groups: [compute]
required_for: [deploy_end]
requires: [ceilometer-controller, top-role-compute]
cross-depends:

View File

@ -16,7 +16,7 @@
$.get('glance_api_servers'), $.get('region', 'RegionOne'), $.ironic,
$.get('memcached_servers'),
$.get('openstack_controller'), $.get('external_lb'), $.quantum_settings,
$.get('database_vip'), $.nova_quota, $.use_vcenter, $.libvirt_type,
$.get('database_vip'), $.nova_quota, $.libvirt_type,
$.network_metadata.nodes.values().where(
$.node_roles.any($.matches('(controller|rabbitmq)'))).network_roles.select(
$.get('mgmt/messaging')),

View File

@ -63,7 +63,7 @@
- id: top-role-cinder
type: puppet
version: 2.1.0
groups: [cinder, cinder-block-device, cinder-vmware]
groups: [cinder, cinder-block-device]
required_for: [enable_cinder_volume_service]
requires: [hosts, firewall]
cross-depends:
@ -175,8 +175,8 @@
- id: enable_nova_compute_service
type: puppet
version: 2.1.0
groups: [compute, compute-vmware]
requires: [top-role-compute, top-role-compute-vmware]
groups: [compute]
requires: [top-role-compute]
required_for: [deploy_end]
refresh_on: [nova_config, nova_paste_api_ini]
cross-depends:

View File

@ -6,8 +6,7 @@
requires: [openstack-controller, memcached]
condition:
yaql_exp: &swift_enabled >
((not $.storage.objects_ceph and not $.storage.images_ceph) and
not $.storage.images_vcenter) and
(not $.storage.objects_ceph and not $.storage.images_ceph) and
(changedAny($.network_scheme, $.network_metadata, $.swift,
$.get('swift_master_role', 'primary-controller'),
$.get('swift_object_roles'), ('primary-controller' in $.roles),
@ -64,8 +63,7 @@
requires: [swift-proxy_storage, primary-swift-proxy_storage]
condition:
yaql_exp: >
((not $.storage.objects_ceph and not $.storage.images_ceph) and
not $.storage.images_vcenter) and
(not $.storage.objects_ceph and not $.storage.images_ceph) and
(changedAny($.storage, $.get('swift_master_role', 'primary-controller'),
$.get('swift_ring_min_part_hours'),('primary-controller' in $.roles)))
parameters:
@ -86,8 +84,7 @@
- name: swift-proxy_storage
condition:
yaql_exp: >
((not $.storage.objects_ceph and not $.storage.images_ceph) and
not $.storage.images_vcenter) and
(not $.storage.objects_ceph and not $.storage.images_ceph) and
(changedAny($.swift, $.network_metadata.vips,
$.get('region', 'RegionOne'), $.public_ssl, $.get('use_ssl')))
parameters:

View File

@ -67,16 +67,6 @@ class openstack_tasks::glance::glance {
$glance_glare_user = pick($glance_glare_hash['user'],'glare')
$glance_glare_user_password = $glance_glare_hash['user_password']
$glance_glare_tenant = pick($glance_glare_hash['tenant'],'services')
$glance_vcenter_host = $glance_hash['vc_host']
$glance_vcenter_user = $glance_hash['vc_user']
$glance_vcenter_password = $glance_hash['vc_password']
$glance_vcenter_datacenter = $glance_hash['vc_datacenter']
$glance_vcenter_datastore = $glance_hash['vc_datastore']
$glance_vcenter_image_dir = $glance_hash['vc_image_dir']
$glance_vcenter_insecure = $glance_hash['vc_insecure']
$glance_vcenter_api_retry_count = '20'
$glance_vcenter_ca_file = pick($glance_hash['vc_ca_file'], {})
$glance_vcenter_ca_content = pick($glance_vcenter_ca_file['content'], {})
$glance_image_cache_max_size = $glance_hash['image_cache_max_size']
$pipeline = pick($glance_hash['pipeline'], 'keystone')
$glance_large_object_size = pick($glance_hash['large_object_size'], '5120')
@ -98,11 +88,6 @@ class openstack_tasks::glance::glance {
$known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
$show_multiple_locations = pick($glance_hash['show_multiple_locations'], true)
$show_image_direct_url = pick($glance_hash['show_image_direct_url'], true)
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
$known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
$show_multiple_locations = pick($glance_hash['show_multiple_locations'], true)
$show_image_direct_url = pick($glance_hash['show_image_direct_url'], true)
} else {
$glance_backend = 'swift'
$known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ]
@ -296,37 +281,6 @@ class openstack_tasks::glance::glance {
glare_enabled => true,
}
}
'vmware': {
$glance_vcenter_datastores = "${glance_vcenter_datacenter}:${glance_vcenter_datastore}"
if ! empty($glance_vcenter_ca_content) and ! $glance_vcenter_insecure {
$vcenter_ca_filepath = '/etc/glance/vcenter-ca.pem'
$glance_vcenter_insecure_real = false
file { $vcenter_ca_filepath:
ensure => file,
content => $glance_vcenter_ca_content,
mode => '0644',
owner => 'root',
group => 'root',
}
Class['::glance::backend::vsphere']->File[$vcenter_ca_filepath]
} else {
$vcenter_ca_filepath = $::os_service_default
$glance_vcenter_insecure_real = $glance_vcenter_insecure
}
class { '::glance::backend::vsphere':
vcenter_host => $glance_vcenter_host,
vcenter_user => $glance_vcenter_user,
vcenter_password => $glance_vcenter_password,
vcenter_datastores => $glance_vcenter_datastores,
vcenter_insecure => $glance_vcenter_insecure_real,
vcenter_image_dir => $glance_vcenter_image_dir,
vcenter_api_retry_count => $glance_vcenter_api_retry_count,
vcenter_ca_file => $vcenter_ca_filepath,
glare_enabled => true,
}
}
default: {
class { "glance::backend::${glance_backend}":
glare_enabled => true,

View File

@ -132,12 +132,6 @@ class openstack_tasks::openstack_controller::openstack_controller {
$notify_on_state_change = 'vm_and_task_state'
if hiera('use_vcenter', false) or hiera('libvirt_type') == 'vcenter' {
$multi_host = false
} else {
$multi_host = true
}
# From legacy params.pp
case $::osfamily {
'RedHat': {

View File

@ -105,9 +105,6 @@ class openstack_tasks::roles::cinder {
}
Augeas<| tag == 'lvm-conf-augeas'|> ~> Exec<| title == 'Update initramfs' |>
} elsif roles_include(['cinder-vmware']) {
$manage_volumes = 'vmdk'
$physical_volumes = false
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
$physical_volumes = false
@ -130,7 +127,7 @@ class openstack_tasks::roles::cinder {
# other services that are declared in openstack manifests
# TODO(xarses): somone needs to refactor this out
# https://bugs.launchpad.net/fuel/+bug/1558831
if ($use_ceph and !$storage_hash['volumes_lvm'] and !roles_include(['cinder-vmware'])) {
if ($use_ceph and !$storage_hash['volumes_lvm']) {
prepare_network_config(hiera_hash('network_scheme', {}))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')

View File

@ -69,8 +69,8 @@ class openstack_tasks::roles::compute {
}
}
# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects
if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] {
# Use Swift if it isn't replaced by Ceph for BOTH images and objects
if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) {
$use_swift = true
} else {
$use_swift = false

View File

@ -7,7 +7,7 @@ class openstack_tasks::roles::enable_compute {
$use_ovs = hiera('use_ovs', true)
$roles = hiera('roles')
if !('compute-vmware' in $roles) and $use_ovs {
if $use_ovs {
$neutron_integration_bridge = 'br-int'
$bridge_exists_check = "ovs-vsctl br-exists ${neutron_integration_bridge}"

View File

@ -56,7 +56,7 @@ class openstack_tasks::swift::proxy_storage {
$swift_proxies_num = size(hiera('swift_proxies'))
# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects
# Use Swift if it isn't replaced by Ceph for BOTH images and objects
$master_swift_proxy_nodes = get_nodes_hash_by_roles($network_metadata, [$swift_master_role])
$master_swift_proxy_nodes_list = values($master_swift_proxy_nodes)
$master_swift_proxy_ip = regsubst($master_swift_proxy_nodes_list[0]['network_roles']['swift/api'], '\/\d+$', '')

View File

@ -8,8 +8,8 @@ class openstack_tasks::swift::rebalance_cronjob {
$swift_master_role = hiera('swift_master_role', 'primary-controller')
$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1)
# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects
if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] {
# Use Swift if it isn't replaced by Ceph for BOTH images and objects
if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) {
$master_swift_replication_nodes = get_nodes_hash_by_roles($network_metadata, [$swift_master_role])
$master_swift_replication_nodes_list = values($master_swift_replication_nodes)
$master_swift_replication_ip = $master_swift_replication_nodes_list[0]['network_roles']['swift/replication']

View File

@ -1,14 +0,0 @@
module Puppet::Parser::Functions
newfunction(:get_cinder_vmware_data, :type => :rvalue,
:doc => <<-EOS
Transform data to suitable form for cinder-vmware: rebuild array of hashes
to hash of hashes with availability_zone_name as a key and add debug value.
EOS
) do |args|
raise(Puppet::ParseError, 'Empty array provided!') if args.size < 1
volumes = args[0]
debug = args[1] || "false"
volumes.each {|h| h.store("debug", debug)}
Hash[volumes.collect {|h| [h["availability_zone_name"], h]}]
end
end

View File

@ -89,7 +89,6 @@ class osnailyfacter::globals::globals {
}
$murano_hash = merge($murano, { 'plugins' => {'glance_artifacts_plugin' => $murano_glance_artifacts_plugin } })
$heat_hash = hiera_hash('heat', {})
$vcenter_hash = hiera('vcenter', {})
$nova_hash = hiera_hash('nova', {})
$mysql_hash = hiera('mysql', {})
$rabbit_hash = hiera_hash('rabbit', {})
@ -354,8 +353,6 @@ class osnailyfacter::globals::globals {
# Determine who should get the volume service
if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) {
$manage_volumes = 'iscsi'
} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) {
$manage_volumes = 'vmdk'
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
} else {
@ -371,9 +368,6 @@ class osnailyfacter::globals::globals {
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
} else {
$glance_backend = 'file'
$glance_known_stores = false

View File

@ -12,7 +12,7 @@ class osnailyfacter::openstack_haproxy::openstack_haproxy_radosgw {
$external_lb = hiera('external_lb', false)
if !$external_lb {
if (!$storage_hash['images_ceph'] and !$storage_hash['objects_ceph'] and !$storage_hash['images_vcenter']) {
if (!$storage_hash['images_ceph'] and !$storage_hash['objects_ceph']) {
$use_swift = true
} else {
$use_swift = false

View File

@ -15,7 +15,7 @@ class osnailyfacter::openstack_haproxy::openstack_haproxy_swift {
$external_lb = hiera('external_lb', false)
if (!$storage_hash['images_ceph'] and !$storage_hash['objects_ceph'] and !$storage_hash['images_vcenter']) {
if (!$storage_hash['images_ceph'] and !$storage_hash['objects_ceph']) {
$use_swift = true
} else {
$use_swift = false

View File

@ -1,12 +0,0 @@
class osnailyfacter::vmware::cinder_vmware {
notice('MODULAR: vmware/cinder_vmware.pp')
$cinder_hash = hiera_hash('cinder', {})
if roles_include(['cinder-vmware']) {
$debug = pick($cinder_hash['debug'], hiera('debug', true))
$volumes = get_cinder_vmware_data($cinder_hash['instances'], $debug)
create_resources(vmware::cinder::vmdk, $volumes)
}
}

View File

@ -1,53 +0,0 @@
class osnailyfacter::vmware::compute_vmware {
notice('MODULAR: vmware/compute_vmware.pp')
$debug = hiera('debug', true)
$vcenter_hash = hiera_hash('vcenter', {})
$computes = $vcenter_hash['computes']
$computes_hash = parse_vcenter_settings($computes)
$defaults = {
current_node => hiera('node_name'),
vlan_interface => $vcenter_hash['esxi_vlan_interface']
}
create_resources(vmware::compute_vmware, $computes_hash, $defaults)
$ceilometer_hash = hiera_hash('ceilometer', {})
$ceilometer_enabled = $ceilometer_hash['enabled']
if $ceilometer_enabled and $computes {
$compute = $computes[0]
$password = $ceilometer_hash['user_password']
$tenant = pick($ceilometer_hash['tenant'], 'services')
$service_endpoint = hiera('service_endpoint')
$management_vip = hiera('management_vip')
$ssl_hash = hiera_hash('use_ssl', {})
$auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http')
$auth_host = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [hiera('keystone_endpoint', ''), $service_endpoint, $management_vip])
$auth_port = '5000'
$identity_uri = "${auth_protocol}://${auth_host}:${auth_port}"
class { '::vmware::ceilometer::compute_vmware':
debug => $debug,
availability_zone_name => $compute['availability_zone_name'],
vc_cluster => $compute['vc_cluster'],
vc_host => $compute['vc_host'],
vc_user => $compute['vc_user'],
vc_password => $compute['vc_password'],
vc_insecure => $compute['vc_insecure'],
vc_ca_file => $compute['vc_ca_file'],
service_name => $compute['service_name'],
identity_uri => $identity_uri,
auth_user => 'ceilometer',
auth_password => $password,
tenant => $tenant,
}
}
}

View File

@ -1,29 +0,0 @@
class osnailyfacter::vmware::vcenter {
notice('MODULAR: vmware/vcenter.pp')
$use_vcenter = hiera('use_vcenter', false)
$vcenter_hash = hiera_hash('vcenter')
$public_vip = hiera('public_vip')
$ceilometer_hash = hiera_hash('ceilometer', {})
$nova_hash = hiera_hash('nova', {})
$public_ssl_hash = hiera_hash('public_ssl')
$ssl_hash = hiera_hash('use_ssl', {})
$vncproxy_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'protocol', [$nova_hash['vncproxy_protocol'], 'http'])
$vncproxy_host = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'hostname', [$public_vip])
$debug = pick($vcenter_hash['debug'], hiera('debug', false))
if $use_vcenter {
class { '::vmware':
vcenter_settings => $vcenter_hash['computes'],
vlan_interface => $vcenter_hash['esxi_vlan_interface'],
use_quantum => true,
vncproxy_protocol => $vncproxy_protocol,
vncproxy_host => $vncproxy_host,
nova_hash => $nova_hash,
ceilometer => $ceilometer_hash['enabled'],
debug => $debug,
}
}
}

View File

@ -154,18 +154,6 @@
timeout: 120
cwd: /
- id: vcenter_compute_zones_create
type: shell
version: 2.1.0
role: [primary-controller, compute-vmware]
requires: [post_deployment_start, enable_nova_compute_service]
required_for: [post_deployment_end]
condition:
yaql_exp: '$.use_vcenter and changed($.use_vcenter)'
parameters:
cmd: /usr/bin/python /etc/puppet/modules/osnailyfacter/modular/astute/vcenter_hooks.py --create_zones
timeout: 180
- id: disable_keystone_service_token
type: puppet
version: 2.2.0

View File

@ -1,146 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import subprocess
import yaml
from itertools import ifilter
from novaclient.client import Client
from optparse import OptionParser
def get_data_from_hiera(hiera_key, lookup_type='priority'):
"""Extract the data from Hiera using the Ruby call.
Yes, it looks funny but other ways to do it are worse.
I have to use the Ruby implementation of hiera here
with the Puppet config file.
:param lookup_type: Which lookup type should be used?
# priority, hash, array
:type lookup_type: str
:param hiera_key: the key to search
:type hiera_key: str
:return: hiera data
:rtype: None, str, list, dict
"""
hiera_lookup = '''
ruby -r hiera -r yaml -e '
hiera = Hiera.new(:config => "/etc/puppet/hiera.yaml");
data = hiera.lookup("{hiera_key}", nil, {{}}, nil, :{lookup_type});
puts YAML.dump data;
'
'''
try:
command = hiera_lookup.format(
hiera_key=hiera_key,
lookup_type=lookup_type,
)
response = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
)
yaml_data = yaml.load(response.stdout.read())
return yaml_data
except subprocess.CalledProcessError as exception:
logging.warn('Could not get Hiera data: {} Code: {} Output: {}'.format(
hiera_key,
exception.returncode,
exception.output,
))
return None
def check_availability_zones(nova_client, compute):
nova_zones = nova_client.availability_zones.list()
nova_aggregates = nova_client.aggregates.list()
nova_zones_and_aggregates = nova_aggregates + nova_zones
compute_zone = compute['availability_zone_name']
present = filter(lambda item: item.to_dict().get('zoneName') ==
compute_zone or item.to_dict().get('availability_zone') ==
compute_zone, nova_zones_and_aggregates)
if present:
print("Zone {0} already present.".format(compute_zone))
else:
print("Zone {0} is missing, creating it.".format(compute_zone))
nova_client.aggregates.create(compute_zone, compute_zone)
def check_host_in_zone(nova_client, compute):
nova_aggregates = nova_client.aggregates.list()
compute_zone = compute['availability_zone_name']
compute_host = compute_zone + "-" + compute['service_name']
present = filter(lambda aggr: compute_host in aggr.hosts, nova_aggregates)
if present:
print("Compute service {0} already in {1} zone.".
format(compute['service_name'], compute_zone))
else:
for aggregate in nova_aggregates:
if aggregate.to_dict()['name'] == compute_zone:
print("Compute service {0} not in {1} zone. Adding.".
format(compute['service_name'], compute_zone))
nova_client.aggregates.add_host(aggregate, compute_host)
def main():
credentials = get_data_from_hiera('access', 'hash')
ssl = get_data_from_hiera('use_ssl', 'hash')
USERNAME = credentials['user']
PASSWORD = credentials['password']
PROJECT_ID = credentials['tenant']
VERSION = 2
IP = []
IP.append(get_data_from_hiera('keystone_vip'))
IP.append(get_data_from_hiera('service_endpoint'))
IP.append(get_data_from_hiera('management_vip'))
if ssl:
auth_protocol = 'https://'
auth_url = ssl['keystone_internal_hostname']
auth_port = ':5000/v2.0/'
else:
auth_protocol = 'http://'
auth_url = ifilter(None, IP).next()
auth_port = ':5000/v2.0/'
AUTH_URL = auth_protocol + auth_url + auth_port
parser = OptionParser()
parser.add_option("--create_zones", action="store_true", help="Create \
needed availability zones and puts coresponding compute \
services in corresponding availability zones")
(options, args) = parser.parse_args()
nova = Client(VERSION, USERNAME, PASSWORD, PROJECT_ID, AUTH_URL,
endpoint_type='internalURL')
vcenter_settings = get_data_from_hiera('vcenter', 'hash')
if options.create_zones:
for compute in vcenter_settings['computes']:
print("---Start of Compute service {0} zone creation.---".
format(compute['service_name']))
check_availability_zones(nova, compute)
check_host_in_zone(nova, compute)
print("----End of Compute service {0} ----".
format(compute['service_name']))
if __name__ == '__main__':
main()

View File

@ -31,7 +31,7 @@
- id: dns-client
type: puppet
version: 2.1.0
role: [primary-mongo, mongo, primary-controller, controller, compute, ceph-osd, cinder, cinder-vmware]
role: [primary-mongo, mongo, primary-controller, controller, compute, ceph-osd, cinder]
requires: [post_deployment_start]
required_for: [ntp-client]
condition:

View File

@ -1,7 +1,7 @@
- id: firewall
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, ironic, primary-rabbitmq, rabbitmq, primary-database, database,
primary-keystone, keystone, primary-neutron, neutron]
required_for: [deploy_end]

View File

@ -14,7 +14,7 @@
- id: fuel_pkgs
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, ironic, primary-rabbitmq, rabbitmq, primary-database, database,
primary-keystone, keystone, primary-neutron, neutron]
requires: [setup_repositories]

View File

@ -2,8 +2,7 @@
type: puppet
version: 2.2.0
tags: [primary-controller, controller,
cinder, cinder-block-device, cinder-vmware, compute, compute-vmware,
ceph-osd, primary-mongo, mongo, virt, ironic,
cinder, cinder-block-device, compute, ceph-osd, primary-mongo, mongo, virt, ironic,
primary-rabbitmq, rabbitmq, primary-database, database, primary-keystone, keystone,
primary-neutron, neutron]
required_for: [deploy_end]

View File

@ -1,8 +1,8 @@
- id: hiera
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware,
compute, compute-vmware, ceph-osd, primary-mongo, mongo, virt, ironic,
tags: [primary-controller, controller, cinder, cinder-block-device, compute,
ceph-osd, primary-mongo, mongo, virt, ironic,
primary-rabbitmq, rabbitmq, primary-database, database, primary-keystone, keystone,
primary-neutron, neutron]
requires: [deploy_start, rsync_core_puppet]

View File

@ -1,7 +1,7 @@
- id: hosts
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, ironic, primary-rabbitmq, rabbitmq, primary-database, database,
primary-keystone, keystone, primary-neutron, neutron]
required_for: [deploy_end]

View File

@ -1,7 +1,7 @@
- id: limits
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, ironic, primary-rabbitmq, rabbitmq]
required_for: [tools]
requires: [logging]

View File

@ -1,7 +1,7 @@
- id: logging
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, virt, ironic, primary-rabbitmq, rabbitmq, primary-database, database,
primary-keystone, keystone, primary-neutron, neutron]
required_for: [deploy_end]

View File

@ -2,8 +2,7 @@
type: puppet
version: 2.1.0
groups: [primary-controller, controller, cinder, cinder-block-device,
cinder-vmware, compute, compute-vmware, ceph-osd, primary-mongo,
mongo, virt, ironic]
compute, ceph-osd, primary-mongo, mongo, virt, ironic]
# We need to execute this task before netconfig on all nodes except mongo.
# Mongo nodes will configure routing via admin network and update it later
# with configure_default_route task
@ -49,7 +48,7 @@
- id: netconfig
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, virt, ironic, primary-rabbitmq, rabbitmq, primary-database, database,
primary-keystone, keystone, primary-neutron, neutron]
required_for: [deploy_end]
@ -86,7 +85,7 @@
- id: connectivity_tests
type: puppet
version: 2.1.0
groups: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd, primary-mongo, mongo, virt, ironic]
groups: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd, primary-mongo, mongo, virt, ironic]
required_for: [firewall, hosts]
requires: [netconfig]
condition:
@ -114,7 +113,7 @@
- id: reserved_ports
type: puppet
version: 2.1.0
groups: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd, primary-mongo, mongo, virt, ironic]
groups: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd, primary-mongo, mongo, virt, ironic]
required_for: [globals]
requires: [rsync_core_puppet]
condition:

View File

@ -18,7 +18,7 @@
- id: ntp-client
type: puppet
version: 2.1.0
role: [primary-mongo, mongo, compute, ceph-osd, cinder, cinder-vmware]
role: [primary-mongo, mongo, compute, ceph-osd, cinder]
requires: [dns-client]
cross-depends:
- name: ntp-server

View File

@ -1,7 +1,7 @@
- id: ssl-keys-saving
type: puppet
version: 2.2.0
tags: [primary-controller, controller, compute, compute-vmware, cinder, cinder-vmware, primary-mongo, mongo, ceph-osd, virt,
tags: [primary-controller, controller, compute, cinder, primary-mongo, mongo, ceph-osd, virt,
primary-keystone, keystone]
requires: [firewall]
condition:
@ -21,7 +21,7 @@
- id: ssl-add-trust-chain
type: puppet
version: 2.2.0
tags: [primary-controller, controller, compute, compute-vmware, cinder, cinder-vmware, primary-mongo, mongo, ceph-osd, virt,
tags: [primary-controller, controller, compute, cinder, primary-mongo, mongo, ceph-osd, virt,
primary-keystone, keystone]
requires: [firewall, ssl-keys-saving]
condition:

View File

@ -1,7 +1,7 @@
- id: tools
type: puppet
version: 2.2.0
tags: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd,
tags: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd,
primary-mongo, mongo, virt, ironic, primary-rabbitmq, rabbitmq, primary-database, database,
primary-keystone, keystone, primary-neutron, neutron]
required_for: [deploy_end]

View File

@ -1,7 +1,7 @@
- id: prepare_symlinks
type: puppet
version: 2.1.0
groups: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd, primary-mongo, mongo, ironic]
groups: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd, primary-mongo, mongo, ironic]
requires: [setup_repositories]
required_for: [pkg_upgrade]
condition:
@ -14,7 +14,7 @@
- id: pkg_upgrade
type: puppet
version: 2.1.0
groups: [primary-controller, controller, cinder, cinder-block-device, cinder-vmware, compute, ceph-osd, primary-mongo, mongo, ironic]
groups: [primary-controller, controller, cinder, cinder-block-device, compute, ceph-osd, primary-mongo, mongo, ironic]
requires: [setup_repositories]
required_for: [fuel_pkgs]
condition:

View File

@ -1,3 +0,0 @@
class { '::osnailyfacter::vmware::cinder_vmware' :}
class { '::osnailyfacter::upgrade::restart_services' :}
class { '::osnailyfacter::override_resources': }

View File

@ -1,3 +0,0 @@
class { '::osnailyfacter::vmware::compute_vmware' :}
class { '::osnailyfacter::upgrade::restart_services' :}
class { '::osnailyfacter::override_resources': }

View File

@ -1,44 +0,0 @@
- id: vmware-vcenter
type: puppet
version: 2.1.0
groups: [primary-controller, controller]
required_for: [deploy_end]
requires: [controller_remaining_tasks]
condition:
yaql_exp: &use_vcenter '$.use_vcenter and changed($.use_vcenter)'
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/vcenter.pp
puppet_modules: /etc/puppet/modules
timeout: 300
- id: top-role-cinder-vmware
type: puppet
version: 2.1.0
groups: [cinder-vmware]
required_for: [deploy_end]
requires: [top-role-cinder]
condition:
yaql_exp: *use_vcenter
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/cinder-vmware.pp
puppet_modules: /etc/puppet/modules
timeout: 300
test_pre:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/vmware/cinder-vmware_pre.rb
test_post:
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/vmware/cinder-vmware_post.rb
- id: top-role-compute-vmware
type: puppet
version: 2.1.0
groups: [compute-vmware]
required_for: [enable_nova_compute_service]
requires: [top-role-compute, ceilometer-compute, ceilometer-keystone]
cross-depends:
- name: ceilometer-keystone
condition:
yaql_exp: *use_vcenter
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/compute-vmware.pp
puppet_modules: /etc/puppet/modules
timeout: 180

View File

@ -1,3 +0,0 @@
class { '::osnailyfacter::vmware::vcenter' :}
class { '::osnailyfacter::upgrade::restart_services' :}
class { '::osnailyfacter::override_resources': }

View File

@ -4,21 +4,6 @@ describe 'generate_glance_images' do
let(:input) {
[
{
'container_format' => 'bare',
'disk_format' => 'vmdk',
'glance_properties' => '--property hypervisor_type=vmware --property vmware_disktype=sparse --property vmware_adaptertype=lsiLogic',
'img_name' => 'TestVM-VMDK',
'img_path' => '/usr/share/cirros-testvm/cirros-i386-disk.vmdk',
'min_ram' => '64',
'os_name' => 'cirros',
'properties' => {
'hypervisor_type' => 'vmware',
'vmware_adaptertype' => 'lsiLogic',
'vmware_disktype' => 'sparse',
},
'public' => 'true',
},
{
'container_format' => 'bare',
'disk_format' => 'qcow2',
@ -35,18 +20,6 @@ describe 'generate_glance_images' do
let (:output) {
{
'TestVM-VMDK' => {
'container_format' => 'bare',
'disk_format' => 'vmdk',
'is_public' => 'true',
'min_ram' => '64',
'source' => '/usr/share/cirros-testvm/cirros-i386-disk.vmdk',
'properties' => {
'hypervisor_type' => 'vmware',
'vmware_adaptertype' => 'lsiLogic',
'vmware_disktype' => 'sparse',
},
},
'TestVM' => {
'container_format' => 'bare',
'disk_format' => 'qcow2',

View File

@ -109,7 +109,6 @@
<% globals.store "use_ceilometer", @use_ceilometer -%>
<% globals.store "use_ovs", @use_ovs -%>
<% globals.store "use_syslog", @use_syslog -%>
<% globals.store "vcenter", @vcenter_hash -%>
<% globals.store "verbose", @verbose -%>
<% globals.store "vlan_start", @vlan_start -%>
<% globals.store "management_vip", @management_vip -%>

View File

@ -1,12 +0,0 @@
fixtures:
symlinks:
ceilometer: "#{source_dir}/../ceilometer"
pacemaker: "#{source_dir}/../pacemaker"
inifile: "#{source_dir}/../inifile"
nova: "#{source_dir}/../nova"
stdlib: "#{source_dir}/../stdlib"
vmware: "#{source_dir}"
oslo: "#{source_dir}/../oslo"
cinder: "#{source_dir}/../cinder"
openstacklib: "#{source_dir}/../openstacklib"
tweaks: "#{source_dir}/../tweaks"

View File

@ -1,2 +0,0 @@
.idea
spec/fixtures

View File

@ -1 +0,0 @@
-f doc -c

View File

@ -1,17 +0,0 @@
source 'https://rubygems.org'
group :development, :test do
gem 'puppetlabs_spec_helper', :require => false
gem 'puppet-lint', '~> 1.1'
gem 'rake', '~> 10.3.1'
gem 'pry'
gem 'rspec-puppet-facts'
end
if puppetversion = ENV['PUPPET_GEM_VERSION']
gem 'puppet', puppetversion, :require => false
else
gem 'puppet', :require => false
end
# vim:ft=ruby

View File

@ -1,2 +0,0 @@
require 'rubygems'
require 'puppetlabs_spec_helper/rake_tasks'

View File

@ -1,109 +0,0 @@
#!/bin/sh
#
#
# openstack-ceilometer-compute OpenStack ceilometer compute node agent
#
# chkconfig: - 98 02
# description: OpenStack measurement and collection service for running on compute nodes
### BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Stop: 0 1 6
# Short-Description: OpenStack ceilometer compute node agent
# Description: OpenStack measurement and collection service for running on compute nodes
### END INIT INFO
. /etc/rc.d/init.d/functions
suffix=compute
cluster=${0##*-}
prog=openstack-ceilometer-$suffix-vmware-$cluster
exec="/usr/bin/ceilometer-agent-$suffix"
config="/etc/ceilometer/ceilometer.conf"
pidfile="/var/run/ceilometer/ceilometer-agent-$suffix.$cluster.pid"
logfile="/var/log/ceilometer/$suffix-$cluster.log"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
[ -f $config ] || exit 6
echo -n $"Starting $prog: "
daemon --user ceilometer --pidfile $pidfile "$exec --logfile $logfile ${OPTIONS} &>/dev/null & echo \$! > $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile ${prog%%.*}
if pgrep -f "ceilometer-agent-$suffix.*$cluster\.conf" &>/dev/null ; then
sleep 2
pgrep -f "ceilometer-agent-$suffix.*$cluster\.conf" &>/dev/null && \
pkill -f "$cluster.conf"
fi
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -1,25 +0,0 @@
# vim: set ft=upstart et ts=2:
description "ceilometer-agent-compute"
author "Chuck Short <zulcss@ubuntu.com>"
start on runlevel [2345]
stop on runlevel [!2345]
chdir /var/run
pre-start script
mkdir -p /var/run/ceilometer
chown ceilometer:root /var/run/ceilometer/
mkdir -p /var/lock/ceilometer
chown ceilometer:root /var/lock/ceilometer/
end script
script
if [ -r /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
start-stop-daemon --start --chuid ceilometer --exec /usr/bin/ceilometer-agent-compute -- $CEILOMETER_COMPUTE_OPTS
end script

View File

@ -1,33 +0,0 @@
description "OpenStack Cinder Volume for VMware"
author "Alexander Arzhanov <aarzhanov@mirantis.com>"
start on runlevel [2345]
stop on runlevel [!2345]
chdir /var/run
respawn
respawn limit 20 5
limit nofile 65535 65535
pre-start script
for i in lock run log lib ; do
mkdir -p /var/$i/cinder
chown cinder /var/$i/cinder
done
end script
script
[ -x "/usr/bin/cinder-volume" ] || exit 0
DAEMON_ARGS=""
CINDER_VOLUME_OPTS=""
[ -r /etc/default/openstack ] && . /etc/default/openstack
[ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/cinder/$UPSTART_JOB.log"
exec start-stop-daemon --start --chdir /var/lib/cinder \
--chuid cinder:cinder --make-pidfile --pidfile /var/run/cinder/$UPSTART_JOB.pid \
--exec /usr/bin/cinder-volume -- --config-file=/etc/cinder/cinder.conf ${CINDER_VOLUME_OPTS} ${DAEMON_ARGS}
end script

View File

@ -1,125 +0,0 @@
#!/bin/sh
# This file is managed by Puppet.
#
# openstack-nova-compute OpenStack Nova Compute Worker
#
# chkconfig: - 98 02
# description: Compute workers manage computing instances on host \
# machines. Through the API, commands are dispatched \
# to compute workers to: \
# * Run instances \
# * Terminate instances \
# * Reboot instances \
# * Attach volumes \
# * Detach volumes \
# * Get console output
### BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Stop: 0 1 6
# Short-Description: OpenStack Nova Compute Worker
# Description: Compute workers manage computing instances on host
# machines. Through the API, commands are dispatched
# to compute workers to:
# * Run instances
# * Terminate instances
# * Reboot instances
# * Attach volumes
# * Detach volumes
# * Get console output
### END INIT INFO
. /etc/rc.d/init.d/functions
suffix=compute
cluster=${0##*-}
prog=openstack-nova-$suffix-vmware-$cluster
exec="/usr/bin/nova-$suffix"
config="/etc/nova/nova.conf"
pidfile="/var/run/nova/nova-$suffix.$cluster.pid"
logfile="/var/log/nova/$suffix-$cluster.log"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
[ -f $config ] || exit 6
echo -n $"Starting $prog: "
daemon --user nova --pidfile $pidfile "$exec --logfile $logfile ${OPTIONS} &>/dev/null & echo \$! > $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile ${prog%%.*}
if pgrep -f "nova-$suffix.*$cluster\.conf" &>/dev/null ; then
sleep 2
pgrep -f "nova-$suffix.*$cluster\.conf" &>/dev/null && \
pkill -f "$cluster.conf"
fi
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -1,33 +0,0 @@
# vim: set ft=upstart et ts=2:
description "Nova compute worker"
author "Soren Hansen <soren@linux2go.dk>"
start on runlevel [2345]
stop on runlevel [!2345]
chdir /var/run
pre-start script
mkdir -p /var/run/nova
chown nova:root /var/run/nova/
mkdir -p /var/lock/nova
chown nova:root /var/lock/nova/
modprobe nbd
# If libvirt-bin is installed, always wait for it to start first
if status libvirt-bin; then
start wait-for-state WAIT_FOR=libvirt-bin WAIT_STATE=running WAITER=nova-compute
fi
end script
script
if [ -r /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
start-stop-daemon --start --chuid nova --exec /usr/bin/nova-compute -- --config-file=/etc/nova/nova.conf $NOVA_COMPUTE_OPTS
end script

View File

@ -1,114 +0,0 @@
#!/bin/sh
#
# openstack-cinder-volume OpenStack Cinder Volume Services
#
# chkconfig: - 98 02
# description: Volume Workers interact with iSCSI storage to manage \
# LVM-based instance volumes. Specific functions include: \
# * Create Volumes \
# * Establish Compute volumes
## BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Stop: 0 1 6
# Short-Description: OpenStack cinder Volume Worker
# Description: Volume Workers interact with iSCSI storage to manage
# LVM-based instance volumes. Specific functions include:
# * Create Volumes
# * Delete Volumes
# * Establish Compute volumes
### END INIT INFO
. /etc/rc.d/init.d/functions
suffix=volume
cluster=${0##*-} # s/cluster/index/
prog=openstack-cinder-$suffix-vmware-$cluster
exec="/usr/bin/cinder-$suffix"
config="/etc/cinder/cinder.conf"
pidfile="/var/run/cinder/cinder-$suffix.$cluster.pid"
logfile="/var/log/cinder/$suffix-$cluster.log"
[ -r /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
[ -f $config ] || exit 6
echo -n $"Starting $prog: "
daemon --user cinder --pidfile $pidfile "$exec --logfile $logfile --config-file=$config ${OPTIONS} &>/dev/null & echo \$! > $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile ${prog%%.*}
if pgrep -f "cinder-$suffix.*$cluster\.conf" &>/dev/null ; then
sleep 2
pgrep -f "cinder-$suffix.*$cluster\.conf" &>/dev/null && \
pkill -f "$cluster.conf"
fi
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -1,19 +0,0 @@
# Eventually this functions should be revised and removed.
# Such data structure forming should be done by nailgun
Puppet::Parser::Functions::newfunction(
:parse_vcenter_settings,
:type => :rvalue,
:arity => 1,
:doc => <<-EOS
Convert array of computes of vCenter settings to hash
EOS
) do |args|
settings = args[0]
settings = [settings] unless settings.is_a? Array
settings_hash = {}
settings.each_with_index do |value, key|
next unless value.is_a? Hash
settings_hash.store key.to_s, value
end
settings_hash
end

View File

@ -1,116 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::ceilometer
#
# Deploys nova-compute service and configures it for use
# with vmwareapi.VCDriver (vCenter server as hypervisor)
# on OpenStack controller nodes. Nova-compute is configured
# to work under Pacemaker supervision.
#
# === Parameters
#
# [*vcenter_settings*]
# (optional) Computes hash in format of:
# Example:
# "[ {"availability_zone_name"=>"vcenter", "datastore_regex"=>".*",
# "service_name"=>"vm_cluster1", "target_node"=>"controllers",
# "vc_cluster"=>"Cluster1", "vc_host"=>"172.16.0.254",
# "vc_password"=>"Qwer!1234", "vc_user"=>"administrator@vsphere.local"},
# {"availability_zone_name"=>"vcenter", "datastore_regex"=>".*",
# "service_name"=>"vm_cluster2", "target_node"=>"node-65",
# "vc_cluster"=>"Cluster2", "vc_host"=>"172.16.0.254",
# "vc_password"=>"Qwer!1234", "vc_user"=>"administrator@vsphere.local"} ]"
# Defaults to undef.
#
# [*vcenter_user*]
# (optional) Username for connection to VMware vCenter host.
# Defaults to 'user'.
#
# [*vcenter_password*]
# (optional) Password for connection to VMware vCenter host.
# Defaults to 'password'.
#
# [*vcenter_host_ip*]
# (optional) Hostname or IP address for connection to VMware vCenter host.
# Defaults to '10.10.10.10'.
#
# [*vcenter_cluster*]
# (optional) Name of a VMware Cluster ComputeResource.
# Defaults to 'cluster'.
#
# [*hypervisor_inspector*]
# (optional) Inspector to use for inspecting the hypervisor layer. Known
# inspectors are libvirt, hyperv, vmware, xenapi and powervm.
# Defaults to 'vmware'.
#
# [*api_retry_count*]
# (optional) Number of times a VMware vSphere API may be retried.
# Defaults to '5'.
#
# [*task_poll_interval*]
# (optional) Sleep time in seconds for polling an ongoing async task.
# Defaults to '5.0'.
#
# [*wsdl_location*]
# (optional) Optional vim service WSDL location
# e.g http://<server>/vimService.wsdl. Optional over-ride to default location
# for bug work-arounds.
# Defaults to false.
#
# [*debug*]
# (optional) Flag that turn debug logging.
# Defaults to false.
#
class vmware::ceilometer (
$vcenter_settings = undef,
$vcenter_user = 'user',
$vcenter_password = 'password',
$vcenter_host_ip = '10.10.10.10',
$vcenter_cluster = 'cluster',
$hypervisor_inspector = 'vmware',
$api_retry_count = '5',
$task_poll_interval = '5.0',
$wsdl_location = false,
$debug = false,
) {
if $debug {
# Enable debug for rabbit and VMware only
$default_log_levels = 'amqp=DEBUG,amqplib=DEBUG,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,oslo.vmware=DEBUG'
} else {
$default_log_levels = 'amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,oslo.vmware=WARN'
}
$computes_hash = parse_vcenter_settings($vcenter_settings)
$defaults = {
default_log_levels => $default_log_levels,
hypervisor_inspector => $hypervisor_inspector,
api_retry_count => $api_retry_count,
task_poll_interval => $task_poll_interval
}
include ::ceilometer::params
package { 'ceilometer-agent-compute':
ensure => present,
name => $::ceilometer::params::agent_compute_package_name,
}
create_resources(vmware::ceilometer::ha, $computes_hash, $defaults)
Package['ceilometer-agent-compute']->
Vmware::Ceilometer::Ha<||>
}

View File

@ -1,146 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::ceilometer::compute_vmware
#
# Class configures ceilometer compute agent on compute-vmware node.
# It does the following:
# - configure keystone auth parameters
# - reload ceilometer polling agent service, package is already
# installed by ceilometer-compute deployment task
#
# === Parameters
#
# [*availability_zone_name*]
# (required) Availability zone name that will be used to form host parameter.
#
# [*vc_cluster*]
# (required) vCenter cluster name that is going to be monitored.
#
# [*vc_host*]
# (required) IP address of the VMware vSphere host.
#
# [*vc_user*]
# (required) Username of VMware vSphere.
#
# [*vc_password*]
# (required) Password of VMware vSphere.
#
# [*service_name*]
# (required) Parameter to form 'host' parameter.
#
# [*target_node*]
# (optional) Parameter that specifies on which node service will be placed.
# Defaults to undef.
#
# [*vc_insecure*]
# (optional) If true, the vCenter server certificate is not verified. If
# false, then the default CA truststore is used for verification. This option
# is ignored if "ca_file" is set.
# Defaults to 'True'.
#
# [*vc_ca_file*]
# (optional) The hash name of the CA bundle file and data in format of:
# Example:
# "{"vc_ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"}}"
# Defaults to undef.
#
# [*datastore_regex*]
# (optional) Regex which match datastore that will be used for openstack vms.
# Defaults to undef.
#
# [*debug*]
# (optional) Flag that turn debug logging.
# Defaults to undef.
#
# [*identity_uri*]
# (optional) URL to access Keystone service.
# Defaults to undef.
#
# [*auth_user*]
# (optional) Keystone user.
# Defaults to undef.
#
# [*auth_password*]
# (optional) Keystone password.
# Defaults to undef.
#
# [*tenant*]
# (optional) Admin tenant name.
# Defaults to undef.
#
class vmware::ceilometer::compute_vmware(
$availability_zone_name,
$vc_cluster,
$vc_host,
$vc_user,
$vc_password,
$service_name,
$target_node = undef,
$vc_insecure = true,
$vc_ca_file = undef,
$datastore_regex = undef,
$debug = undef,
$identity_uri = undef,
$auth_user = undef,
$auth_password = undef,
$tenant = undef,
) {
if $debug {
# Enable debug for rabbit and vmware only
$default_log_levels = 'amqp=DEBUG,amqplib=DEBUG,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,oslo.vmware=DEBUG'
} else {
$default_log_levels = 'amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,oslo.vmware=WARN'
}
class { '::vmware::ssl':
vc_insecure => $vc_insecure,
vc_ca_file => $vc_ca_file,
vc_ca_filepath => '/etc/ceilometer/vmware-ca.pem',
}
$ceilometer_vcenter_ca_filepath = $::vmware::ssl::vcenter_ca_filepath
$ceilometer_vcenter_insecure_real = $::vmware::ssl::vcenter_insecure_real
ceilometer_config {
'DEFAULT/default_log_levels': value => $default_log_levels;
'DEFAULT/hypervisor_inspector': value => 'vmware';
'DEFAULT/host': value => "${availability_zone_name}-${service_name}";
'vmware/host_ip': value => $vc_host;
'vmware/host_username': value => $vc_user;
'vmware/host_password': value => $vc_password;
'vmware/ca_file': value => $ceilometer_vcenter_ca_filepath;
'vmware/insecure': value => $ceilometer_vcenter_insecure_real;
'keystone_authtoken/admin_user': value => $auth_user;
'keystone_authtoken/admin_password': value => $auth_password;
'keystone_authtoken/admin_tenant_name': value => $tenant;
'keystone_authtoken/identity_uri': value => $identity_uri;
}
include ::ceilometer::params
package { 'ceilometer-polling':
ensure => latest,
name => $::ceilometer::params::agent_polling_package_name,
}
service { 'ceilometer-polling':
ensure => running,
name => $::ceilometer::params::agent_polling_service_name,
}
Ceilometer_config<| |> ~> Service['ceilometer-polling']
Package['ceilometer-polling'] ~> Service['ceilometer-polling']
}

View File

@ -1,200 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Define: vmware::ceilometer::ha
#
# This type creates nova-compute service for provided vSphere cluster
# (cluster that is formed of ESXi hosts and is managed by vCenter server).
#
# === Parameters
#
# [*availability_zone_name*]
# (required) Availability zone which nova-compute will be assigned.
#
# [*vc_cluster*]
# (required) Name of a VMware Cluster ComputeResource.
#
# [*vc_host*]
# (required) Hostname or IP address for connection to VMware vCenter host.
#
# [*vc_user*]
# (required) Username for connection to VMware vCenter host.
#
# [*vc_password*]
# (required) Password for connection to VMware vCenter host.
#
# [*service_name*]
# (required) Parameter to form 'host' parameter.
#
# [*target_node*]
# (required) Parameter that specifies on which node service will be placed.
#
# [*default_log_levels*]
# (required) List of package logging levels in logger=LEVEL pairs. This option
# is ignored if log_config_append is set.
#
# [*hypervisor_inspector*]
# (optional) Inspector to use for inspecting the hypervisor layer. Known
# inspectors are libvirt, hyperv, vmware, xenapi and powervm.
# Defaults to 'vmware'.
#
# [*api_retry_count*]
# (optional) Number of times a VMware vSphere API may be retried.
# Defaults to '5'.
#
# [*task_poll_interval*]
# (optional) Sleep time in seconds for polling an ongoing async task.
# Defaults to '5.0'.
#
# [*vc_insecure*]
# (optional) If true, the vCenter server certificate is not verified.
# If false, then the default CA truststore is used for verification. This
# option is ignored if "ca_file" is set.
# Defaults to 'True'.
#
# [*vc_ca_file*]
# (optional) The hash name of the CA bundle file and data in format of:
# Example:
# "{"vc_ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"}}"
# Defaults to undef.
#
# [*datastore_regex*]
# (optional) Regex to match the name of a datastore.
# Defaults to undef.
#
# [*amqp_port*]
# (optional) The listening port number of the AMQP server. Mandatory to
# perform a monitor check.
# Defaults to '5673'.
#
# [*ceilometer_config*]
# (required) Path used for ceilometer conf.
# Defaults to '/etc/ceilometer/ceilometer.conf'.
#
# [*ceilometer_conf_dir*]
# (optional) The base directory used for ceilometer configs.
# Defaults to '/etc/ceilometer/ceilometer-compute.d'.
#
define vmware::ceilometer::ha (
$availability_zone_name,
$vc_cluster,
$vc_host,
$vc_user,
$vc_password,
$service_name,
$target_node,
$default_log_levels,
$hypervisor_inspector = 'vmware',
$api_retry_count = '5',
$task_poll_interval = '5.0',
$vc_insecure = true,
$vc_ca_file = undef,
$datastore_regex = undef,
$amqp_port = '5673',
$ceilometer_config = '/etc/ceilometer/ceilometer.conf',
$ceilometer_conf_dir = '/etc/ceilometer/ceilometer-compute.d',
) {
if ($target_node == 'controllers') {
$ceilometer_compute_conf = "${ceilometer_conf_dir}/vmware-${availability_zone_name}_${service_name}.conf"
$vcenter_ca_file = pick($vc_ca_file, {})
$vcenter_ca_content = pick($vcenter_ca_file['content'], {})
$vcenter_ca_filepath = "${ceilometer_conf_dir}/vmware-${availability_zone_name}_${service_name}-ca.pem"
if ! defined(File[$ceilometer_conf_dir]) {
file { $ceilometer_conf_dir:
ensure => directory,
owner => 'ceilometer',
group => 'ceilometer',
mode => '0750',
}
}
if ! empty($vcenter_ca_content) and ! $vc_insecure {
$ceilometer_vcenter_ca_filepath = $vcenter_ca_filepath
$ceilometer_vcenter_insecure_real = false
file { $vcenter_ca_filepath:
ensure => file,
content => $vcenter_ca_content,
mode => '0644',
owner => 'root',
group => 'root',
}
} else {
$ceilometer_vcenter_ca_filepath = $::os_service_default
$ceilometer_vcenter_insecure_real = $vc_insecure
}
if ! defined(File[$ceilometer_compute_conf]) {
file { $ceilometer_compute_conf:
ensure => present,
content => template('vmware/ceilometer-compute.conf.erb'),
mode => '0600',
owner => 'ceilometer',
group => 'ceilometer',
}
}
$primitive_name = "p_ceilometer_agent_compute_vmware_${availability_zone_name}_${service_name}"
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'ceilometer-agent-compute'
$metadata = {
'target-role' => 'stopped',
'resource-stickiness' => '1'
}
$parameters = {
'amqp_server_port' => $amqp_port,
'config' => $ceilometer_config,
'pid' => "/var/run/ceilometer/ceilometer-agent-compute-${availability_zone_name}_${service_name}.pid",
'user' => 'ceilometer',
'additional_parameters' => "--config-file=${ceilometer_compute_conf}",
}
$operations = {
'monitor' => {
'timeout' => '20',
'interval' => '30',
},
'start' => {
'timeout' => '360',
},
'stop' => {
'timeout' => '360',
}
}
pacemaker::service { $primitive_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
}
service { $primitive_name :
ensure => 'running',
enable => true,
}
File[$ceilometer_conf_dir]->
File[$ceilometer_compute_conf]->
Pcmk_resource[$primitive_name]->
Service[$primitive_name]
}
}

View File

@ -1,283 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Define: vmware::cinder::vmdk
#
# This type creates cinder-volume service with VMDK backend,
# which provides block storage solution for
# vSphere's virtual machine instances.
#
# === Parameters
#
# [*vc_insecure*]
# (optional) If true, the ESX/vCenter server certificate is not verified.
# If false, then the default CA truststore is used for verification.
# Defaults to 'True'.
#
# [*vc_ca_file*]
# (optional) The hash name of the CA bundle file and data in format of:
# Example:
# "{"vc_ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"}}"
# Defaults to undef.
#
# [*vc_host*]
# (required) IP address for connecting to VMware vCenter server.
# Defaults to '1.2.3.4'.
#
# [*vc_user*]
# (required) Username for authenticating with VMware vCenter server.
# Defaults to 'user'.
#
# [*vc_password*]
# (required) Password for authenticating with VMware vCenter server.
# Defaults to 'password'.
#
# [*availability_zone_name*]
# (required) Availability zone of this node and value is used as
# the default for new volumes.
# Defaults to 'non-nova'.
#
# [*vc_volume_folder*]
# (optional) Name of the vCenter inventory folder that will contain
# Cinder volumes. This folder will be created under
# "OpenStack/<project_folder>", where project_folder is of format
# "Project (<volume_project_id>)".
# Defaults to 'cinder-volumes'.
#
# [*vc_wsdl_location*]
# (optional) Optional VIM service WSDL Location e.g
# http://<server>/vimService.wsdl. Optional over-ride to default
# location for bug work-arounds.
# Defaults to empty.
#
# [*vc_api_retry_count*]
# (optional) Number of times VMware vCenter server API must be
# retried upon connection related issues.
# Defaults to '10'.
#
# [*vc_host_version*]
# (optional) Optional string specifying the VMware vCenter
# server version. The driver attempts to retrieve the version from
# VMware vCenter server. Set this configuration only if you want
# to override the vCenter server version.
# Defaults to empty.
#
# [*vc_image_transfer_timeout_secs*]
# (optional) Timeout in seconds for VMDK volume transfer
# between Cinder and Glance.
# Defaults to '7200'.
#
# [*vc_max_objects_retrieval*]
# (optional) Max number of objects to be retrieved per batch.
# Query results will be obtained in batches from the server
# and not in one shot. Server may still limit the count to
# something less than the configured value.
# Defaults to '100'.
#
# [*vc_task_poll_interval*]
# (optional) The interval (in seconds) for polling remote
# tasks invoked on VMware vCenter server.
# Defaults to '5'.
#
# [*vc_tmp_dir*]
# (optional) Directory where virtual disks are stored during
# volume backup and restore.
# Defaults to '/tmp'.
#
# [*cinder_conf_dir*]
# (optional) The base directory used for cinder-vmware configs.
# Defaults to '/etc/cinder/cinder.d'.
#
# [*cinder_log_dir*]
# (optional) The base directory used for relative --log-file paths.
# Defaults to '/var/log/cinder'.
#
# [*debug*]
# (optional) Print debugging output (set logging level to DEBUG instead
# of default WARNING level).
# Defaults to false.
#
define vmware::cinder::vmdk(
$vc_insecure = true,
$vc_ca_file = undef,
$vc_host = '1.2.3.4',
$vc_user = 'user',
$vc_password = 'password',
$availability_zone_name = 'non-nova',
$vc_volume_folder = 'cinder-volumes',
$vc_wsdl_location = '',
$vc_api_retry_count = '10',
$vc_host_version = '',
$vc_image_transfer_timeout_secs = '7200',
$vc_max_objects_retrieval = '100',
$vc_task_poll_interval = '5',
$vc_tmp_dir = '/tmp',
$cinder_conf_dir = '/etc/cinder/cinder.d',
$cinder_log_dir = '/var/log/cinder',
$debug = false,
)
{
include ::cinder::params
$az_name = $availability_zone_name
$cinder_volume_conf = "${cinder_conf_dir}/vmware-${az_name}.conf"
$cinder_volume_vmware = "${::cinder::params::volume_service}-vmware"
$storage_hash = hiera_hash('storage', {})
$vcenter_ca_file = pick($vc_ca_file, {})
$vcenter_ca_content = pick($vcenter_ca_file['content'], {})
$vcenter_ca_filepath = "${cinder_conf_dir}/vmware-${az_name}-ca.pem"
if ($storage_hash['volumes_ceph']) and
(roles_include(['primary-controller']) or
roles_include(['controller'])) {
class { '::openstack_tasks::openstack_cinder::openstack_cinder': }
}
if ! defined(File[$cinder_conf_dir]) {
file { $cinder_conf_dir:
ensure => directory,
owner => 'cinder',
group => 'cinder',
mode => '0750',
}
}
if ! empty($vcenter_ca_content) and ! $vc_insecure {
$cinder_vcenter_ca_filepath = $vcenter_ca_filepath
$cinder_vcenter_insecure_real = false
file { $vcenter_ca_filepath:
ensure => file,
content => $vcenter_ca_content,
mode => '0644',
owner => 'root',
group => 'root',
}
} else {
$cinder_vcenter_ca_filepath = $::os_service_default
$cinder_vcenter_insecure_real = $vc_insecure
}
if ! defined (File[$cinder_volume_conf]) {
file { $cinder_volume_conf:
ensure => present,
content => template('vmware/cinder-volume.conf.erb'),
mode => '0600',
owner => 'cinder',
group => 'cinder',
}
}
File[$cinder_conf_dir]->File[$cinder_volume_conf]
if ! defined(Service['cinder_volume_vmware']) {
service { 'cinder_volume_vmware':
ensure => stopped,
enable => false,
name => $cinder_volume_vmware,
hasstatus => true,
}
}
if ! defined(Service["cinder_volume_vmware_${az_name}"]) {
service { "cinder_volume_vmware_${az_name}":
ensure => running,
name => "${cinder_volume_vmware}-${az_name}",
enable => true,
}
}
case $::osfamily {
'RedHat': {
$src_init = $cinder_volume_vmware
$dst_init = '/etc/init.d'
$file_perm = '0755'
$cinder_volume_vmware_init = "${dst_init}/${cinder_volume_vmware}"
$init_link = "${cinder_volume_vmware_init}-${az_name}"
if ! defined(File[$init_link]) {
file { $init_link:
ensure => link,
target => $cinder_volume_vmware_init,
}
}
$cinder_volume_default = "/etc/sysconfig/${cinder_volume_vmware}-${az_name}"
if ! defined(File[$cinder_volume_default]){
file { $cinder_volume_default:
ensure => present,
content => "OPTIONS='--config-file=${cinder_volume_conf}'",
}
}
File[$cinder_volume_default]~>
Service["cinder_volume_vmware_${az_name}"]->
Service['cinder_volume_vmware']
}
'Debian': {
$cinder_volume_default = "/etc/default/${cinder_volume_vmware}-${az_name}"
$src_init = "${cinder_volume_vmware}.conf"
$dst_init = '/etc/init'
$file_perm = '0644'
ensure_packages($::cinder::params::volume_package)
Package[$::cinder::params::volume_package] -> File[$src_init]
if ! defined(File[$cinder_volume_default]) {
file { $cinder_volume_default:
ensure => present,
content => "CINDER_VOLUME_OPTS='--config-file=${cinder_volume_conf}'",
}
}
$cinder_volume_vmware_init = "${dst_init}/${cinder_volume_vmware}.conf"
$init_link = "${dst_init}/${cinder_volume_vmware}-${az_name}.conf"
if ! defined(File[$init_link]) {
file { $init_link:
ensure => link,
target => $cinder_volume_vmware_init,
}
}
$init_reload_cmd = '/sbin/initctl reload-configuration'
$init_reload = 'initctl reload-configuration'
if ! defined(Exec[$init_reload]) {
exec { $init_reload:
command => $init_reload_cmd,
path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ],
}
}
File[$cinder_volume_default]~>
Exec[$init_reload]->
Service["cinder_volume_vmware_${az_name}"]->
Service['cinder_volume_vmware']
}
default: {
fail { "Unsupported OS family (${::osfamily})": }
}
}
if ! defined(File[$src_init]) {
file { $src_init:
source => "puppet:///modules/vmware/${src_init}",
path => "${dst_init}/${src_init}",
owner => 'root',
group => 'root',
mode => $file_perm,
}
}
File[$src_init]->
File[$init_link]->
File[$cinder_volume_default]
}

View File

@ -1,213 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Define: vmware::compute::ha
#
# This type creates nova-compute service for provided vSphere cluster
# (cluster that is formed of ESXi hosts and is managed by vCenter server).
#
# === Parameters
#
# [*availability_zone_name*]
# (required) Availability zone which nova-compute will be assigned.
#
# [*vc_cluster*]
# (required) Name of a VMware Cluster ComputeResource.
#
# [*vc_host*]
# (required) Hostname or IP address for connection to VMware vCenter host.
#
# [*vc_user*]
# (required) Username for connection to VMware vCenter host.
#
# [*vc_password*]
# (required) Password for connection to VMware vCenter host.
#
# [*service_name*]
# (required) Name that will form hypervisor name together with
# 'availability_zone_name' in nova-compute.conf.
#
# [*target_node*]
# (required) Name of node where nova-compute must be deployed. If it matches
# 'current_node' we are deploying nova-compute service.
#
# [*vc_insecure*]
# (optional) If true, the vCenter server certificate is not verified.
# If false, then the default CA truststore is used for verification. This
# option is ignored if "ca_file" is set.
# Defaults to 'True'.
#
# [*vc_ca_file*]
# (optional) The hash name of the CA bundle file and data in format of:
# Example:
# "{"vc_ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"}}"
# Defaults to undef.
#
# [*datastore_regex*]
# (optional) Regex to match the name of a datastore.
# Defaults to undef.
#
# [*amqp_port*]
# (optional) The listening port number of the AMQP server. Mandatory to
# perform a monitor check.
# Defaults to '5673'.
#
# [*api_retry_count*]
# (required) The number of times we retry on failures, e.g.,
# socket error, etc.
# Defaults to '5'.
#
# [*maximum_objects*]
# (required) The maximum number of ObjectContent data objects that should be
# returned in a single result. A positive value will cause the operation to
# suspend the retrieval when the count of objects reaches the specified
# maximum. The server may still limit the count to something less than the
# configured value. Any remaining objects may be retrieved with additional
# requests.
# Defaults to '100'.
#
# [*nova_conf*]
# (required) Path used for nova conf.
# Defaults to '/etc/nova/nova.conf'.
#
# [*nova_conf_dir*]
# (optional) The base directory used for compute-vmware configs.
# Defaults to '/etc/nova/nova-compute.d'.
#
# [*task_poll_interval*]
# (required) The interval used for polling of remote tasks.
# Defaults to '5.0'.
#
# [*use_linked_clone*]
# (required) Whether to use linked clone.
# Defaults to true.
#
# [*wsdl_location*]
# (optional) Optional VIM Service WSDL Location
# e.g 'http://<server>/vimService.wsdl'. Optional over-ride to default
# location for bug workarounds.
# Defaults to undef.
#
define vmware::compute::ha(
$availability_zone_name,
$vc_cluster,
$vc_host,
$vc_user,
$vc_password,
$service_name,
$target_node,
$vc_insecure = true,
$vc_ca_file = undef,
$datastore_regex = undef,
$amqp_port = '5673',
$api_retry_count = '5',
$maximum_objects = '100',
$nova_conf = '/etc/nova/nova.conf',
$nova_conf_dir = '/etc/nova/nova-compute.d',
$task_poll_interval = '5.0',
$use_linked_clone = true,
$wsdl_location = undef,
) {
# We deploy nova-compute on controller node only if $target_node contains
# 'controllers' otherwise service will be deployed on separate node.
if ($target_node == 'controllers') {
$nova_compute_conf = "${nova_conf_dir}/vmware-${availability_zone_name}_${service_name}.conf"
$vcenter_ca_file = pick($vc_ca_file, {})
$vcenter_ca_content = pick($vcenter_ca_file['content'], {})
$vcenter_ca_filepath = "${nova_conf_dir}/vmware-${availability_zone_name}_${service_name}-ca.pem"
if ! defined(File[$nova_conf_dir]) {
file { $nova_conf_dir:
ensure => 'directory',
owner => 'nova',
group => 'nova',
mode => '0750',
}
}
if ! empty($vcenter_ca_content) and ! $vc_insecure {
$compute_vcenter_ca_filepath = $vcenter_ca_filepath
$compute_vcenter_insecure_real = false
file { $vcenter_ca_filepath:
ensure => file,
content => $vcenter_ca_content,
mode => '0644',
owner => 'root',
group => 'root',
}
} else {
$compute_vcenter_ca_filepath = $::os_service_default
$compute_vcenter_insecure_real = $vc_insecure
}
if ! defined(File[$nova_compute_conf]) {
file { $nova_compute_conf:
ensure => 'present',
content => template('vmware/nova-compute.conf.erb'),
mode => '0600',
owner => 'nova',
group => 'nova',
}
}
$primitive_name = "p_nova_compute_vmware_${availability_zone_name}-${service_name}"
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'nova-compute'
$metadata = {
'resource-stickiness' => '1'
}
$parameters = {
'amqp_server_port' => $amqp_port,
'config' => $nova_conf,
'pid' => "/var/run/nova/nova-compute-${availability_zone_name}-${service_name}.pid",
'additional_parameters' => "--config-file=${nova_compute_conf}",
}
$operations = {
'monitor' => {
'timeout' => '10',
'interval' => '20',
},
'start' => {
'timeout' => '30',
},
'stop' => {
'timeout' => '30',
}
}
pacemaker::service { $primitive_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
}
service { $primitive_name :
ensure => 'running',
enable => true,
}
File[$nova_conf_dir]->
File[$nova_compute_conf]->
Pcmk_resource[$primitive_name]->
Service[$primitive_name]
}
}

View File

@ -1,184 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Define: vmware::compute_vmware
#
# This resource deploys nova-compute service and configures it for use with
# vmwareapi.VCDriver (vCenter server as hypervisor).
# Depends on nova::params class.
#
# === Parameters
#
# [*availability_zone_name*]
# (required) Availability zone which nova-compute will be assigned.
#
# [*vc_cluster*]
# (required) Name of a VMware Cluster ComputeResource.
#
# [*vc_host*]
# (required) Hostname or IP address for connection to VMware vCenter host.
#
# [*vc_user*]
# (required) Username for connection to VMware vCenter host.
#
# [*vc_password*]
# (required) Password for connection to VMware vCenter host.
#
# [*service_name*]
# (required) Name that will form hypervisor name together with
# 'availability_zone_name' in nova-compute.conf.
#
# [*current_node*]
# (required) Name of node that we are executing manifest (e.g. 'node-4').
#
# [*target_node*]
# (required) Name of node where nova-compute must be deployed. If it matches
# 'current_node' we are deploying nova-compute service.
#
# [*vlan_interface*]
# (optional) Physical ethernet adapter name for vlan networking.
#
# [*vc_insecure*]
# (optional) If true, the vCenter server certificate is not verified.
# If false, then the default CA truststore is used for verification. This
# option is ignored if "ca_file" is set.
# Defaults to 'True'.
#
# [*vc_ca_file*]
# (optional) The hash name of the CA bundle file and data in format of:
# Example:
# "{"vc_ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"}}"
# Defaults to undef.
#
# [*datastore_regex*]
# (optional) Regex to match the name of a datastore.
# Defaults to undef.
#
# [*api_retry_count*]
# (required) The number of times we retry on failures, e.g.,
# socket error, etc.
# Defaults to '5'.
#
# [*maximum_objects*]
# (required) The maximum number of ObjectContent data objects that should be
# returned in a single result. A positive value will cause the operation to
# suspend the retrieval when the count of objects reaches the specified
# maximum. The server may still limit the count to something less than the
# configured value. Any remaining objects may be retrieved with additional
# requests.
# Defaults to '100'.
#
# [*nova_compute_conf*]
# (required) Path used for compute-vmware conf.
# Defaults to '/etc/nova/nova-compute.conf'.
#
# [*task_poll_interval*]
# (required) The interval used for polling of remote tasks.
# Defaults to '5.0'.
#
# [*use_linked_clone*]
# (required) Whether to use linked clone.
# Defaults to true.
#
# [*wsdl_location*]
# (optional) Optional VIM Service WSDL Location
# e.g 'http://<server>/vimService.wsdl'. Optional over-ride to default
# location for bug workarounds.
# Defaults to undef.
#
# [*service_enabled*]
# (optional) Manage nova-compute service.
# Defaults to false.
#
define vmware::compute_vmware(
$availability_zone_name,
$vc_cluster,
$vc_host,
$vc_user,
$vc_password,
$service_name,
$current_node,
$target_node,
$vlan_interface,
$vc_insecure = true,
$vc_ca_file = undef,
$datastore_regex = undef,
$api_retry_count = '5',
$maximum_objects = '100',
$nova_compute_conf = '/etc/nova/nova-compute.conf',
$task_poll_interval = '5.0',
$use_linked_clone = true,
$wsdl_location = undef,
$service_enabled = false,
)
{
include ::nova::params
$vcenter_ca_file = pick($vc_ca_file, {})
$vcenter_ca_content = pick($vcenter_ca_file['content'], {})
$vcenter_ca_filepath = '/etc/nova/vmware-ca.pem'
if $service_enabled {
$service_ensure = 'running'
} else {
$service_ensure = 'stopped'
}
# We skip deployment if current node name is not same as target_node.
if ($target_node == $current_node) {
if ! empty($vcenter_ca_content) and ! $vc_insecure {
$compute_vcenter_ca_filepath = $vcenter_ca_filepath
$compute_vcenter_insecure_real = false
file { $vcenter_ca_filepath:
ensure => file,
content => $vcenter_ca_content,
mode => '0644',
owner => 'root',
group => 'root',
}
} else {
$compute_vcenter_ca_filepath = $::os_service_default
$compute_vcenter_insecure_real = $vc_insecure
}
file { $nova_compute_conf:
ensure => present,
content => template('vmware/nova-compute.conf.erb'),
mode => '0600',
owner => 'nova',
group => 'nova',
}
package { 'nova-compute':
ensure => installed,
name => $::nova::params::compute_package_name,
}
package { 'python-oslo.vmware':
ensure => installed,
}
service { 'nova-compute':
ensure => $service_ensure,
name => $::nova::params::compute_service_name,
enable => $service_enabled,
}
Package['python-oslo.vmware']->
Package['nova-compute']->
File[$nova_compute_conf]->
Service['nova-compute']
}
}

View File

@ -1,154 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::controller
#
# Deploys nova-compute service and configures it for use
# with vmwareapi.VCDriver (vCenter server as hypervisor)
# on OpenStack controller nodes. Nova-compute is configured
# to work under Pacemaker supervision.
#
# === Parameters
#
# [*vcenter_settings*]
# (optional) Computes hash in format of:
# Example:
# "[ {"availability_zone_name"=>"vcenter", "datastore_regex"=>".*",
# "service_name"=>"vm_cluster1", "target_node"=>"controllers",
# "vc_cluster"=>"Cluster1", "vc_host"=>"172.16.0.254",
# "vc_password"=>"Qwer!1234", "vc_user"=>"administrator@vsphere.local"},
# {"availability_zone_name"=>"vcenter", "datastore_regex"=>".*",
# "service_name"=>"vm_cluster2", "target_node"=>"node-65",
# "vc_cluster"=>"Cluster2", "vc_host"=>"172.16.0.254",
# "vc_password"=>"Qwer!1234", "vc_user"=>"administrator@vsphere.local"} ]"
# Defaults to undef.
#
# [*vcenter_host_ip*]
# (optional) Hostname or IP address for connection to VMware vCenter host.
# Defaults to '10.10.10.10'.
#
# [*vcenter_user*]
# (optional) Username for connection to VMware vCenter host.
# Defaults to 'user'.
#
# [*vcenter_password*]
# (optional) Password for connection to VMware vCenter host.
# Defaults to 'password'.
#
# [*vlan_interface*]
# (optional) Physical ethernet adapter name for vlan networking.
# Defaults to undef.
#
# [*vncproxy_host*]
# (optional) IP address on which VNC server will be listening on.
# Defaults to undef.
#
# [*vncproxy_protocol*]
# (required) The protocol to communicate with the VNC proxy server.
# Defaults to 'http'.
#
# [*vncproxy_port*]
# (optional) The port to communicate with the VNC proxy server.
# Defaults to '6080'.
#
# [*vncproxy_path*]
# (optional) The path at the end of the uri for communication
# with the VNC proxy server.
# Defaults to '/vnc_auto.html'.
#
# [*use_quantum*]
# (optional) Shows if neutron is enabled.
# Defaults to false.
#
# Modules needed:
# nova
#
# Limitations:
# Only one vCenter supported.
#
class vmware::controller (
$vcenter_settings = undef,
$vcenter_host_ip = '10.10.10.10',
$vcenter_user = 'user',
$vcenter_password = 'password',
$vlan_interface = undef,
$vncproxy_host = undef,
$vncproxy_protocol = 'http',
$vncproxy_port = '6080',
$vncproxy_path = '/vnc_auto.html',
$use_quantum = false,
)
{
include ::nova::params
$vncproxy_base_url = "${vncproxy_protocol}://${vncproxy_host}:${vncproxy_port}${vncproxy_path}"
$computes_hash = parse_vcenter_settings($vcenter_settings)
# Stubs from nova class in order to not include whole class
if ! defined(Class['nova']) {
exec { 'post-nova_config':
command => '/bin/echo "Nova config has changed"',
refreshonly => true,
}
exec { 'networking-refresh':
command => '/sbin/ifdown -a ; /sbin/ifup -a',
refreshonly => true,
}
package { 'nova-common':
ensure => 'installed',
name => 'binutils',
}
}
$libvirt_type = hiera('libvirt_type', 'qemu')
tweaks::ubuntu_service_override { 'nova-compute':
package_name => "nova-compute-${libvirt_type}",
}
package { 'nova-compute':
ensure => 'present',
name => $::nova::params::compute_package_name,
}
service { 'nova-compute':
ensure => 'stopped',
name => $::nova::params::compute_service_name,
enable => false,
}
# Create nova-compute per vSphere cluster.
create_resources(vmware::compute::ha, $computes_hash)
Package['nova-compute']->
Service['nova-compute']->
Vmware::Compute::Ha<||>->
class { '::vmware::network':
use_quantum => $use_quantum,
}
# Enable metadata service on Controller node.
# Set correct parameter for vnc access.
nova_config {
'DEFAULT/enabled_apis': value => 'ec2,osapi_compute,metadata';
'vnc/novncproxy_base_url': value => $vncproxy_base_url;
} -> Service['nova-compute']
# Install cirros vmdk package.
package { 'cirros-testvmware':
ensure => present,
}
package { 'python-suds':
ensure => present,
}
}

View File

@ -1,127 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware
#
# This is the main VMware integration class. It should check the variables and
# basing on them call needed subclasses in order to setup VMware integration
# with OpenStack.
#
# === Parameters
#
# [*vcenter_settings*]
# (required) Computes hash in format of:
# Example:
# "[ {"availability_zone_name"=>"vcenter", "datastore_regex"=>".*",
# "service_name"=>"vm_cluster1", "target_node"=>"controllers",
# "vc_cluster"=>"Cluster1", "vc_host"=>"172.16.0.254",
# "vc_password"=>"Qwer!1234", "vc_user"=>"administrator@vsphere.local"},
# {"availability_zone_name"=>"vcenter", "datastore_regex"=>".*",
# "service_name"=>"vm_cluster2", "target_node"=>"node-65",
# "vc_cluster"=>"Cluster2", "vc_host"=>"172.16.0.254",
# "vc_password"=>"Qwer!1234", "vc_user"=>"administrator@vsphere.local"} ]"
# Defaults to undef.
#
# [*vcenter_user*]
# (optional) Username for connection to VMware vCenter host.
# Defaults to 'user'.
#
# [*vcenter_password*]
# (optional) Password for connection to VMware vCenter host.
# Defaults to 'password'.
#
# [*vcenter_host_ip*]
# (optional) Hostname or IP address for connection to VMware vCenter host.
# Defaults to '10.10.10.10'.
#
# [*vcenter_cluster*]
# (optional) Name of a VMware Cluster ComputeResource.
# Defaults to 'cluster'.
#
# [*vlan_interface*]
# (optional) Physical ethernet adapter name for vlan networking.
# Defaults to undef.
#
# [*use_quantum*]
# (optional) Shows if neutron is enabled.
# Defaults to true.
#
# [*vncproxy_protocol*]
# (required) The protocol to communicate with the VNC proxy server.
# Defaults to 'http'.
#
# [*vncproxy_host*]
# (required) IP address on which VNC server will be listening on.
# Defaults to undef.
#
# [*nova_hash*]
# (required) Nova hash in format of:
# Example:
# {"db_password"=>"JC4W0MTwtb6I0f8gBcKjJdiT", "enable_hugepages"=>false,
# "state_path"=>"/var/lib/nova", "user_password"=>"xT4rEWlhmI4KCyo2pGCMJwsz",
# "vncproxy_protocol"=>"http", "nova_rate_limits"=> {"POST"=>"100000",
# "POST_SERVERS"=>"100000", "PUT"=>"1000", "GET"=>"100000",
# "DELETE"=>"100000"}, "nova_report_interval"=>"60",
# "nova_service_down_time"=>"180", "num_networks"=>nil, "network_size"=>nil,
# "network_manager"=>nil}
# Defaults to {}.
#
# [*ceilometer*]
# (optional) IP address on which VNC server will be listening on.
# Defaults to 'false'.
#
# [*debug*]
# (optional) If set to true, the logging level will be set to DEBUG instead of
# the default INFO level.
# Defaults to 'false'.
#
class vmware (
$vcenter_settings = undef,
$vcenter_user = 'user',
$vcenter_password = 'password',
$vcenter_host_ip = '10.10.10.10',
$vcenter_cluster = 'cluster',
$vlan_interface = undef,
$use_quantum = true,
$vncproxy_protocol = 'http',
$vncproxy_host = undef,
$nova_hash = {},
$ceilometer = false,
$debug = false,
)
{
class { '::vmware::controller':
vcenter_settings => $vcenter_settings,
vcenter_user => $vcenter_user,
vcenter_password => $vcenter_password,
vcenter_host_ip => $vcenter_host_ip,
vlan_interface => $vlan_interface,
use_quantum => $use_quantum,
vncproxy_protocol => $vncproxy_protocol,
vncproxy_host => $vncproxy_host,
vncproxy_port => $nova_hash['vncproxy_port'],
}
if $ceilometer {
class { '::vmware::ceilometer':
vcenter_settings => $vcenter_settings,
vcenter_user => $vcenter_user,
vcenter_password => $vcenter_password,
vcenter_host_ip => $vcenter_host_ip,
vcenter_cluster => $vcenter_cluster,
debug => $debug,
}
}
}

View File

@ -1,36 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::network
#
# VMware related network configuration class. It handles whether we use neutron
# or nova-network and call for an appropriate class.
#
# === Parameters
#
# [*use_quantum*]
# (optional) Shows if neutron is enabled.
# Defaults to false.
#
class vmware::network (
$use_quantum = false,
)
{
if $use_quantum {
class { '::vmware::network::neutron': }
} else {
class { '::vmware::network::nova': }
}
}

View File

@ -1,23 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::network::neutron
#
# VMware related neutron configuration.
#
class vmware::network::neutron (
)
{
}

View File

@ -1,148 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::network
#
# VMware network class for nova-network.
#
# === Parameters
#
# [*ensure_package*]
# (optional) What state the package should be in.
# Defaults to 'present'.
#
# [*amqp_port*]
# (optional) The listening port number of the AMQP server. Mandatory to
# perform a monitor check.
# Defaults to '5673'.
#
# [*nova_network_config*]
# (required) Path used for nova conf.
# Defaults to '/etc/nova/nova.conf'.
#
# [*nova_network_config_dir*]
# (required) The base directory used for nova-network configs.
# Defaults to '/etc/nova/nova-network.d'.
#
class vmware::network::nova (
$ensure_package = 'present',
$amqp_port = '5673',
$nova_network_config = '/etc/nova/nova.conf',
$nova_network_config_dir = '/etc/nova/nova-network.d',
)
{
include ::nova::params
$nova_network_config_ha = "${nova_network_config_dir}/nova-network-ha.conf"
if ! defined(File[$nova_network_config_dir]) {
file { $nova_network_config_dir:
ensure => 'directory',
owner => 'nova',
group => 'nova',
mode => '0750',
}
}
if ! defined(File[$nova_network_config_ha]) {
file { $nova_network_config_ha:
ensure => 'present',
content => template('vmware/nova-network-ha.conf.erb'),
mode => '0600',
owner => 'nova',
group => 'nova',
}
}
$nova_user = 'nova'
$nova_hash = hiera_hash('nova', {})
$nova_password = $nova_hash['user_password']
$management_vip = hiera('management_vip')
$auth_url = "http://${management_vip}:5000/v2.0"
$region = hiera('region', 'RegionOne')
$service_name = 'p_vcenter_nova_network'
$primitive_class = 'ocf'
$primitive_provider = 'fuel'
$primitive_type = 'nova-network'
$metadata = {
'resource-stickiness' => '1'
}
$parameters = {
'amqp_server_port' => $amqp_port,
'user' => $nova_user,
'password' => $nova_password,
'auth_url' => $auth_url,
'region' => $region,
'config' => $nova_network_config,
'additional_parameters' => "--config-file=${nova_network_config_ha}",
}
$operations = {
'monitor' => {
'interval' => '20',
'timeout' => '30',
},
'start' => {
'timeout' => '20',
},
'stop' => {
'timeout' => '20',
}
}
pacemaker::service { $service_name :
prefix => false,
primitive_class => $primitive_class,
primitive_provider => $primitive_provider,
primitive_type => $primitive_type,
metadata => $metadata,
parameters => $parameters,
operations => $operations,
}
if ($::operatingsystem == 'Ubuntu') {
tweaks::ubuntu_service_override { 'nova-network':
package_name => 'nova-network',
}
}
service { $service_name :
ensure => 'running',
enable => true,
}
package { 'nova-network':
ensure => 'present',
name => $::nova::params::network_package_name,
}
service { 'nova-network':
ensure => 'stopped',
enable => false,
name => $::nova::params::network_service_name,
}
anchor { 'vcenter-nova-network-start': }
anchor { 'vcenter-nova-network-end': }
Anchor['vcenter-nova-network-start']->
Package['nova-network']->
Service['nova-network']->
File[$nova_network_config_dir]->
File[$nova_network_config_ha]->
Pcmk_resource[$service_name]->
Service[$service_name]->
Anchor['vcenter-nova-network-end']
}

View File

@ -1,77 +0,0 @@
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vmware::ssl
#
# The VMware driver for cinder-volume, nova-compute, ceilometer, etc establishes
# connections to vCenter over HTTPS, and VMware driver support the vCenter
# server certificate verification as part of the connection process.
# Class configures ssl verification for next cases:
# 1. Bypass vCenter certificate verification. Certificate
# verification turn off. This case is useful for faster deployment
# and for testing environment.
# 2. vCenter is using a Self-Signed certificate. In this case the
# user must upload custom CA bundle file certificate.
# 3. vCenter server certificate was emitted by know CA (e.g. GeoTrust).
# In this case user have to leave CA certificate bundle upload field empty.
#
# === Parameters
#
# [*vc_insecure*]
# (optional) If true, the vCenter server certificate is not verified. If
# false, then the default CA truststore is used for verification. This option
# is ignored if "ca_file" is set.
# Defaults to 'True'.
#
# [*vc_ca_file*]
# (optional) The hash name of the CA bundle file and data in format of:
# Example:
# "{"vc_ca_file"=>{"content"=>"RSA", "name"=>"vcenter-ca.pem"}}"
# Defaults to undef.
#
# [*vc_ca_filepath*]
# (required) Path CA bundle file to use in verifying the vCenter server
# certificate.
# Defaults to $::os_service_default.
#
class vmware::ssl(
$vc_insecure = true,
$vc_ca_file = undef,
$vc_ca_filepath = $::os_service_default,
) {
$vcenter_ca_file = pick($vc_ca_file, {})
$vcenter_ca_content = pick($vcenter_ca_file['content'], {})
if ! empty($vcenter_ca_content) and ! $vc_insecure {
if is_service_default($vc_ca_filepath) {
fail("The vc_ca_filepath parameter is required when vc_insecure is set \
to false and vcenter_ca_content not empty")
}
$vcenter_ca_filepath = $vc_ca_filepath
$vcenter_insecure_real = false
file { $vcenter_ca_filepath:
ensure => file,
content => $vcenter_ca_content,
mode => '0644',
owner => 'root',
group => 'root',
}
} else {
$vcenter_ca_filepath = $::os_service_default
$vcenter_insecure_real = $vc_insecure
}
}

View File

@ -1,23 +0,0 @@
{
"name": "fuel-vmware",
"version": "1.0.0",
"author": "Mirantis",
"summary": "Puppet module for nova-compute (VCDriver) support for Fuel",
"license": "Apache License, Version 2.0",
"source": "https://github.com/openstack/fuel-library/",
"issues_url": null,
"project_page": null,
"operatingsystem_support": [
{
"operatingsystem": "Ubuntu",
"operatingsystemrelease": [
"14.04"
]
}
],
"dependencies": [
{ "name": "puppetlabs-stdlib", "version_requirement": ">= 1.0.0" },
{ "name": "nova", "version_requirement": ">= 5.0.0" }
]
}

View File

@ -1,165 +0,0 @@
require 'spec_helper'
describe 'vmware::ceilometer::compute_vmware' do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with custom ca file' do
let(:params) do
{
:debug => true,
:availability_zone_name => 'vcenter',
:vc_cluster => 'Cluster1',
:vc_host => '172.16.0.254',
:vc_user => 'administrator@vsphere.local',
:vc_password => 'Qwer!1234',
:vc_insecure => false,
:vc_ca_file => {
'content' => 'RSA',
'name' => 'vcenter-ca.pem' },
:service_name => 'vmcluster1',
:identity_uri => 'http://172.16.1.4:5000',
:auth_user => 'ceilometer',
:auth_password => 'GCqFEGzzDHvQSVYBJsX4qGhO',
:tenant => 'services',
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('vmware::ssl').with(
:vc_insecure => params[:vc_insecure],
:vc_ca_file => params[:vc_ca_file],
:vc_ca_filepath => '/etc/ceilometer/vmware-ca.pem',
) }
it { is_expected.to contain_ceilometer_config('DEFAULT/default_log_levels') \
.with_value('amqp=DEBUG,amqplib=DEBUG,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,oslo.vmware=DEBUG') }
it { is_expected.to contain_ceilometer_config('DEFAULT/hypervisor_inspector') \
.with_value('vmware') }
it { is_expected.to contain_ceilometer_config('DEFAULT/host') \
.with_value("#{params[:availability_zone_name]}-#{params[:service_name]}") }
it { is_expected.to contain_ceilometer_config('vmware/host_ip') \
.with_value(params[:vc_host]) }
it { is_expected.to contain_ceilometer_config('vmware/host_username') \
.with_value(params[:vc_user]) }
it { is_expected.to contain_ceilometer_config('vmware/host_password') \
.with_value(params[:vc_password]) }
it { is_expected.to contain_ceilometer_config('vmware/ca_file') \
.with_value('/etc/ceilometer/vmware-ca.pem') }
it { is_expected.to contain_ceilometer_config('vmware/insecure') \
.with_value(params[:vc_insecure]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/admin_user') \
.with_value(params[:auth_user]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/admin_password') \
.with_value(params[:auth_password]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/admin_tenant_name') \
.with_value(params[:tenant]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/identity_uri') \
.with_value(params[:identity_uri]) }
it { is_expected.to contain_class('ceilometer::params') }
it { is_expected.to contain_package('ceilometer-polling').with(
:ensure => 'latest',
:name => 'ceilometer-polling',
) }
it { is_expected.to contain_service('ceilometer-polling').with(
:ensure => 'running',
:name => 'ceilometer-polling',
) }
end
context 'without custom ca file' do
let(:params) do
{
:debug => false,
:availability_zone_name => 'vcenter',
:vc_cluster => 'Cluster1',
:vc_host => '172.16.0.254',
:vc_user => 'administrator@vsphere.local',
:vc_password => 'Qwer!1234',
:vc_insecure => true,
:vc_ca_file => '',
:service_name => 'vmcluster1',
:identity_uri => 'http://172.16.1.4:5000',
:auth_user => 'ceilometer',
:auth_password => 'GCqFEGzzDHvQSVYBJsX4qGhO',
:tenant => 'services',
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('vmware::ssl').with(
:vc_insecure => params[:vc_insecure],
:vc_ca_file => params[:vc_ca_file],
:vc_ca_filepath => '/etc/ceilometer/vmware-ca.pem',
) }
it { is_expected.to contain_ceilometer_config('DEFAULT/default_log_levels') \
.with_value('amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,oslo.vmware=WARN') }
it { is_expected.to contain_ceilometer_config('DEFAULT/hypervisor_inspector') \
.with_value('vmware') }
it { is_expected.to contain_ceilometer_config('DEFAULT/host') \
.with_value("#{params[:availability_zone_name]}-#{params[:service_name]}") }
it { is_expected.to contain_ceilometer_config('vmware/host_ip') \
.with_value(params[:vc_host]) }
it { is_expected.to contain_ceilometer_config('vmware/host_username') \
.with_value(params[:vc_user]) }
it { is_expected.to contain_ceilometer_config('vmware/host_password') \
.with_value(params[:vc_password]) }
it { is_expected.to contain_ceilometer_config('vmware/ca_file') \
.with_value(nil) }
it { is_expected.to contain_ceilometer_config('vmware/insecure') \
.with_value(params[:vc_insecure]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/admin_user') \
.with_value(params[:auth_user]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/admin_password') \
.with_value(params[:auth_password]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/admin_tenant_name') \
.with_value(params[:tenant]) }
it { is_expected.to contain_ceilometer_config('keystone_authtoken/identity_uri') \
.with_value(params[:identity_uri]) }
it { is_expected.to contain_class('ceilometer::params') }
it { is_expected.to contain_package('ceilometer-polling').with(
:ensure => 'latest',
:name => 'ceilometer-polling',
) }
it { is_expected.to contain_service('ceilometer-polling').with(
:ensure => 'running',
:name => 'ceilometer-polling',
) }
end
end
end
end

View File

@ -1,66 +0,0 @@
require 'spec_helper'
describe 'vmware::ceilometer' do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with custom ca file' do
let(:params) do
{
:vcenter_settings => {
'availability_zone_name' => 'vcenter', 'datastore_regex' => '.*',
'service_name' => 'srv_cluster1', 'target_node' => 'controllers',
'vc_cluster' => 'Cluster1', 'vc_host' => '172.16.0.145',
'vc_password' => 'vmware', 'vc_user' => 'root',
'vc_insecure' => 'false', 'vc_ca_file' => {
'content' => 'RSA', 'name' => 'vcenter-ca.pem'} },
:vcenter_user => 'user',
:vcenter_password => 'password',
:vcenter_host_ip => '10.10.10.10',
:vcenter_cluster => 'cluster',
:debug => true,
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('ceilometer::params') }
it { is_expected.to contain_package('ceilometer-agent-compute').with(
:ensure => 'present',
:name => 'ceilometer-agent-compute',
) }
end
context 'without custom ca file' do
let(:params) do
{
:vcenter_settings => {
'availability_zone_name' => 'vcenter', 'datastore_regex' => '.*',
'service_name' => 'srv_cluster1', 'target_node' => 'controllers',
'vc_cluster' => 'Cluster1', 'vc_host' => '172.16.0.145',
'vc_password' => 'vmware', 'vc_user' => 'root',
'vc_insecure' => 'true', 'vc_ca_file' => '' },
:vcenter_user => 'user',
:vcenter_password => 'password',
:vcenter_host_ip => '10.10.10.10',
:vcenter_cluster => 'cluster',
:debug => true,
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('ceilometer::params') }
it { is_expected.to contain_package('ceilometer-agent-compute').with(
:ensure => 'present',
:name => 'ceilometer-agent-compute',
) }
end
end
end
end

View File

@ -1,118 +0,0 @@
require 'spec_helper'
describe 'vmware::controller' do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with custom ca file' do
let(:params) do
{
:vcenter_settings => {
'availability_zone_name' => 'vcenter', 'datastore_regex' => '.*',
'service_name' => 'srv_cluster1', 'target_node' => 'controllers',
'vc_cluster' => 'Cluster1', 'vc_host' => '172.16.0.145',
'vc_password' => 'vmware', 'vc_user' => 'root',
'vc_insecure' => 'false', 'vc_ca_file' => {
'content' => 'RSA', 'name' => 'vcenter-ca.pem'} },
:vcenter_user => 'user',
:vcenter_password => 'password',
:vcenter_host_ip => '10.10.10.10',
:vlan_interface => '',
:use_quantum => true,
:vncproxy_protocol => 'https',
:vncproxy_host => '172.16.0.4',
:vncproxy_port => '',
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('nova::params') }
it { is_expected.to contain_package('nova-compute').with(
:ensure => 'present',
:name => 'nova-compute',
).that_comes_before('Service[nova-compute]') }
it { is_expected.to contain_service('nova-compute').with(
:ensure => 'stopped',
:name => 'nova-compute',
) }
it { is_expected.to contain_class('vmware::network').with(
:use_quantum => params[:use_quantum],
) }
it { is_expected.to contain_nova_config('DEFAULT/enabled_apis') \
.with_value('ec2,osapi_compute,metadata') }
it { is_expected.to contain_nova_config('vnc/novncproxy_base_url') \
.with_value("#{params[:vncproxy_protocol]}://#{params[:vncproxy_host]}:#{params[:vncproxy_port]}/vnc_auto.html") }
it { is_expected.to contain_package('cirros-testvmware').with(
:ensure => 'present',
) }
it { is_expected.to contain_package('python-suds').with(
:ensure => 'present',
) }
end
context 'without custom ca file' do
let(:params) do
{
:vcenter_settings => {
'availability_zone_name' => 'vcenter', 'datastore_regex' => '.*',
'service_name' => 'srv_cluster1', 'target_node' => 'controllers',
'vc_cluster' => 'Cluster1', 'vc_host' => '172.16.0.145',
'vc_password' => 'vmware', 'vc_user' => 'root',
'vc_insecure' => 'true', 'vc_ca_file' => '' },
:vcenter_user => 'user',
:vcenter_password => 'password',
:vcenter_host_ip => '10.10.10.10',
:vlan_interface => '',
:use_quantum => true,
:vncproxy_protocol => 'https',
:vncproxy_host => '172.16.0.4',
:vncproxy_port => '',
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('nova::params') }
it { is_expected.to contain_package('nova-compute').with(
:ensure => 'present',
:name => 'nova-compute',
).that_comes_before('Service[nova-compute]') }
it { is_expected.to contain_service('nova-compute').with(
:ensure => 'stopped',
:name => 'nova-compute',
) }
it { is_expected.to contain_class('vmware::network').with(
:use_quantum => params[:use_quantum],
) }
it { is_expected.to contain_nova_config('DEFAULT/enabled_apis') \
.with_value('ec2,osapi_compute,metadata') }
it { is_expected.to contain_nova_config('vnc/novncproxy_base_url') \
.with_value("#{params[:vncproxy_protocol]}://#{params[:vncproxy_host]}:#{params[:vncproxy_port]}/vnc_auto.html") }
it { is_expected.to contain_package('cirros-testvmware').with(
:ensure => 'present',
) }
it { is_expected.to contain_package('python-suds').with(
:ensure => 'present',
) }
end
end
end
end

View File

@ -1,84 +0,0 @@
require 'spec_helper'
describe 'vmware' do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with default parameters' do
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('vmware::controller').with(
:vcenter_settings => nil,
:vcenter_user => 'user',
:vcenter_password => 'password',
:vcenter_host_ip => '10.10.10.10',
:vlan_interface => nil,
:use_quantum => true,
:vncproxy_protocol => 'http',
:vncproxy_host => nil,
:vncproxy_port => '6080',
) }
end
context 'with custom parameters' do
let(:params) do
{
:vcenter_settings => {
'availability_zone_name' => 'vcenter', 'datastore_regex' => '.*',
'service_name' => 'srv_cluster1', 'target_node' => 'controllers',
'vc_cluster' => 'Cluster1', 'vc_host' => '172.16.0.145',
'vc_password' => 'vmware', 'vc_user' => 'root',
'vc_insecure' => 'false', 'vc_ca_file' => {
'content' => 'RSA', 'name' => 'vcenter-ca.pem'} },
:vcenter_user => 'user',
:vcenter_password => 'password',
:vcenter_host_ip => '10.10.10.10',
:vcenter_cluster => 'cluster',
:vlan_interface => '',
:use_quantum => true,
:vncproxy_protocol => 'https',
:vncproxy_host => '172.16.0.4',
:nova_hash => {
'db_password' => 'JoF3Wti3kn2Hm2RaD12SVvbI',
'enable_hugepages' => false, 'state_path' => '/var/lib/nova',
'user_password' => 'tEHRJ4biwyk4Z1JOempJVnXp',
'vncproxy_protocol' => 'http', 'nova_rate_limits' => {
'POST' => '100000', 'POST_SERVERS' => '100000', 'PUT' => '1000',
'GET' => '100000', 'DELETE' => '100000' },
'nova_report_interval' => '60', 'nova_service_down_time' => '180',
'num_networks' => nil, 'network_size' => nil, 'network_manager' => nil },
:ceilometer => true,
:debug => true,
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_class('vmware::controller').with(
:vcenter_settings => params[:vcenter_settings],
:vcenter_user => params[:vcenter_user],
:vcenter_password => params[:vcenter_password],
:vcenter_host_ip => params[:vcenter_host_ip],
:vlan_interface => params[:vlan_interface],
:use_quantum => params[:use_quantum],
:vncproxy_protocol => params[:vncproxy_protocol],
:vncproxy_host => params[:vncproxy_host],
:vncproxy_port => '6080',
) }
it { is_expected.to contain_class('vmware::ceilometer').with(
:vcenter_settings => params[:vcenter_settings],
:vcenter_user => params[:vcenter_user],
:vcenter_password => params[:vcenter_password],
:vcenter_host_ip => params[:vcenter_host_ip],
:vcenter_cluster => params[:vcenter_cluster],
:debug => params[:debug],
) }
end
end
end
end

View File

@ -1,38 +0,0 @@
require 'spec_helper'
describe 'vmware::ssl' do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with default parameters' do
it { is_expected.to compile.with_all_deps }
end
context 'with custom parameters' do
let(:params) do
{
:vc_insecure => false,
:vc_ca_file => {
'content' => 'RSA',
'name' => 'vcenter-ca.pem' },
:vc_ca_filepath => '/etc/nova/vmware-ca.pem',
}
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_file(params[:vc_ca_filepath]).with(
:ensure => 'file',
:content => 'RSA',
:mode => '0644',
:owner => 'root',
:group => 'root',
) }
end
end
end
end

View File

@ -1,303 +0,0 @@
require 'spec_helper'
describe 'vmware::ceilometer::ha', type: :define do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with custom ca file' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:service_name => 'srv_cluster1',
:target_node => 'controllers',
:default_log_levels => 'amqp=DEBUG,amqplib=DEBUG,boto=WARN',
:vc_cluster => 'Cluster1',
:vc_host => '172.16.0.254',
:vc_password => 'Qwer!1234',
:vc_user => 'administrator@vsphere.local',
:vc_insecure => false,
:vc_ca_file => {
'content' => 'RSA',
'name' => 'vcenter-ca.pem' },
}
end
let(:title) { '0' }
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__ceilometer__ha('0') }
it { is_expected.to contain_file('/etc/ceilometer/ceilometer-compute.d').with(
:ensure => 'directory',
:owner => 'ceilometer',
:group => 'ceilometer',
:mode => '0750',
).that_comes_before('File[/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster1.conf]') }
it { is_expected.to contain_file('/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster1-ca.pem').with(
:ensure => 'file',
:content => 'RSA',
:mode => '0644',
:owner => 'root',
:group => 'root',
) }
it do
content = <<-eof
[DEFAULT]
# Name of this node, which must be valid in an AMQP key. Can be an opaque
# identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
#host=localhost
host=vcenter-srv_cluster1
# Inspector to use for inspecting the hypervisor layer. Known inspectors are
# libvirt, hyperv, vmware, xenapi and powervm.
#hypervisor_inspector=libvirt
hypervisor_inspector=vmware
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set.
#default_log_levels=amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN,
#sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN,
#requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN,
#websocket=WARN, requests.packages.urllib3.util.retry=WARN,
#urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN,
#stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO,
#dogpile.core.dogpile=INFO
default_log_levels=amqp=DEBUG,amqplib=DEBUG,boto=WARN
# Name of log file to send logging output to. If no default is set, logging will
# go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set.
#log_file=None
log_file=ceilometer-agent-compute-vmware-vcenter-srv_cluster1.log
[vmware]
# Number of times a VMware vSphere API may be retried.
#api_retry_count=10
api_retry_count=5
# CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
ca_file=/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster1-ca.pem
# IP address of the VMware vSphere host.
#host_ip=
host_ip=172.16.0.254
# Password of VMware vSphere.
#host_password=
host_password=Qwer!1234
# Port of the VMware vSphere host.
#host_port=443
# Username of VMware vSphere.
#host_username=
host_username=administrator@vsphere.local
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure=False
insecure=false
# Sleep time in seconds for polling an ongoing async task.
#task_poll_interval=0.5
task_poll_interval=5.0
# Optional vim service WSDL location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'ceilometer',
:group => 'ceilometer',
:content => content,
}
is_expected.to contain_file('/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster1.conf') \
.with(parameters).that_comes_before('Pcmk_resource[p_ceilometer_agent_compute_vmware_vcenter_srv_cluster1]')
end
it { is_expected.to contain_pcmk_resource('p_ceilometer_agent_compute_vmware_vcenter_srv_cluster1').with(
:primitive_class => 'ocf',
:primitive_provider => 'fuel',
:primitive_type => 'ceilometer-agent-compute',
:metadata => {
'target-role' => 'stopped',
'resource-stickiness' => '1' },
:parameters => {
'amqp_server_port' => '5673',
'config' => '/etc/ceilometer/ceilometer.conf',
'pid' => '/var/run/ceilometer/ceilometer-agent-compute-vcenter_srv_cluster1.pid',
'user' => 'ceilometer',
'additional_parameters' => '--config-file=/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster1.conf', },
:operations => {
'monitor' => {
'timeout' => '20',
'interval' => '30', },
'start' => {
'timeout' => '360', },
'stop' => {
'timeout' => '360', } },
).that_comes_before('Service[p_ceilometer_agent_compute_vmware_vcenter_srv_cluster1]') }
it { is_expected.to contain_service('p_ceilometer_agent_compute_vmware_vcenter_srv_cluster1').with(
:ensure => 'running',
:enable => true,
) }
end
context 'without custom ca file' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:service_name => 'srv_cluster2',
:target_node => 'controllers',
:default_log_levels => 'amqp=DEBUG,amqplib=DEBUG,boto=WARN',
:vc_cluster => 'Cluster2',
:vc_host => '172.16.0.254',
:vc_password => 'Qwer!1234',
:vc_user => 'administrator@vsphere.local',
:vc_insecure => true,
:vc_ca_file => '',
}
end
let(:title) { '1' }
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__ceilometer__ha('1') }
it { is_expected.to contain_file('/etc/ceilometer/ceilometer-compute.d').with(
:ensure => 'directory',
:owner => 'ceilometer',
:group => 'ceilometer',
:mode => '0750',
).that_comes_before('File[/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster2.conf]') }
it do
content = <<-eof
[DEFAULT]
# Name of this node, which must be valid in an AMQP key. Can be an opaque
# identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
#host=localhost
host=vcenter-srv_cluster2
# Inspector to use for inspecting the hypervisor layer. Known inspectors are
# libvirt, hyperv, vmware, xenapi and powervm.
#hypervisor_inspector=libvirt
hypervisor_inspector=vmware
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set.
#default_log_levels=amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN,
#sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN,
#requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN,
#websocket=WARN, requests.packages.urllib3.util.retry=WARN,
#urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN,
#stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO,
#dogpile.core.dogpile=INFO
default_log_levels=amqp=DEBUG,amqplib=DEBUG,boto=WARN
# Name of log file to send logging output to. If no default is set, logging will
# go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set.
#log_file=None
log_file=ceilometer-agent-compute-vmware-vcenter-srv_cluster2.log
[vmware]
# Number of times a VMware vSphere API may be retried.
#api_retry_count=10
api_retry_count=5
# CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
# IP address of the VMware vSphere host.
#host_ip=
host_ip=172.16.0.254
# Password of VMware vSphere.
#host_password=
host_password=Qwer!1234
# Port of the VMware vSphere host.
#host_port=443
# Username of VMware vSphere.
#host_username=
host_username=administrator@vsphere.local
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure=False
insecure=true
# Sleep time in seconds for polling an ongoing async task.
#task_poll_interval=0.5
task_poll_interval=5.0
# Optional vim service WSDL location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'ceilometer',
:group => 'ceilometer',
:content => content,
}
is_expected.to contain_file('/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster2.conf') \
.with(parameters).that_comes_before('Pcmk_resource[p_ceilometer_agent_compute_vmware_vcenter_srv_cluster2]')
end
it { is_expected.to contain_pcmk_resource('p_ceilometer_agent_compute_vmware_vcenter_srv_cluster2').with(
:primitive_class => 'ocf',
:primitive_provider => 'fuel',
:primitive_type => 'ceilometer-agent-compute',
:metadata => {
'target-role' => 'stopped',
'resource-stickiness' => '1' },
:parameters => {
'amqp_server_port' => '5673',
'config' => '/etc/ceilometer/ceilometer.conf',
'pid' => '/var/run/ceilometer/ceilometer-agent-compute-vcenter_srv_cluster2.pid',
'user' => 'ceilometer',
'additional_parameters' => '--config-file=/etc/ceilometer/ceilometer-compute.d/vmware-vcenter_srv_cluster2.conf', },
:operations => {
'monitor' => {
'timeout' => '20',
'interval' => '30', },
'start' => {
'timeout' => '360', },
'stop' => {
'timeout' => '360', } },
).that_comes_before('Service[p_ceilometer_agent_compute_vmware_vcenter_srv_cluster2]') }
it { is_expected.to contain_service('p_ceilometer_agent_compute_vmware_vcenter_srv_cluster2').with(
:ensure => 'running',
:enable => true,
) }
end
end
end
end

View File

@ -1,397 +0,0 @@
require 'spec_helper'
describe 'vmware::cinder::vmdk', type: :define do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
let(:p_param) do
case facts[:osfamily]
when 'Debian'
{
:service => 'cinder-volume',
:file_perm => '0644',
:src_init => 'cinder-volume-vmware.conf',
:dst_init => '/etc/init',
:volume_def => '/etc/default/cinder-volume-vmware',
:opts => "CINDER_VOLUME_OPTS='--config-file=/etc/cinder/cinder.d/vmware",
:conf => '.conf'
}
when 'RedHat'
{
:service => 'openstack-cinder-volume',
:file_perm => '0755',
:src_init => 'openstack-cinder-volume-vmware',
:dst_init => '/etc/init.d',
:volume_def => '/etc/sysconfig/openstack-cinder-volume-vmware',
:opts => "OPTIONS='--config-file=/etc/cinder/cinder.d/vmware",
:conf => ''
}
end
end
context 'with default parameters' do
let(:title) do
'non-nova'
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__cinder__vmdk('non-nova') }
it { is_expected.to contain_class('cinder::params') }
it { is_expected.to contain_file('/etc/cinder/cinder.d').with(
:ensure => 'directory',
:owner => 'cinder',
:group => 'cinder',
:mode => '0750',
).that_comes_before('File[/etc/cinder/cinder.d/vmware-non-nova.conf]') }
it do
content = <<-eof
[DEFAULT]
# A list of backend names to use. These backend names should be backed by a
# unique [CONFIG] group with its options (list value)
#enabled_backends = <None>
enabled_backends=VMwareVcVmdk-backend
# Availability zone of this node (string value)
#storage_availability_zone = nova
storage_availability_zone=non-nova-cinder
# Default availability zone for new volumes. If not set, the
# storage_availability_zone option value is used as the default for new volumes.
# (string value)
#default_availability_zone = <None>
default_availability_zone=non-nova-cinder
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
#debug = false
debug=false
[VMwareVcVmdk-backend]
# Backend override of host value. (string value)
# Deprecated group/name - [DEFAULT]/host
#backend_host = <None>
backend_host=non-nova
# The backend name for a given driver implementation (string value)
#volume_backend_name = <None>
volume_backend_name=VMwareVcVmdk-backend
# Driver to use for volume creation (string value)
#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
# Number of times VMware vCenter server API must be retried upon connection
# related issues. (integer value)
#vmware_api_retry_count = 10
vmware_api_retry_count=10
# CA bundle file to use in verifying the vCenter server certificate. (string
# value)
#vmware_ca_file = <None>
# Name of a vCenter compute cluster where volumes should be created. (multi
# valued)
#vmware_cluster_name =
# IP address for connecting to VMware vCenter server. (string value)
#vmware_host_ip = <None>
vmware_host_ip=1.2.3.4
# Password for authenticating with VMware vCenter server. (string value)
#vmware_host_password = <None>
vmware_host_password=password
# Username for authenticating with VMware vCenter server. (string value)
#vmware_host_username = <None>
vmware_host_username=user
# Optional string specifying the VMware vCenter server version. The driver
# attempts to retrieve the version from VMware vCenter server. Set this
# configuration only if you want to override the vCenter server version. (string
# value)
#vmware_host_version = <None>
# Timeout in seconds for VMDK volume transfer between Cinder and Glance.
# (integer value)
#vmware_image_transfer_timeout_secs = 7200
vmware_image_transfer_timeout_secs=7200
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "vmware_ca_file" is set. (boolean value)
#vmware_insecure = false
vmware_insecure=true
# Max number of objects to be retrieved per batch. Query results will be
# obtained in batches from the server and not in one shot. Server may still
# limit the count to something less than the configured value. (integer value)
#vmware_max_objects_retrieval = 100
vmware_max_objects_retrieval=100
# The interval (in seconds) for polling remote tasks invoked on VMware vCenter
# server. (floating point value)
#vmware_task_poll_interval = 0.5
vmware_task_poll_interval=5
# Directory where virtual disks are stored during volume backup and restore.
# (string value)
#vmware_tmp_dir = /tmp
vmware_tmp_dir=/tmp
# Name of the vCenter inventory folder that will contain Cinder volumes. This
# folder will be created under "OpenStack/<project_folder>", where
# project_folder is of format "Project (<volume_project_id>)". (string value)
#vmware_volume_folder = Volumes
vmware_volume_folder=cinder-volumes
# Optional VIM service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds. (string value)
#vmware_wsdl_location = <None>
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'cinder',
:group => 'cinder',
:content => content,
}
is_expected.to contain_file('/etc/cinder/cinder.d/vmware-non-nova.conf').with(parameters)
end
it { is_expected.to contain_service('cinder_volume_vmware').with(
:ensure => 'stopped',
:enable => false,
:name => "#{p_param[:service]}-vmware",
:hasstatus => true,
) }
it { is_expected.to contain_service('cinder_volume_vmware_non-nova').with(
:ensure => 'running',
:name => "#{p_param[:service]}-vmware-non-nova",
:enable => true,
) }
it { is_expected.to contain_file("#{p_param[:src_init]}").with(
:source => "puppet:///modules/vmware/#{p_param[:src_init]}",
:path => "#{p_param[:dst_init]}/#{p_param[:src_init]}",
:owner => 'root',
:group => 'root',
:mode => p_param[:file_perm],
).that_comes_before("File[#{p_param[:dst_init]}/#{p_param[:service]}-vmware-non-nova#{p_param[:conf]}]") }
it { is_expected.to contain_file("#{p_param[:volume_def]}-non-nova").with(
:ensure => 'present',
:content => "#{p_param[:opts]}-non-nova.conf'",
) }
it { is_expected.to contain_file("#{p_param[:dst_init]}/#{p_param[:service]}-vmware-non-nova#{p_param[:conf]}").with(
:ensure => 'link',
:target => "#{p_param[:dst_init]}/#{p_param[:service]}-vmware#{p_param[:conf]}",
).that_comes_before("File[#{p_param[:volume_def]}-non-nova]") }
end
context 'with custom parameters' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:vc_insecure => false,
:vc_ca_file => {
'content' => 'RSA',
'name' => 'vcenter-ca.pem' },
:vc_host => '172.16.0.254',
:vc_password => 'Qwer!1234',
:vc_user => 'administrator@vsphere.local',
:debug => true,
}
end
let(:title) do
params[:availability_zone_name]
end
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__cinder__vmdk('vcenter') }
it { is_expected.to contain_class('cinder::params') }
it { is_expected.to contain_file('/etc/cinder/cinder.d').with(
:ensure => 'directory',
:owner => 'cinder',
:group => 'cinder',
:mode => '0750',
).that_comes_before('File[/etc/cinder/cinder.d/vmware-vcenter.conf]') }
it { is_expected.to contain_file('/etc/cinder/cinder.d/vmware-vcenter-ca.pem').with(
:ensure => 'file',
:content => 'RSA',
:mode => '0644',
:owner => 'root',
:group => 'root',
) }
it do
content = <<-eof
[DEFAULT]
# A list of backend names to use. These backend names should be backed by a
# unique [CONFIG] group with its options (list value)
#enabled_backends = <None>
enabled_backends=VMwareVcVmdk-backend
# Availability zone of this node (string value)
#storage_availability_zone = nova
storage_availability_zone=vcenter-cinder
# Default availability zone for new volumes. If not set, the
# storage_availability_zone option value is used as the default for new volumes.
# (string value)
#default_availability_zone = <None>
default_availability_zone=vcenter-cinder
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
#debug = false
debug=true
[VMwareVcVmdk-backend]
# Backend override of host value. (string value)
# Deprecated group/name - [DEFAULT]/host
#backend_host = <None>
backend_host=vcenter
# The backend name for a given driver implementation (string value)
#volume_backend_name = <None>
volume_backend_name=VMwareVcVmdk-backend
# Driver to use for volume creation (string value)
#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
# Number of times VMware vCenter server API must be retried upon connection
# related issues. (integer value)
#vmware_api_retry_count = 10
vmware_api_retry_count=10
# CA bundle file to use in verifying the vCenter server certificate. (string
# value)
#vmware_ca_file = <None>
vmware_ca_file=/etc/cinder/cinder.d/vmware-vcenter-ca.pem
# Name of a vCenter compute cluster where volumes should be created. (multi
# valued)
#vmware_cluster_name =
# IP address for connecting to VMware vCenter server. (string value)
#vmware_host_ip = <None>
vmware_host_ip=172.16.0.254
# Password for authenticating with VMware vCenter server. (string value)
#vmware_host_password = <None>
vmware_host_password=Qwer!1234
# Username for authenticating with VMware vCenter server. (string value)
#vmware_host_username = <None>
vmware_host_username=administrator@vsphere.local
# Optional string specifying the VMware vCenter server version. The driver
# attempts to retrieve the version from VMware vCenter server. Set this
# configuration only if you want to override the vCenter server version. (string
# value)
#vmware_host_version = <None>
# Timeout in seconds for VMDK volume transfer between Cinder and Glance.
# (integer value)
#vmware_image_transfer_timeout_secs = 7200
vmware_image_transfer_timeout_secs=7200
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "vmware_ca_file" is set. (boolean value)
#vmware_insecure = false
vmware_insecure=false
# Max number of objects to be retrieved per batch. Query results will be
# obtained in batches from the server and not in one shot. Server may still
# limit the count to something less than the configured value. (integer value)
#vmware_max_objects_retrieval = 100
vmware_max_objects_retrieval=100
# The interval (in seconds) for polling remote tasks invoked on VMware vCenter
# server. (floating point value)
#vmware_task_poll_interval = 0.5
vmware_task_poll_interval=5
# Directory where virtual disks are stored during volume backup and restore.
# (string value)
#vmware_tmp_dir = /tmp
vmware_tmp_dir=/tmp
# Name of the vCenter inventory folder that will contain Cinder volumes. This
# folder will be created under "OpenStack/<project_folder>", where
# project_folder is of format "Project (<volume_project_id>)". (string value)
#vmware_volume_folder = Volumes
vmware_volume_folder=cinder-volumes
# Optional VIM service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds. (string value)
#vmware_wsdl_location = <None>
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'cinder',
:group => 'cinder',
:content => content,
}
is_expected.to contain_file('/etc/cinder/cinder.d/vmware-vcenter.conf').with(parameters)
end
it { is_expected.to contain_service('cinder_volume_vmware').with(
:ensure => 'stopped',
:enable => false,
:name => "#{p_param[:service]}-vmware",
:hasstatus => true,
) }
it { is_expected.to contain_service('cinder_volume_vmware_vcenter').with(
:ensure => 'running',
:name => "#{p_param[:service]}-vmware-vcenter",
:enable => true,
) }
it { is_expected.to contain_file("#{p_param[:src_init]}").with(
:source => "puppet:///modules/vmware/#{p_param[:src_init]}",
:path => "#{p_param[:dst_init]}/#{p_param[:src_init]}",
:owner => 'root',
:group => 'root',
:mode => p_param[:file_perm],
).that_comes_before("File[#{p_param[:dst_init]}/#{p_param[:service]}-vmware-vcenter#{p_param[:conf]}]") }
it { is_expected.to contain_file("#{p_param[:volume_def]}-vcenter").with(
:ensure => 'present',
:content => "#{p_param[:opts]}-vcenter.conf'",
) }
it { is_expected.to contain_file("#{p_param[:dst_init]}/#{p_param[:service]}-vmware-vcenter#{p_param[:conf]}").with(
:ensure => 'link',
:target => "#{p_param[:dst_init]}/#{p_param[:service]}-vmware#{p_param[:conf]}",
).that_comes_before("File[#{p_param[:volume_def]}-vcenter]") }
end
end
end
end

View File

@ -1,425 +0,0 @@
require 'spec_helper'
describe 'vmware::compute::ha', type: :define do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with custom ca file' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:vc_cluster => 'Cluster1',
:vc_host => '172.16.0.254',
:vc_user => 'administrator@vsphere.local',
:vc_password => 'Qwer!1234',
:service_name => 'srv_cluster1',
:target_node => 'controllers',
:vc_insecure => false,
:vc_ca_file => {
'content' => 'RSA',
'name' => 'vcenter-ca.pem' },
:datastore_regex => '.*',
}
end
let(:title) { '0' }
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__compute__ha('0') }
it { is_expected.to contain_file('/etc/nova/nova-compute.d').with(
:ensure => 'directory',
:owner => 'nova',
:group => 'nova',
:mode => '0750',
).that_comes_before('File[/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster1.conf]') }
it { is_expected.to contain_file('/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster1-ca.pem').with(
:ensure => 'file',
:content => 'RSA',
:mode => '0644',
:owner => 'root',
:group => 'root',
) }
it do
content = <<-eof
[DEFAULT]
# Driver to use for controlling virtualization.
# Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver,
# ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver
#compute_driver=None
compute_driver=vmwareapi.VMwareVCDriver
# Name of log file to output to. If no default is set, logging will go to
# stdout. This option is ignored if log_config_append is set.
#log_file=None
log_file=nova-compute-vmware-vcenter-srv_cluster1.log
# Name of this node. This can be an opaque identifier. It is not necessarily a
# hostname, FQDN, or IP address. However, the node name must be valid within an
# AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
#host=localhost
host=vcenter-srv_cluster1
# Amount of memory in MB to reserve for the host.
#reserved_host_memory_mb=512
reserved_host_memory_mb = 0
# Force injection to take place on a config drive.
#force_config_drive = False
force_config_drive=False
[vmware]
# The number of times we retry on failures, e.g., socket error, etc.
#api_retry_count=10
api_retry_count=5
# Specify a CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
ca_file=/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster1-ca.pem
# The prefix for where cached images are stored. This is NOT the full path -
# just a folder prefix. This should only be used when a datastore cache should
# be shared between compute nodes. Note: this should only be used when the
# compute nodes have a shared file system.
#cache_prefix=None
cache_prefix=$host
# Name of a VMware Cluster ComputeResource.
#cluster_name=None
cluster_name=Cluster1
# Set this value if affected by an increased network latency causing repeated
# characters when typing in a remote console.
#console_delay_seconds=None
# Regex to match the name of a datastore.
#datastore_regex=None
datastore_regex=.*
# Hostname or IP address for connection to VMware vCenter host.
#host_ip=None
host_ip=172.16.0.254
# Password for connection to VMware vCenter host.
#host_password=None
host_password=Qwer!1234
# Port for connection to VMware vCenter host.
#host_port = 443
# Username for connection to VMware vCenter host.
#host_username=None
host_username=administrator@vsphere.local
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure = False
insecure=false
# This option should be configured only when using the NSX-MH Neutron plugin.
# This is the name of the integration bridge on the ESXi. This should not be set
# for any other Neutron plugin. Hence the default value is not set.
#integration_bridge=None
# The maximum number of ObjectContent data objects that should be returned in a
# single result. A positive value will cause the operation to suspend the
# retrieval when the count of objects reaches the specified maximum. The server
# may still limit the count to something less than the configured value. Any
# remaining objects may be retrieved with additional requests.
#maximum_objects = 100
maximum_objects=100
# The PBM default policy. If pbm_wsdl_location is set and there is no defined
# storage policy for the specific request then this policy will be used.
#pbm_default_policy=None
# The PBM status.
#pbm_enabled=False
# PBM service WSDL file location URL.
# e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable
# storage policy based placement of instances.
#pbm_wsdl_location=None
# Identifies a proxy service that provides network access to the
# serial_port_service_uri. This option is ignored if serial_port_service_uri is
# not specified.
#serial_port_proxy_uri=None
# Identifies the remote system that serial port traffic will be sent to. If this
# is not set, no serial ports will be added to the created VMs.
#serial_port_service_uri=None
# The interval used for polling of remote tasks.
#task_poll_interval=0.5
task_poll_interval=5.0
# Whether to use linked clone
#use_linked_clone=True
use_linked_clone=true
# Optional VIM Service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
# Physical ethernet adapter name for vlan networking
#vlan_interface=vmnic0
# VNC starting port.
#vnc_port=5900
# Total number of VNC ports
#vnc_port_total=10000
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'nova',
:group => 'nova',
:content => content,
}
is_expected.to contain_file('/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster1.conf') \
.with(parameters).that_comes_before('Pcmk_resource[p_nova_compute_vmware_vcenter-srv_cluster1]')
end
it { is_expected.to contain_pcmk_resource('p_nova_compute_vmware_vcenter-srv_cluster1').with(
:primitive_class => 'ocf',
:primitive_provider => 'fuel',
:primitive_type => 'nova-compute',
:metadata => {
'resource-stickiness' => '1' },
:parameters => {
'amqp_server_port' => '5673',
'config' => '/etc/nova/nova.conf',
'pid' => '/var/run/nova/nova-compute-vcenter-srv_cluster1.pid',
'additional_parameters' => '--config-file=/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster1.conf', },
:operations => {
'monitor' => {
'timeout' => '10',
'interval' => '20', },
'start' => {
'timeout' => '30', },
'stop' => {
'timeout' => '30', } },
).that_comes_before('Service[p_nova_compute_vmware_vcenter-srv_cluster1]') }
it { is_expected.to contain_service('p_nova_compute_vmware_vcenter-srv_cluster1').with(
:ensure => 'running',
:enable => true,
) }
end
context 'without custom ca file' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:vc_cluster => 'Cluster2',
:vc_host => '172.16.0.254',
:vc_user => 'administrator@vsphere.local',
:vc_password => 'Qwer!1234',
:service_name => 'srv_cluster2',
:target_node => 'controllers',
:vc_insecure => true,
:vc_ca_file => '',
:datastore_regex => '.*',
}
end
let(:title) { '1' }
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__compute__ha('1') }
it { is_expected.to contain_file('/etc/nova/nova-compute.d').with(
:ensure => 'directory',
:owner => 'nova',
:group => 'nova',
:mode => '0750',
).that_comes_before('File[/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster2.conf]') }
it do
content = <<-eof
[DEFAULT]
# Driver to use for controlling virtualization.
# Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver,
# ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver
#compute_driver=None
compute_driver=vmwareapi.VMwareVCDriver
# Name of log file to output to. If no default is set, logging will go to
# stdout. This option is ignored if log_config_append is set.
#log_file=None
log_file=nova-compute-vmware-vcenter-srv_cluster2.log
# Name of this node. This can be an opaque identifier. It is not necessarily a
# hostname, FQDN, or IP address. However, the node name must be valid within an
# AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
#host=localhost
host=vcenter-srv_cluster2
# Amount of memory in MB to reserve for the host.
#reserved_host_memory_mb=512
reserved_host_memory_mb = 0
# Force injection to take place on a config drive.
#force_config_drive = False
force_config_drive=False
[vmware]
# The number of times we retry on failures, e.g., socket error, etc.
#api_retry_count=10
api_retry_count=5
# Specify a CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
# The prefix for where cached images are stored. This is NOT the full path -
# just a folder prefix. This should only be used when a datastore cache should
# be shared between compute nodes. Note: this should only be used when the
# compute nodes have a shared file system.
#cache_prefix=None
cache_prefix=$host
# Name of a VMware Cluster ComputeResource.
#cluster_name=None
cluster_name=Cluster2
# Set this value if affected by an increased network latency causing repeated
# characters when typing in a remote console.
#console_delay_seconds=None
# Regex to match the name of a datastore.
#datastore_regex=None
datastore_regex=.*
# Hostname or IP address for connection to VMware vCenter host.
#host_ip=None
host_ip=172.16.0.254
# Password for connection to VMware vCenter host.
#host_password=None
host_password=Qwer!1234
# Port for connection to VMware vCenter host.
#host_port = 443
# Username for connection to VMware vCenter host.
#host_username=None
host_username=administrator@vsphere.local
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure = False
insecure=true
# This option should be configured only when using the NSX-MH Neutron plugin.
# This is the name of the integration bridge on the ESXi. This should not be set
# for any other Neutron plugin. Hence the default value is not set.
#integration_bridge=None
# The maximum number of ObjectContent data objects that should be returned in a
# single result. A positive value will cause the operation to suspend the
# retrieval when the count of objects reaches the specified maximum. The server
# may still limit the count to something less than the configured value. Any
# remaining objects may be retrieved with additional requests.
#maximum_objects = 100
maximum_objects=100
# The PBM default policy. If pbm_wsdl_location is set and there is no defined
# storage policy for the specific request then this policy will be used.
#pbm_default_policy=None
# The PBM status.
#pbm_enabled=False
# PBM service WSDL file location URL.
# e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable
# storage policy based placement of instances.
#pbm_wsdl_location=None
# Identifies a proxy service that provides network access to the
# serial_port_service_uri. This option is ignored if serial_port_service_uri is
# not specified.
#serial_port_proxy_uri=None
# Identifies the remote system that serial port traffic will be sent to. If this
# is not set, no serial ports will be added to the created VMs.
#serial_port_service_uri=None
# The interval used for polling of remote tasks.
#task_poll_interval=0.5
task_poll_interval=5.0
# Whether to use linked clone
#use_linked_clone=True
use_linked_clone=true
# Optional VIM Service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
# Physical ethernet adapter name for vlan networking
#vlan_interface=vmnic0
# VNC starting port.
#vnc_port=5900
# Total number of VNC ports
#vnc_port_total=10000
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'nova',
:group => 'nova',
:content => content,
}
is_expected.to contain_file('/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster2.conf') \
.with(parameters).that_comes_before('Pcmk_resource[p_nova_compute_vmware_vcenter-srv_cluster2]')
end
it { is_expected.to contain_pcmk_resource('p_nova_compute_vmware_vcenter-srv_cluster2').with(
:primitive_class => 'ocf',
:primitive_provider => 'fuel',
:primitive_type => 'nova-compute',
:metadata => {
'resource-stickiness' => '1' },
:parameters => {
'amqp_server_port' => '5673',
'config' => '/etc/nova/nova.conf',
'pid' => '/var/run/nova/nova-compute-vcenter-srv_cluster2.pid',
'additional_parameters' => '--config-file=/etc/nova/nova-compute.d/vmware-vcenter_srv_cluster2.conf', },
:operations => {
'monitor' => {
'timeout' => '10',
'interval' => '20', },
'start' => {
'timeout' => '30', },
'stop' => {
'timeout' => '30', } },
).that_comes_before('Service[p_nova_compute_vmware_vcenter-srv_cluster2]') }
it { is_expected.to contain_service('p_nova_compute_vmware_vcenter-srv_cluster2').with(
:ensure => 'running',
:enable => true,
) }
end
end
end
end

View File

@ -1,398 +0,0 @@
require 'spec_helper'
describe 'vmware::compute_vmware', type: :define do
on_supported_os.each do |os, facts|
context "on #{os}" do
let(:facts) { facts }
context 'with custom ca file' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:vc_cluster => 'Cluster1',
:vc_host => '172.16.0.254',
:vc_user => 'administrator@vsphere.local',
:vc_password => 'Qwer!1234',
:service_name => 'srv_cluster1',
:current_node => 'node-2',
:target_node => 'node-2',
:vlan_interface => 'vmnic0',
:vc_insecure => false,
:vc_ca_file => {
'content' => 'RSA',
'name' => 'vcenter-ca.pem' },
:datastore_regex => '.*',
}
end
let(:title) { '0' }
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__compute_vmware('0') }
it { is_expected.to contain_class('nova::params') }
it { is_expected.to contain_file('/etc/nova/vmware-ca.pem').with(
:ensure => 'file',
:content => 'RSA',
:mode => '0644',
:owner => 'root',
:group => 'root',
) }
it do
content = <<-eof
[DEFAULT]
# Driver to use for controlling virtualization.
# Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver,
# ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver
#compute_driver=None
compute_driver=vmwareapi.VMwareVCDriver
# Name of log file to output to. If no default is set, logging will go to
# stdout. This option is ignored if log_config_append is set.
#log_file=None
log_file=nova-compute-vmware-vcenter-srv_cluster1.log
# Name of this node. This can be an opaque identifier. It is not necessarily a
# hostname, FQDN, or IP address. However, the node name must be valid within an
# AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
#host=localhost
host=vcenter-srv_cluster1
# Amount of memory in MB to reserve for the host.
#reserved_host_memory_mb=512
reserved_host_memory_mb = 0
# Force injection to take place on a config drive.
#force_config_drive = False
force_config_drive=False
[vmware]
# The number of times we retry on failures, e.g., socket error, etc.
#api_retry_count=10
api_retry_count=5
# Specify a CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
ca_file=/etc/nova/vmware-ca.pem
# The prefix for where cached images are stored. This is NOT the full path -
# just a folder prefix. This should only be used when a datastore cache should
# be shared between compute nodes. Note: this should only be used when the
# compute nodes have a shared file system.
#cache_prefix=None
cache_prefix=$host
# Name of a VMware Cluster ComputeResource.
#cluster_name=None
cluster_name=Cluster1
# Set this value if affected by an increased network latency causing repeated
# characters when typing in a remote console.
#console_delay_seconds=None
# Regex to match the name of a datastore.
#datastore_regex=None
datastore_regex=.*
# Hostname or IP address for connection to VMware vCenter host.
#host_ip=None
host_ip=172.16.0.254
# Password for connection to VMware vCenter host.
#host_password=None
host_password=Qwer!1234
# Port for connection to VMware vCenter host.
#host_port = 443
# Username for connection to VMware vCenter host.
#host_username=None
host_username=administrator@vsphere.local
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure = False
insecure=false
# This option should be configured only when using the NSX-MH Neutron plugin.
# This is the name of the integration bridge on the ESXi. This should not be set
# for any other Neutron plugin. Hence the default value is not set.
#integration_bridge=None
# The maximum number of ObjectContent data objects that should be returned in a
# single result. A positive value will cause the operation to suspend the
# retrieval when the count of objects reaches the specified maximum. The server
# may still limit the count to something less than the configured value. Any
# remaining objects may be retrieved with additional requests.
#maximum_objects = 100
maximum_objects=100
# The PBM default policy. If pbm_wsdl_location is set and there is no defined
# storage policy for the specific request then this policy will be used.
#pbm_default_policy=None
# The PBM status.
#pbm_enabled=False
# PBM service WSDL file location URL.
# e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable
# storage policy based placement of instances.
#pbm_wsdl_location=None
# Identifies a proxy service that provides network access to the
# serial_port_service_uri. This option is ignored if serial_port_service_uri is
# not specified.
#serial_port_proxy_uri=None
# Identifies the remote system that serial port traffic will be sent to. If this
# is not set, no serial ports will be added to the created VMs.
#serial_port_service_uri=None
# The interval used for polling of remote tasks.
#task_poll_interval=0.5
task_poll_interval=5.0
# Whether to use linked clone
#use_linked_clone=True
use_linked_clone=true
# Optional VIM Service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
# Physical ethernet adapter name for vlan networking
#vlan_interface=vmnic0
vlan_interface=vmnic0
# VNC starting port.
#vnc_port=5900
# Total number of VNC ports
#vnc_port_total=10000
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'nova',
:group => 'nova',
:content => content,
}
is_expected.to contain_file('/etc/nova/nova-compute.conf') \
.with(parameters).that_comes_before('Service[nova-compute]')
end
it { is_expected.to contain_package('nova-compute').with(
:ensure => 'installed',
:name => 'nova-compute',
).that_comes_before('File[/etc/nova/nova-compute.conf]') }
it { is_expected.to contain_package('python-oslo.vmware').with(
:ensure => 'installed',
).that_comes_before('Package[nova-compute]') }
it { is_expected.to contain_service('nova-compute').with(
:ensure => 'stopped',
:name => 'nova-compute',
:enable => false,
) }
end
context 'without custom ca file' do
let(:params) do
{
:availability_zone_name => 'vcenter',
:vc_cluster => 'Cluster2',
:vc_host => '172.16.0.254',
:vc_user => 'administrator@vsphere.local',
:vc_password => 'Qwer!1234',
:service_name => 'srv_cluster2',
:current_node => 'node-3',
:target_node => 'node-3',
:vlan_interface => '',
:vc_insecure => true,
:vc_ca_file => '',
:datastore_regex => '.*',
}
end
let(:title) { '1' }
it { is_expected.to compile.with_all_deps }
it { is_expected.to contain_vmware__compute_vmware('1') }
it { is_expected.to contain_class('nova::params') }
it do
content = <<-eof
[DEFAULT]
# Driver to use for controlling virtualization.
# Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver,
# ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver
#compute_driver=None
compute_driver=vmwareapi.VMwareVCDriver
# Name of log file to output to. If no default is set, logging will go to
# stdout. This option is ignored if log_config_append is set.
#log_file=None
log_file=nova-compute-vmware-vcenter-srv_cluster2.log
# Name of this node. This can be an opaque identifier. It is not necessarily a
# hostname, FQDN, or IP address. However, the node name must be valid within an
# AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
#host=localhost
host=vcenter-srv_cluster2
# Amount of memory in MB to reserve for the host.
#reserved_host_memory_mb=512
reserved_host_memory_mb = 0
# Force injection to take place on a config drive.
#force_config_drive = False
force_config_drive=False
[vmware]
# The number of times we retry on failures, e.g., socket error, etc.
#api_retry_count=10
api_retry_count=5
# Specify a CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
# The prefix for where cached images are stored. This is NOT the full path -
# just a folder prefix. This should only be used when a datastore cache should
# be shared between compute nodes. Note: this should only be used when the
# compute nodes have a shared file system.
#cache_prefix=None
cache_prefix=$host
# Name of a VMware Cluster ComputeResource.
#cluster_name=None
cluster_name=Cluster2
# Set this value if affected by an increased network latency causing repeated
# characters when typing in a remote console.
#console_delay_seconds=None
# Regex to match the name of a datastore.
#datastore_regex=None
datastore_regex=.*
# Hostname or IP address for connection to VMware vCenter host.
#host_ip=None
host_ip=172.16.0.254
# Password for connection to VMware vCenter host.
#host_password=None
host_password=Qwer!1234
# Port for connection to VMware vCenter host.
#host_port = 443
# Username for connection to VMware vCenter host.
#host_username=None
host_username=administrator@vsphere.local
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure = False
insecure=true
# This option should be configured only when using the NSX-MH Neutron plugin.
# This is the name of the integration bridge on the ESXi. This should not be set
# for any other Neutron plugin. Hence the default value is not set.
#integration_bridge=None
# The maximum number of ObjectContent data objects that should be returned in a
# single result. A positive value will cause the operation to suspend the
# retrieval when the count of objects reaches the specified maximum. The server
# may still limit the count to something less than the configured value. Any
# remaining objects may be retrieved with additional requests.
#maximum_objects = 100
maximum_objects=100
# The PBM default policy. If pbm_wsdl_location is set and there is no defined
# storage policy for the specific request then this policy will be used.
#pbm_default_policy=None
# The PBM status.
#pbm_enabled=False
# PBM service WSDL file location URL.
# e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable
# storage policy based placement of instances.
#pbm_wsdl_location=None
# Identifies a proxy service that provides network access to the
# serial_port_service_uri. This option is ignored if serial_port_service_uri is
# not specified.
#serial_port_proxy_uri=None
# Identifies the remote system that serial port traffic will be sent to. If this
# is not set, no serial ports will be added to the created VMs.
#serial_port_service_uri=None
# The interval used for polling of remote tasks.
#task_poll_interval=0.5
task_poll_interval=5.0
# Whether to use linked clone
#use_linked_clone=True
use_linked_clone=true
# Optional VIM Service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
# Physical ethernet adapter name for vlan networking
#vlan_interface=vmnic0
# VNC starting port.
#vnc_port=5900
# Total number of VNC ports
#vnc_port_total=10000
eof
parameters = {
:ensure => 'present',
:mode => '0600',
:owner => 'nova',
:group => 'nova',
:content => content,
}
is_expected.to contain_file('/etc/nova/nova-compute.conf') \
.with(parameters).that_comes_before('Service[nova-compute]')
end
it { is_expected.to contain_package('nova-compute').with(
:ensure => 'installed',
:name => 'nova-compute',
).that_comes_before('File[/etc/nova/nova-compute.conf]') }
it { is_expected.to contain_package('python-oslo.vmware').with(
:ensure => 'installed',
).that_comes_before('Package[nova-compute]') }
it { is_expected.to contain_service('nova-compute').with(
:ensure => 'stopped',
:name => 'nova-compute',
:enable => false,
) }
end
end
end
end

View File

@ -1,43 +0,0 @@
require 'spec_helper'
describe 'parse_vcenter_settings', :type => :puppet_function do
it { is_expected.to run.with_params().and_raise_error(ArgumentError) }
it { is_expected.to run.with_params([]).and_return({}) }
it { is_expected.to run.with_params('').and_return({}) }
it { is_expected.to run.with_params(1).and_return({}) }
it { is_expected.to run.with_params(nil).and_return({}) }
it { is_expected.to run.with_params(
{
'a' => '1',
}
).and_return(
{
'0' => {
'a' => '1',
}
}
) }
it { is_expected.to run.with_params(
[
{
'a' => '1',
},
{
'a' => '2',
'b' => '3',
},
]
).and_return(
{
'0' => {
'a' => '1',
},
'1' => {
'a' => '2',
'b' => '3',
}
}
) }
end

View File

@ -1 +0,0 @@
--color

View File

@ -1,9 +0,0 @@
require 'rubygems'
require 'puppetlabs_spec_helper/module_spec_helper'
require 'rspec-puppet-facts'
include RspecPuppetFacts
RSpec.configure do |c|
c.alias_it_should_behave_like_to :it_configures, 'configures'
c.alias_it_should_behave_like_to :it_raises, 'raises'
end

View File

@ -1,74 +0,0 @@
[DEFAULT]
# Name of this node, which must be valid in an AMQP key. Can be an opaque
# identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
#host=localhost
host=<%= @availability_zone_name %>-<%= @service_name %>
# Inspector to use for inspecting the hypervisor layer. Known inspectors are
# libvirt, hyperv, vmware, xenapi and powervm.
#hypervisor_inspector=libvirt
hypervisor_inspector=<%= @hypervisor_inspector %>
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set.
#default_log_levels=amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN,
#sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, iso8601=WARN,
#requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN,
#websocket=WARN, requests.packages.urllib3.util.retry=WARN,
#urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN,
#stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO,
#dogpile.core.dogpile=INFO
default_log_levels=<%= @default_log_levels %>
# Name of log file to send logging output to. If no default is set, logging will
# go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set.
#log_file=None
log_file=ceilometer-agent-compute-vmware-<%= @availability_zone_name %>-<%= @service_name %>.log
[vmware]
# Number of times a VMware vSphere API may be retried.
#api_retry_count=10
api_retry_count=<%= @api_retry_count %>
# CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
<% if @ceilometer_vcenter_ca_filepath and @ceilometer_vcenter_ca_filepath \
!= "<SERVICE DEFAULT>" and !@ceilometer_vcenter_ca_filepath.empty? -%>
ca_file=<%= @ceilometer_vcenter_ca_filepath %>
<% end -%>
# IP address of the VMware vSphere host.
#host_ip=
host_ip=<%= @vc_host %>
# Password of VMware vSphere.
#host_password=
host_password=<%= @vc_password %>
# Port of the VMware vSphere host.
#host_port=443
# Username of VMware vSphere.
#host_username=
host_username=<%= @vc_user %>
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure=False
insecure=<%= @ceilometer_vcenter_insecure_real %>
# Sleep time in seconds for polling an ongoing async task.
#task_poll_interval=0.5
task_poll_interval=<%= @task_poll_interval %>
# Optional vim service WSDL location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
<% if @wsdl_location -%>
wsdl_location=<%= @wsdl_location %>
<% end -%>

View File

@ -1,113 +0,0 @@
[DEFAULT]
# A list of backend names to use. These backend names should be backed by a
# unique [CONFIG] group with its options (list value)
#enabled_backends = <None>
enabled_backends=VMwareVcVmdk-backend
# Availability zone of this node (string value)
#storage_availability_zone = nova
storage_availability_zone=<%= @availability_zone_name %>-cinder
# Default availability zone for new volumes. If not set, the
# storage_availability_zone option value is used as the default for new volumes.
# (string value)
#default_availability_zone = <None>
default_availability_zone=<%= @availability_zone_name %>-cinder
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
#debug = false
debug=<%= @debug %>
[VMwareVcVmdk-backend]
# Backend override of host value. (string value)
# Deprecated group/name - [DEFAULT]/host
#backend_host = <None>
backend_host=<%= @az_name %>
# The backend name for a given driver implementation (string value)
#volume_backend_name = <None>
volume_backend_name=VMwareVcVmdk-backend
# Driver to use for volume creation (string value)
#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
# Number of times VMware vCenter server API must be retried upon connection
# related issues. (integer value)
#vmware_api_retry_count = 10
vmware_api_retry_count=<%= @vc_api_retry_count %>
# CA bundle file to use in verifying the vCenter server certificate. (string
# value)
#vmware_ca_file = <None>
<% if @cinder_vcenter_ca_filepath and @cinder_vcenter_ca_filepath \
!= "<SERVICE DEFAULT>" and !@cinder_vcenter_ca_filepath.empty? -%>
vmware_ca_file=<%= @cinder_vcenter_ca_filepath %>
<% end -%>
# Name of a vCenter compute cluster where volumes should be created. (multi
# valued)
#vmware_cluster_name =
# IP address for connecting to VMware vCenter server. (string value)
#vmware_host_ip = <None>
vmware_host_ip=<%= @vc_host %>
# Password for authenticating with VMware vCenter server. (string value)
#vmware_host_password = <None>
vmware_host_password=<%= @vc_password %>
# Username for authenticating with VMware vCenter server. (string value)
#vmware_host_username = <None>
vmware_host_username=<%= @vc_user %>
# Optional string specifying the VMware vCenter server version. The driver
# attempts to retrieve the version from VMware vCenter server. Set this
# configuration only if you want to override the vCenter server version. (string
# value)
#vmware_host_version = <None>
<% if !@vc_host_version.empty? %>
vmware_host_version=<%= @vc_host_version %>
<% end %>
# Timeout in seconds for VMDK volume transfer between Cinder and Glance.
# (integer value)
#vmware_image_transfer_timeout_secs = 7200
vmware_image_transfer_timeout_secs=<%= @vc_image_transfer_timeout_secs %>
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "vmware_ca_file" is set. (boolean value)
#vmware_insecure = false
vmware_insecure=<%= @cinder_vcenter_insecure_real %>
# Max number of objects to be retrieved per batch. Query results will be
# obtained in batches from the server and not in one shot. Server may still
# limit the count to something less than the configured value. (integer value)
#vmware_max_objects_retrieval = 100
vmware_max_objects_retrieval=<%= @vc_max_objects_retrieval %>
# The interval (in seconds) for polling remote tasks invoked on VMware vCenter
# server. (floating point value)
#vmware_task_poll_interval = 0.5
vmware_task_poll_interval=<%= @vc_task_poll_interval %>
# Directory where virtual disks are stored during volume backup and restore.
# (string value)
#vmware_tmp_dir = /tmp
vmware_tmp_dir=<%= @vc_tmp_dir %>
# Name of the vCenter inventory folder that will contain Cinder volumes. This
# folder will be created under "OpenStack/<project_folder>", where
# project_folder is of format "Project (<volume_project_id>)". (string value)
#vmware_volume_folder = Volumes
vmware_volume_folder=<%= @vc_volume_folder %>
# Optional VIM service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds. (string value)
#vmware_wsdl_location = <None>
<% if !@vc_wsdl_location.empty? %>
vmware_wsdl_location=<%= @vc_wsdl_location %>
<% end %>

View File

@ -1,143 +0,0 @@
[DEFAULT]
# Driver to use for controlling virtualization.
# Options include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver,
# ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver
#compute_driver=None
compute_driver=vmwareapi.VMwareVCDriver
# Name of log file to output to. If no default is set, logging will go to
# stdout. This option is ignored if log_config_append is set.
#log_file=None
log_file=nova-compute-vmware-<%= @availability_zone_name %>-<%= @service_name %>.log
# Name of this node. This can be an opaque identifier. It is not necessarily a
# hostname, FQDN, or IP address. However, the node name must be valid within an
# AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
#host=localhost
host=<%= @availability_zone_name %>-<%= @service_name %>
# Amount of memory in MB to reserve for the host.
#reserved_host_memory_mb=512
reserved_host_memory_mb = 0
# Force injection to take place on a config drive.
#force_config_drive = False
force_config_drive=False
[vmware]
# The number of times we retry on failures, e.g., socket error, etc.
#api_retry_count=10
api_retry_count=<%= @api_retry_count %>
# Specify a CA bundle file to use in verifying the vCenter server certificate.
#ca_file=None
<% if @compute_vcenter_ca_filepath and @compute_vcenter_ca_filepath \
!= "<SERVICE DEFAULT>" and !@compute_vcenter_ca_filepath.empty? -%>
ca_file=<%= @compute_vcenter_ca_filepath %>
<% end -%>
# The prefix for where cached images are stored. This is NOT the full path -
# just a folder prefix. This should only be used when a datastore cache should
# be shared between compute nodes. Note: this should only be used when the
# compute nodes have a shared file system.
#cache_prefix=None
cache_prefix=$host
# Name of a VMware Cluster ComputeResource.
#cluster_name=None
cluster_name=<%= @vc_cluster %>
# Set this value if affected by an increased network latency causing repeated
# characters when typing in a remote console.
#console_delay_seconds=None
# Regex to match the name of a datastore.
#datastore_regex=None
<% if @datastore_regex and !@datastore_regex.empty? -%>
datastore_regex=<%= @datastore_regex %>
<% end -%>
# Hostname or IP address for connection to VMware vCenter host.
#host_ip=None
host_ip=<%= @vc_host %>
# Password for connection to VMware vCenter host.
#host_password=None
host_password=<%= @vc_password %>
# Port for connection to VMware vCenter host.
#host_port = 443
# Username for connection to VMware vCenter host.
#host_username=None
host_username=<%= @vc_user %>
# If true, the vCenter server certificate is not verified. If false, then the
# default CA truststore is used for verification. This option is ignored if
# "ca_file" is set.
#insecure = False
insecure=<%= @compute_vcenter_insecure_real %>
# This option should be configured only when using the NSX-MH Neutron plugin.
# This is the name of the integration bridge on the ESXi. This should not be set
# for any other Neutron plugin. Hence the default value is not set.
#integration_bridge=None
# The maximum number of ObjectContent data objects that should be returned in a
# single result. A positive value will cause the operation to suspend the
# retrieval when the count of objects reaches the specified maximum. The server
# may still limit the count to something less than the configured value. Any
# remaining objects may be retrieved with additional requests.
#maximum_objects = 100
maximum_objects=<%= @maximum_objects %>
# The PBM default policy. If pbm_wsdl_location is set and there is no defined
# storage policy for the specific request then this policy will be used.
#pbm_default_policy=None
# The PBM status.
#pbm_enabled=False
# PBM service WSDL file location URL.
# e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable
# storage policy based placement of instances.
#pbm_wsdl_location=None
# Identifies a proxy service that provides network access to the
# serial_port_service_uri. This option is ignored if serial_port_service_uri is
# not specified.
#serial_port_proxy_uri=None
# Identifies the remote system that serial port traffic will be sent to. If this
# is not set, no serial ports will be added to the created VMs.
#serial_port_service_uri=None
# The interval used for polling of remote tasks.
#task_poll_interval=0.5
task_poll_interval=<%= @task_poll_interval %>
# Whether to use linked clone
#use_linked_clone=True
use_linked_clone=<%= @use_linked_clone %>
# Optional VIM Service WSDL Location e.g http://<server>/vimService.wsdl.
# Optional over-ride to default location for bug work-arounds.
#wsdl_location=None
<% if @wsdl_location -%>
wsdl_location=<%= @wsdl_location %>
<% end -%>
# Physical ethernet adapter name for vlan networking
#vlan_interface=vmnic0
<% if @vlan_interface and !@vlan_interface.empty? -%>
vlan_interface=<%= @vlan_interface %>
<% end -%>
# VNC starting port.
#vnc_port=5900
# Total number of VNC ports
#vnc_port_total=10000

View File

@ -1,7 +0,0 @@
[DEFAULT]
# Name of this node. This can be an opaque identifier. It is not necessarily a
# hostname, FQDN, or IP address. However, the node name must be valid within an
# AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address.
#host=localhost
host=nova-network-ha

View File

@ -1,4 +1,3 @@
# ROLE: compute-vmware
# ROLE: compute
require 'spec_helper'
@ -96,4 +95,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,9 +4,7 @@
# ROLE: mongo
# ROLE: ironic
# ROLE: controller
# ROLE: compute-vmware
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd

View File

@ -3,7 +3,6 @@
# ROLE: mongo
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder
# ROLE: ceph-osd
require 'spec_helper'
@ -13,4 +12,3 @@ manifest = 'dns/dns-client.pp'
describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,7 +4,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -317,4 +316,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,7 +4,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -38,4 +37,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,9 +4,7 @@
# ROLE: mongo
# ROLE: ironic
# ROLE: controller
# ROLE: compute-vmware
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd

View File

@ -43,16 +43,6 @@ describe manifest do
glance_db_password = Noop.hiera_structure 'glance/db_password', 'glance'
glance_db_user = Noop.hiera_structure 'glance/db_user', 'glance'
glance_db_name = Noop.hiera_structure 'glance/db_name', 'glance'
#vCenter
glance_vc_host = Noop.hiera_structure 'glance/vc_host', '172.16.0.254'
glance_vc_user = Noop.hiera_structure 'glance/vc_user', 'administrator@vsphere.local'
glance_vc_password = Noop.hiera_structure 'glance/vc_password', 'Qwer!1234'
glance_vc_datacenter = Noop.hiera_structure 'glance/vc_datacenter', 'Datacenter'
glance_vc_datastore = Noop.hiera_structure 'glance/vc_datastore', 'nfs'
glance_vc_image_dir = Noop.hiera_structure 'glance/vc_image_dir'
glance_vc_insecure = Noop.hiera_structure 'glance/vc_insecure', 'false'
glance_vc_ca_file = Noop.hiera_structure 'glance/vc_ca_file', {'content' => 'RSA', 'name' => 'vcenter-ca.pem'}
glance_password = glance_config.fetch('user_password')
glance_username = glance_config.fetch('user', 'glance')
glance_project_name = glance_config.fetch('tenant', 'services')
@ -274,77 +264,6 @@ describe manifest do
it 'should configure show_multiple_locations' do
should contain_glance_api_config('DEFAULT/show_multiple_locations').with_value(show_multiple_locations)
end
elsif storage_config && storage_config.has_key?('images_vcenter') && storage_config['images_vcenter']
if glance_config
if glance_config.has_key?('show_image_direct_url')
show_image_direct_url = glance_config['show_image_direct_url']
else
show_image_direct_url = true
end
if glance_config.has_key?('show_multiple_locations')
show_multiple_locations = glance_config['show_multiple_locations']
else
show_multiple_locations = true
end
end
let :params do { :glance_backend => 'vmware', } end
it 'should declare vmware backend' do
should contain_class('glance::backend::vsphere').with(:vcenter_host => glance_vc_host)
should contain_class('glance::backend::vsphere').with(:vcenter_user => glance_vc_user)
should contain_class('glance::backend::vsphere').with(:vcenter_password => glance_vc_password)
should contain_class('glance::backend::vsphere').with(:vcenter_datastores => "#{glance_vc_datacenter}:#{glance_vc_datastore}")
should contain_class('glance::backend::vsphere').with(:vcenter_insecure => glance_vc_insecure)
should contain_class('glance::backend::vsphere').with(:vcenter_image_dir => glance_vc_image_dir)
should contain_class('glance::backend::vsphere').with(:vcenter_api_retry_count => '20')
should contain_class('glance::backend::vsphere').with(:vcenter_ca_file => '/etc/glance/vcenter-ca.pem')
should contain_class('glance::backend::vsphere').with(:glare_enabled => true)
end
it 'should configure vmware_server_host setting' do
should contain_glance_api_config('glance_store/vmware_server_host').with_value(glance_vc_host)
should contain_glance_glare_config('glance_store/vmware_server_host').with_value(glance_vc_host)
end
it 'should configure vmware_server_username setting' do
should contain_glance_api_config('glance_store/vmware_server_username').with_value(glance_vc_user)
should contain_glance_glare_config('glance_store/vmware_server_username').with_value(glance_vc_user)
end
it 'should configure vmware_server_password setting' do
should contain_glance_api_config('glance_store/vmware_server_password').with_value(glance_vc_password)
should contain_glance_glare_config('glance_store/vmware_server_password').with_value(glance_vc_password)
end
it 'should configure vmware_datastores setting' do
should contain_glance_api_config('glance_store/vmware_datastores').with_value("#{glance_vc_datacenter}:#{glance_vc_datastore}")
should contain_glance_glare_config('glance_store/vmware_datastores').with_value("#{glance_vc_datacenter}:#{glance_vc_datastore}")
end
it 'should configure vmware_insecure setting' do
should contain_glance_api_config('glance_store/vmware_insecure').with_value(glance_vc_insecure)
should contain_glance_glare_config('glance_store/vmware_insecure').with_value(glance_vc_insecure)
end
it 'should configure vmware_store_image_dir setting' do
should contain_glance_api_config('glance_store/vmware_store_image_dir').with_value(glance_vc_image_dir)
should contain_glance_glare_config('glance_store/vmware_store_image_dir').with_value(glance_vc_image_dir)
end
it 'should configure vmware_api_retry_count setting' do
should contain_glance_api_config('glance_store/vmware_api_retry_count').with_value('20')
should contain_glance_glare_config('glance_store/vmware_api_retry_count').with_value('20')
end
it 'should configure vmware_ca_file setting' do
should contain_glance_api_config('glance_store/vmware_ca_file').with_value('/etc/glance/vcenter-ca.pem')
should contain_glance_glare_config('glance_store/vmware_ca_file').with_value('/etc/glance/vcenter-ca.pem')
end
it 'should configure default_store setting' do
should contain_glance_api_config('glance_store/default_store').with_value('vsphere')
should contain_glance_glare_config('glance_store/default_store').with_value('vsphere')
end
it 'should configure stores setting' do
should contain_glance_api_config('glance_store/stores').with_value('glance.store.vmware_datastore.Store,glance.store.http.Store')
should contain_glance_glare_config('glance_store/stores').with_value('glance.store.vmware_datastore.Store,glance.store.http.Store')
end
it 'should configure show_image_direct_url' do
should contain_glance_api_config('DEFAULT/show_image_direct_url').with_value(show_image_direct_url)
end
it 'should configure show_multiple_locations' do
should contain_glance_api_config('DEFAULT/show_multiple_locations').with_value(show_multiple_locations)
end
else
if glance_config
if glance_config.has_key?('show_image_direct_url')

View File

@ -5,8 +5,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: compute-vmware
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -29,6 +27,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,9 +4,7 @@
# ROLE: mongo
# ROLE: ironic
# ROLE: controller
# ROLE: compute-vmware
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -53,4 +51,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,9 +4,7 @@
# ROLE: mongo
# ROLE: ironic
# ROLE: controller
# ROLE: compute-vmware
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -31,4 +29,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -4,9 +4,7 @@
# ROLE: mongo
# ROLE: ironic
# ROLE: controller
# ROLE: compute-vmware
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -58,4 +56,3 @@ describe manifest do
end
test_ubuntu_and_centos manifest
end

View File

@ -4,7 +4,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd

View File

@ -5,7 +5,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -75,4 +74,3 @@ describe manifest do
end
test_ubuntu_and_centos manifest
end

View File

@ -5,7 +5,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd

View File

@ -5,8 +5,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: compute-vmware
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -47,4 +45,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -5,7 +5,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -76,4 +75,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -5,7 +5,6 @@
# ROLE: ironic
# ROLE: controller
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder-block-device
# ROLE: cinder
# ROLE: ceph-osd
@ -20,4 +19,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

View File

@ -1,7 +1,6 @@
# ROLE: primary-mongo
# ROLE: mongo
# ROLE: compute
# ROLE: cinder-vmware
# ROLE: cinder
# ROLE: ceph-osd
@ -48,4 +47,3 @@ describe manifest do
test_ubuntu_and_centos manifest
end

Some files were not shown because too many files have changed in this diff Show More