Merge "Remove provision using Cobbler profiles (used before 6.1)"

This commit is contained in:
Jenkins 2016-08-08 17:05:25 +00:00 committed by Gerrit Code Review
commit fa254cf7ca
11 changed files with 38 additions and 1201 deletions

View File

@ -14,7 +14,6 @@ Gem::Specification.new do |s|
s.add_dependency 'mcollective-client', '>= 2.4.1'
s.add_dependency 'symboltable', '>= 1.0.2'
s.add_dependency 'rest-client', '>= 1.6.7'
s.add_dependency 'net-ssh-multi', '>= 1.1'
# Astute as service
s.add_dependency 'bunny', '>= 2.0'

View File

@ -69,11 +69,6 @@ require 'fuel_deployment'
Dir[File.dirname(__FILE__) + path].each{ |f| require f }
end
require 'astute/ssh'
require 'astute/ssh_actions/ssh_erase_nodes'
require 'astute/ssh_actions/ssh_hard_reboot'
# Server
require 'astute/server/worker'
require 'astute/server/server'

View File

@ -85,7 +85,6 @@ module Astute
conf[:keys_dst_dir] = '/var/lib/astute' # folder where keys will be uploaded. Warning!
conf[:max_nodes_per_call] = 50 # how many nodes to deploy simultaneously
conf[:max_nodes_to_provision] = 50 # how many nodes to provision simultaneously
conf[:ssh_retries] = 5 # SSH tries to call ssh client before failure
conf[:ssh_retry_timeout] = 30 # SSH sleeps for ## sec between retries
conf[:max_nodes_per_remove_call] = 10 # how many nodes to remove in one call

View File

@ -26,218 +26,7 @@ module Astute
return @default_patterns.keys
end
ha_controller_pattern = {'type' => 'components-list',
'chunk_size' => 40000,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Galera', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Galera/File[/etc/mysql]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Galera/Package[galera]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Galera/Package[MySQL-client]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Galera/Package[MySQL-server]/ensure) created', 'progress' => 0.6},
{'pattern' => "/Stage[main]/Galera/Service[mysql-galera]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.8},
{'pattern' => '/Stage[main]/Galera/Exec[wait-for-synced-state]/returns) executed successfully', 'progress' => 0.9},
{'pattern' => '/Stage[main]/Galera::Galera_master_final_config/Exec'\
'[first-galera-node-final-config]/returns) executed successfully', 'progress' => 1},
]
},
{'name' => 'Glance', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Glance::Db::Mysql/Mysql::Db[glance]/Database[glance]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Glance::Db::Mysql/Glance::Db::Mysql::Host_access[]/'\
'Database_user[glance@]/ensure) created', 'progress' => 0.7},
{'pattern' => '/Stage[main]/Glance::Registry/Glance_registry_config[keystone_authtoken/'\
'admin_user]/value) value changed', 'progress' => 0.71},
{'pattern' => '/Stage[main]/Glance::Keystone::Auth/Keystone_endpoint[glance]/ensure) created', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Glance::Registry/Service[glance-registry]/ensure)"\
" ensure changed 'stopped' to 'running'", 'progress' => 0.95},
{'pattern' => "/Stage[main]/Glance::Api/Service[glance-api]/ensure) ensure changed"\
" 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Haproxy', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Haproxy/Concat[/etc/haproxy/haproxy.cfg]/File[/var/lib/puppet/'\
'concat/_etc_haproxy_haproxy.cfg]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Haproxy/Concat[/etc/haproxy/haproxy.cfg]/File[/var/lib/puppet/'\
'concat/_etc_haproxy_haproxy.cfg/fragments.concat.out]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Haproxy/Concat[/etc/haproxy/haproxy.cfg]/Exec[concat_/etc/haproxy/'\
'haproxy.cfg]/returns) executed successfully', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Haproxy/Service[haproxy]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Horizon', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Horizon/Package[mod_wsgi]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Horizon/Package[openstack-dashboard]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Horizon/File[/etc/openstack-dashboard/'\
'local_settings]/content) content changed', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Horizon/Service[\$::horizon::params::http_service]/"\
"ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Keepalived', 'weight' => 1, 'patterns' => [
{'pattern' => '/Stage[main]/Keepalived::Install/Package[keepalived]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Keepalived::Config/Concat[/etc/keepalived/keepalived.conf]/'\
'File[/etc/keepalived/keepalived.conf]/content) content changed', 'progress' => 0.6},
{'pattern' => "/Stage[main]/Keepalived::Service/Service[keepalived]/ensure) ensure"\
" changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Keystone', 'weight' => 1, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Keystone::Db::Mysql/Mysql::Db[keystone]/Database[keystone]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Keystone/Package[keystone]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Keystone/Keystone_config[DEFAULT/admin_port]/ensure) created', 'progress' => 0.7},
{'pattern' => "/Stage[main]/Keystone/Service[keystone]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.8},
{'pattern' => '/Stage[main]/Keystone::Roles::Admin/Keystone_user_role[admin@admin]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Memcached', 'weight' => 1, 'patterns' => [
{'pattern' => '/Stage[main]/Memcached/User[memcached]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Memcached/Package[memcached]/ensure) created', 'progress' => 0.4},
{'pattern' => "/Stage[main]/Memcached/Service[memcached]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Rabbitmq', 'weight' => 1, 'patterns' => [
{'pattern' => '/Stage[main]/Rabbitmq::Server/Package[rabbitmq-server]/ensure) created', 'progress' => 0.3},
{'pattern' => "/Stage[main]/Rabbitmq::Service/Service[rabbitmq-server]/ensure) ensure changed 'stopped' to 'running", 'progress' => 0.7},
{'pattern' => '/Stage[main]/Rabbitmq::Server/Rabbitmq_user[guest]/ensure) removed', 'progress' => 1},
]
},
{'name' => 'Rsync/Xinetd', 'weight' => 1, 'patterns' => [
{'pattern' => '/Stage[main]/Xinetd/Package[xinetd]/ensure) created', 'progress' => 0.2},
{'pattern' => '(/Stage[main]/Xinetd/File[/etc/xinetd.conf]/content) content changed', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Rsync::Server/File[/etc/rsync.d]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Rsync::Server/Xinetd::Service[rsync]/File[/etc/xinetd.d/rsync]/content) content changed', 'progress' => 1},
]
},
{'name' => 'Swift', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Swift::Xfs/Package[xfsprogs]/ensure) created', 'progress' => 0.01},
{'pattern' => '/Stage[main]/Swift/File[/etc/swift/swift.conf]/content) content changed', 'progress' => 0.05},
{'pattern' => '/Stage[main]/Swift/File[/home/swift]/ensure) created', 'progress' => 0.07},
{'pattern' => '/Stage[main]/Swift::Storage::All/File[/srv/node]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Swift::Storage::Account/Swift::Storage::Generic[account]/File'\
'[/etc/swift/account-server/]/ensure) created', 'progress' => 0.12},
{'pattern' => '/Stage[main]/Swift::Storage::Object/Swift::Storage::Generic[object]/Package'\
'[swift-object]/ensure) created', 'progress' => 0.15},
{'pattern' => "/Stage[main]/Swift::Storage::Account/Swift::Storage::Generic[account]/Service"\
"[swift-account]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.18},
{'pattern' => "/Stage[main]/Swift::Storage::Object/Swift::Storage::Generic[object]/Service"\
"[swift-object]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.2},
{'pattern' => '/Stage[main]/Swift::Keystone::Auth/Keystone_service[swift]/ensure) created', 'progress' => 0.23},
{'pattern' => '/Stage[main]/Swift::Keystone::Auth/Keystone_user_role[swift@services]/ensure) created', 'progress' => 0.28},
{'pattern' => '/Stage\[main\]/Swift::Storage::Container/Ring_container_device\[[0-9.:]+\]/ensure\) created',
'regexp' => true, 'progress' => 0.33},
{'pattern' => "/Stage[main]/Swift::Storage::Account/Swift::Storage::Generic[account]/File[/etc/swift/"\
"account-server/]/group) group changed 'root' to 'swift'", 'progress' => 0.36},
{'pattern' => '/Stage[main]/Swift::Ringbuilder/Swift::Ringbuilder::Rebalance[object]/Exec'\
'[hours_passed_object]/returns) executed successfully', 'progress' => 0.39},
{'pattern' => '/Stage[main]/Swift::Ringbuilder/Swift::Ringbuilder::Rebalance[account]/Exec'\
'[hours_passed_account]/returns) executed successfully', 'progress' => 0.42},
{'pattern' => '/Stage[main]/Swift::Ringbuilder/Swift::Ringbuilder::Rebalance[account]/Exec'\
'[rebalance_account]/returns) executed successfully', 'progress' => 0.44},
{'pattern' => '/Stage[main]/Swift::Ringbuilder/Swift::Ringbuilder::Rebalance[container]/Exec'\
'[hours_passed_container]/returns) executed successfully', 'progress' => 0.49},
{'pattern' => '/Stage[main]/Swift::Ringbuilder/Swift::Ringbuilder::Rebalance[container]/Exec'\
'[rebalance_container]/returns) executed successfully', 'progress' => 0.52},
{'pattern' => '/Stage[main]/Swift::Proxy/Package[swift-proxy]/ensure) created', 'progress' => 0.55},
{'pattern' => '/Service[swift-container-replicator]/ensure) ensure changed \'stopped\'', 'progress' => 0.9},
{'pattern' => '/Service[swift-accaunt-replicator]/ensure) ensure changed \'stopped\'', 'progress' => 0.95},
{'pattern' => '/Service[swift-object-replicator]/ensure) ensure changed \'stopped\'', 'progress' => 1},
]
},
{'name' => 'Nova', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Utilities/Package[euca2ools]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[parted]/ensure) created', 'progress' => 0.11},
{'pattern' => '/Stage[main]/Nova::Api/Nova::Generic_service[api]/Package[nova-api]/ensure) created', 'progress' => 0.13},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[unzip]/ensure) created', 'progress' => 0.15},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Package[python-numpy]/ensure) created', 'progress' => 0.2},
{'pattern' => '(/Stage[main]/Nova::Utilities/Package[libguestfs-tools-c]/ensure) created', 'progress' => 0.25},
{'pattern' => '/Stage[main]/Nova::Rabbitmq/Rabbitmq_user_permissions[nova@/]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Nova::Db::Mysql/Mysql::Db[nova]/Database[nova]/ensure) created', 'progress' => 0.35},
{'pattern' => "/Stage[main]/Nova::Db::Mysql/Mysql::Db[nova]/Database_grant"\
"[nova@127.0.0.1/nova]/privileges) privileges changed '' to 'all'", 'progress' => 0.4},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Nova::Generic_service[vncproxy]/Package'\
'[nova-vncproxy]/ensure) created', 'progress' => 0.45},
{'pattern' => '/Stage[main]/Nova::Keystone::Auth/Keystone_service[nova_volume]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Nova::Keystone::Auth/Keystone_user_role[nova@services]/ensure) created', 'progress' => 0.55},
{'pattern' => '/Stage[main]/Nova/Exec[post-nova_config]/returns) Nova config has changed', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Nova::Api/Exec[nova-db-sync]/returns) executed successfully', 'progress' => 0.7},
{'pattern' => "/Stage[main]/Nova::Consoleauth/Nova::Generic_service[consoleauth]/Service"\
"[nova-consoleauth]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.85},
{'pattern' => '/Stage[main]/Nova::Network/Nova::Manage::Network[nova-vm-net]/Nova_network'\
'nova-vm-net]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Openstack', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Openstack::Firewall/File[iptables]/ensure) defined content as', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Openstack::Glance/Package[swift]/ensure) created', 'progress' => 0.15},
{'pattern' => '/Stage[main]/Openstack::Auth_file/File[/root/openrc]/ensure) defined content as', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Openstack::Controller_ha/Package[socat]/ensure) created', 'progress' => 0.25},
{'pattern' => '/Stage[main]/Openstack::Swift::Storage-node/Swift::Storage::Loopback[1]/File[/srv/loopback-device]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Openstack::Controller_ha/Exec[wait-for-haproxy-mysql-backend]/returns) executed successfully', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Openstack::Controller/Nova_config[DEFAULT/memcached_servers]/ensure) created', 'progress' => 0.45},
{'pattern' => '/Stage[main]/Openstack::Nova::Controller/Nova_config[DEFAULT/multi_host]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Openstack::Firewall/Exec[startup-firewall]/returns) executed successfully', 'progress' => 0.65},
{'pattern' => '/Stage[main]/Openstack::Swift::Proxy/Ring_object_device\[[0-9.:]+\]/ensure\) created',
'regexp' => true, 'progress' => 0.75},
{'pattern' => '/Stage[main]/Openstack::Swift::Proxy/Ring_container_device\[[0-9.:]+\]/ensure\) created',
'regexp' => true, 'progress' => 0.8},
{'pattern' => '/Stage[main]/Openstack::Img::Cirros/Package[cirros-testvm]/ensure) created', 'progress' => 1},
]
},
]
}
@default_patterns = {
'centos-anaconda-log-supposed-time-baremetal' => # key for default baremetal provision pattern
{'type' => 'supposed-time',
'chunk_size' => 10000,
'date_format' => '%Y-%m-%dT%H:%M:%S',
'date_regexp' => '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
'pattern_list' => [
{'pattern' => 'Running anaconda script', 'supposed_time' => 60},
{'pattern' => 'moving (1) to step enablefilesystems', 'supposed_time' => 3},
{'pattern' => "notifying kernel of 'change' event on device", 'supposed_time' => 6},
{'pattern' => 'Preparing to install packages', 'supposed_time' => 9},
{'pattern' => 'Installing glibc-common-2.12', 'supposed_time' => 9},
{'pattern' => 'Installing bash-4.1.2', 'supposed_time' => 11},
{'pattern' => 'Installing coreutils-8.4-19', 'supposed_time' => 20},
{'pattern' => 'Installing centos-release-6-3', 'supposed_time' => 21},
{'pattern' => 'Installing attr-2.4.44', 'supposed_time' => 23},
{'pattern' => 'leaving (1) step installpackages', 'supposed_time' => 60},
{'pattern' => 'moving (1) to step postscripts', 'supposed_time' => 4},
{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 130},
{'pattern' => 'wait while node rebooting', 'supposed_time' => 20},
].reverse,
'filename' => 'install/anaconda.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['hostname'] %>/<%= @pattern_spec['filename'] %>",
},
'centos-anaconda-log-supposed-time-kvm' => # key for default kvm provision pattern
{'type' => 'supposed-time',
'chunk_size' => 10000,
'date_format' => '%Y-%m-%dT%H:%M:%S',
'date_regexp' => '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
'pattern_list' => [
{'pattern' => 'Running anaconda script', 'supposed_time' => 60},
{'pattern' => 'moving (1) to step enablefilesystems', 'supposed_time' => 3},
{'pattern' => "notifying kernel of 'change' event on device", 'supposed_time' => 30},
{'pattern' => 'Preparing to install packages', 'supposed_time' => 12},
{'pattern' => 'Installing glibc-common-2.12', 'supposed_time' => 15},
{'pattern' => 'Installing bash-4.1.2', 'supposed_time' => 15},
{'pattern' => 'Installing coreutils-8.4-19', 'supposed_time' => 33},
{'pattern' => 'Installing centos-release-6-3', 'supposed_time' => 21},
{'pattern' => 'Installing attr-2.4.44', 'supposed_time' => 48},
{'pattern' => 'leaving (1) step installpackages', 'supposed_time' => 100},
{'pattern' => 'moving (1) to step postscripts', 'supposed_time' => 4},
{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 200},
{'pattern' => 'wait while node rebooting', 'supposed_time' => 20},
].reverse,
'filename' => 'install/anaconda.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['hostname'] %>/<%= @pattern_spec['filename'] %>",
},
'provisioning-image-building' =>
{'type' => 'supposed-time',
'chunk_size' => 10000,
@ -273,326 +62,6 @@ module Astute
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['hostname'] %>/<%= @pattern_spec['filename'] %>",
},
'ubuntu-provisioning' =>
{'type' => 'supposed-time',
'chunk_size' => 10000,
'date_format' => '%Y-%m-%dT%H:%M:%S',
'date_regexp' => '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
'pattern_list' => [
{'pattern' => "Menu item 'choose-mirror' selected", 'supposed_time' => 120},
{'pattern' => "Menu item 'user-setup-udeb' selected", 'supposed_time' => 60},
{'pattern' => "Menu item 'apt-setup-udeb' selected", 'supposed_time' => 60},
{'pattern' => "Menu item 'finish-install' selected", 'supposed_time' => 60},
{'pattern' => 'Processing next logs (fake pattern)', 'supposed_time' => 90},
].reverse,
'filename' => 'main-menu.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['hostname'] %>/<%= @pattern_spec['filename'] %>",
},
'puppet-log-components-list-ha_compact-primary-controller' => ha_controller_pattern,
'puppet-log-components-list-ha_compact-controller' => ha_controller_pattern, # key for default HA deploy pattern
'puppet-log-components-list-ha_compact-compute' =>
{'type' => 'components-list',
'chunk_size' => 40000,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Mysql', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Mysql::Python/Package[python-mysqldb]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Nova', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Utilities/Package[euca2ools]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[parted]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Nova::Api/Nova::Generic_service[api]/Package[nova-api]/ensure) created', 'progress' => 0.28},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[unzip]/ensure) created', 'progress' => 0.32},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Package[python-numpy]/ensure) created', 'progress' => 0.35},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[libguestfs-tools-c]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Nova::Rabbitmq/Rabbitmq_user_permissions[nova@/]/ensure) created', 'progress' => 0.43},
{'pattern' => '/Stage[main]/Nova/Exec[post-nova_config]/returns) Nova config has changed', 'progress' => 0.8},
{'pattern' => '/Stage[main]/Nova::Api/Exec[nova-db-sync]/returns) executed successfully', 'progress' => 0.85},
{'pattern' => '/Stage[main]/Nova::Network/Nova::Manage::Network[nova-vm-net]/Nova_network'\
'nova-vm-net]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Nova::Compute', 'weight' => 15, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Compute/Package[bridge-utils]/ensure) created', 'progress' => 0.15},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Exec[symlink-qemu-kvm]/returns) executed successfully', 'progress' => 0.25},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Package[libvirt]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Package[dnsmasq-utils]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Nova_config[DEFAULT/vncserver_listen]/ensure) created', 'progress' => 0.55},
{'pattern' => '/Stage[main]/Nova::Compute/Nova::Generic_service[compute]/Package[nova-compute]/ensure) created', 'progress' => 0.88},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Package[avahi]/ensure) created', 'progress' => 0.9},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Service[messagebus]/ensure) ensure changed', 'progress' => 0.93},
{'pattern' => '/Stage[main]/Nova::Compute/Nova::Generic_service[compute]/Service[nova-compute]/ensure) ensure changed', 'progress' => 0.97},
{'pattern' => '/Stage[main]/Nova::Compute/Nova::Generic_service[compute]/Service[nova-compute]) Triggered', 'progress' => 1},
]
},
{'name' => 'Openstack', 'weight' => 2, 'patterns' => [
{'pattern' => '/Stage[main]/Openstack::Compute/Nova_config[DEFAULT/metadata_host]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Openstack::Compute/Nova_config[DEFAULT/memcached_servers]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Openstack::Compute/Augeas[sysconfig-libvirt]/returns) executed successfully', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Openstack::Compute/Nova_config[DEFAULT/multi_host]/ensure) created', 'progress' => 0.8},
{'pattern' => '/Stage[main]/Openstack::Compute/Augeas[libvirt-conf]/returns) executed successfully', 'progress' => 1},
]
},
]
},
'puppet-log-components-list-singlenode-controller' =>
{'type' => 'components-list',
'chunk_size' => 40000,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Glance', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Glance::Db::Mysql/Mysql::Db[glance]/Database[glance]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Glance::Db::Mysql/Glance::Db::Mysql::Host_access[]/'\
'Database_user[glance@]/ensure) created', 'progress' => 0.7},
{'pattern' => '/Stage[main]/Glance::Registry/Glance_registry_config[keystone_authtoken/'\
'admin_user]/value) value changed', 'progress' => 0.71},
{'pattern' => '/Stage[main]/Glance::Keystone::Auth/Keystone_endpoint[glance]/ensure) created', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Glance::Registry/Service[glance-registry]/ensure)"\
" ensure changed 'stopped' to 'running'", 'progress' => 0.95},
{'pattern' => "/Stage[main]/Glance::Api/Service[glance-api]/ensure) ensure changed"\
" 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Horizon', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Horizon/Package[mod_wsgi]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Horizon/Package[openstack-dashboard]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Horizon/File[/etc/openstack-dashboard/'\
'local_settings]/content) content changed', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Horizon/Service[\$::horizon::params::http_service]/"\
"ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Keystone::Db::Mysql/Mysql::Db[keystone]/Database[keystone]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Keystone/Package[keystone]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Keystone/Keystone_config[DEFAULT/admin_port]/ensure) created', 'progress' => 0.7},
{'pattern' => "/Stage[main]/Keystone/Service[keystone]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.8},
{'pattern' => '/Stage[main]/Keystone::Roles::Admin/Keystone_user_role[admin@admin]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Memcached', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Memcached/User[memcached]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Memcached/Package[memcached]/ensure) created', 'progress' => 0.6},
{'pattern' => "/Stage[main]/Memcached/Service[memcached]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Rabbitmq', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Rabbitmq::Server/Package[rabbitmq-server]/ensure) created', 'progress' => 0.3},
{'pattern' => "/Stage[main]/Rabbitmq::Service/Service[rabbitmq-server]/ensure) ensure changed 'stopped' to 'running", 'progress' => 0.7},
{'pattern' => '/Stage[main]/Rabbitmq::Server/Rabbitmq_user[guest]/ensure) removed', 'progress' => 1},
]
},
{'name' => 'Nova', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Utilities/Package[euca2ools]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[parted]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Nova::Api/Nova::Generic_service[api]/Package[nova-api]/ensure) created', 'progress' => 0.28},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[unzip]/ensure) created', 'progress' => 0.32},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Package[python-numpy]/ensure) created', 'progress' => 0.35},
{'pattern' => '(/Stage[main]/Nova::Utilities/Package[libguestfs-tools-c]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Nova::Rabbitmq/Rabbitmq_user_permissions[nova@/]/ensure) created', 'progress' => 0.43},
{'pattern' => '/Stage[main]/Nova::Db::Mysql/Mysql::Db[nova]/Database[nova]/ensure) created', 'progress' => 0.48},
{'pattern' => "/Stage[main]/Nova::Db::Mysql/Mysql::Db[nova]/Database_grant"\
"[nova@127.0.0.1/nova]/privileges) privileges changed '' to 'all'", 'progress' => 0.51},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Nova::Generic_service[vncproxy]/Package'\
'[nova-vncproxy]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Nova::Keystone::Auth/Keystone_service[nova_volume]/ensure) created', 'progress' => 0.68},
{'pattern' => '/Stage[main]/Nova::Keystone::Auth/Keystone_user_role[nova@services]/ensure) created', 'progress' => 0.75},
{'pattern' => '/Stage[main]/Nova/Exec[post-nova_config]/returns) Nova config has changed', 'progress' => 0.8},
{'pattern' => '/Stage[main]/Nova::Api/Exec[nova-db-sync]/returns) executed successfully', 'progress' => 0.85},
{'pattern' => "/Stage[main]/Nova::Consoleauth/Nova::Generic_service[consoleauth]/Service"\
"[nova-consoleauth]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.9},
{'pattern' => '/Stage[main]/Nova::Network/Nova::Manage::Network[nova-vm-net]/Nova_network'\
'nova-vm-net]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Openstack', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Openstack::Firewall/File[iptables]/ensure) defined content as', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Openstack::Glance/Package[swift]/ensure) created', 'progress' => 0.15},
{'pattern' => '/Stage[main]/Openstack::Auth_file/File[/root/openrc]/ensure) defined content as', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Openstack::Controller/Nova_config[DEFAULT/memcached_servers]/ensure) created', 'progress' => 0.45},
{'pattern' => '/Stage[main]/Openstack::Nova::Controller/Nova_config[DEFAULT/multi_host]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Openstack::Firewall/Exec[startup-firewall]/returns) executed successfully', 'progress' => 0.65},
{'pattern' => '/Stage[main]/Openstack::Img::Cirros/Package[cirros-testvm]/ensure) created', 'progress' => 1},
]
},
]
},
'puppet-log-components-list-multinode-controller' =>
{'type' => 'components-list',
'chunk_size' => 40000,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Glance', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Glance/Package[glance]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Glance::Db::Mysql/Mysql::Db[glance]/Database[glance]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Glance::Db::Mysql/Glance::Db::Mysql::Host_access[]/'\
'Database_user[glance@]/ensure) created', 'progress' => 0.7},
{'pattern' => '/Stage[main]/Glance::Registry/Glance_registry_config[keystone_authtoken/'\
'admin_user]/value) value changed', 'progress' => 0.71},
{'pattern' => '/Stage[main]/Glance::Keystone::Auth/Keystone_endpoint[glance]/ensure) created', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Glance::Registry/Service[glance-registry]/ensure)"\
" ensure changed 'stopped' to 'running'", 'progress' => 0.95},
{'pattern' => "/Stage[main]/Glance::Api/Service[glance-api]/ensure) ensure changed"\
" 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Horizon', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Horizon/Package[mod_wsgi]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Horizon/Package[openstack-dashboard]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Horizon/File[/etc/openstack-dashboard/'\
'local_settings]/content) content changed', 'progress' => 0.8},
{'pattern' => "/Stage[main]/Horizon/Service[\$::horizon::params::http_service]/"\
"ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Keystone::Db::Mysql/Mysql::Db[keystone]/Database[keystone]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Keystone/Package[keystone]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Keystone/Keystone_config[DEFAULT/admin_port]/ensure) created', 'progress' => 0.7},
{'pattern' => "/Stage[main]/Keystone/Service[keystone]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.8},
{'pattern' => '/Stage[main]/Keystone::Roles::Admin/Keystone_user_role[admin@admin]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Memcached', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Memcached/User[memcached]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Memcached/Package[memcached]/ensure) created', 'progress' => 0.6},
{'pattern' => "/Stage[main]/Memcached/Service[memcached]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 1},
]
},
{'name' => 'Rabbitmq', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Rabbitmq::Server/Package[rabbitmq-server]/ensure) created', 'progress' => 0.3},
{'pattern' => "/Stage[main]/Rabbitmq::Service/Service[rabbitmq-server]/ensure) ensure changed 'stopped' to 'running", 'progress' => 0.7},
{'pattern' => '/Stage[main]/Rabbitmq::Server/Rabbitmq_user[guest]/ensure) removed', 'progress' => 1},
]
},
{'name' => 'Nova', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Utilities/Package[euca2ools]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[parted]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Nova::Api/Nova::Generic_service[api]/Package[nova-api]/ensure) created', 'progress' => 0.28},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[unzip]/ensure) created', 'progress' => 0.32},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Package[python-numpy]/ensure) created', 'progress' => 0.35},
{'pattern' => '(/Stage[main]/Nova::Utilities/Package[libguestfs-tools-c]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Nova::Rabbitmq/Rabbitmq_user_permissions[nova@/]/ensure) created', 'progress' => 0.43},
{'pattern' => '/Stage[main]/Nova::Db::Mysql/Mysql::Db[nova]/Database[nova]/ensure) created', 'progress' => 0.48},
{'pattern' => "/Stage[main]/Nova::Db::Mysql/Mysql::Db[nova]/Database_grant"\
"[nova@127.0.0.1/nova]/privileges) privileges changed '' to 'all'", 'progress' => 0.51},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Nova::Generic_service[vncproxy]/Package'\
'[nova-vncproxy]/ensure) created', 'progress' => 0.6},
{'pattern' => '/Stage[main]/Nova::Keystone::Auth/Keystone_service[nova_volume]/ensure) created', 'progress' => 0.68},
{'pattern' => '/Stage[main]/Nova::Keystone::Auth/Keystone_user_role[nova@services]/ensure) created', 'progress' => 0.75},
{'pattern' => '/Stage[main]/Nova/Exec[post-nova_config]/returns) Nova config has changed', 'progress' => 0.8},
{'pattern' => '/Stage[main]/Nova::Api/Exec[nova-db-sync]/returns) executed successfully', 'progress' => 0.85},
{'pattern' => "/Stage[main]/Nova::Consoleauth/Nova::Generic_service[consoleauth]/Service"\
"[nova-consoleauth]/ensure) ensure changed 'stopped' to 'running'", 'progress' => 0.9},
{'pattern' => '/Stage[main]/Nova::Network/Nova::Manage::Network[nova-vm-net]/Nova_network'\
'nova-vm-net]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Openstack', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Openstack::Firewall/File[iptables]/ensure) defined content as', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Openstack::Glance/Package[swift]/ensure) created', 'progress' => 0.15},
{'pattern' => '/Stage[main]/Openstack::Auth_file/File[/root/openrc]/ensure) defined content as', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Openstack::Controller/Nova_config[DEFAULT/memcached_servers]/ensure) created', 'progress' => 0.45},
{'pattern' => '/Stage[main]/Openstack::Nova::Controller/Nova_config[DEFAULT/multi_host]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Openstack::Firewall/Exec[startup-firewall]/returns) executed successfully', 'progress' => 0.65},
{'pattern' => '/Stage[main]/Openstack::Img::Cirros/Package[cirros-testvm]/ensure) created', 'progress' => 1},
]
},
]
},
'puppet-log-components-list-multinode-compute' =>
{'type' => 'components-list',
'chunk_size' => 40000,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>",
'components_list' => [
{'name' => 'Keystone', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Keystone::Python/Package[python-keystone]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Mysql', 'weight' => 10, 'patterns' => [
{'pattern' => '/Stage[main]/Mysql::Python/Package[python-mysqldb]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Nova', 'weight' => 5, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Utilities/Package[euca2ools]/ensure) created', 'progress' => 0.1},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[parted]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Nova::Api/Nova::Generic_service[api]/Package[nova-api]/ensure) created', 'progress' => 0.28},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[unzip]/ensure) created', 'progress' => 0.32},
{'pattern' => '/Stage[main]/Nova::Vncproxy/Package[python-numpy]/ensure) created', 'progress' => 0.35},
{'pattern' => '/Stage[main]/Nova::Utilities/Package[libguestfs-tools-c]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Nova::Rabbitmq/Rabbitmq_user_permissions[nova@/]/ensure) created', 'progress' => 0.43},
{'pattern' => '/Stage[main]/Nova/Exec[post-nova_config]/returns) Nova config has changed', 'progress' => 0.8},
{'pattern' => '/Stage[main]/Nova::Api/Exec[nova-db-sync]/returns) executed successfully', 'progress' => 0.85},
{'pattern' => '/Stage[main]/Nova::Network/Nova::Manage::Network[nova-vm-net]/Nova_network'\
'nova-vm-net]/ensure) created', 'progress' => 1},
]
},
{'name' => 'Nova::Compute', 'weight' => 15, 'patterns' => [
{'pattern' => '/Stage[main]/Nova::Compute/Package[bridge-utils]/ensure) created', 'progress' => 0.15},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Exec[symlink-qemu-kvm]/returns) executed successfully', 'progress' => 0.25},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Package[libvirt]/ensure) created', 'progress' => 0.3},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Package[dnsmasq-utils]/ensure) created', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Nova_config[DEFAULT/vncserver_listen]/ensure) created', 'progress' => 0.55},
{'pattern' => '/Stage[main]/Nova::Compute/Nova::Generic_service[compute]/Package[nova-compute]/ensure) created', 'progress' => 0.88},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Package[avahi]/ensure) created', 'progress' => 0.9},
{'pattern' => '/Stage[main]/Nova::Compute::Libvirt/Service[messagebus]/ensure) ensure changed', 'progress' => 0.93},
{'pattern' => '/Stage[main]/Nova::Compute/Nova::Generic_service[compute]/Service[nova-compute]/ensure) ensure changed', 'progress' => 0.97},
{'pattern' => '/Stage[main]/Nova::Compute/Nova::Generic_service[compute]/Service[nova-compute]) Triggered', 'progress' => 1},
]
},
{'name' => 'Openstack', 'weight' => 2, 'patterns' => [
{'pattern' => '/Stage[main]/Openstack::Compute/Nova_config[DEFAULT/metadata_host]/ensure) created', 'progress' => 0.2},
{'pattern' => '/Stage[main]/Openstack::Compute/Nova_config[DEFAULT/memcached_servers]/ensure) created', 'progress' => 0.4},
{'pattern' => '/Stage[main]/Openstack::Compute/Augeas[sysconfig-libvirt]/returns) executed successfully', 'progress' => 0.5},
{'pattern' => '/Stage[main]/Openstack::Compute/Nova_config[DEFAULT/multi_host]/ensure) created', 'progress' => 0.8},
{'pattern' => '/Stage[main]/Openstack::Compute/Augeas[libvirt-conf]/returns) executed successfully', 'progress' => 1},
]
},
]
},
'puppet-log-components-list-ha_compact-cinder' => {
'type' => 'count-lines',
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'expected_line_number' => 345,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>"
},
'puppet-log-components-list-multinode-cinder' => {
'type' => 'count-lines',
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'expected_line_number' => 345,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>"
},
'puppet-log-components-list-singlenode-cinder' => {
'type' => 'count-lines',
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'expected_line_number' => 345,
'filename' => 'puppet-apply.log',
'path_format' => "<%= @pattern_spec['path_prefix'] %><%= node['fqdn'] %>/<%= @pattern_spec['filename'] %>"
},
'default' => {
'type' => 'count-lines',
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],

View File

@ -21,7 +21,13 @@ module Astute
def node_type(reporter, task_id, nodes_uids, timeout=nil)
context = Context.new(task_id, reporter)
systemtype = MClient.new(context, "systemtype", nodes_uids, check_result=false, timeout)
systemtype = MClient.new(
context,
"systemtype",
nodes_uids,
_check_result=false,
timeout
)
systems = systemtype.get_type
systems.map do |n|
{
@ -136,7 +142,7 @@ module Astute
if reject_uids.present?
ctx ||= Context.new(task_id, reporter)
reject_nodes = reject_uids.map { |uid| {'uid' => uid } }
NodesRemover.new(ctx, reject_nodes, reboot=true).remove
NodesRemover.new(ctx, reject_nodes, _reboot=true).remove
end
#check timouted nodes
@ -197,18 +203,8 @@ module Astute
def stop_provision(reporter, task_id, engine_attrs, nodes)
ctx = Context.new(task_id, reporter)
_provisioned_nodes, result = stop_provision_via_mcollective(ctx, nodes)
ssh_result = stop_provision_via_ssh(ctx, nodes, engine_attrs)
# Remove already provisioned node. Possible erasing nodes twice
provisioned_nodes, mco_result = stop_provision_via_mcollective(ctx, nodes)
# For nodes responded via mcollective use mcollective result instead of ssh
['nodes', 'error_nodes', 'inaccessible_nodes'].each do |node_status|
ssh_result[node_status] = ssh_result.fetch(node_status, []) - provisioned_nodes
end
result = merge_rm_nodes_result(ssh_result, mco_result)
result['status'] = 'error' if result['error_nodes'].present?
Rsyslogd.send_sighup(
@ -222,55 +218,32 @@ module Astute
def provision_piece(reporter, task_id, engine_attrs, nodes, provision_method)
cobbler = CobblerManager.new(engine_attrs, reporter)
failed_uids = []
# if provision_method is 'image', we do not need to immediately
# reboot nodes. instead, we need to run image based provisioning
# process and then reboot nodes
# TODO(kozhukalov): do not forget about execute_shell_command timeout which is 3600
# provision_and_watch_progress has provisioning_timeout + 3600 is much longer than provisioning_timeout
if provision_method == 'image'
# IBP is implemented in terms of Fuel Agent installed into bootstrap ramdisk
# we don't want nodes to be rebooted into OS installer ramdisk
cobbler.edit_nodes(nodes, {'profile' => Astute.config.bootstrap_profile})
# change node type to prevent unexpected erase
change_nodes_type(reporter, task_id, nodes)
# Run parallel reporter
report_image_provision(reporter, task_id, nodes) do
failed_uids |= image_provision(reporter, task_id, nodes)
end
provisioned_nodes = nodes.reject { |n| failed_uids.include? n['uid'] }
# IBP is implemented in terms of Fuel Agent installed into bootstrap ramdisk
# we don't want nodes to be rebooted into OS installer ramdisk
cobbler.edit_nodes(nodes, {'profile' => Astute.config.bootstrap_profile})
# disabling pxe boot (chain loader) for nodes which succeeded
cobbler.netboot_nodes(provisioned_nodes, false)
# in case of IBP we reboot only those nodes which we managed to provision
soft_reboot(
reporter,
task_id,
provisioned_nodes.map{ |n| n['uid'] },
'reboot_provisioned_nodes'
)
else
reboot_events = cobbler.reboot_nodes(nodes)
not_rebooted = cobbler.check_reboot_nodes(reboot_events)
not_rebooted = nodes.select { |n| not_rebooted.include?(n['slave_name'])}
failed_uids |= not_rebooted.map { |n| n['uid']}
# change node type to prevent unexpected erase
change_nodes_type(reporter, task_id, nodes)
# Run parallel reporter
report_image_provision(reporter, task_id, nodes) do
failed_uids |= image_provision(reporter, task_id, nodes)
end
provisioned_nodes = nodes.reject { |n| failed_uids.include? n['uid'] }
# disabling pxe boot (chain loader) for nodes which succeeded
cobbler.netboot_nodes(provisioned_nodes, false)
# in case of IBP we reboot only those nodes which we managed to provision
soft_reboot(
reporter,
task_id,
provisioned_nodes.map{ |n| n['uid'] },
'reboot_provisioned_nodes'
)
# control reboot for nodes which still in bootstrap state
# Note: if the image based provisioning is used nodes are already
# provisioned and rebooting is not necessary. In fact the forced
# reboot can corrupt a node if it manages to reboot fast enough
# (see LP #1394599)
# XXX: actually there's a tiny probability to reboot a node being
# provisioned in a traditional way (by Debian installer or anaconda),
# however such a double reboot is not dangerous since cobbler will
# boot such a node into installer once again.
if provision_method != 'image'
control_reboot_using_ssh(reporter, task_id, nodes)
end
return failed_uids
end
@ -370,7 +343,7 @@ module Astute
provisioned = nodes_types.select{ |n| ['target', 'bootstrap', 'image'].include? n['node_type'] }
.map{ |n| {'uid' => n['uid']} }
current_mco_result = NodesRemover.new(ctx, provisioned, reboot=true).remove
current_mco_result = NodesRemover.new(ctx, provisioned, _reboot=true).remove
Astute.logger.debug "Retry result #{i}: "\
"mco success nodes: #{current_mco_result['nodes']}, "\
"mco error nodes: #{current_mco_result['error_nodes']}, "\
@ -393,25 +366,14 @@ module Astute
return provisioned_nodes, mco_result
end
def stop_provision_via_ssh(ctx, nodes, engine_attrs)
ssh_result = Ssh.execute(ctx, nodes, SshEraseNodes.command)
CobblerManager.new(engine_attrs, ctx.reporter).remove_nodes(nodes)
Ssh.execute(ctx,
nodes,
SshHardReboot.command,
timeout=5,
retries=1)
ssh_result
end
def unlock_nodes_discovery(reporter, task_id="", failed_uids, nodes)
nodes_uids = nodes.select{ |n| failed_uids.include?(n['uid']) }
.map{ |n| n['uid'] }
shell = MClient.new(Context.new(task_id, reporter),
'execute_shell_command',
nodes_uids,
check_result=false,
timeout=2)
_check_result=false,
_timeout=2)
mco_result = shell.execute(:cmd => "rm -f #{Astute.config.agent_nodiscover_file}")
result = mco_result.map do |n|
{
@ -422,17 +384,6 @@ module Astute
Astute.logger.debug "Unlock discovery for failed nodes. Result: #{result}"
end
def control_reboot_using_ssh(reporter, task_id="", nodes)
ctx = Context.new(task_id, reporter)
nodes.each { |n| n['admin_ip'] = n['power_address'] }
Ssh.execute(ctx,
nodes,
SshHardReboot.command,
timeout=5,
retries=1)
end
def merge_rm_nodes_result(res1, res2)
['nodes', 'error_nodes', 'inaccessible_nodes'].inject({}) do |result, node_status|
result[node_status] = (res1.fetch(node_status, []) + res2.fetch(node_status, [])).uniq
@ -445,8 +396,8 @@ module Astute
shell = MClient.new(Context.new(task_id, reporter),
'execute_shell_command',
nodes_uids,
check_result=false,
timeout=5)
_check_result=false,
_timeout=5)
mco_result = shell.execute(:cmd => "echo '#{type}' > /etc/nailgun_systemtype")
result = mco_result.map do |n|
{

View File

@ -1,141 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'net/ssh/multi'
require 'timeout'
module Astute
class Ssh
def self.execute(ctx, nodes, cmd, timeout=60, retries=Astute.config.ssh_retries)
nodes_to_process = nodes.map { |n| n['admin_ip'] }
Astute.logger.debug "Run shell command '#{cmd}' using ssh"
ready_nodes = []
error_nodes = []
retries.times do |i|
Astute.logger.debug "Run shell command using ssh. Retry #{i}"
Astute.logger.debug "Affected nodes: #{nodes_to_process}"
new_ready_nodes, new_error_nodes, nodes_to_process = run_remote_command(nodes_to_process, cmd, timeout)
Astute.logger.debug "Retry result: "\
"success nodes: #{new_ready_nodes}, "\
"error nodes: #{new_error_nodes}, "\
"inaccessible nodes: #{nodes_to_process}"
ready_nodes += new_ready_nodes
error_nodes += new_error_nodes
break if nodes_to_process.empty?
sleep Astute.config.ssh_retry_timeout
end
inaccessible_nodes = nodes_to_process
nodes_uids = nodes.map { |n| n['uid'] }
answer = {'nodes' => to_report_format(ready_nodes, nodes)}
if inaccessible_nodes.present?
answer.merge!({'inaccessible_nodes' => to_report_format(inaccessible_nodes, nodes)})
Astute.logger.warn "#{ctx.task_id}: Running shell command on nodes\n#{nodes_uids.pretty_inspect}\nfinished " \
"with errors. Nodes\n#{answer['inaccessible_nodes'].pretty_inspect}\nare inaccessible"
end
if error_nodes.present?
answer.merge!({'status' => 'error', 'error_nodes' => to_report_format(error_nodes, nodes)})
Astute.logger.error "#{ctx.task_id}: Running shell command on nodes\n#{nodes_uids.pretty_inspect}\nfinished " \
"with errors:\n#{answer['error_nodes'].pretty_inspect}"
end
Astute.logger.info "#{ctx.task_id}: Finished running shell command:\n#{nodes_uids.pretty_inspect}"
answer
end
private
def self.to_report_format(slave_names, nodes)
result_nodes = nodes.select { |n| slave_names.include?(n['admin_ip']) }
result_nodes.inject([]) do |result, node|
result << {'uid' => node['uid']} if node['uid']
result
end
end
def self.run_remote_command(nodes, cmd, timeout)
servers = []
channel = nil
Net::SSH::Multi.start(:concurrent_connections => Astute.config.max_nodes_per_call,
:on_error => :warn) do |session|
nodes.each do |name|
session.use name,
:user => 'root',
:host_key => 'ssh-rsa',
:keys => ['/root/.ssh/id_rsa']
end
servers = session.servers_for
# execute commands on all servers
# FIXME: debug not show a messages if command contain a several
# strings
channel = session.exec cmd do |ch, success|
ch.on_data do |ichannel, data|
Astute.logger.debug "[#{ch[:host]} : #{ichannel}] #{data}"
end
ch.on_request "exit-status" do |_ichannel, data|
exit_status = data.read_long
end
end
Timeout::timeout(timeout) { session.loop }
end
detect_status(servers)
rescue Timeout::Error
Astute.logger.debug "SSH session is closed due to the achievement of a timeout"
return [[], [], nodes] unless servers
exception_process(servers)
rescue Net::SSH::Disconnect
Astute.logger.debug "SSH connection closed by remote host"
exception_process(servers)
end
def self.exception_process(servers)
servers.each do |s|
if s.busy?
# Pending connection could not be shutdown, but always return busy as true
s.session.shutdown! if s.session.channels.present?
s.fail!
end
end
detect_status(servers)
end
# TODO: support exit code from shell command
def self.detect_status(servers)
executed_nodes = []
inaccessible_nodes = []
servers.each do |s|
s.failed? ? inaccessible_nodes << s.host : executed_nodes << s.host
end
[executed_nodes, [], inaccessible_nodes]
end
end
end

View File

@ -1,73 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Astute
class SshEraseNodes
def self.command
<<-ERASE_COMMAND
killall -STOP anaconda
killall -STOP debootstrap dpkg
echo "5" > /proc/sys/kernel/panic
echo "1" > /proc/sys/kernel/sysrq
echo "1" > /proc/sys/kernel/panic_on_oops
STORAGE_DEVICE_NUMBERS="3, 8, 65, 66, 67, 68, 69, 70, 71, 104, 105, 106, 107, 108, 109, 110, 111, 202, 252, 253"
BLOCK_DEVICES=$(sed -nr 's#^.*[0-9]\s+([a-z]+|cciss\/c[0-9]+d[0-9]+)$#\\1#p' /proc/partitions)
erase_data() {
echo "Run erase_data with dev= /dev/$1 length = $2 offset = $3 bs = $4"
dd if=/dev/zero of="/dev/$1" count="$2" seek="$3" bs="$4"
blockdev --flushbufs "/dev/$1"
}
erase_partitions() {
for PART in $(sed -nr 's#^.*[0-9]\s+('"$1"'p?[0-9]+)$#\\1#p' /proc/partitions)
do
erase_data "$PART" "$2" "$3" "$4"
done
}
erase_boot_devices() {
for DEVICE in $BLOCK_DEVICES
do
MAJOR=$(sed -nr 's#^\s+([0-9]+)\s.*\s'"$DEVICE"'$#\\1#p' /proc/partitions)
SIZE=$(($(sed -nr 's#^(\s+[0-9]+){2}\s+([0-9]+)\s+'"$DEVICE"'$#\\2#p' /proc/partitions) * 2))
echo "$STORAGE_DEVICE_NUMBERS" | grep -wq "$MAJOR" || continue
grep -wq 0 "/sys/block/$(echo $DEVICE | sed 's#/#!#')/removable" || continue
erase_data "$DEVICE" 1 0 512
erase_data "$DEVICE" 1 $(($SIZE-1)) 512
erase_partitions "$DEVICE" 1 0 512
done
}
if [ -r /etc/nailgun_systemtype ]; then
NODE_TYPE=$(cat /etc/nailgun_systemtype)
else
NODE_TYPE="provisioning"
fi
# Check what was mounted to '/': drive (provisioned node)
# or init ramdisk (bootsrapped/provisioning node)
if grep -Eq 'root=[^[:blank:]]+' /proc/cmdline; then
echo "Do not erase $NODE_TYPE node using shell"
else
echo "Run erase command on ${NODE_TYPE} node"
erase_boot_devices
fi
ERASE_COMMAND
end
end
end

View File

@ -1,39 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
module Astute
class SshHardReboot
def self.command
<<-REBOOT_COMMAND
if [ -r /etc/nailgun_systemtype ]; then
NODE_TYPE=$(cat /etc/nailgun_systemtype)
else
NODE_TYPE="provisioning"
fi
# Check what was mounted to '/': drive (provisioned node)
# or init ramdisk (bootsrapped/provisioning node)
if grep -Eq 'root=[^[:blank:]]+' /proc/cmdline; then
echo "Do not reboot $NODE_TYPE node using shell"
else
echo "Run node rebooting command using 'SB' to sysrq-trigger"
echo "1" > /proc/sys/kernel/panic_on_oops
echo "10" > /proc/sys/kernel/panic
echo "b" > /proc/sysrq-trigger
fi
REBOOT_COMMAND
end
end
end

View File

@ -161,32 +161,6 @@ describe LogParser do
return node
end
it "should be greather than 0.96 for CentOS" do
node = {
'uid' => '1',
'ip' => '1.0.0.1',
'hostname' => 'slave-1.domain.tld',
'role' => 'controller',
'src_filename' => 'anaconda.log_',
'profile' => 'centos-x86_64'}
calculated_node = provision_parser_wrapper(node)
calculated_node['statistics']['pcc'].should > 0.96
end
it "should be greather than 0.96 for Ubuntu" do
node = {
'uid' => '1',
'ip' => '1.0.0.1',
'hostname' => 'slave-1.domain.tld',
'role' => 'controller',
'src_filename' => 'main-menu.log_',
'profile' => 'ubuntu_1404_x86_64'}
calculated_node = provision_parser_wrapper(node)
calculated_node['statistics']['pcc'].should > 0.96
end
it "should be greather than 0.98 for Image Based Provisioning building" do
node = {
'uid' => '1',
@ -223,98 +197,6 @@ describe LogParser do
end
context "Correlation coeff. (PCC) of Deploying progress bar calculation" do
def deployment_parser_wrapper(cluster_type, nodes)
uids = nodes.map{|n| n['uid']}
date_regexp = '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
date_format = '%Y-%m-%dT%H:%M:%S'
Dir.mktmpdir do |dir|
Astute::LogParser::PATH_PREFIX.replace("#{dir}/")
deploy_parser = Astute::LogParser::ParseDeployLogs.new
deploy_parser.deploy_type = cluster_type
# Create temp log files and structures.
nodes.each do |node|
pattern_spec = deploy_parser.get_pattern_for_node(node)
path = "#{pattern_spec['path_prefix']}#{node['fqdn']}/#{pattern_spec['filename']}"
Dir.mkdir(File.dirname(path)) unless File.exists?(File.dirname(path))
node['file'] = File.open(path, 'w')
src_filename = File.join(File.dirname(__FILE__), "..", "example-logs", node['src_filename'])
node['src'] = File.open(src_filename)
node['progress_table'] ||= []
end
# End 'while' cycle if reach EOF at all src files.
while nodes.index{|n| not n['src'].eof?}
# Copy logs line by line from example logfile to tempfile and collect progress for each step.
nodes.each do |node|
unless node['src'].eof?
line = node['src'].readline
node['file'].write(line)
node['file'].flush
node['last_line'] = line
else
node['last_line'] = ''
end
end
nodes_progress = deploy_parser.progress_calculate(uids, nodes)
nodes_progress.each do |progress|
node = nodes.at(nodes.index{|n| n['uid'] == progress['uid']})
date_string = node['last_line'].match(date_regexp)
if date_string
date = DateTime.strptime(date_string[0], date_format)
node['progress_table'] << {:date => date, :progress => progress['progress']}
end
end
end
nodes.each do |node|
node['statistics'] = get_statistics_variables(node['progress_table'])
end
# Clear temp files.
nodes.each do |n|
n['file'].close
File.unlink(n['file'].path)
Dir.unlink(File.dirname(n['file'].path))
end
end
return nodes
end
it "should be greather than 0.85 for HA deployment" do
nodes = [
{'uid' => '1', 'ip' => '1.0.0.1', 'fqdn' => 'slave-1.domain.tld', 'role' => 'controller', 'src_filename' => 'puppet-agent.log.ha.contr.2'},
{'uid' => '2', 'ip' => '1.0.0.2', 'fqdn' => 'slave-2.domain.tld', 'role' => 'compute', 'src_filename' => 'puppet-agent.log.ha.compute'},
]
calculated_nodes = deployment_parser_wrapper('ha_compact', nodes)
calculated_nodes.each {|node| node['statistics']['pcc'].should > 0.85}
end
it "should be greather than 0.97 for singlenode deployment" do
nodes = [
{'uid' => '1', 'ip' => '1.0.0.1', 'fqdn' => 'slave-1.domain.tld', 'role' => 'controller', 'src_filename' => 'puppet-agent.log.singlenode'},
]
calculated_nodes = deployment_parser_wrapper('singlenode', nodes)
calculated_nodes.each {|node| node['statistics']['pcc'].should > 0.97}
end
it "should be greather than 0.94 for multinode deployment" do
nodes = [
{'uid' => '1', 'ip' => '1.0.0.1', 'fqdn' => 'slave-1.domain.tld', 'role' => 'controller', 'src_filename' => 'puppet-agent.log.multi.contr'},
{'uid' => '2', 'ip' => '1.0.0.2', 'fqdn' => 'slave-2.domain.tld', 'role' => 'compute', 'src_filename' => 'puppet-agent.log.multi.compute'},
]
calculated_nodes = deployment_parser_wrapper('multinode', nodes)
calculated_nodes.each {|node| node['statistics']['pcc'].should > 0.94}
end
end
context "Dirsize-based progress calculation" do
def create_dir_with_size(size, given_opts={})
raise "The required size should be a non-negative number" if size < 0
@ -509,102 +391,4 @@ describe LogParser do
end
end
context "Correct profile for logparsing" do
let(:deploy_parser) { Astute::LogParser::ParseProvisionLogs.new }
it 'should not raise error if system is CentOS' do
node = {
'uid' => '1',
'profile' => 'centos-x86_64'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['filename']).to be_eql "install/anaconda.log"
end
it 'should not raise error if system is Ubuntu' do
node = {
'uid' => '1',
'profile' => 'ubuntu_1404_x86_64'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['filename']).to be_eql "main-menu.log"
end
it 'should raise error if system unknown' do
node = {
'uid' => '1',
'profile' => 'unknown'
}
expect { deploy_parser.get_pattern_for_node(node) }.to raise_error(Astute::ParseProvisionLogsError)
end
end
context "Correct profile for logparsing" do
let(:deploy_parser) { Astute::LogParser::ParseDeployLogs.new }
context 'HA' do
before(:each) do
deploy_parser.deploy_type = 'ha_compact'
end
it 'should set correct patterns for role - controller' do
node = {
'uid' => '1',
'role' => 'controller'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['type']).to be_eql 'components-list'
end
it 'should set correct patterns for role - primary-controller' do
node = {
'uid' => '1',
'role' => 'primary-controller'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['type']).to be_eql 'components-list'
end
it 'should set correct patterns for role - compute' do
node = {
'uid' => '1',
'role' => 'compute'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['type']).to be_eql 'components-list'
end
end # HA
context 'multinode' do
before(:each) do
deploy_parser.deploy_type = 'multinode'
end
it 'should set correct patterns for role - controller' do
node = {
'uid' => '1',
'role' => 'controller'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['type']).to be_eql 'components-list'
end
it 'should set correct patterns for role - compute' do
node = {
'uid' => '1',
'role' => 'compute'
}
pattern_spec = deploy_parser.get_pattern_for_node(node)
expect(pattern_spec['type']).to be_eql 'components-list'
end
end # multinode
end
end

View File

@ -70,7 +70,7 @@ describe Astute::Provisioner do
Astute::Rsyslogd.expects(:send_sighup).once
@provisioner.remove_nodes(
@reporter,
task_id="task_id",
_task_id="task_id",
engine_attrs,
nodes,
{:reboot => true}
@ -82,7 +82,7 @@ describe Astute::Provisioner do
Astute::Rsyslogd.stubs(:send_sighup).once
expect(@provisioner.remove_nodes(
@reporter,
task_id="task_id",
_task_id="task_id",
engine_attrs,
nodes,
{:reboot => true}
@ -98,7 +98,7 @@ describe Astute::Provisioner do
Astute::Rsyslogd.stubs(:send_sighup).never
expect {@provisioner.remove_nodes(
@reporter,
task_id="task_id",
_task_id="task_id",
engine_attrs,
nodes,
{
@ -115,7 +115,7 @@ describe Astute::Provisioner do
Astute::Rsyslogd.stubs(:send_sighup).never
expect {@provisioner.remove_nodes(
@reporter,
task_id="task_id",
_task_id="task_id",
engine_attrs,
nodes,
{
@ -338,20 +338,6 @@ describe Astute::Provisioner do
@provisioner.expects(:unlock_nodes_discovery).never
@provisioner.provision(@reporter, data['task_uuid'], data, 'image')
end
it 'should try to reboot nodes using ssh(insurance for cobbler)' do
Astute::Provision::Cobbler.any_instance.stubs(:event_status)
.returns([Time.now.to_f, 'controller-1', 'complete'])
@provisioner.expects(:control_reboot_using_ssh)
.with(@reporter, data['task_uuid'], data['nodes']).once
@provisioner.provision_piece(
@reporter,
data['task_uuid'],
data['engine'],
data['nodes'],
'native'
)
end
end
context 'node reboot fail' do
@ -854,82 +840,18 @@ describe Astute::Provisioner do
describe '#stop_provision' do
around(:each) do |example|
old_ssh_retries = Astute.config.ssh_retries
old_mc_retries = Astute.config.mc_retries
old_nodes_rm_interal = Astute.config.nodes_remove_interval
example.run
Astute.config.ssh_retries = old_ssh_retries
Astute.config.mc_retries = old_mc_retries
Astute.config.nodes_remove_interval = old_nodes_rm_interal
end
before(:each) do
Astute.config.ssh_retries = 1
Astute.config.mc_retries = 1
Astute.config.nodes_remove_interval = 0
end
it 'erase nodes using ssh' do
Astute::CobblerManager.any_instance.stubs(:remove_nodes).returns([])
Astute::Rsyslogd.stubs(:send_sighup).once
@provisioner.stubs(:stop_provision_via_mcollective).returns([[], {}])
Astute::Ssh.stubs(:execute).returns({'inaccessible_nodes' => [{'uid' => '1'}]}).once
Astute::Ssh.expects(:execute).with(instance_of(Astute::Context),
data['nodes'],
Astute::SshEraseNodes.command)
.returns({'nodes' => [{'uid' => '1'}]})
expect(@provisioner.stop_provision(@reporter,
data['task_uuid'],
data['engine'],
data['nodes']))
.to eql({
"error_nodes" => [],
"inaccessible_nodes" => [],
"nodes" => [{"uid"=>'1'}]
})
end
it 'always remove nodes from Cobbler' do
Astute::Rsyslogd.stubs(:send_sighup).once
Astute::Ssh.stubs(:execute).twice.returns({'inaccessible_nodes' => [{'uid' => '1'}]})
@provisioner.stubs(:stop_provision_via_mcollective).returns([[], {}])
Astute::CobblerManager.any_instance.expects(:remove_nodes)
.with(data['nodes'])
.returns([])
@provisioner.stop_provision(@reporter,
data['task_uuid'],
data['engine'],
data['nodes'])
end
it 'reboot nodes using using ssh' do
Astute::CobblerManager.any_instance.stubs(:remove_nodes).returns([])
Astute::Rsyslogd.stubs(:send_sighup).once
@provisioner.stubs(:stop_provision_via_mcollective).returns([[], {}])
Astute::Ssh.stubs(:execute).returns({'nodes' => [{'uid' => '1'}]}).once
Astute::Ssh.expects(:execute).with(instance_of(Astute::Context),
data['nodes'],
Astute::SshHardReboot.command,
timeout=5,
retries=1)
.returns({'inaccessible_nodes' => [{'uid' => '1'}]})
expect(@provisioner.stop_provision(@reporter,
data['task_uuid'],
data['engine'],
data['nodes']))
.to eql({
"error_nodes" => [],
"inaccessible_nodes" => [],
"nodes" => [{"uid"=>'1'}]
})
end
it 'stop provision if provision operation stop immediately' do
Astute::Rsyslogd.stubs(:send_sighup).once
@provisioner.stubs(:stop_provision_via_ssh)
@ -970,29 +892,8 @@ describe Astute::Provisioner do
})
end
it 'inform about inaccessible nodes' do
Astute::Rsyslogd.stubs(:send_sighup).once
Astute::Ssh.stubs(:execute).returns({'inaccessible_nodes' => [{'uid' => '1'}]}).twice
Astute::CobblerManager.any_instance.stubs(:remove_nodes).returns([])
@provisioner.stubs(:node_type).returns([])
Astute::NodesRemover.any_instance.expects(:remove).never
expect(@provisioner.stop_provision(@reporter,
data['task_uuid'],
data['engine'],
data['nodes']))
.to eql({
"error_nodes" => [],
"inaccessible_nodes" => [{"uid"=>'1'}],
"nodes" => []
})
end
it 'sleep between attempts to find and erase nodes using mcollective' do
Astute::Rsyslogd.stubs(:send_sighup).once
@provisioner.stubs(:stop_provision_via_ssh)
.returns({'inaccessible_nodes' => [{'uid' => '1'}]})
@provisioner.stubs(:node_type).returns([{'uid' => '1', 'node_type' => 'bootstrap'}])
Astute::NodesRemover.any_instance.stubs(:remove)
.once.returns({"nodes"=>[{"uid"=>"1", }]})
@ -1010,10 +911,6 @@ describe Astute::Provisioner do
Astute.config.mc_retries = 2
Astute.config.nodes_remove_interval = 0
@provisioner.stubs(:stop_provision_via_ssh)
.returns({'nodes' => [{'uid' => "1"}],
'inaccessible_nodes' => [{'uid' => '2'}]})
@provisioner.stubs(:node_type).twice
.returns([{'uid' => '1', 'node_type' => 'bootstrap'}])
.then.returns([{'uid' => '2', 'node_type' => 'target'}])
@ -1043,8 +940,6 @@ describe Astute::Provisioner do
it 'should send sighup for Rsyslogd' do
Astute::Rsyslogd.expects(:send_sighup).once
Astute::Ssh.stubs(:execute).twice.returns({'inaccessible_nodes' => [{'uid' => '1'}]})
@provisioner.stubs(:stop_provision_via_mcollective).returns([[], {}])
Astute::CobblerManager.any_instance.stubs(:remove_nodes)

View File

@ -37,7 +37,6 @@ Requires: ruby21-rubygem-bunny
Requires: ruby21-rubygem-raemon = 0.3.0
Requires: ruby21-rubygem-net-ssh = 2.8.0
Requires: ruby21-rubygem-net-ssh-gateway = 1.2.0
Requires: ruby21-rubygem-net-ssh-multi = 1.2.0
BuildRequires: ruby21 >= 2.1
BuildRequires: rubygems21
%else
@ -50,7 +49,6 @@ Requires: rubygem-bunny
Requires: rubygem-raemon
Requires: rubygem-net-ssh
Requires: rubygem-net-ssh-gateway
Requires: rubygem-net-ssh-multi
BuildRequires: ruby
BuildRequires: rubygems-devel
%endif