Removed HPCloud and baremetal configs.

This commit is contained in:
Tim Kuhlman 2014-07-08 17:14:49 -06:00
parent 705bfa97c1
commit 12ed340ebc
24 changed files with 5 additions and 696 deletions

View File

@ -1,9 +0,0 @@
# Mini-mon in the cloud
Run vagrant commands from this subdir to have Mini-mon run in the HP Public Cloud rather than as a vm on your machine.
You must have a valid HPCloud account with a defined security group that allows ssh access and an ssh key pair must be defined.
To setup:
- Install the plugin `vagrant plugin install vagrant-hp`
- Copy Vagrantfile.hpcloud to ~/.vagrant.d/Vagrantfile then edit and enter your credentials and other access information.

26
HPCloud/Vagrantfile vendored
View File

@ -1,26 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Set working dir to root of repo
Dir.chdir ".."
VAGRANTFILE_API_VERSION = "2" # Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Settings for all vms
config.berkshelf.enabled = true
ENV['VAGRANT_DEFAULT_PROVIDER'] = 'hp'
config.vm.box = "dummy_hp"
config.vm.box_url = "https://github.com/mohitsethi/vagrant-hp/raw/master/dummy_hp.box"
config.vm.hostname = 'mini-mon'
config.vm.synced_folder ".", "/vagrant" # needed to get vertica packages but otherwise unneeded
config.vm.provision :shell, path: "./chef-solo-bootstrap.sh"
config.vm.provision :chef_solo do |chef|
chef.roles_path = "../roles"
chef.data_bags_path = "../data_bags"
chef.add_role "Mini-Mon"
end
end

View File

@ -1,19 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# HP Cloud Settings
# using this provider - https://github.com/mohitsethi/vagrant-hp
Vagrant.configure("2") do |config|
config.vm.provider :hp do |hp|
hp.access_key = ""
hp.secret_key = ""
hp.flavor = "standard.large"
hp.tenant_id = ""
hp.server_name = "mini-mon"
hp.keypair_name = ""
hp.ssh_private_key_path = "~/.ssh/id_rsa"
hp.ssh_username = "ubuntu"
hp.security_groups = ["default"]
hp.image = "Ubuntu Server 12.04 LTS (amd64 20140606) - Partner Image"
end
end

View File

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# Bootstrap chef-solo on Ubuntu 12.04
# Use the local apt mirrors, much faster
sed -i -e 's,^archive.ubuntu.com/ubuntu,nova.clouds.archive.ubuntu.com/ubuntu,g' /etc/apt/sources.list
apt-get -y update
# The omnibus installer
if ! [ -e /usr/bin/chef-solo ]; then
curl -L https://www.opscode.com/chef/install.sh | bash
fi
# An alternative to omnibus is to install ruby via apt and then the chef gem
#apt-get -y install ruby1.9.3
#gem install --no-ri --no-rdoc chef

View File

@ -132,9 +132,10 @@ vagrant plugin install vagrant-proxyconf
```
# Alternate Vagrant Configurations
To run any of these alternate configs, simply run the Vagrant commands from within the subdir. Note that the Vertica debs must be _copied_ (not symlinked) into
the subdir as well. See the README.md in the subdir for more details.
To run any of these alternate configs, simply run the Vagrant commands from within the subdir. Note that the Vertica debs must be _copied_
(not symlinked) into the subdir as well. See the README.md in the subdir for more details.
- `HPCloud` subdir - Runs a VM in the HP Public Cloud rather than using VirtualBox
- `split` subdir - The various monitoring components split into their own VMs
- `Baremetal` - Actually not using Vagrant at all; see the baremetal fabric task in the `utils` directory
In the past other alternative setups were working including running mini-mon in HP Public Cloud and scripts for putting it on baremetal. These are no
longer supported.

View File

@ -1,4 +0,0 @@
Various utilities for leveraging mini-mon. These are written as [fabric](http://docs.fabfile.org/) scripts.
To deploy mini-mon to baremetal, choosing the right ip run
- `fab baremetal -H 10.0.0.0`

View File

@ -1,150 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fabric Tasks for installing mini-mon on baremetal
These tasks were developed for hLinux but will likely work on any decently up to date debian based distro
"""
from fabric.api import *
@task
def chef_solo(chef_dir='/vagrant', run_list='role[Mini-Mon]', proxy=None):
"""Runs chef-solo
This assumes chef solo and other dependencies are setup.
"""
with settings(hide('running', 'output', 'warnings'), warn_only=True):
# The hLinux version messes with the lsb codename which some things depend on, this fixes it
debian_version = run('cat /etc/debian_version')
if debian_version == 'cattleprod':
sudo('echo "7.0" > /etc/debian_version')
# Also generate the UTF8 locale
sudo('localedef -v -c -i en_US -f UTF-8 en_US.UTF-8')
# Setup solo.rb
solo_content = '''cookbook_path "{dir}/berks-cookbooks"
role_path "{dir}/roles"
data_bag_path "{dir}/data_bags"'''.format(dir=chef_dir)
sudo("echo '%s' > %s/solo.rb" % (solo_content, chef_dir))
# Setup node.json
node_json = '{ "run_list": "%s" }' % run_list
sudo("echo '%s' > %s/node.json" % (node_json, chef_dir))
# Run chef-solo
# todo - proxy hell defeats chef at this point because some components like pip need it but others like apt choke
with prefix(proxy_string(proxy)):
sudo('chef-solo -c {dir}/solo.rb -j {dir}/node.json'.format(dir=chef_dir))
@task
def git_mini_mon(install_dir, branch=None, proxy=None):
"""Download mini-mon from git
"""
with prefix(proxy_string(proxy)):
# Update the install dir if it already has code, otherwise check out
with settings(hide('running', 'output', 'warnings'), warn_only=True):
install_dir_check = run('ls %s' % install_dir)
if install_dir_check.succeeded:
with cd(install_dir):
sudo('git checkout master; git pull -f origin master')
else:
sudo('git clone https://github.com/hpcloud-mon/mon-vagrant.git %s' % install_dir)
if branch is not None:
with cd(install_dir):
with settings(hide('everything'), warn_only=True):
branch_exists = sudo('git branch| grep %s' % branch)
if branch_exists.failed:
sudo('git branch {branch} origin/{branch}'.format(branch=branch))
sudo('git checkout %s' % branch)
@task(default=True)
def install(install_dir='/vagrant', proxy=None):
"""Installs the latest mini-mon and bits necessary to run chef-solo and runs chef-solo on the box.
proxy is an optional proxy url used for http and https, it is not used for apt as that is assumed to be
correctly setup.
install_dir defaults to /vagrant to match assumptions from mini-mon even though vagrant is not used here
"""
if proxy is not None:
abort('Proxy support is incomplete.')
execute(install_deps, proxy)
execute(git_mini_mon, install_dir, proxy)
#Clone mini-mon
with prefix(proxy_string(proxy)):
# download cookbooks
with cd(install_dir):
sudo('berks vendor')
# the vertica packages from my.vertica.com are needed, this assumes they are one level up from cwd
put('../vertica*.deb', install_dir, use_sudo=True)
execute(chef_solo, proxy=proxy)
@task
def install_berkshelf(proxy=None):
"""Installs berkshelf."""
# check for ruby 1.9.2 or greater and if needed install
with settings(hide('running', 'output', 'warnings'), warn_only=True):
ruby_check = run('ruby -v')
if ruby_check.failed:
# Install both ruby and tools needed to build gems
sudo('apt-get install -y ruby ruby-dev ruby-hitimes build-essential')
else:
# A better semantic version check like semantic_version module provides would be nice
version_parts = ruby_check.split()[1].split('.')
if int(version_parts[0]) < 2 and int(version_parts[1]) < 9:
abort('Ruby reports version %s, > 1.9.2 is needed' % ruby_check)
with prefix(proxy_string(proxy)):
sudo('gem install berkshelf')
@task
def install_chef(proxy=None):
"""Installs chef via omnibus."""
sudo('apt-get install -y curl')
# If chef is already installed continue
with settings(hide('running', 'output', 'warnings'), warn_only=True):
chef_check = run('chef-solo -v')
if chef_check.succeeded:
return
# Run the omnibus installer
with prefix(proxy_string(proxy)):
sudo('curl -s -S -L https://www.opscode.com/chef/install.sh | bash')
@task
def install_deps(proxy=None):
"""Install all dependencies needed for running chef-solo"""
sudo('apt-get install -y git')
execute(install_chef, proxy)
execute(install_berkshelf, proxy)
def proxy_string(proxy):
"""Return a string using the given proxy url.
An example proxy to pass in myproxy.me.com:8080
"""
if proxy is None:
return "no_proxy=" # I have to return a noop string as the prefix context manager will add it to the cmd
else:
return "export http_proxy='http://{proxy}' && export https_proxy='http://{proxy}'".format(proxy=proxy)

View File

@ -1,34 +0,0 @@
# Setup of a test cluster
The goal of this fabric script is to setup a test cluster on baremetal leveraging some tools from mini-mon.
##Steps
- Before running first setup the following settings for your test cluster:
- keystone host and project_id in data_bags/mon_agent/mon_agent.json
- keystone host(serverVip) in data_bags/mon_api/mon_credentials.json
- wsrep address in the Mon-Node role
- servers in data_bags/zookeeper/mon.json
- servers in data_bags/kafka/mon.json
- vertica data bags in data_bags/vertica
- ssh_key.json with two fields, public/private corresponding to public/private ssh keys
- nodes data bag
- From the utils directory (or specifying that fabfile) start the install script
- `fab cluster.setup -H host1,host2,host3`
- 3 hosts is the minimum needed for a fully HA cluster.
- All 3 machines will run the Mon-Node role, host1 will also run the Thresh-Nimbus role and remaining hosts the Thresh-Supervisor role
- After the cluster is up and running some additional configuration steps are needed to setup kafka topics and databases.
- On one of the machines run the recipes:
- kafka::create_topics
- mini-mon::mysql_schema
- vertica::create_db
## Optional Configuration
- Add in the Vertica Console to one of the machines. This can be done with vertica::console recipe
## Known Issues
- runit used for the storm cookbook needs a newer version for chef 11 and even the newer version has errors on hLinux.
- If a client connects to kafka before the topics are made in some situations the topics are automatically created and can end up
as the wrong size or without replicas.
- The percona cluster can be fickle regarding how to start it up properly this at times interferes with the cookbook. A workaround is
to initially set the wsrep_cluster_address to just gcomm://
- Various cookbooks were pinned to older versions to accomodate the old chef used in the precise vagrant image, these can be unpinned.

View File

@ -1,21 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setup import SetupCluster
setup = SetupCluster()

View File

@ -1,8 +0,0 @@
{
"id" : "mon",
"brokers": {
"mon-ae1test-vertica0001.useast.hpcloud.net" : { "id": 0, "ip": "10.22.156.11" },
"mon-ae1test-api0001.useast.hpcloud.net" : { "id": 1, "ip": "10.22.156.17" },
"mon-ae1test-api0002.useast.hpcloud.net" : { "id": 2, "ip": "10.22.156.18" }
}
}

View File

@ -1,9 +0,0 @@
{
"id": "mon_agent",
"keystone_url": "http://10.22.156.20:35357/v3",
"username": "admin",
"password": "password",
"project_name": "demo",
"mon_api_url": "http://localhost:8080/v2.0",
"service": "demo"
}

View File

@ -1,21 +0,0 @@
{
"id" : "mon_api",
"api_region": "useast",
"database-configuration": {
"database-type": "vertica"
},
"vertica" : {
"dbname" : "mon",
"hostname" : "localhost"
},
"zookeeper" : {
"hostname" : "10.22.156.11,10.22.156.17,10.22.156.18"
},
"mysql": {
"hostname":"localhost",
"schema": "mon"
},
"kafka": {
"hostname": "10.22.156.11,10.22.156.17,10.22.156.18"
}
}

View File

@ -1,19 +0,0 @@
{
"id": "mon_credentials",
"middleware": {
"serverVip": "10.22.156.19",
"truststore_password": "changeit"
},
"mysql": {
"hostname": "localhost",
"username": "monapi",
"password": "password",
"schema": "mon"
},
"vertica": {
"hostname": "localhost",
"username": "mon_api",
"password": "password",
"schema": "mon"
}
}

View File

@ -1,24 +0,0 @@
{
"id" : "hosts",
"kafka": {
"url": "10.22.156.14,10.22.156.15,10.22.156.16",
"alarm_topic": "alarm-state-transitions",
"notification_topic": "alarm-notifications"
},
"mysql": {
"url": "localhost",
"user": "notification",
"password": "password",
"database": "mon"
},
"smtp": {
"url": "localhost",
"user": "",
"password": "",
"timeout": 60,
"from_addr": "hpcs.mon@hp.com"
},
"zookeeper": {
"url": "10.22.156.14,10.22.156.15,10.22.156.16"
}
}

View File

@ -1,7 +0,0 @@
{
"id" : "mon_credentials",
"vertica" : {
"user" : "dbadmin",
"password" : "password"
}
}

View File

@ -1,41 +0,0 @@
{
"id": "mon_persister",
"alarm_history": {
"topic": "alarm-state-transitions"
},
"metrics": {
"topic": "metrics"
},
"kafka": {
"num_threads": "1",
"group_id": "1",
"consumer_id": {
"mon-ae1test-vertica0001.useast.hpcloud.net": 0,
"mon-ae1test-api0001.useast.hpcloud.net": 1,
"mon-ae1test-api0002.useast.hpcloud.net": 2
}
},
"disruptor": {
"buffer_size": "1048576",
"num_processors": "1"
},
"output_processor": {
"batch_size": "10000"
},
"deduper_config": {
"dedupe_run_frequencey_seconds": "30"
},
"database_configuration": {
"database_type": "vertica"
},
"vertica_metric_repository_config": {
"max_cache_size": "2000000"
},
"vertica": {
"dbname": "mon",
"hostname": "localhost"
},
"zookeeper": {
"hostname": "localhost"
}
}

View File

@ -1,22 +0,0 @@
{
"id" : "mon_thresh",
"kafka": {
"metric": {
"group": "thresh-metric",
"topic": "metrics"
},
"event": {
"group": "thresh-event",
"host": "localhost:9092",
"consumer_topic": "events",
"producer_topic": "alarm-state-transitions"
}
},
"mysql": {
"db": "mon",
"host": "localhost:3306"
},
"zookeeper": {
"host": "localhost:2181"
}
}

View File

@ -1,23 +0,0 @@
{
"id": "nodes",
"nodes": {
"mon-ae1test-vertica0001.useast.hpcloud.net" : {
"ip": "10.22.156.11",
"broadcast": "10.22.156.255",
"network": "10.22.156.0",
"netmask": "255.255.255.0"
},
"mon-ae1test-api0001.useast.hpcloud.net" : {
"ip": "10.22.156.17",
"broadcast": "10.22.156.255",
"network": "10.22.156.0",
"netmask": "255.255.255.0"
},
"mon-ae1test-api0002.useast.hpcloud.net" : {
"ip": "10.22.156.18",
"broadcast": "10.22.156.255",
"network": "10.22.156.0",
"netmask": "255.255.255.0"
}
}
}

View File

@ -1,8 +0,0 @@
{
"id" : "mon",
"servers": {
"mon-ae1test-vertica0001.useast.hpcloud.net" : { "id": 0, "ip": "10.22.156.11" },
"mon-ae1test-api0001.useast.hpcloud.net" : { "id": 1, "ip": "10.22.156.17" },
"mon-ae1test-api0002.useast.hpcloud.net" : { "id": 2, "ip": "10.22.156.18" }
}
}

View File

@ -1,67 +0,0 @@
{
"name": "Mon-Node",
"description": "Sets up one box in a Monitoring Cluster",
"json_class": "Chef::Role",
"default_attributes": {
"apt": {
"periodic_update_min_delay": 60
},
"kafka": {
"cluster": "mon",
"listen_interface": "eth2",
"topics": {
"metrics": { "replicas": 3, "partitions": 64 },
"events": { "replicas": 3, "partitions": 12 },
"alarm-state-transitions": { "replicas": 3, "partitions": 12 },
"alarm-notifications": { "replicas": 3, "partitions": 12 }
}
},
"percona": {
"backup": {
"password": "password"
},
"cluster": {
"package": "percona-xtradb-cluster-56",
"wsrep_cluster_address": "gcomm://10.22.156.11,10.22.156.17,10.22.156.18",
"wsrep_cluster_name": "mon",
"wsrep_sst_method": "rsync",
"wsrep_provider": "/usr/lib/libgalera_smm.so"
},
"main_config_file": "/etc/mysql/my.cnf",
"server": {
"bind_address": "0.0.0.0",
"replication": {
"password": "password"
},
"role": "cluster",
"root_password": "password",
"skip_name_resolve": true
}
},
"vertica": {
"cluster": true
},
"zookeeper": {
"cluster": "mon"
}
},
"override_attributes": {
},
"chef_type": "role",
"run_list": [
"recipe[mini-mon]",
"recipe[percona::cluster]",
"recipe[zookeeper]",
"recipe[kafka]",
"recipe[mini-mon::postfix]",
"recipe[mon_notification]",
"recipe[vertica]",
"recipe[vertica::backup]",
"recipe[sysctl]",
"recipe[mon_api]",
"recipe[mon_persister]",
"recipe[mon_agent]"
],
"env_run_lists": {
}
}

View File

@ -1,37 +0,0 @@
{
"name": "Thresh-Nimbus",
"description": "Sets up storm Nimbus and mon-thresh",
"json_class": "Chef::Role",
"default_attributes": {
"java": {
"install_flavor": "openjdk",
"jdk_version": "7"
},
"storm": {
"nimbus": {
"host": {
"fqdn": "10.22.156.11"
}
},
"ui": {
"port": "8088"
},
"zookeeper": {
"quorum": [
"10.22.156.11",
"10.22.156.17",
"10.22.156.18"
]
}
}
},
"override_attributes": {
},
"chef_type": "role",
"run_list": [
"recipe[storm::nimbus]",
"recipe[mon_thresh]"
],
"env_run_lists": {
}
}

View File

@ -1,33 +0,0 @@
{
"name": "Thresh-Supervisor",
"description": "Sets up storm supervisor",
"json_class": "Chef::Role",
"default_attributes": {
"java": {
"install_flavor": "openjdk",
"jdk_version": "7"
},
"storm": {
"nimbus": {
"host": {
"fqdn": "10.22.156.11"
}
},
"zookeeper": {
"quorum": [
"10.22.156.11",
"10.22.156.17",
"10.22.156.18"
]
}
}
},
"override_attributes": {
},
"chef_type": "role",
"run_list": [
"recipe[storm::supervisor]"
],
"env_run_lists": {
}
}

View File

@ -1,93 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fabric Tasks for installing a cluster monitoring stack on baremetal
These tasks were developed for hLinux but will likely work on any decently up to date debian based distro
"""
from fabric.api import *
from fabric.tasks import Task
import os
from baremetal import chef_solo, git_mini_mon, install_deps
__all__ = ['setup']
class SetupCluster(Task):
def __init__(self):
"""Setup a cluster running monitoring.
"""
self.cluster_dir = '/var/tmp/chef-Mon-Node'
self.mini_mon_dir = '/vagrant' # mini_mon_dir is /vagrant to match assumptions in mini-mon
def run(self):
"""Installs the latest cookbooks and dependencies to run chef-solo and runs chef-solo on each box.
The data bags in the cluster subdir should be properly setup for the environment before running.
"""
execute(install_deps)
execute(git_mini_mon, self.mini_mon_dir)
execute(prep_chef, self.cluster_dir, self.mini_mon_dir)
execute(copy_vertica, self.mini_mon_dir)
execute(chef_solo, self.cluster_dir, "role[Mon-Node]")
if len(env.hosts) > 1:
execute(chef_solo, self.cluster_dir, "role[Thresh-Nimbus]", host=env.hosts[0])
execute(chef_solo, self.cluster_dir, "role[Thresh-Supervisor]", hosts=env.hosts[1:])
else:
puts('Only one host specified, the Thresh roles will not be run as they require at least two hosts')
@task
def prep_chef(cluster_dir, berks_dir):
""" Pull down cookbooks with bershelf and put roles/data bags in place.
"""
# download cookbooks
with settings(hide('running', 'output', 'warnings'), warn_only=True):
sudo('rm -r %s' % cluster_dir)
sudo('mkdir %s' % cluster_dir)
with cd(berks_dir):
with settings(hide('running', 'output', 'warnings'), warn_only=True):
berks_check = sudo('ls Berksfile.lock')
if berks_check.succeeded:
sudo('berks update')
else:
sudo('berks install')
sudo('berks vendor %s/berks-cookbooks' % cluster_dir)
# Copy roles and data bags - assumes you are running from the utils directory
put('%s/cluster/data_bags' % os.path.dirname(env.real_fabfile), cluster_dir, use_sudo=True)
put('%s/cluster/roles' % os.path.dirname(env.real_fabfile), cluster_dir, use_sudo=True)
@task
def copy_vertica(dest_dir):
"""Copies vertica debs to the remote box
"""
vertica_packages = ['vertica_7.0.1-0_amd64.deb', 'vertica-r-lang_7.0.1-0_amd64.deb']
# the vertica packages from my.vertica.com are needed, this assumes they are one level up from cwd
for deb in vertica_packages:
with settings(hide('running', 'output', 'warnings'), warn_only=True):
if run('ls %s/%s' %(dest_dir, deb)).failed:
puts('Uploading %s' % deb)
put('../../vertica*.deb', dest_dir, use_sudo=True)
setup = SetupCluster()

2
utils/fabfile.py vendored
View File

@ -1,2 +0,0 @@
import baremetal
import cluster