Initial pass at Swift rewrite.
This commit is contained in:
parent
651779b65f
commit
a1638d1993
41
config.yaml
41
config.yaml
|
@ -1,8 +1,25 @@
|
|||
options:
|
||||
swift-release:
|
||||
openstack-origin:
|
||||
default: distro
|
||||
type: string
|
||||
description: Swift PPA to configure (trunk, milestone, distro)
|
||||
description: |
|
||||
Repository from which to install. May be one of the following:
|
||||
distro (default), ppa:somecustom/ppa, a deb url sources entry,
|
||||
or a supported Cloud Archive release pocket.
|
||||
.
|
||||
Supported Cloud Archive sources include:
|
||||
- cloud:precise-folsom,
|
||||
- cloud:precise-folsom/updates
|
||||
- cloud:precise-folsom/staging
|
||||
- cloud:precise-folsom/proposed
|
||||
.
|
||||
Note that updating this setting to a source that is known to
|
||||
provide a later version of OpenStack will trigger a software
|
||||
upgrade.
|
||||
region:
|
||||
default: RegionOne
|
||||
type: string
|
||||
description: OpenStack region that this swift-proxy supports.
|
||||
# Ring configuration
|
||||
partition-power:
|
||||
default: 8
|
||||
|
@ -16,10 +33,21 @@ options:
|
|||
default: 1
|
||||
type: int
|
||||
description: Minimum hours between balances
|
||||
storage-zone-distribution:
|
||||
default: "service-unit"
|
||||
type: string
|
||||
description: |
|
||||
Storage zone distribution policy that the charm will use when
|
||||
configuring and initializing the storage ring upon new swift-storage
|
||||
relations (see README). Options include:
|
||||
.
|
||||
service-unit - Storage zones configured per swift-storage service unit.
|
||||
machine-unit - Storage zones configured per swift-storage machine-unit.
|
||||
manual - Storage zones configured manually per swift-storage service.
|
||||
# CA Cert info
|
||||
use-https:
|
||||
default: 1
|
||||
type: int
|
||||
default: "yes"
|
||||
type: string
|
||||
description: Whether to listen on HTTPS
|
||||
country:
|
||||
default: US
|
||||
|
@ -45,10 +73,15 @@ options:
|
|||
default: 0
|
||||
type: int
|
||||
description: Number of TCP workers to launch (0 for the number of system cores)
|
||||
operator-roles:
|
||||
default: "Member,Admin"
|
||||
type: string
|
||||
description: Comma-separated list of Swift operator roles.
|
||||
auth-type:
|
||||
default: tempauth
|
||||
type: string
|
||||
description: Auth method to use, tempauth or keystone
|
||||
# Manual Keystone configuration.
|
||||
keystone-auth-host:
|
||||
type: string
|
||||
description: Keystone authentication host
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
swift-hooks.py
|
|
@ -0,0 +1 @@
|
|||
swift-hooks.py
|
|
@ -0,0 +1 @@
|
|||
swift-hooks.py
|
|
@ -1 +1 @@
|
|||
swift-proxy-relations
|
||||
swift-hooks.py
|
|
@ -0,0 +1,206 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Common python helper functions used for OpenStack charms.
|
||||
|
||||
import subprocess
|
||||
|
||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||
|
||||
ubuntu_openstack_release = {
|
||||
'oneiric': 'diablo',
|
||||
'precise': 'essex',
|
||||
'quantal': 'folsom',
|
||||
'raring' : 'grizzly'
|
||||
}
|
||||
|
||||
|
||||
openstack_codenames = {
|
||||
'2011.2': 'diablo',
|
||||
'2012.1': 'essex',
|
||||
'2012.2': 'folsom',
|
||||
'2013.1': 'grizzly'
|
||||
}
|
||||
|
||||
# The ugly duckling
|
||||
swift_codenames = {
|
||||
'1.4.3': 'diablo',
|
||||
'1.4.8': 'essex',
|
||||
'1.7.4': 'folsom'
|
||||
}
|
||||
|
||||
def juju_log(msg):
|
||||
print msg
|
||||
return
|
||||
subprocess.check_call(['juju-log', msg])
|
||||
|
||||
|
||||
def error_out(msg):
|
||||
juju_log("FATAL ERROR: %s" % msg)
|
||||
exit(1)
|
||||
|
||||
|
||||
def lsb_release():
|
||||
'''Return /etc/lsb-release in a dict'''
|
||||
lsb = open('/etc/lsb-release', 'r')
|
||||
d = {}
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
||||
|
||||
|
||||
def get_os_codename_install_source(src):
|
||||
'''Derive OpenStack release codename from a given installation source.'''
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
|
||||
rel = ''
|
||||
if src == 'distro':
|
||||
try:
|
||||
rel = ubuntu_openstack_release[ubuntu_rel]
|
||||
except KeyError:
|
||||
e = 'Code not derive openstack release for '\
|
||||
'this Ubuntu release: %s' % rel
|
||||
error_out(e)
|
||||
return rel
|
||||
|
||||
if src.startswith('cloud:'):
|
||||
ca_rel = src.split(':')[1]
|
||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
||||
return ca_rel
|
||||
|
||||
# Best guess match based on deb string provided
|
||||
if src.startswith('deb'):
|
||||
for k, v in openstack_codenames.iteritems():
|
||||
if v in src:
|
||||
return v
|
||||
|
||||
def get_os_codename_version(vers):
|
||||
'''Determine OpenStack codename from version number.'''
|
||||
try:
|
||||
return openstack_codenames[vers]
|
||||
except KeyError:
|
||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename(codename):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in openstack_codenames.iteritems():
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Code not derive OpenStack version for '\
|
||||
'codename: %s' % codename
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_codename_package(pkg):
|
||||
'''Derive OpenStack release codename from an installed package.'''
|
||||
cmd = ['dpkg', '-l', pkg]
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
e = 'Could not derive OpenStack version from package that is not '\
|
||||
'installed; %s' % pkg
|
||||
error_out(e)
|
||||
|
||||
def _clean(line):
|
||||
line = line.split(' ')
|
||||
clean = []
|
||||
for c in line:
|
||||
if c != '':
|
||||
clean.append(c)
|
||||
return clean
|
||||
|
||||
vers = None
|
||||
for l in output.split('\n'):
|
||||
if l.startswith('ii'):
|
||||
l = _clean(l)
|
||||
if l[1] == pkg:
|
||||
vers = l[2]
|
||||
|
||||
if not vers:
|
||||
e = 'Could not determine version of installed package: %s' % pkg
|
||||
error_out(e)
|
||||
|
||||
vers = vers[:6]
|
||||
try:
|
||||
if 'swift' in pkg:
|
||||
vers = vers[:5]
|
||||
return swift_codenames[vers]
|
||||
else:
|
||||
vers = vers[:6]
|
||||
return openstack_codenames[vers]
|
||||
except KeyError:
|
||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||
error_out(e)
|
||||
|
||||
|
||||
def configure_installation_source(rel):
|
||||
'''Configure apt installation source.'''
|
||||
|
||||
def _import_key(id):
|
||||
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
|
||||
"--recv-keys %s" % id
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except:
|
||||
error_out("Error importing repo key %s" % id)
|
||||
|
||||
if rel == 'distro':
|
||||
return
|
||||
elif rel[:4] == "ppa:":
|
||||
src = rel
|
||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||
elif rel[:3] == "deb":
|
||||
l = len(rel.split('|'))
|
||||
if l == 2:
|
||||
src, key = rel.split('|')
|
||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||
_import_key(key)
|
||||
elif l == 1:
|
||||
src = rel
|
||||
else:
|
||||
error_out("Invalid openstack-release: %s" % rel)
|
||||
|
||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||
f.write(src)
|
||||
elif rel[:6] == 'cloud:':
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
rel = rel.split(':')[1]
|
||||
u_rel = rel.split('-')[0]
|
||||
ca_rel = rel.split('-')[1]
|
||||
|
||||
if u_rel != ubuntu_rel:
|
||||
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
||||
'version (%s)' % (ca_rel, ubuntu_rel)
|
||||
error_out(e)
|
||||
|
||||
if ca_rel == 'folsom/staging':
|
||||
# staging is just a regular PPA.
|
||||
cmd = 'add-apt-repository -y ppa:ubuntu-cloud-archive/folsom-staging'
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
return
|
||||
|
||||
# map charm config options to actual archive pockets.
|
||||
pockets = {
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'folsom/updates': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom'
|
||||
}
|
||||
|
||||
try:
|
||||
pocket = pockets[ca_rel]
|
||||
except KeyError:
|
||||
e = 'Invalid Cloud Archive release specified: %s' % rel
|
||||
error_out(e)
|
||||
|
||||
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||
_import_key(CLOUD_ARCHIVE_KEY_ID)
|
||||
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||
f.write(src)
|
||||
else:
|
||||
error_out("Invalid openstack-release specified: %s" % rel)
|
||||
|
|
@ -1 +0,0 @@
|
|||
swift-proxy-relations
|
|
@ -1 +0,0 @@
|
|||
swift-proxy-relations
|
|
@ -1 +0,0 @@
|
|||
swift-proxy-relations
|
|
@ -1 +0,0 @@
|
|||
swift-proxy-relations
|
|
@ -0,0 +1,167 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
import utils
|
||||
import sys
|
||||
import shutil
|
||||
import uuid
|
||||
from subprocess import check_call
|
||||
|
||||
import lib.openstack_common as openstack
|
||||
import swift_utils as swift
|
||||
|
||||
def install():
|
||||
src = utils.config_get('openstack-origin')
|
||||
if src != 'distro':
|
||||
openstack.configure_installation_source(src)
|
||||
check_call(['apt-get', 'update'])
|
||||
rel = openstack.get_os_codename_install_source(src)
|
||||
|
||||
pkgs = swift.determine_packages(rel)
|
||||
utils.install(*pkgs)
|
||||
|
||||
uid, gid = swift.swift_user()
|
||||
conf_dir = os.path.dirname(swift.SWIFT_CONF)
|
||||
if not os.path.isdir(conf_dir):
|
||||
os.mkdir(conf_dir, 0750)
|
||||
os.chown(conf_dir, uid, gid)
|
||||
|
||||
swift.ensure_swift_dir()
|
||||
|
||||
# initialize swift configs.
|
||||
# swift.conf hash
|
||||
ctxt = {
|
||||
'swift_hash': swift.get_swift_hash()
|
||||
}
|
||||
with open(swift.SWIFT_CONF, 'w') as conf:
|
||||
conf.write(swift.render_config(swift.SWIFT_CONF, ctxt))
|
||||
|
||||
# swift-proxy.conf
|
||||
swift.write_proxy_config()
|
||||
|
||||
# memcached.conf
|
||||
ctxt = { 'proxy_ip': utils.get_host_ip() }
|
||||
with open(swift.MEMCACHED_CONF, 'w') as conf:
|
||||
conf.write(swift.render_config(swift.MEMCACHED_CONF, ctxt))
|
||||
|
||||
# generate or setup SSL certificate
|
||||
swift.configure_ssl()
|
||||
|
||||
# initialize new storage rings.
|
||||
for ring in swift.SWIFT_RINGS.iteritems():
|
||||
swift.initialize_ring(ring[1],
|
||||
utils.config_get('partition-power'),
|
||||
utils.config_get('replicas'),
|
||||
utils.config_get('min-hours'))
|
||||
|
||||
# configure a directory on webserver for distributing rings.
|
||||
if not os.path.isdir(swift.WWW_DIR):
|
||||
os.mkdir(swift.WWW_DIR, 0755)
|
||||
os.chown(swift.WWW_DIR, uid, gid)
|
||||
swift.write_apache_config()
|
||||
|
||||
|
||||
def keystone_joined(relid=None):
|
||||
hostname = utils.unit_get('private-address')
|
||||
port = utils.config_get('bind-port')
|
||||
ssl = utils.config_get('use-https')
|
||||
if ssl == 'yes':
|
||||
proto = 'https'
|
||||
else:
|
||||
proto = 'http'
|
||||
admin_url = '%s://%s:%s' % (proto, hostname, port)
|
||||
internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url
|
||||
utils.relation_set(service='swift',
|
||||
region=utils.config_get('region'),
|
||||
public_url=public_url, internal_url=internal_url,
|
||||
admin_url=admin_url,
|
||||
requested_roles=utils.config_get('operator-roles'),
|
||||
rid=relid)
|
||||
|
||||
|
||||
def keystone_changed():
|
||||
swift.write_proxy_config()
|
||||
|
||||
|
||||
def balance_rings():
|
||||
'''handle doing ring balancing and distribution.'''
|
||||
new_ring = False
|
||||
for ring in swift.SWIFT_RINGS.itervalues():
|
||||
if swift.balance_ring(ring):
|
||||
utils.juju_log('INFO', 'Balanced ring %s' % ring)
|
||||
new_ring = True
|
||||
if not new_ring:
|
||||
return
|
||||
|
||||
for ring in swift.SWIFT_RINGS.keys():
|
||||
f = '%s.ring.gz' % ring
|
||||
shutil.copyfile(os.path.join(swift.SWIFT_CONF_DIR, f),
|
||||
os.path.join(swift.WWW_DIR, f))
|
||||
|
||||
msg = 'Broadcasting notification to all storage nodes that new '\
|
||||
'ring is ready for consumption.'
|
||||
utils.juju_log('INFO', msg)
|
||||
|
||||
www_dir = swift.WWW_DIR.split('/var/www/')[1]
|
||||
trigger = uuid.uuid4()
|
||||
swift_hash = swift.get_swift_hash()
|
||||
# notify storage nodes that there is a new ring to fetch.
|
||||
for relid in utils.relation_ids('swift-proxy'):
|
||||
utils.relation_set(rid=relid, swift_hash=swift_hash,
|
||||
www_dir=www_dir, trigger=trigger)
|
||||
swift.proxy_control('restart')
|
||||
|
||||
def proxy_changed():
|
||||
account_port = utils.config_get('account-ring-port')
|
||||
object_port = utils.config_get('object-ring-port')
|
||||
container_port = utils.config_get('container-ring-port')
|
||||
node_settings = {
|
||||
'ip': utils.get_host_ip(utils.relation_get('private-address')),
|
||||
'zone': utils.relation_get('zone'),
|
||||
'account_port': utils.relation_get('account_port'),
|
||||
'object_port': utils.relation_get('object_port'),
|
||||
'container_port': utils.relation_get('container_port'),
|
||||
}
|
||||
if None in node_settings.itervalues():
|
||||
utils.juju_log('INFO', 'proxy_changed: Relation not ready.')
|
||||
return None
|
||||
|
||||
for k in ['zone', 'account_port', 'object_port', 'container_port']:
|
||||
node_settings[k] = int(node_settings[k])
|
||||
|
||||
# Grant new node access to rings via apache.
|
||||
swift.write_apache_config()
|
||||
|
||||
# allow for multiple devs per unit, passed along as a : separated list
|
||||
devs = utils.relation_get('device').split(':')
|
||||
for dev in devs:
|
||||
node_settings['device'] = dev
|
||||
for ring in swift.SWIFT_RINGS.itervalues():
|
||||
if not swift.exists_in_ring(ring, node_settings):
|
||||
swift.add_to_ring(ring, node_settings)
|
||||
|
||||
if swift.should_balance([r for r in swift.SWIFT_RINGS.itervalues()]):
|
||||
balance_rings()
|
||||
|
||||
def proxy_broken():
|
||||
swift.write_apache_config()
|
||||
|
||||
def config_changed():
|
||||
relids = utils.relation_ids('identity-service')
|
||||
if relids:
|
||||
for relid in relids:
|
||||
keystone_joined(relid)
|
||||
swift.write_proxy_config()
|
||||
|
||||
hooks = {
|
||||
'install': install,
|
||||
'config-changed': config_changed,
|
||||
'identity-service-relation-joined': keystone_joined,
|
||||
'identity-service-relation-changed': keystone_changed,
|
||||
'swift-proxy-relation-changed': proxy_changed,
|
||||
'swift-proxy-relation-broken': proxy_broken,
|
||||
}
|
||||
|
||||
utils.do_hooks(hooks)
|
||||
|
||||
sys.exit(0)
|
Binary file not shown.
|
@ -1,215 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -u
|
||||
# For openssl cert generation
|
||||
USE_HTTPS=$(config-get use-https)
|
||||
COUNTRY=$(config-get country)
|
||||
STATE=$(config-get state)
|
||||
LOCALE=$(config-get locale)
|
||||
COMMON_NAME=$(config-get common-name)
|
||||
PPA=$(config-get swift-release)
|
||||
BINDPORT=$(config-get bind-port)
|
||||
WORKERS=$(config-get workers)
|
||||
AUTHTYPE=$(config-get auth-type)
|
||||
KEYSTONE_AUTH_HOST=$(config-get keystone-auth-host)
|
||||
KEYSTONE_AUTH_PORT=$(config-get keystone-auth-port)
|
||||
KEYSTONE_AUTH_PROTOCOL=$(config-get keystone-auth-protocol)
|
||||
KEYSTONE_ADMIN_TENANT_NAME=$(config-get keystone-admin-tenant-name)
|
||||
KEYSTONE_ADMIN_USER=$(config-get keystone-admin-user)
|
||||
KEYSTONE_ADMIN_PASSWORD=$(config-get keystone-admin-password)
|
||||
|
||||
# Used in proxy-server.conf. Run one worker per cpu core by default.
|
||||
CORES=$(cat /proc/cpuinfo | grep processor | wc -l)
|
||||
[ "$WORKERS" = "0" ] && WORKERS="$CORES"
|
||||
|
||||
# TODO: Need to use different addresses for internal swift traffic
|
||||
# as this the only security measure in place is network isolation
|
||||
PROXY_LOCAL_NET_IP=`dig +short $(unit-get private-address)`
|
||||
|
||||
# Use apache2 to distribute ring config until there is support
|
||||
# for file xfer in juju
|
||||
PACKAGES="swift swift-proxy memcached apache2"
|
||||
if [ "$AUTHTYPE" = "keystone" ]; then
|
||||
PACKAGES="$PACKAGES python-keystone"
|
||||
fi
|
||||
WWW_DIR="/var/www/swift-rings"
|
||||
SWIFT_HASH_FILE="/var/lib/juju/swift-hash-path.conf"
|
||||
|
||||
# Ring configuration
|
||||
PARTITION_POWER=$(config-get partition-power)
|
||||
REPLICAS=$(config-get replicas)
|
||||
MIN_HOURS=$(config-get min-hours)
|
||||
|
||||
# generate the swift hash to be used for salting URLs of objects.
|
||||
# TODO: its important this is never lost, find out some way of getting
|
||||
# it off the server and into a sys admins INBOX?
|
||||
if [[ ! -e $SWIFT_HASH_FILE ]] ; then
|
||||
juju-log "swift-proxy: Generating a new SWIFT_HASH in $SWIFT_HASH_FILE"
|
||||
echo $(od -t x8 -N 8 -A n </dev/random) >$SWIFT_HASH_FILE
|
||||
fi
|
||||
|
||||
function set_swift_hash {
|
||||
# TODO: Do this with augeas and put in a utility function for use elsewhere
|
||||
cat >/etc/swift/swift.conf <<EOF
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = `cat $SWIFT_HASH_FILE`
|
||||
EOF
|
||||
}
|
||||
|
||||
function create_proxy_conf {
|
||||
SWIFT_DEB_VERSION="$(dpkg-query -W -f='${Version}' 'swift-proxy')"
|
||||
cat >/etc/swift/proxy-server.conf <<EOF
|
||||
[DEFAULT]
|
||||
EOF
|
||||
if [ "$USE_HTTPS" = "1" ]; then
|
||||
cat >>/etc/swift/proxy-server.conf <<EOF
|
||||
cert_file = /etc/swift/cert.crt
|
||||
key_file = /etc/swift/cert.key
|
||||
EOF
|
||||
fi
|
||||
cat >>/etc/swift/proxy-server.conf <<EOF
|
||||
bind_port = $BINDPORT
|
||||
workers = $WORKERS
|
||||
user = swift
|
||||
|
||||
EOF
|
||||
if [ "$AUTHTYPE" = "keystone" ]; then
|
||||
if [ "${SWIFT_DEB_VERSION:0:3}" = "1.7" ]; then
|
||||
SIGNING_DIR_LINE="signing_dir = /etc/swift"
|
||||
SWIFT3_LINE="use = egg:swift3#swift3"
|
||||
else
|
||||
SIGNING_DIR_LINE=""
|
||||
SWIFT3_LINE="use = egg:swift#swift3"
|
||||
fi
|
||||
cat >>/etc/swift/proxy-server.conf <<EOF
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache swift3 s3token authtoken keystone proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
account_autocreate = true
|
||||
|
||||
[filter:keystone]
|
||||
paste.filter_factory = keystone.middleware.swift_auth:filter_factory
|
||||
operator_roles = admin, swiftaccess
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
# Delaying the auth decision is required to support token-less
|
||||
# usage for anonymous referrers ('.r:*') or for tempurl/formpost
|
||||
# middleware.
|
||||
delay_auth_decision = 1
|
||||
auth_port = $KEYSTONE_AUTH_PORT
|
||||
auth_host = $KEYSTONE_AUTH_HOST
|
||||
auth_protocol = $KEYSTONE_AUTH_PROTOCOL
|
||||
admin_tenant_name = $KEYSTONE_ADMIN_TENANT_NAME
|
||||
admin_user = $KEYSTONE_ADMIN_USER
|
||||
admin_password = $KEYSTONE_ADMIN_PASSWORD
|
||||
$SIGNING_DIR_LINE
|
||||
|
||||
[filter:swift3]
|
||||
$SWIFT3_LINE
|
||||
|
||||
[filter:s3token]
|
||||
paste.filter_factory = keystone.middleware.s3_token:filter_factory
|
||||
auth_port = $KEYSTONE_AUTH_PORT
|
||||
auth_host = $KEYSTONE_AUTH_HOST
|
||||
auth_protocol = $KEYSTONE_AUTH_PROTOCOL
|
||||
|
||||
EOF
|
||||
else
|
||||
cat >>/etc/swift/proxy-server.conf <<EOF
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache tempauth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_system_root = testpass .admin https://$PROXY_LOCAL_NET_IP:8080/v1/AUTH_system
|
||||
|
||||
EOF
|
||||
fi
|
||||
cat >>/etc/swift/proxy-server.conf <<EOF
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = $PROXY_LOCAL_NET_IP:11211
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
function initialize_ring {
|
||||
# $1 is ring name.
|
||||
# $PARTITION_POWER, $REPLICAS, $MIN_HOURS from config above
|
||||
swift-ring-builder /etc/swift/$1.builder \
|
||||
create $PARTITION_POWER $REPLICAS $MIN_HOURS
|
||||
}
|
||||
|
||||
function get_zone {
|
||||
# a hack to assign units to zones until config is taken care of
|
||||
# in juju
|
||||
zone_file="/var/run/juju/swift-zone"
|
||||
checked_in="/var/run/juju/checked-in"
|
||||
if [[ -e $checked_in ]] ; then
|
||||
# changed relation seems to run twice? dont get new zone if
|
||||
# we just got one
|
||||
cat $checked_in | grep $JUJU_REMOTE_UNIT >/dev/null
|
||||
if [[ $? == 0 ]] ; then
|
||||
ZONE=$(cat $checked_in | grep $JUJU_REMOTE_UNIT | cut -d, -f2)
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
if [[ ! -e $zone_file ]] ; then
|
||||
echo 1 > $zone_file
|
||||
fi
|
||||
ZONE=$(cat $zone_file)
|
||||
echo "$JUJU_REMOTE_UNIT,$ZONE" >>$checked_in
|
||||
if [[ $ZONE == $REPLICAS ]] ; then
|
||||
echo 1 >$zone_file
|
||||
return 0
|
||||
fi
|
||||
echo $[$ZONE+1] >$zone_file
|
||||
}
|
||||
|
||||
function add_to_ring {
|
||||
juju-log "swift-proxy: Updating $1 ring. Adding $IP:$PORT, zone $ZONE, device $DEVICE"
|
||||
swift-ring-builder /etc/swift/$1.builder add \
|
||||
z$ZONE-$IP:$PORT/$DEVICE 100
|
||||
rc=$?
|
||||
if [[ "$rc" == "0" ]] ; then
|
||||
juju-log "Added to ring: $IP:$PORT, zone $ZONE, device $DEVICE"
|
||||
return 0
|
||||
fi
|
||||
juju-log "swift-proxy: Failed to add to ring."
|
||||
return 1
|
||||
}
|
||||
|
||||
function exists_in_ring {
|
||||
swift-ring-builder /etc/swift/$i.builder \
|
||||
search z$ZONE-$IP:$PORT/$DEVICE
|
||||
}
|
||||
|
||||
function rebalance_ring {
|
||||
juju-log "Rebalancing ring $1"
|
||||
swift-ring-builder /etc/swift/$i.builder rebalance
|
||||
return $?
|
||||
}
|
||||
|
||||
function add_ppa {
|
||||
# Don't configure PPA, install from archive.
|
||||
[[ $PPA == "distro" ]] && return 0
|
||||
if [ "${PPA:0:4}" = "deb " ]; then
|
||||
PPA_URL="$PPA"
|
||||
else
|
||||
. /etc/lsb-release
|
||||
[[ $PPA == "milestone" ]] && PPA="release"
|
||||
PPA_URL="deb http://ppa.launchpad.net/swift-core/$PPA/ubuntu $DISTRIB_CODENAME main"
|
||||
fi
|
||||
add-apt-repository "$PPA_URL" || exit 1
|
||||
}
|
|
@ -1 +1 @@
|
|||
swift-proxy-relations
|
||||
swift-hooks.py
|
|
@ -1 +1 @@
|
|||
swift-proxy-relations
|
||||
swift-hooks.py
|
|
@ -1 +0,0 @@
|
|||
swift-proxy-relations
|
|
@ -1,167 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -u
|
||||
|
||||
FORMULA_DIR=$(dirname $0)
|
||||
ARG0=${0##*/}
|
||||
|
||||
if [[ -e $FORMULA_DIR/swift-proxy-common ]] ; then
|
||||
. $FORMULA_DIR/swift-proxy-common
|
||||
else
|
||||
echo "ERROR: Could not load swift-proxy-common from $FORMULA_DIR"
|
||||
fi
|
||||
|
||||
function install_hook {
|
||||
### CANONICAL-SPECIFIC BEGIN ###
|
||||
# MAAS preseed is forcing apt to proxy through the deb caching proxy
|
||||
# on the MAAS server. This is preventing us from getting to
|
||||
# cloud-archive. Stop it.
|
||||
rm -f /etc/apt/apt.conf
|
||||
### CANONICAL-SPECIFIC END ###
|
||||
|
||||
apt-get -y --force-yes install python-software-properties || exit 1
|
||||
add_ppa
|
||||
apt-get update
|
||||
for i in $PACKAGES ; do
|
||||
DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes install $i
|
||||
done
|
||||
|
||||
SWIFT_DEB_VERSION="$(dpkg-query -W -f='${Version}' 'swift-proxy')"
|
||||
# We are shipping swift-plugin-s3 here until it becomes available in
|
||||
# ubuntu-cloud.archive precise-updates/folsom
|
||||
if [ "${SWIFT_DEB_VERSION:0:3}" = "1.7" ]; then
|
||||
dpkg -i "$(dirname $0)/swift-plugin-s3_1.0.0~git201200618-0ubuntu1_all.deb"
|
||||
fi
|
||||
|
||||
mkdir -p /etc/swift
|
||||
set_swift_hash || exit 1
|
||||
create_proxy_conf
|
||||
mkdir $WWW_DIR
|
||||
chown www-data:www-data $WWW_DIR
|
||||
if [ "$USE_HTTPS" = "1" ]; then
|
||||
if [[ ! -e /etc/swift/cert.crt ]] ; then
|
||||
openssl req -new -x509 -nodes \
|
||||
-out /etc/swift/cert.crt \
|
||||
-keyout /etc/swift/cert.key \
|
||||
-subj "/C=$COUNTRY/ST=$STATE/L=$LOCALE/CN=$COMMON_NAME"
|
||||
fi
|
||||
fi
|
||||
perl -pi -e "s/-l 127.0.0.1/-l $PROXY_LOCAL_NET_IP/" /etc/memcached.conf
|
||||
service memcached restart
|
||||
echo "swift-proxy-node - install: Initializing rings"
|
||||
for i in account container object ; do initialize_ring $i ; done
|
||||
}
|
||||
|
||||
function proxy_joined {
|
||||
exit 0
|
||||
}
|
||||
|
||||
function proxy_changed {
|
||||
HOST=`relation-get hostname`
|
||||
DEVICES=`relation-get device`
|
||||
get_zone
|
||||
[[ -z $ZONE ]] || [[ -z $HOST ]] || [[ -z $DEVICES ]] && \
|
||||
echo "ZONE|HOST|DEVICES not set. Peer not ready? Exit 0 and wait." && exit 0
|
||||
|
||||
if [[ $ZONE -gt $REPLICAS ]] ; then
|
||||
echo "ERROR: Peer $JUJU_REMOTE_UNIT attempting to join a non-existent zone!"
|
||||
exit 1
|
||||
fi
|
||||
PORT=6000
|
||||
RINGS="object container account"
|
||||
IP=$(dig +short $HOST)
|
||||
for i in $RINGS ; do
|
||||
if [[ ! -e /etc/swift/$i.builder ]] ; then
|
||||
echo "Ring $i missing, initializing"
|
||||
initialize_ring $i
|
||||
fi
|
||||
done
|
||||
|
||||
for i in $RINGS ; do
|
||||
for DEVICE in $(echo $DEVICES | sed 's/:/ /g'); do
|
||||
if ! exists_in_ring ; then
|
||||
add_to_ring $i $ZONE $IP $PORT $DEVICE || exit 1
|
||||
else
|
||||
juju-log "swift-proxy: $IP:$PORT/$DEVICE already exists in $ZONE"
|
||||
fi
|
||||
done
|
||||
PORT=$[$PORT+1]
|
||||
done
|
||||
echo "Current peers:"
|
||||
relation-list
|
||||
current_peers=$(relation-list | wc -l)
|
||||
|
||||
# checks to find out if we should rebalance rings
|
||||
balance_file="/var/run/juju/swift-balanced"
|
||||
|
||||
if [[ $current_peers -lt $REPLICAS ]] ; then
|
||||
echo "Not enough peers to maitain minimum $REPLICAS replicas ($current_peers/$REPLICAS), skipping rebalance."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -e $balance_file ]] ; then
|
||||
[[ $(cat $balance_file | cut -d, -f1) == $current_peers ]] && \
|
||||
echo "Ring already balanced since $current_peers present."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Balancing rings"
|
||||
for i in $RINGS ; do
|
||||
rebalance_ring $i || exit 1
|
||||
done
|
||||
|
||||
chown -R swift:swift /etc/swift
|
||||
|
||||
stamp=`date +%Y%M%d-%H%M%S`
|
||||
export_dir="$WWW_DIR/$stamp"
|
||||
echo "$current_peers,$stamp" > $balance_file
|
||||
|
||||
# rings have been balanced, push out new rings to nodes via webserver
|
||||
mkdir $export_dir
|
||||
echo "Copying rings to $export_dir for client consumption"
|
||||
for i in $RINGS ; do
|
||||
cp /etc/swift/$i.ring.gz $export_dir
|
||||
done
|
||||
|
||||
chown -R swift:swift /etc/swift
|
||||
chown -R www-data $WWW_DIR
|
||||
|
||||
relation-set update_url="http://$(unit-get private-address)/swift-rings/$stamp"
|
||||
relation-set swift_hash=$(cat $SWIFT_HASH_FILE)
|
||||
|
||||
swift-init proxy status || swift-init proxy start
|
||||
}
|
||||
|
||||
function proxy_broken {
|
||||
# remove all ring configuration on broken
|
||||
rm -rf /etc/swift/*.ring.gz
|
||||
rm -rf /etc/swift/*.builder
|
||||
rm -rf /etc/swift/backups
|
||||
rm -rf /var/run/juju/swift-balanced
|
||||
rm -rf /var/run/juju/checked-in
|
||||
rm -rf /var/run/juju/swift-zone
|
||||
rm -rf /var/www/swift-rings
|
||||
}
|
||||
|
||||
function object-store_joined {
|
||||
# until we use keystone or another real auth system,
|
||||
# just return a tempauth user from config.
|
||||
USER=$(cat /etc/swift/proxy-server.conf | grep user_system_root | awk '{ print $1 }')
|
||||
USER=${USER##*_}
|
||||
PASSWORD=$(cat /etc/swift/proxy-server.conf | grep user_system_root | cut -d= -f2 | awk '{ print $1 }')
|
||||
URL=https://$(unit-get private-address):8080/auth/v1.0
|
||||
relation-set user=$USER password=$PASSWORD url=$URL
|
||||
}
|
||||
|
||||
[[ -d /etc/swift ]] && chown -R swift /etc/swift
|
||||
|
||||
juju-log "swift-proxy: Firing hook $ARG0"
|
||||
case $ARG0 in
|
||||
"install") install_hook ;;
|
||||
"start"|"stop") exit 0 ;;
|
||||
"swift-proxy-relation-joined") proxy_joined ;;
|
||||
"swift-proxy-relation-changed") proxy_changed ;;
|
||||
"swift-proxy-relation-broken") proxy_broken ;;
|
||||
"object-store-relation-joined") object-store_joined ;;
|
||||
"object-store-relation-changed") exit 0 ;;
|
||||
esac
|
||||
|
|
@ -0,0 +1,348 @@
|
|||
import os
|
||||
import pwd
|
||||
import subprocess
|
||||
import lib.openstack_common as openstack
|
||||
import utils
|
||||
|
||||
# Various config files that are managed via templating.
|
||||
SWIFT_HASH_FILE='/var/lib/juju/swift-hash-path.conf'
|
||||
SWIFT_CONF = '/etc/swift/swift.conf'
|
||||
SWIFT_PROXY_CONF = '/etc/swift/proxy-server.conf'
|
||||
SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF)
|
||||
MEMCACHED_CONF = '/etc/memcached.conf'
|
||||
APACHE_CONF = '/etc/apache2/conf.d/swift-rings'
|
||||
|
||||
WWW_DIR = '/var/www/swift-rings'
|
||||
|
||||
SWIFT_RINGS = {
|
||||
'account': '/etc/swift/account.builder',
|
||||
'container': '/etc/swift/container.builder',
|
||||
'object': '/etc/swift/object.builder'
|
||||
}
|
||||
|
||||
SSL_CERT = '/etc/swift/cert.crt'
|
||||
SSL_KEY = '/etc/swift/cert.key'
|
||||
|
||||
# Essex packages
|
||||
BASE_PACKAGES = [
|
||||
'swift',
|
||||
'swift-proxy',
|
||||
'memcached',
|
||||
'apache2',
|
||||
'python-keystone',
|
||||
]
|
||||
|
||||
# Folsom-specific packages
|
||||
FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
|
||||
|
||||
def proxy_control(action):
|
||||
'''utility to work around swift-init's bad RCs.'''
|
||||
def _cmd(action):
|
||||
return ['swift-init', 'proxy-server', action]
|
||||
|
||||
p = subprocess.Popen(_cmd('status'), stdout=subprocess.PIPE)
|
||||
p.communicate()
|
||||
status = p.returncode
|
||||
if action == 'stop':
|
||||
if status == 1:
|
||||
return
|
||||
elif status == 0:
|
||||
return subprocess.check_call(_cmd('stop'))
|
||||
|
||||
# the proxy will not start unless there are balanced rings, gzip'd in /etc/swift
|
||||
missing=False
|
||||
for k in SWIFT_RINGS.keys():
|
||||
if not os.path.exists(os.path.join(SWIFT_CONF_DIR, '%s.ring.gz' % k)):
|
||||
missing = True
|
||||
if missing:
|
||||
utils.juju_log('INFO', 'Rings not balanced, skipping %s.' % action)
|
||||
return
|
||||
|
||||
if action == 'start':
|
||||
if status == 0:
|
||||
return
|
||||
elif status == 1:
|
||||
return subprocess.check_call(_cmd('start'))
|
||||
elif action == 'restart':
|
||||
if status == 0:
|
||||
return subprocess.check_call(_cmd('restart'))
|
||||
elif status == 1:
|
||||
return subprocess.check_call(_cmd('start'))
|
||||
|
||||
def swift_user(username='swift'):
|
||||
user = pwd.getpwnam('swift')
|
||||
return (user.pw_uid, user.pw_gid)
|
||||
|
||||
|
||||
def ensure_swift_dir(conf_dir=os.path.dirname(SWIFT_CONF)):
|
||||
if not os.path.isdir(conf_dir):
|
||||
os.mkdir(conf_dir, 0750)
|
||||
uid, gid = swift_user()
|
||||
os.chown(conf_dir, uid, gid)
|
||||
|
||||
|
||||
def determine_packages(release):
|
||||
'''determine what packages are needed for a given OpenStack release'''
|
||||
if release == 'essex':
|
||||
return BASE_PACKAGES
|
||||
elif release == 'folsom':
|
||||
return FOLSOM_PACKAGES
|
||||
|
||||
|
||||
def render_config(config_file, context):
|
||||
'''write out config using templates for a specific openstack release.'''
|
||||
os_release = openstack.get_os_codename_package('python-swift')
|
||||
# load os release-specific templates.
|
||||
cfile = os.path.basename(config_file)
|
||||
templates_dir = os.path.join(utils.TEMPLATES_DIR, os_release)
|
||||
return utils.render_template(cfile, context, templates_dir)
|
||||
|
||||
|
||||
def get_swift_hash():
|
||||
if os.path.isfile(SWIFT_HASH_FILE):
|
||||
with open(SWIFT_HASH_FILE, 'r') as hashfile:
|
||||
swift_hash = hashfile.read().strip()
|
||||
else:
|
||||
cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n']
|
||||
rand = open('/dev/random', 'r')
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand)
|
||||
swift_hash = p.communicate()[0].strip()
|
||||
with open(SWIFT_HASH_FILE, 'w') as hashfile:
|
||||
hashfile.write(swift_hash)
|
||||
return swift_hash
|
||||
|
||||
|
||||
def get_keystone_auth():
|
||||
'''return standard keystone auth credentials, either from config or the
|
||||
identity-service relation. user-specified config is given priority
|
||||
over an existing relation.
|
||||
'''
|
||||
auth_type = utils.config_get('auth-type')
|
||||
auth_host = utils.config_get('keystone-auth-host')
|
||||
admin_user = utils.config_get('keystone-admin-user')
|
||||
admin_password = utils.config_get('keystone-admin-user')
|
||||
if (auth_type == 'keystone' and auth_host
|
||||
and admin_user and admin_password):
|
||||
utils.juju_log('INFO', 'Using user-specified Keystone configuration.')
|
||||
ks_auth = {
|
||||
'auth_type': 'keystone',
|
||||
'auth_protocol': utils.config_get('keystone-auth-protocol'),
|
||||
'keystone_host': auth_host,
|
||||
'auth_port': utils.config_get('keystone-auth-port'),
|
||||
'service_user': admin_user,
|
||||
'service_password': admin_password,
|
||||
'service_tenant': utils.config_get('keystone-admin-tenant-name')
|
||||
}
|
||||
return ks_auth
|
||||
|
||||
for relid in utils.relation_ids('identity-service'):
|
||||
utils.juju_log('INFO',
|
||||
'Using Keystone configuration from identity-service.')
|
||||
for unit in utils.relation_list(relid):
|
||||
ks_auth = {
|
||||
'auth_type': 'keystone',
|
||||
'auth_protocol': 'http',
|
||||
'keystone_host': utils.relation_get('private-address',
|
||||
unit, relid),
|
||||
'auth_port': utils.relation_get('auth_port', unit, relid),
|
||||
'service_user': utils.relation_get('service_username', unit, relid),
|
||||
'service_password': utils.relation_get('service_password', unit, relid),
|
||||
'service_tenant': utils.relation_get('service_tenant', unit, relid),
|
||||
'service_port': utils.relation_get('service_port', unit, relid),
|
||||
'admin_token': utils.relation_get('admin_token', unit, relid),
|
||||
}
|
||||
if None not in ks_auth.itervalues():
|
||||
return ks_auth
|
||||
return None
|
||||
|
||||
|
||||
def write_proxy_config():
|
||||
workers = utils.config_get('workers')
|
||||
if workers == '0':
|
||||
import multiprocessing
|
||||
workers = multiprocessing.cpu_count()
|
||||
|
||||
ctxt = {
|
||||
'proxy_ip': utils.get_host_ip(),
|
||||
'bind_port': utils.config_get('bind-port'),
|
||||
'workers': workers,
|
||||
'operator_roles': utils.config_get('operator-roles')
|
||||
}
|
||||
|
||||
if utils.config_get('use-https') == 'no':
|
||||
ctxt['ssl'] = False
|
||||
else:
|
||||
ctxt['ssl'] = True
|
||||
ctxt['ssl_cert'] = SSL_CERT
|
||||
ctxt['ssl_key'] = SSL_KEY
|
||||
|
||||
ks_auth = get_keystone_auth()
|
||||
if ks_auth:
|
||||
utils.juju_log('INFO', 'Enabling Keystone authentication.')
|
||||
ctxt = (ctxt.items() + ks_auth.items())
|
||||
|
||||
with open(SWIFT_PROXY_CONF, 'w') as conf:
|
||||
conf.write(render_config(SWIFT_PROXY_CONF, ctxt))
|
||||
|
||||
proxy_control('restart')
|
||||
|
||||
def configure_ssl():
|
||||
# this should be expanded to cover setting up user-specified certificates
|
||||
if (utils.config_get('use-https') == 'yes' and
|
||||
not os.path.isfile(SSL_CERT) and
|
||||
not os.path.isfile(SSL_KEY)):
|
||||
subj = '/C=%s/ST=%s/L=%s/CN=%s' %\
|
||||
(utils.config_get('country'), utils.config_get('state'),
|
||||
utils.config_get('locale'), utils.config_get('common-name'))
|
||||
cmd = ['openssl', 'req', '-new', '-x509', '-nodes',
|
||||
'-out', SSL_CERT, '-keyout', SSL_KEY,
|
||||
'-subj', subj]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def _load_builder(path):
|
||||
# lifted straight from /usr/bin/swift-ring-builder
|
||||
from swift.common.ring import RingBuilder, Ring
|
||||
import cPickle as pickle
|
||||
try:
|
||||
builder = pickle.load(open(path, 'rb'))
|
||||
if not hasattr(builder, 'devs'):
|
||||
builder_dict = builder
|
||||
builder = RingBuilder(1, 1, 1)
|
||||
builder.copy_from(builder_dict)
|
||||
except ImportError: # Happens with really old builder pickles
|
||||
modules['swift.ring_builder'] = \
|
||||
modules['swift.common.ring.builder']
|
||||
builder = RingBuilder(1, 1, 1)
|
||||
builder.copy_from(pickle.load(open(argv[1], 'rb')))
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
return builder
|
||||
|
||||
|
||||
def _write_ring(ring, ring_path):
|
||||
import cPickle as pickle
|
||||
pickle.dump(ring.to_dict(), open(ring_path, 'wb'), protocol=2)
|
||||
|
||||
|
||||
|
||||
|
||||
def ring_port(ring_path, node):
|
||||
'''determine correct port from relation settings for a given ring file.'''
|
||||
for name in ['account', 'object', 'container']:
|
||||
if name in ring_path:
|
||||
return node[('%s_port' % name)]
|
||||
|
||||
|
||||
def initialize_ring(path, part_power, replicas, min_hours):
|
||||
'''Initialize a new swift ring with given parameters.'''
|
||||
from swift.common.ring import RingBuilder
|
||||
ring = RingBuilder(part_power, replicas, min_hours)
|
||||
_write_ring(ring, path)
|
||||
|
||||
def exists_in_ring(ring_path, node):
|
||||
from swift.common.ring import RingBuilder, Ring
|
||||
ring = _load_builder(ring_path).to_dict()
|
||||
node['port'] = ring_port(ring_path, node)
|
||||
|
||||
for dev in ring['devs']:
|
||||
d = [(i, dev[i]) for i in dev if i in node]
|
||||
n = [(i, node[i]) for i in node if i in dev]
|
||||
if sorted(d) == sorted(n):
|
||||
|
||||
msg = 'Node already exists in ring (%s).' % ring_path
|
||||
utils.juju_log('INFO', msg)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def add_to_ring(ring_path, node):
|
||||
from swift.common.ring import RingBuilder, Ring
|
||||
ring = _load_builder(ring_path)
|
||||
port = ring_port(ring_path, node)
|
||||
|
||||
devs = ring.to_dict()['devs']
|
||||
next_id = 0
|
||||
if devs:
|
||||
next_id = len([d['id'] for d in devs])
|
||||
|
||||
new_dev = {
|
||||
'id': next_id,
|
||||
'zone': node['zone'],
|
||||
'ip': node['ip'],
|
||||
'port': port,
|
||||
'device': node['device'],
|
||||
'weight': 100,
|
||||
'meta': '',
|
||||
}
|
||||
ring.add_dev(new_dev)
|
||||
_write_ring(ring, ring_path)
|
||||
msg = 'Added new device to ring %s: %s' % (ring_path,
|
||||
[k for k in new_dev.iteritems()])
|
||||
utils.juju_log('INFO', msg)
|
||||
|
||||
|
||||
def determine_zone(policy):
|
||||
'''Determine which storage zone a specific machine unit belongs to based
|
||||
on configured storage-zone-distrbution policy.'''
|
||||
if policy == 'service-unit':
|
||||
this_relid = os.getenv('JUJU_RELATION_ID')
|
||||
relids = utils.relation_ids('swift-proxy')
|
||||
zone = (relids.index(this_relid) + 1)
|
||||
elif policy == 'machine-unit':
|
||||
pass
|
||||
elif policy == 'manual':
|
||||
zone = utils.relation_get('zone')
|
||||
return zone
|
||||
|
||||
|
||||
def balance_ring(ring_path):
|
||||
'''balance a ring. return True if it needs redistribution'''
|
||||
# shell out to swift-ring-builder instead, since the balancing code there
|
||||
# does a bunch of un-importable validation.'''
|
||||
cmd = ['swift-ring-builder', ring_path, 'rebalance']
|
||||
p = subprocess.Popen(cmd)
|
||||
p.communicate()
|
||||
rc = p.returncode
|
||||
if rc == 0:
|
||||
return True
|
||||
elif rc == 1:
|
||||
# swift-ring-builder returns 1 on WARNING (ring didn't require balance)
|
||||
return False
|
||||
else:
|
||||
utils.juju_log('balance_ring: %s returned %s' % (cmd, rc))
|
||||
sys.exit(1)
|
||||
|
||||
def should_balance(rings):
|
||||
'''Based on zones vs min. replicas, determine whether or not the rings
|
||||
should be balanaced during initial configuration.'''
|
||||
do_rebalance = True
|
||||
for ring in rings:
|
||||
zones = []
|
||||
r = _load_builder(ring).to_dict()
|
||||
replicas = r['replicas']
|
||||
zones = [d['zone'] for d in r['devs']]
|
||||
if len(set(zones)) < replicas:
|
||||
do_rebalance = False
|
||||
return do_rebalance
|
||||
|
||||
|
||||
def write_apache_config():
|
||||
'''write out /etc/apache2/conf.d/swift-rings with a list of authenticated
|
||||
hosts'''
|
||||
utils.juju_log('INFO', 'Updating %s.' % APACHE_CONF)
|
||||
|
||||
allowed_hosts = []
|
||||
for relid in utils.relation_ids('swift-proxy'):
|
||||
for unit in utils.relation_list(relid):
|
||||
host = utils.relation_get('private-address', unit, relid)
|
||||
allowed_hosts.append(host)
|
||||
# testing
|
||||
allowed_hosts.append('10.0.0.3')
|
||||
ctxt = { 'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts }
|
||||
with open(APACHE_CONF, 'w') as conf:
|
||||
conf.write(render_config(APACHE_CONF, ctxt))
|
||||
subprocess.check_call(['service', 'apache2', 'reload'])
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
import lib.openstack_common as openstack
|
||||
pkg = 'swift-proxy'
|
||||
print openstack.get_os_codename_package(pkg)
|
|
@ -0,0 +1,237 @@
|
|||
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Paul Collins <paul.collins@canonical.com>
|
||||
#
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
||||
def do_hooks(hooks):
|
||||
hook = os.path.basename(sys.argv[0])
|
||||
|
||||
try:
|
||||
hooks[hook]()
|
||||
except KeyError:
|
||||
juju_log('INFO',
|
||||
"This charm doesn't know how to handle '{}'.".format(hook))
|
||||
|
||||
|
||||
def install(*pkgs):
|
||||
cmd = [
|
||||
'apt-get',
|
||||
'-y',
|
||||
'install'
|
||||
]
|
||||
for pkg in pkgs:
|
||||
cmd.append(pkg)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
TEMPLATES_DIR = 'templates'
|
||||
|
||||
try:
|
||||
import jinja2
|
||||
except ImportError:
|
||||
install('python-jinja2')
|
||||
import jinja2
|
||||
|
||||
try:
|
||||
import dns.resolver
|
||||
import dns.ipv4
|
||||
except ImportError:
|
||||
install('python-dnspython')
|
||||
import dns.resolver
|
||||
import dns.ipv4
|
||||
|
||||
|
||||
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
||||
templates = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(template_dir)
|
||||
)
|
||||
template = templates.get_template(template_name)
|
||||
return template.render(context)
|
||||
|
||||
CLOUD_ARCHIVE = \
|
||||
""" # Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
|
||||
CLOUD_ARCHIVE_POCKETS = {
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'folsom/updates': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom'
|
||||
}
|
||||
|
||||
|
||||
def configure_source():
|
||||
source = str(config_get('openstack-origin'))
|
||||
if not source:
|
||||
return
|
||||
if source.startswith('ppa:'):
|
||||
cmd = [
|
||||
'add-apt-repository',
|
||||
source
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
if source.startswith('cloud:'):
|
||||
install('ubuntu-cloud-keyring')
|
||||
pocket = source.split(':')[1]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
|
||||
if source.startswith('deb'):
|
||||
l = len(source.split('|'))
|
||||
if l == 2:
|
||||
(apt_line, key) = source.split('|')
|
||||
cmd = [
|
||||
'apt-key',
|
||||
'adv', '--keyserver keyserver.ubuntu.com',
|
||||
'--recv-keys', key
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
elif l == 1:
|
||||
apt_line = source
|
||||
|
||||
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
|
||||
apt.write(apt_line + "\n")
|
||||
cmd = [
|
||||
'apt-get',
|
||||
'update'
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
# Protocols
|
||||
TCP = 'TCP'
|
||||
UDP = 'UDP'
|
||||
|
||||
|
||||
def expose(port, protocol='TCP'):
|
||||
cmd = [
|
||||
'open-port',
|
||||
'{}/{}'.format(port, protocol)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def juju_log(severity, message):
|
||||
cmd = [
|
||||
'juju-log',
|
||||
'--log-level', severity,
|
||||
message
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def relation_ids(relation):
|
||||
cmd = [
|
||||
'relation-ids',
|
||||
relation
|
||||
]
|
||||
return subprocess.check_output(cmd).split() # IGNORE:E1103
|
||||
|
||||
|
||||
def relation_list(rid):
|
||||
cmd = [
|
||||
'relation-list',
|
||||
'-r', rid,
|
||||
]
|
||||
return subprocess.check_output(cmd).split() # IGNORE:E1103
|
||||
|
||||
|
||||
def relation_get(attribute, unit=None, rid=None):
|
||||
cmd = [
|
||||
'relation-get',
|
||||
]
|
||||
if rid:
|
||||
cmd.append('-r')
|
||||
cmd.append(rid)
|
||||
cmd.append(attribute)
|
||||
if unit:
|
||||
cmd.append(unit)
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def relation_set(**kwargs):
|
||||
cmd = [
|
||||
'relation-set'
|
||||
]
|
||||
args = []
|
||||
for k, v in kwargs.items():
|
||||
if k == 'rid':
|
||||
if v:
|
||||
cmd.append('-r')
|
||||
cmd.append(v)
|
||||
else:
|
||||
args.append('{}={}'.format(k, v))
|
||||
cmd += args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def unit_get(attribute):
|
||||
cmd = [
|
||||
'unit-get',
|
||||
attribute
|
||||
]
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def config_get(attribute):
|
||||
cmd = [
|
||||
'config-get',
|
||||
'--format',
|
||||
'json',
|
||||
]
|
||||
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
cfg = json.loads(out)
|
||||
|
||||
try:
|
||||
return cfg[attribute]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def get_unit_hostname():
|
||||
return socket.gethostname()
|
||||
|
||||
|
||||
def get_host_ip(hostname=unit_get('private-address')):
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton(hostname)
|
||||
return hostname
|
||||
except socket.error:
|
||||
try:
|
||||
answers = dns.resolver.query(hostname, 'A')
|
||||
if answers:
|
||||
return answers[0].address
|
||||
except dns.resolver.NXDOMAIN:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def restart(*services):
|
||||
for service in services:
|
||||
subprocess.check_call(['service', service, 'restart'])
|
||||
|
||||
|
||||
def stop(*services):
|
||||
for service in services:
|
||||
subprocess.check_call(['service', service, 'stop'])
|
||||
|
||||
|
||||
def start(*services):
|
||||
for service in services:
|
||||
subprocess.check_call(['service', service, 'start'])
|
|
@ -1,4 +1,3 @@
|
|||
ensemble: formula
|
||||
name: swift-proxy
|
||||
maintainer: Adam Gandelman <adamg@canonical.com>
|
||||
summary: "Swift proxy node"
|
||||
|
@ -13,3 +12,5 @@ provides:
|
|||
requires:
|
||||
swift-storage:
|
||||
interface: swift
|
||||
identity-service:
|
||||
interface: keystone
|
||||
|
|
Loading…
Reference in New Issue