support ceph backend of cinder
Change-Id: I12a04138c6c0df3df8fe93e55fecf9b6905ad57b
This commit is contained in:
parent
5e97eba19b
commit
215596a214
|
@ -19,9 +19,12 @@ import yaml
|
|||
import random
|
||||
import string
|
||||
import uuid
|
||||
import subprocess
|
||||
from oslo_log import log as logging
|
||||
from daisy import i18n
|
||||
from Crypto.PublicKey import RSA
|
||||
import daisy.registry.client.v1.api as registry
|
||||
import daisy.api.backends.common as daisy_cmn
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -102,6 +105,113 @@ def add_role_to_inventory(file_path, config_data):
|
|||
host_sequence = host_sequence + 1
|
||||
|
||||
|
||||
def update_kolla_globals_yml(date):
|
||||
with open('/etc/kolla/globals.yml', 'r') as f:
|
||||
kolla_config = yaml.load(f.read())
|
||||
kolla_config.update(date)
|
||||
f.close()
|
||||
with open('/etc/kolla/globals.yml', 'w') as f:
|
||||
f.write(yaml.dump(kolla_config, default_flow_style=False))
|
||||
f.close()
|
||||
|
||||
|
||||
def _del_general_params(param):
|
||||
del param['created_at']
|
||||
del param['updated_at']
|
||||
del param['deleted']
|
||||
del param['deleted_at']
|
||||
del param['id']
|
||||
|
||||
|
||||
def _get_services_disk(req, role):
|
||||
params = {'filters': {'role_id': role['id']}}
|
||||
services_disk = registry.list_service_disk_metadata(
|
||||
req.context, **params)
|
||||
for service_disk in services_disk:
|
||||
if service_disk.get('role_id', None):
|
||||
service_disk['role_id'] = role['name']
|
||||
_del_general_params(service_disk)
|
||||
return services_disk
|
||||
|
||||
|
||||
def config_lvm_for_cinder(config_data):
|
||||
lvm_config = {'enable_cinder': 'yes',
|
||||
'enable_cinder_backend_lvm': 'yes',
|
||||
'cinder_volume_group': 'cinder-volumes'}
|
||||
update_kolla_globals_yml(lvm_config)
|
||||
storage_ip_list = config_data.get('Storage_ips')
|
||||
if len(storage_ip_list) == 1:
|
||||
LOG.info(_("this is all in one environment \
|
||||
to enable ceph backend"))
|
||||
storage_ip = storage_ip_list[0]
|
||||
fp = '/var/log/daisy/api.log'
|
||||
cmd = 'ssh -o StrictHostKeyChecking=no %s \
|
||||
"dd if=/dev/zero of=/var/lib/cinder_data.img\
|
||||
bs=1G count=20" ' % \
|
||||
(storage_ip)
|
||||
daisy_cmn.subprocess_call(cmd, fp)
|
||||
cmd = 'ssh -o StrictHostKeyChecking=no %s \
|
||||
"losetup --find --show /var/lib/cinder_data.img"' % \
|
||||
(storage_ip)
|
||||
obj = subprocess.Popen(cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
shell=True)
|
||||
dev_name = obj.stdout.read().decode('utf8')
|
||||
cmd = 'ssh -o StrictHostKeyChecking=no %s \
|
||||
"pvcreate %s" ' % \
|
||||
(storage_ip, dev_name)
|
||||
daisy_cmn.subprocess_call(cmd, fp)
|
||||
cmd = 'ssh -o StrictHostKeyChecking=no %s \
|
||||
"vgcreate cinder-volumes %s" ' % \
|
||||
(storage_ip, dev_name)
|
||||
daisy_cmn.subprocess_call(cmd, fp)
|
||||
LOG.info(_("execute all four commands on \
|
||||
storage node %s ok!" % storage_ip))
|
||||
|
||||
|
||||
def config_ceph_for_cinder(config_data, disk):
|
||||
ceph_config = {'enable_cinder': 'yes',
|
||||
'enable_ceph': 'yes'}
|
||||
update_kolla_globals_yml(ceph_config)
|
||||
disk_name = disk.get('partition', None)
|
||||
storage_ip_list = config_data.get('Storage_ips')
|
||||
if len(storage_ip_list) > 2:
|
||||
LOG.info(_("this is CEPH backend environment \
|
||||
with %s nodes" % len(storage_ip_list)))
|
||||
for storage_ip in storage_ip_list:
|
||||
fp = '/var/log/daisy/api.log'
|
||||
cmd = 'ssh -o StrictHostKeyChecking=no %s \
|
||||
"parted %s -s -- mklabel gpt mkpart\
|
||||
KOLLA_CEPH_OSD_BOOTSTRAP 1 -1" ' % \
|
||||
(storage_ip, disk_name)
|
||||
daisy_cmn.subprocess_call(cmd, fp)
|
||||
exc_result = subprocess.check_output(
|
||||
'ssh -o StrictHostKeyChecking=no %s \
|
||||
"parted %s print" ' % (storage_ip, disk_name),
|
||||
shell=True, stderr=subprocess.STDOUT)
|
||||
LOG.info(_("parted label is %s" % exc_result))
|
||||
LOG.info(_("execute labeled command successfully\
|
||||
on %s node" % storage_ip))
|
||||
|
||||
|
||||
def enable_cinder_backend(req, cluster_id, config_data):
|
||||
service_disks = []
|
||||
params = {'filters': {'cluster_id': cluster_id}}
|
||||
roles = registry.get_roles_detail(req.context, **params)
|
||||
for role in roles:
|
||||
if role['name'] == 'CONTROLLER_LB':
|
||||
service_disk = _get_services_disk(req, role)
|
||||
service_disks += service_disk
|
||||
for disk in service_disks:
|
||||
if disk.get('service', None) == 'cinder' and\
|
||||
disk.get('protocol_type', None) == 'LVM':
|
||||
config_lvm_for_cinder(config_data)
|
||||
|
||||
elif disk.get('service', None) == 'cinder' and\
|
||||
disk.get('protocol_type', None) == 'CEPH':
|
||||
config_ceph_for_cinder(config_data, disk)
|
||||
|
||||
|
||||
# generate kolla's globals.yml file
|
||||
def update_globals_yml(config_data):
|
||||
LOG.info(_("begin to update kolla's globals.yml file..."))
|
||||
|
|
|
@ -198,7 +198,7 @@ def get_cluster_kolla_config(req, cluster_id):
|
|||
version_flag = True
|
||||
kolla_openstack_version = line.strip()
|
||||
openstack_version = kolla_openstack_version.split(
|
||||
": ")[1]
|
||||
": ")[1].strip('\"')
|
||||
LOG.info(_("openstack version is %s" % openstack_version))
|
||||
docker_registry_ip = _get_local_ip()
|
||||
docker_registry = docker_registry_ip + ':4000'
|
||||
|
@ -279,12 +279,15 @@ def get_cluster_kolla_config(req, cluster_id):
|
|||
return (kolla_config, mgt_ip_list, host_name_ip_list)
|
||||
|
||||
|
||||
def generate_kolla_config_file(cluster_id, kolla_config):
|
||||
def generate_kolla_config_file(req, cluster_id, kolla_config):
|
||||
LOG.info(_("generate kolla config..."))
|
||||
if kolla_config:
|
||||
config.update_globals_yml(kolla_config)
|
||||
config.update_password_yml()
|
||||
config.add_role_to_inventory(kolla_file, kolla_config)
|
||||
config.enable_cinder_backend(req,
|
||||
cluster_id,
|
||||
kolla_config)
|
||||
|
||||
|
||||
def config_nodes_hosts(host_name_ip_list, host_ip):
|
||||
|
@ -489,7 +492,7 @@ class KOLLAInstallTask(Thread):
|
|||
api_cmn.config_network_new(ssh_host_info, 'kolla')
|
||||
time.sleep(20)
|
||||
LOG.info(_("begin to generate kolla config file ..."))
|
||||
generate_kolla_config_file(self.cluster_id, kolla_config)
|
||||
generate_kolla_config_file(self.req, self.cluster_id, kolla_config)
|
||||
LOG.info(_("generate kolla config file in /etc/kolla/ dir..."))
|
||||
(role_id_list, host_id_list, hosts_list) = \
|
||||
kolla_cmn.get_roles_and_hosts_list(self.req, self.cluster_id)
|
||||
|
|
|
@ -48,9 +48,9 @@ SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
|
|||
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
|
||||
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
|
||||
SERVICE_DISK_SERVICE = ('db', 'glance', 'db_backup', 'mongodb', 'nova',
|
||||
'provider')
|
||||
'provider', 'cinder')
|
||||
DISK_LOCATION = ('local', 'share', 'share_cluster')
|
||||
PROTOCOL_TYPE = ('FIBER', 'ISCSI', 'CEPH')
|
||||
PROTOCOL_TYPE = ('FIBER', 'ISCSI', 'CEPH', 'LVM')
|
||||
FC_DRIVER = ('brocade', 'cisco')
|
||||
FC_ZONEING_POLICY = ('initiator-target', 'initiator')
|
||||
CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips', 'pools',
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import MetaData, Table, Column, String
|
||||
|
||||
partition = Column('partition', String(255))
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
print("036 upgrade")
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
service_disks = Table('service_disks', meta, autoload=True)
|
||||
service_disks.create_column(partition)
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
# Operations to reverse the above upgrade go here.
|
||||
print("036 downgrade")
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
roles = Table('service_disks', meta, autoload=True)
|
||||
roles.drop_column(partition)
|
|
@ -647,6 +647,7 @@ class ServiceDisk(BASE, DaisyBase):
|
|||
data_ips = Column(String(255))
|
||||
size = Column(Integer())
|
||||
protocol_type = Column(String(36))
|
||||
partition = Column(String(255))
|
||||
|
||||
|
||||
class CinderVolume(BASE, DaisyBase):
|
||||
|
|
|
@ -23,7 +23,7 @@ from daisyclient.openstack.common.apiclient import base
|
|||
|
||||
CREATE_SERVICE_DISK_PARAMS = ('service', 'data_ips', 'size',
|
||||
'disk_location', 'role_id', 'lun',
|
||||
'protocol_type')
|
||||
'protocol_type', 'partition')
|
||||
CREATE_CINDER_BACKEND_PARAMS = ('disk_array', 'role_id')
|
||||
CREATE_CINDER_BACKEND_INTER_PARAMS = ('management_ips', 'data_ips',
|
||||
'pools', 'volume_driver',
|
||||
|
|
|
@ -1832,6 +1832,8 @@ def do_discover_host(gc, args):
|
|||
help='mark which volume is used for glance sharing disk.')
|
||||
@utils.arg('--protocol-type', metavar='<PROTOCOL_TYPE>',
|
||||
help='protocol type of share disks')
|
||||
@utils.arg('--partition', metavar='<PARTITION>',
|
||||
help='partition name of local disks')
|
||||
def do_service_disk_add(dc, args):
|
||||
""" config services share disk. """
|
||||
|
||||
|
|
Loading…
Reference in New Issue