Import plugin's code
Change-Id: I6c9ea6894e44802a57e5dd6aedc64f230467b5c5
This commit is contained in:
parent
f3ac65ecb3
commit
aefabfa51a
|
@ -0,0 +1,176 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
Ceph Multibackend plugin for Fuel
|
||||
=================================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
Ceph Multibackend plugin for Fuel extends Mirantis OpenStack functionality by adding
|
||||
support for Ceph backend in Glance using ceph second pool. It adds new role "Ceph Glance
|
||||
Backende" with volume partition assignment. This role can be used ONLY with standart
|
||||
"Ceph OSD" role on node.
|
||||
|
||||
|
||||
Compatible Fuel versions
|
||||
------------------------
|
||||
|
||||
9.0
|
||||
|
||||
|
||||
User Guide
|
||||
----------
|
||||
|
||||
1. Create an environment with the Ceph default image backend for Glance.
|
||||
2. Enable the plugin on the Settings/Storage tab of the Fuel web UI and fill in form
|
||||
fields:
|
||||
* Ceph pool name - name for new ceph pool
|
||||
3. Select new node with roles Ceph OSD *AND* Ceph Glance Backend
|
||||
4. Configure Disks on new node, chose at least one whole disk for role Ceph Glance Backend
|
||||
and one for role Ceph-OSD
|
||||
5. Deploy the environment.
|
||||
|
||||
|
||||
Installation Guide
|
||||
==================
|
||||
|
||||
Ceph Multibackend Plugin for Fuel installation
|
||||
----------------------------------------------
|
||||
|
||||
To install Ceph Multibackend plugin, follow these steps:
|
||||
|
||||
1. Download the plugin
|
||||
git clone https://github.com/openstack/fuel-plugin-ceph-multibackend
|
||||
|
||||
2. Copy the plugin on already installed Fuel Master node; ssh can be used for
|
||||
that. If you do not have the Fuel Master node yet, see
|
||||
[Quick Start Guide](https://software.mirantis.com/quick-start/):
|
||||
|
||||
# scp fuel-plugin-ceph_multibackend-1.7.1-1.noarch.rpm root@<Fuel_master_ip>:/tmp
|
||||
|
||||
3. Log into the Fuel Master node. Install the plugin:
|
||||
|
||||
# cd /tmp
|
||||
# fuel plugins --install fuel-plugin-ceph_multibackend-1.7.1-1.noarch.rpm
|
||||
|
||||
4. Check if the plugin was installed successfully:
|
||||
|
||||
# fuel plugins
|
||||
id | name | version | package_version
|
||||
---|---------------------------------|---------|----------------
|
||||
1 | fuel-plugin-ceph_multibackend | 1.7.1 | 4.0.0
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
| Requirement | Version/Comment |
|
||||
|:---------------------------------|:----------------|
|
||||
| Mirantis OpenStack compatibility | 9.0 |
|
||||
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
Role Ceph Glance Backend can be used only on nodes with active Ceph OSD role.
|
|
@ -0,0 +1,12 @@
|
|||
- name: 'storage:image:ceph2'
|
||||
label: 'Ceph SSD'
|
||||
description: 'Separate osd pool for Ceph Images. Requires Ceph Image Storage option.'
|
||||
requires:
|
||||
- name: 'storage:image:ceph'
|
||||
compatible:
|
||||
- name: 'hypervisor:libvirt:*'
|
||||
- name: 'storage:block:ceph'
|
||||
- name: 'storage:object:ceph'
|
||||
- name: 'storage:image:ceph'
|
||||
- name: 'storage:ephemeral:ceph'
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
include fuel-plugin-ceph_multibackend::ceph_auth
|
||||
notice('MODULAR: fuel-plugin-ceph_multibackend/ceph_auth.pp')
|
|
@ -0,0 +1,2 @@
|
|||
include fuel-plugin-ceph_multibackend::ceph_primary
|
||||
notice('MODULAR: fuel-plugin-ceph_multibackend/ceph_primary.pp')
|
|
@ -0,0 +1,3 @@
|
|||
include fuel-plugin-ceph_multibackend::controller
|
||||
notice('MODULAR: fuel-plugin-ceph_multibackend/controller.pp')
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
notice('MODULAR: fuel-plugin-ceph_multibackend/keys.pp')
|
||||
|
||||
exec { "gather keys":
|
||||
command => "ceph-deploy --ceph-conf ~/ceph.conf gatherkeys localhost",
|
||||
path => "/usr/bin",
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
include fuel-plugin-ceph_multibackend::osd
|
||||
notice('MODULAR: fuel-plugin-ceph_multibackend/osd.pp')
|
|
@ -0,0 +1,2 @@
|
|||
include fuel-plugin-ceph_multibackend::osd_id
|
||||
notice('MODULAR: fuel-plugin-ceph_multibackend/osd_id.pp')
|
|
@ -0,0 +1,2 @@
|
|||
include fuel-plugin-ceph_multibackend::regions
|
||||
notice('MODULAR: fuel-plugin-ceph_multibackend/regions.pp')
|
|
@ -0,0 +1,127 @@
|
|||
#
|
||||
# partitions.rb
|
||||
# BY kwilczynski (https://github.com/kwilczynski/facter-facts/blob/master/partitions.rb)
|
||||
# This fact provides an alphabetic list of blocks per disk and/or partition,
|
||||
# partitions per disk and disks.
|
||||
#
|
||||
# We support most of generic SATA and PATA disks, plus Hewlett-Packard
|
||||
# Smart Array naming format ... This also should work for systems running
|
||||
# as Virtual Machine guest at least for Xen and KVM ...
|
||||
#
|
||||
|
||||
if Facter.value(:kernel) == 'Linux'
|
||||
# We store a list of disks (or block devices if you wish) here ...
|
||||
disks = []
|
||||
|
||||
# We store number of blocks per disk and/or partition here ...
|
||||
blocks = {}
|
||||
|
||||
# We store a list of partitions on per-disk basis here ...
|
||||
partitions = Hash.new { |k,v| k[v] = [] }
|
||||
|
||||
#
|
||||
# Support for the following might not be of interest ...
|
||||
#
|
||||
# MMC is Multi Media Card which can be either SD or microSD, etc ...
|
||||
# MTD is Memory Technology Device also known as Flash Memory
|
||||
#
|
||||
exclude = %w(backdev.* dm-\d loop mmcblk mtdblock ram ramzswap)
|
||||
|
||||
#
|
||||
# Modern Linux kernels provide "/proc/partitions" in the following format:
|
||||
#
|
||||
# major minor #blocks name
|
||||
#
|
||||
# 8 0 244198584 sda
|
||||
# 8 1 3148708 sda1
|
||||
# 8 2 123804922 sda2
|
||||
# 8 3 116214210 sda3
|
||||
# 8 4 1028160 sda4
|
||||
#
|
||||
|
||||
# Make regular expression form our patterns ...
|
||||
exclude = Regexp.union(*exclude.collect { |i| Regexp.new(i) })
|
||||
|
||||
#
|
||||
# We utilise rely on "cat" for reading values from entries under "/proc".
|
||||
# This is due to some problems with IO#read in Ruby and reading content of
|
||||
# the "proc" file system that was reported more than once in the past ...
|
||||
#
|
||||
Facter::Util::Resolution.exec('cat /proc/partitions 2> /dev/null').each_line do |line|
|
||||
# Remove bloat ...
|
||||
line.strip!
|
||||
|
||||
# Line of interest should start with a number ...
|
||||
next if line.empty? or line.match(/^[a-zA-Z]+/)
|
||||
|
||||
# We have something, so let us apply our device type filter ...
|
||||
next if line.match(exclude)
|
||||
|
||||
# Only blocks and partitions matter ...
|
||||
block = line.split(/\s+/)[2]
|
||||
partition = line.split(/\s+/)[3]
|
||||
|
||||
if partition.match(/^cciss/)
|
||||
#
|
||||
# Special case for Hewlett-Packard Smart Array which probably
|
||||
# nobody is using any more nowadays anyway ...
|
||||
#
|
||||
partition = partition.split('/')[1]
|
||||
|
||||
if match = partition.match(/^([a-zA-Z0-9]+)[pP][0-9]+/)
|
||||
# Handle the case when "cciss/c0d0p1" is given ...
|
||||
disk = match[1]
|
||||
elsif partition.match(/^[a-zA-Z0-9]+/)
|
||||
# Handle the case when "cciss/c0d0" is given ...
|
||||
disk = partition
|
||||
end
|
||||
else
|
||||
# Take care of any partitions create atop of the
|
||||
# Linux Software RAID decies like e.g. /dev/md0, etc.
|
||||
if match = partition.match(/^(md\d+)/)
|
||||
disk = match[1]
|
||||
else
|
||||
# Everything else ...
|
||||
disk = partition.scan(/^[a-zA-Z]+/)
|
||||
end
|
||||
end
|
||||
|
||||
# Convert back into a string value ...
|
||||
disk = Array(disk).first.to_s
|
||||
|
||||
# We have something rather odd that did not parse at all, so ignore ...
|
||||
next if disk.empty?
|
||||
|
||||
# All disks ... This might even be sda, sdaa, sdab, sdac, etc ...
|
||||
disks << disk
|
||||
|
||||
# Store details about number of blocks per disk and/or partition ...
|
||||
blocks[partition] = block
|
||||
|
||||
# A disk is not a partition, therefore we ignore ...
|
||||
partitions[disk] << partition unless partition == disk
|
||||
end
|
||||
|
||||
Facter.add('disks') do
|
||||
confine :kernel => :linux
|
||||
setcode { disks.sort.uniq.join(',') }
|
||||
end
|
||||
|
||||
blocks.each do |k,v|
|
||||
Facter.add("blocks_#{k}") do
|
||||
confine :kernel => :linux
|
||||
setcode { v }
|
||||
end
|
||||
end
|
||||
|
||||
partitions.each do |k,v|
|
||||
Facter.add("partitions_#{k}") do
|
||||
confine :kernel => :linux
|
||||
|
||||
# To ensure proper sorting order by the interface name ...
|
||||
v = v.sort_by { |i| i.scan(/\d+/).shift.to_i }
|
||||
|
||||
setcode { v.sort.join(',') }
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,10 @@
|
|||
Facter.add(:volume_rbd_pool) do
|
||||
setcode do
|
||||
volume_rbd_pool = Facter::Util::Resolution.exec('rados lspools | grep volume; echo $?')
|
||||
end
|
||||
end
|
||||
Facter.add(:image_rbd_pool) do
|
||||
setcode do
|
||||
image_rbd_pool = Facter::Util::Resolution.exec('rados lspools | grep image; echo $?')
|
||||
end
|
||||
end
|
|
@ -0,0 +1,5 @@
|
|||
Facter.add(:rbd_secret) do
|
||||
setcode do
|
||||
rbd_secret = Facter::Util::Resolution.exec('grep ^rbd_secret_uuid /etc/cinder/cinder.conf | cut -d"=" -f2 | head -1')
|
||||
end
|
||||
end
|
|
@ -0,0 +1,6 @@
|
|||
module Puppet::Parser::Functions
|
||||
newfunction(:get_all_osd_id, :type => :rvalue) do |args|
|
||||
all_osd_id = `find /var/log/lost+found/ -maxdepth 1 | cut -d"/" -f5`
|
||||
return all_osd_id
|
||||
end
|
||||
end
|
|
@ -0,0 +1,21 @@
|
|||
module Puppet::Parser::Functions
|
||||
newfunction(:get_disks, :type => :rvalue, :doc => <<-EOS
|
||||
Return a list of disks (node roles are keys) that have the given node role.
|
||||
example:
|
||||
get_disks_list_by_role($node_volumes, 'cinder')
|
||||
EOS
|
||||
) do |args|
|
||||
disks_metadata, role = args
|
||||
disks = Array.new
|
||||
disks_metadata.each do |disk|
|
||||
disk['volumes'].each do |volume|
|
||||
if volume['name'] == role and volume['size'] != 0 then
|
||||
disks << disk['name']
|
||||
end
|
||||
end
|
||||
end
|
||||
return disks
|
||||
end
|
||||
|
||||
end
|
||||
# vim: set ts=2 sw=2 et :
|
|
@ -0,0 +1,10 @@
|
|||
module Puppet::Parser::Functions
|
||||
newfunction(:get_osd_id, :type => :rvalue) do |args|
|
||||
device = args
|
||||
dev = device.join("")
|
||||
osd_id = `mount | grep #{dev} | cut -d" " -f3 | cut -d"-" -f2`
|
||||
osd_id.delete!("\n")
|
||||
return osd_id
|
||||
end
|
||||
end
|
||||
# vim: set ts=2 sw=2 et :
|
|
@ -0,0 +1,18 @@
|
|||
module Puppet::Parser::Functions
|
||||
newfunction(:get_primary_node, :type => :rvalue, :doc => <<-EOS
|
||||
Return a primary node fqdn that have specific node role.
|
||||
example:
|
||||
get_target_disk($nodes_hash, 'primaray-controller')
|
||||
EOS
|
||||
) do |args|
|
||||
node_hash, role = args
|
||||
noda = Array.new
|
||||
node_hash.each do |node|
|
||||
if node['role'] == role then
|
||||
noda << node['fqdn']
|
||||
end
|
||||
end
|
||||
return noda
|
||||
end
|
||||
end
|
||||
# vim: set ts=2 sw=2 et
|
|
@ -0,0 +1,21 @@
|
|||
module Puppet::Parser::Functions
|
||||
newfunction(:get_target_disk, :type => :rvalue, :doc => <<-EOS
|
||||
Return a list of disks (node roles are keys) that have the given node role.
|
||||
example:
|
||||
get_target_disk($node_volumes, 'cinder')
|
||||
EOS
|
||||
) do |args|
|
||||
disks_metadata, role = args
|
||||
disks = Array.new
|
||||
disks_metadata.each do |disk|
|
||||
disk['volumes'].each do |volume|
|
||||
if volume['name'] == role and volume['size'] != 0 then
|
||||
disks << disk['name']
|
||||
end
|
||||
end
|
||||
end
|
||||
return disks
|
||||
end
|
||||
|
||||
end
|
||||
# vim: set ts=2 sw=2 et :
|
|
@ -0,0 +1,23 @@
|
|||
class fuel-plugin-ceph_multibackend::ceph_auth {
|
||||
|
||||
$plugin_settings = hiera('fuel-plugin-ceph_multibackend')
|
||||
$ssd_pool=$plugin_settings['ceph_pool']
|
||||
Exec { path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' }
|
||||
|
||||
#image_rbd_pool and volume_rbd_pool from facter, in case of fuel > 9.0
|
||||
if $image_rbd_pool == '0' {
|
||||
exec { "Update auth caps for ssd pool":
|
||||
command => "ceph auth caps client.images osd \"allow class-read object_prefix rbd_children, allow rwx pool=$ssd_pool\" mon \"allow r\"",
|
||||
}
|
||||
}
|
||||
if $volume_rbd_pool == '0' {
|
||||
exec { "Update auth caps for volumes client":
|
||||
command => "ceph auth caps client.volumes osd \"allow class-read object_prefix rbd_children, allow rwx pool=$ssd_pool, allow rwx pool=volumes, allow rwx pool=images, allow rwx pool=compute\" mon \"allow r\"",
|
||||
}
|
||||
}
|
||||
|
||||
exec { "Update auth caps for compute client":
|
||||
command => "ceph auth caps client.compute osd \"allow class-read object_prefix rbd_children, allow rwx pool=$ssd_pool, allow rwx pool=volumes, allow rwx pool=images, allow rwx pool=compute\" mon \"allow r\"",
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
class fuel-plugin-ceph_multibackend::ceph_primary {
|
||||
include ::cinder::params
|
||||
Exec { path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' }
|
||||
|
||||
$access_hash = hiera(access)
|
||||
$pass = $access_hash['password']
|
||||
$user = $access_hash['user']
|
||||
$tenant = $access_hash['tenant']
|
||||
$service_endpoint = hiera(service_endpoint)
|
||||
$management_vip = hiera(management_vip)
|
||||
$auth_uri = "http://$service_endpoint:5000/"
|
||||
$plugin_settings = hiera('fuel-plugin-ceph_multibackend')
|
||||
$ssd_pool=$plugin_settings['ceph_pool']
|
||||
$ssd_pg_num = $plugin_settings['ceph_ssd_pg_num']
|
||||
$HDD_pg_num = $plugin_settings['ceph_HDD_pg_num']
|
||||
$file_ma_crush_new_map_exists = inline_template("<% if File.exist?('/var/log/lost+found/ma-crush-new-map') -%>true<% end -%>")
|
||||
$file_cinder_type_lock_exists = inline_template("<% if File.exist?('/var/log/lost+found/cinder_ceph_type.lock') -%>true<% end -%>")
|
||||
|
||||
if $management_vip == $service_endpoint { #in case of local keystone
|
||||
$region='RegionOne'
|
||||
}
|
||||
else { #in case of detach keystone
|
||||
$region=hiera(region)
|
||||
}
|
||||
|
||||
if $file_ma_crush_new_map_exists == ''
|
||||
{
|
||||
exec { "add HDD region to root":
|
||||
command => "ceph osd crush move HDD root=default",
|
||||
}
|
||||
->
|
||||
exec { "add SSD region to root":
|
||||
command => "ceph osd crush move SSD root=default",
|
||||
}
|
||||
->
|
||||
exec { "Get Crushmap":
|
||||
command => "ceph osd getcrushmap -o /var/log/lost+found/ma-crush-map",
|
||||
}
|
||||
->
|
||||
exec { "Decompile crushmap":
|
||||
command => "crushtool -d /var/log/lost+found/ma-crush-map -o /var/log/lost+found/ma-crush-map.txt"
|
||||
}
|
||||
->
|
||||
file_line { "Adding ssd rule":
|
||||
path => "/var/log/lost+found/ma-crush-map.txt",
|
||||
line => "rule ssd {\n ruleset 255\n type replicated\n min_size 1\n max_size 10\n step take SSD\n step choose firstn 0 type osd\n step emit\n}",
|
||||
after => "# rules",
|
||||
}
|
||||
->
|
||||
file_line { "Adding HDD rule":
|
||||
path => "/var/log/lost+found/ma-crush-map.txt",
|
||||
line => "rule HDD {\n ruleset 254\n type replicated\n min_size 1\n max_size 10\n step take HDD\n step choose firstn 0 type osd\n step emit\n}",
|
||||
after => "# rules",
|
||||
}
|
||||
->
|
||||
exec { "Compile Crushmap":
|
||||
command => "crushtool -c /var/log/lost+found/ma-crush-map.txt -o /var/log/lost+found/ma-crush-new-map",
|
||||
}
|
||||
->
|
||||
exec { "Upload Crushmap":
|
||||
command => "ceph osd setcrushmap -i /var/log/lost+found/ma-crush-new-map",
|
||||
}
|
||||
->
|
||||
exec { "Make ssd pool":
|
||||
command => "ceph osd pool create $ssd_pool $ssd_pg_num",
|
||||
}
|
||||
->
|
||||
exec { "Make HDD pool":
|
||||
command => "ceph osd pool create HDD $HDD_pg_num",
|
||||
}
|
||||
->
|
||||
|
||||
exec { "Set crushmap ruleset for ssd":
|
||||
command => "ceph osd pool set $ssd_pool crush_ruleset 255",
|
||||
}
|
||||
->
|
||||
exec { "Set crushmap ruleset for HDD":
|
||||
command => "ceph osd pool set HDD crush_ruleset 254",
|
||||
}
|
||||
|
||||
if $file_cinder_type_lock_exists == '' {
|
||||
|
||||
exec {"cinder type":
|
||||
command => "cinder --os-username $user --os-password $pass --os-project-name $tenant --os-tenant-name $tenant --os-auth-url $auth_uri --os-region-name $region type-create SSD_volumes_ceph",
|
||||
}->
|
||||
exec{"cinder type-key":
|
||||
command => "cinder --os-username $user --os-password $pass --os-project-name $tenant --os-tenant-name $tenant --os-auth-url $auth_uri --os-region-name $region type-key SSD_volumes_ceph set volume_backend_name=RBD-fast",
|
||||
}
|
||||
file {"/var/log/lost+found/cinder_ceph_type.lock":
|
||||
path => "/var/log/lost+found/cinder_ceph_type.lock",
|
||||
ensure => "file",
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
class fuel-plugin-ceph_multibackend::controller {
|
||||
|
||||
include ::glance::params
|
||||
include ::cinder::params
|
||||
$plugin_settings = hiera('fuel-plugin-ceph_multibackend')
|
||||
$ssd_pool=$plugin_settings['ceph_pool']
|
||||
glance_api_config {
|
||||
"glance_store/rbd_store_pool": value => "$ssd_pool";
|
||||
}
|
||||
Glance_api_config<||> ~> Service['glance-api']
|
||||
|
||||
service { 'glance-api':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
}
|
||||
|
||||
|
||||
file_line{"RBD-fast backend":
|
||||
ensure => present,
|
||||
path => "/etc/cinder/cinder.conf",
|
||||
line => "[RBD-fast]",
|
||||
}
|
||||
->
|
||||
file_line {"enable second volume backend":
|
||||
path => "/etc/cinder/cinder.conf",
|
||||
match => "^enabled_backends = RBD-backend$",
|
||||
line => "enabled_backends = RBD-backend, RBD-fast",
|
||||
}
|
||||
->
|
||||
cinder_config {
|
||||
"RBD-fast/volume_backend_name": value => "RBD-fast";
|
||||
"RBD-fast/rbd_pool": value => "$ssd_pool";
|
||||
"RBD-fast/rbd_user": value => "volumes";
|
||||
"RBD-fast/rbd_secret_uuid": value => "$rbd_secret";
|
||||
"RBD-fast/backend_host": value => "rbd:volumes";
|
||||
"RBD-fast/volume_driver": value => "cinder.volume.drivers.rbd.RBDDriver";
|
||||
"RBD-fast/rbd_ceph_conf": value => "/etc/ceph/ceph.conf";
|
||||
"RBD-backend/rbd_pool": value => "volumes";
|
||||
}
|
||||
|
||||
Cinder_config<||> ~> Service['cinder-volume']
|
||||
Cinder_config<||> ~> Service['cinder-scheduler']
|
||||
service { 'cinder-volume':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
}
|
||||
service { 'cinder-scheduler':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
class fuel-plugin-ceph_multibackend {}
|
|
@ -0,0 +1,31 @@
|
|||
class fuel-plugin-ceph_multibackend::osd {
|
||||
|
||||
Exec { path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' }
|
||||
$node_volumes = hiera('node_volumes', [])
|
||||
$dev = get_target_disk($node_volumes, 'ceph-backend2')
|
||||
$plugin_settings = hiera('fuel-plugin-ceph_multibackend')
|
||||
$file_osd_id_exists = inline_template("<% if File.exist?('/var/log/lost+found/osd_create.lock') -%>true<% end -%>")
|
||||
|
||||
define process_osd {
|
||||
|
||||
$part = "partitions_${name}"
|
||||
$target_part = inline_template("<%= scope.lookupvar(part) %>")
|
||||
$target_part_arr = split($target_part,",")
|
||||
$arr_len=size($target_part_arr)
|
||||
|
||||
exec { "Prepare OSD $name":
|
||||
command => "ceph-deploy --ceph-conf /root/ceph.conf osd prepare localhost:$name$arr_len"
|
||||
}
|
||||
->
|
||||
exec { "Activate OSD $name":
|
||||
command => "ceph-deploy --ceph-conf /root/ceph.conf osd activate localhost:$name$arr_len",
|
||||
tries => 2,
|
||||
}
|
||||
}
|
||||
|
||||
if $file_osd_id_exists == ''
|
||||
{
|
||||
process_osd { $dev : }
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
class fuel-plugin-ceph_multibackend::osd_id {
|
||||
|
||||
Exec { path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' }
|
||||
$node_volumes = hiera('node_volumes', [])
|
||||
$dev = get_target_disk($node_volumes, 'ceph-backend2')
|
||||
$plugin_settings = hiera('fuel-plugin-ceph_multibackend')
|
||||
$file_osd_id_exists = inline_template("<% if File.exist?('/var/log/lost+found/osd_create.lock') -%>true<% end -%>")
|
||||
$node_name=hiera(node_name)
|
||||
|
||||
define crush_osd {
|
||||
|
||||
$osd_id = get_osd_id($name)
|
||||
$node_name=hiera(node_name)
|
||||
|
||||
exec { "Removing ssd $name osd from host HDD bucket":
|
||||
command => "ceph osd crush remove osd.$osd_id $node_name.HDD",
|
||||
}
|
||||
->
|
||||
exec { "Adding ssd $name item to SSD bucket":
|
||||
command => "ceph osd crush add osd.$osd_id 1.0 host=$node_name.SSD",
|
||||
}
|
||||
}
|
||||
|
||||
if $file_osd_id_exists == ''
|
||||
{
|
||||
exec { "Creating new SSD bucket":
|
||||
command => "ceph osd crush add-bucket $node_name.SSD host",
|
||||
}
|
||||
->
|
||||
exec { "Renaming old bucket":
|
||||
command => "ceph osd crush rename-bucket $node_name $node_name.HDD",
|
||||
}
|
||||
->
|
||||
exec { "Adding ssd host to SSD region":
|
||||
command => "ceph osd crush move $node_name.SSD region=SSD",
|
||||
}
|
||||
->
|
||||
exec { "Adding HDD host to HDD region":
|
||||
command => "ceph osd crush move $node_name.HDD region=HDD",
|
||||
}
|
||||
->
|
||||
crush_osd { $dev : }
|
||||
->
|
||||
file {"/var/log/lost+found":
|
||||
ensure => 'directory',
|
||||
}
|
||||
->
|
||||
file {"/var/log/lost+found/osd_create.lock":
|
||||
ensure => 'file',
|
||||
path => "/var/log/lost+found/osd_create.lock",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
class fuel-plugin-ceph_multibackend::regions {
|
||||
|
||||
Exec { path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' }
|
||||
$plugin_settings = hiera('fuel-plugin-ceph_multibackend')
|
||||
|
||||
exec { "creating SSD region":
|
||||
command => "ceph osd crush add-bucket SSD region",
|
||||
}->
|
||||
exec { "creating HDD region":
|
||||
command => "ceph osd crush add-bucket HDD region",
|
||||
}
|
||||
file {"/var/log/lost+found":
|
||||
ensure => 'directory',
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
- id: fp-ceph_multibackend-regions
|
||||
role: ['primary-controller']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/regions.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 360
|
||||
requires: [post_deployment_start]
|
||||
required_for: [ceph_ready_check,upload_cirros]
|
||||
- id: fp-ceph_multibackend-keys
|
||||
role: ['ceph-backend2']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/keys.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 300
|
||||
requires: [post_deployment_start,ceph_create_pools]
|
||||
required_for: [upload_cirros,fp-ceph_multibackend-ceph-storage]
|
||||
- id: fp-ceph_multibackend-ceph-storage
|
||||
role: ['ceph-backend2']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/osd.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 1500
|
||||
requires: [post_deployment_start,ceph_create_pools,fp-ceph_multibackend-keys]
|
||||
required_for: [upload_cirros,fp-ceph_multibackend-ceph-osd-id]
|
||||
- id: fp-ceph_multibackend-ceph-osd-id
|
||||
role: ['ceph-backend2']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/osd_id.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 360
|
||||
requires: [post_deployment_start,ceph_create_pools,fp-ceph_multibackend-ceph-storage,fp-ceph_multibackend-regions]
|
||||
required_for: [fp-ceph_multibackend-ceph-primary,upload_cirros]
|
||||
- id: fp-ceph_multibackend-ceph-primary
|
||||
role: ['primary-controller']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/ceph_primary.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 360
|
||||
requires: [post_deployment_start,ceph_create_pools,fp-ceph_multibackend-ceph-storage,fp-ceph_multibackend-ceph-osd-id]
|
||||
required_for: [ceph_ready_check,upload_cirros]
|
||||
cross-depends:
|
||||
- name: fp-ceph_multibackend-ceph-osd-id
|
||||
role: ['ceph-backend2']
|
||||
- id: fp-ceph_multibackend-ceph-controller
|
||||
role: ['primary-controller','controller']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/controller.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 360
|
||||
requires: [post_deployment_start,ceph_create_pools]
|
||||
required_for: [upload_cirros]
|
||||
- id: fp-ceph_multibackend-ceph-auth-update
|
||||
role: ['primary-controller']
|
||||
type: puppet
|
||||
version: 2.0.0
|
||||
parameters:
|
||||
puppet_manifest: puppet/manifests/ceph_auth.pp
|
||||
puppet_modules: puppet/modules:/etc/puppet/modules
|
||||
timeout: 360
|
||||
requires: [post_deployment_start,ceph-compute]
|
||||
required_for: [upload_cirros]
|
|
@ -0,0 +1,21 @@
|
|||
attributes:
|
||||
metadata:
|
||||
group: 'storage'
|
||||
ceph_pool:
|
||||
value: "SSD"
|
||||
label: "Ceph pool name"
|
||||
description: "Ceph pool name"
|
||||
weight: 50
|
||||
type: "text"
|
||||
ceph_ssd_pg_num:
|
||||
value: "1024"
|
||||
label: "Number of Ceph placement group for SSD pool"
|
||||
description: "osd_count*100/replica_count rounded to nearest power of two"
|
||||
weight: 51
|
||||
type: "text"
|
||||
ceph_hdd_pg_num:
|
||||
value: "8192"
|
||||
label: "Number of Ceph placement group for HDD pool"
|
||||
description: "osd_count*100/replica_count rounded to nearest power of two"
|
||||
weight: 52
|
||||
type: "text"
|
|
@ -0,0 +1,31 @@
|
|||
# Plugin name
|
||||
name: fuel-plugin-ceph-multibackend
|
||||
title: Ceph multibackend for Cinder
|
||||
# Plugin version
|
||||
version: 1.8.4
|
||||
# Description
|
||||
description: Enables Ceph Multibackend in Cinder
|
||||
# Required fuel version
|
||||
fuel_version: ['9.0']
|
||||
# Groups
|
||||
groups: ['storage::cinder']
|
||||
# Licenses
|
||||
licenses: ['Apache License, Version 2.0']
|
||||
# Authors
|
||||
authors:
|
||||
- Margarita Shakhova <margarita.shakhova@gmail.com>
|
||||
homepage: https://github.com/openstack/fuel-plugin-ceph-multibackend
|
||||
# Change `false` to `true` if the plugin can be installed in the environment
|
||||
# after the deployment.
|
||||
is_hotpluggable: true
|
||||
|
||||
# The plugin is compatible with releases in the list
|
||||
releases:
|
||||
- os: ubuntu
|
||||
version: mitaka-9.0
|
||||
mode: ['ha']
|
||||
deployment_scripts_path: deployment_scripts/
|
||||
repository_path: repositories/ubuntu
|
||||
|
||||
# Version of plugin package
|
||||
package_version: '4.0.0'
|
|
@ -0,0 +1,6 @@
|
|||
ceph-backend2:
|
||||
name: "Ceph SSD"
|
||||
description: Ceph SSD backend
|
||||
has_primary: false
|
||||
public_ip_required: false
|
||||
weight: 100
|
|
@ -0,0 +1 @@
|
|||
[]
|
|
@ -0,0 +1,14 @@
|
|||
volumes:
|
||||
- id: "ceph-backend2"
|
||||
type: "partition"
|
||||
min_size:
|
||||
generator: "calc_gb_to_mb"
|
||||
generator_args: [2]
|
||||
label: "Ceph Glance Backend"
|
||||
mount: "none"
|
||||
file_system: "xfs"
|
||||
|
||||
volumes_roles_mapping:
|
||||
ceph-backend2:
|
||||
- {allocate_size: "min", id: "os"}
|
||||
- {allocate_size: "all", id: "ceph-backend2"}
|
Loading…
Reference in New Issue