Initial commit. Probably not working.

This commit is contained in:
Kiran Thyagaraja 2017-08-23 23:18:28 +00:00
parent 55e557ffe0
commit 908874894f
7 changed files with 658 additions and 0 deletions

15
defaults/main.yml Normal file
View File

@ -0,0 +1,15 @@
coe_host: "https://127.0.0.1:8443"
kube_context: "kubernetes-admin@kubernetes"
config_file: "~/.kube/config"
action: provision
namespace: openstack
hiera_data: {}
hiera_data_file: ''
cinder_conf:
DEFAULT:
public_bind_host: "0.0.0.0"
database:
connection: "mysql+pymysql://root:weakpassword@mariadb:3306/cinder"

88
tasks/hiera.yml Normal file
View File

@ -0,0 +1,88 @@
- name: Translate hieradata
include_role:
name: 'ansible-role-k8s-tripleo'
vars:
hieradata: '{{hiera_data}}'
hieradata_file: '{{hiera_data_file}}'
schema:
glance_log_file: DEFAULT.log_file
glance::api::debug: DEFAULT.debug
glance::api::workers: DEFAULT.workers
cinder::api::bind_port: 0.0.0.0
cinder::api::bind_port: DEFAULT.bind_port
glance::api::enable_v1_api: DEFAULT.enable_v1_api
glance::api::enable_v2_api: DEFAULT.enable_v2_api
glance::api::show_image_direct_url: DEFAULT.show_image_direct_url
glance::api::show_multiple_locations: DEFAULT.show_multiple_locations
glance::api::authtoken::auth_uri: keystone_authtoken.auth_uri
glance::api::authtoken::auth_url: keystone_authtoken.auth_url
glance::api::authtoken::password: keystone_authtoken.password
glance::api::authtoken::username: keystone_authtoken.username
glance::api::authtoken::project: keystone_authtoken.project
glance::api::os_region_name: glance_store.os_region_name
glance::api::enable_proxy_headers_parsing: oslo_middleware.enable_proxy_headers_parsing
glance::api::pipeline: paste_deploy.pipeline
cinder::notify::rabbitmq::rabbit_userid: oslo_messaging_rabbit.rabbit_userid
cinder::notify::rabbitmq::rabbit_port: oslo_messaging_rabbit.rabbit_port
cinder::notify::rabbitmq::rabbit_password: oslo_messaging_rabbit.rabbit_password
cinder::notify::rabbitmq::rabbit_use_ssl: oslo_messaging_rabbit.rabbit_use_ssl
cinder::notify::rabbitmq::rabbit_host: oslo_messaging_rabbit.rabbit_host
cinder::notify::rabbitmq::notification_driver: {get_param: NotificationDriver}
glance::notification_driver: oslo_messaging_notifications.driver
glance::notification_topics: oslo_messaging_notifications.topics
cinder::api::bind_host": "%{hiera('fqdn_internal_api')}",
cinder::api::enable_proxy_headers_parsing": true,
cinder::api::nova_catalog_admin_info": "compute:nova:adminURL",
cinder::api::nova_catalog_info": "compute:nova:internalURL",
cinder::api::service_name": "httpd",
"cinder::ceilometer::notification_driver": "messagingv2",
"cinder::config": {
"DEFAULT/swift_catalog_info": {
"value": "object-store:swift:internalURL"
}
},
"cinder::cron::db_purge::age": "0",
"cinder::cron::db_purge::destination": "/var/log/cinder/cinder-rowsflush.log",
"cinder::cron::db_purge::hour": "0",
"cinder::cron::db_purge::minute": "1",
"cinder::cron::db_purge::month": "*",
"cinder::cron::db_purge::monthday": "*",
"cinder::cron::db_purge::user": "keystone",
"cinder::cron::db_purge::weekday": "*",
"cinder::database_connection": "mysql+pymysql://cinder:wafF9ny97Z4ANQMegheBbdxDc@192.168.122.120/cinder?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf",
"cinder::db::database_db_max_retries": -1,
"cinder::db::database_max_retries": -1,
"cinder::db::mysql::allowed_hosts": [
"%",
"%{hiera('mysql_bind_host')}"
],
"cinder::db::mysql::dbname": "cinder",
"cinder::db::mysql::host": "192.168.122.120",
"cinder::db::mysql::password": "wafF9ny97Z4ANQMegheBbdxDc",
"cinder::db::mysql::user": "cinder",
"cinder::debug": "",
"cinder::glance::glance_api_servers": "http://192.168.122.120:9292",
"cinder::keystone::authtoken::auth_uri": "http://192.168.122.120:5000",
"cinder::keystone::authtoken::auth_url": "http://192.168.122.120:5000",
"cinder::keystone::authtoken::password": "wafF9ny97Z4ANQMegheBbdxDc",
"cinder::keystone::authtoken::project_domain_name": "Default",
"cinder::keystone::authtoken::project_name": "service",
"cinder::keystone::authtoken::user_domain_name": "Default",
"cinder::policy::policies": {},
"cinder::rabbit_heartbeat_timeout_threshold": 60,
"cinder::rabbit_password": "3H8Zm93p6Z6qghxPMJ7yBsDtc",
"cinder::rabbit_port": 5672,
"cinder::rabbit_use_ssl": "False",
"cinder::rabbit_userid": "guest",
"cinder::scheduler::scheduler_driver": "cinder.scheduler.filter_scheduler.FilterScheduler",
"cinder::wsgi::apache::bind_host": "192.168.122.120",
"cinder::wsgi::apache::servername": "%{hiera('fqdn_internal_api')}",
"cinder::wsgi::apache::ssl": false,
"cinder::wsgi::apache::workers": "%{::os_workers}",

1
tasks/main.yml Normal file
View File

@ -0,0 +1 @@
- include: "{{ action }}.yml"

367
tasks/provision.yml Normal file
View File

@ -0,0 +1,367 @@
- name: Upload config files
template:
src: "{{item}}"
dest: /tmp/{{item}}
backup: yes
mode: 0644
with_items:
- cinder.conf
- cinder-wsgi.conf
#- include: hiera.yml
#- name: Generate config files
# config_template:
# src: base.conf.j2
# dest: /tmp/glance-api.conf
# config_overrides: '{{glance_config}}'
# config_type: ini
- name: Read configs into memory
slurp:
src: "/tmp/cinder.conf"
register: "cinder_conf"
- name: Read configs into memory
slurp:
src: "/tmp/cinder-wsgi.conf"
register: "cinder_wsgi_conf"
- name: Create cinder-api configmaps
ignore_errors: yes
k8s_v1_config_map:
name: cinder-api
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
state: present
debug: yes
labels:
service: cinder
data:
config.json: |
{
"command": "httpd -DFOREGROUND",
"config_files": [
{
"source": "/var/lib/kolla/config_files/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
},
{
"source": "/var/lib/kolla/config_files/cinder-wsgi.conf",
"dest": "/etc/httpd/conf.d/cinder-wsgi.conf",
"owner": "cinder",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}
cinder.conf: |
{{cinder_conf['content'] | b64decode}}
cinder-wsgi.conf: |
{{cinder_wsgi_conf['content'] | b64decode}}
- name: Create cinder-scheduler configmaps
ignore_errors: yes
k8s_v1_config_map:
name: cinder-scheduler
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
state: present
debug: yes
labels:
service: cinder-scheduler
data:
config.json: |
{
"command": "cinder-scheduler --config-file /etc/cinder/cinder.conf",
"config_files": [
{
"source": "/var/lib/kolla/config_files/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}
cinder.conf: |
{{cinder_conf['content'] | b64decode}}
- name: Create cinder-volume configmaps
ignore_errors: yes
k8s_v1_config_map:
name: cinder-volume
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
state: present
debug: yes
labels:
service: cinder-volume
data:
config.json: |
{
"command": "/usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf",
"config_files": [
{
"source": "/var/lib/kolla/config_files/cinder.conf",
"dest": "/etc/cinder/cinder.conf",
"owner": "cinder",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/cinder",
"owner": "cinder:cinder",
"recurse": true
},
{
"path": "/var/log/kolla/cinder",
"owner": "cinder:cinder",
"recurse": true
}
]
}
cinder.conf: |
{{cinder_conf['content'] | b64decode}}
- name: Create Cinder Database
k8s_v1_job:
name: cinder-create-db
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
restart_policy: OnFailure
containers:
- name: cinder-create-db
image: tripleoupstream/centos-binary-kolla-toolbox:latest
image_pull_policy: IfNotPresent
command: ["sh", "-c"]
args:
- ansible localhost -m mysql_db -a
"login_host='mariadb'
login_port='3306'
login_user='root'
login_password='$DATABASE_PASSWORD'
name='cinder'"
env:
- name: DATABASE_PASSWORD
value: "mysqlpass"
- name: "TZ"
value: "UTC"
- name: Create Cinder User in the database
k8s_v1_job:
name: cinder-create-user
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
restart_policy: OnFailure
containers:
- name: cinder-create-db
image: tripleoupstream/centos-binary-kolla-toolbox:latest
image_pull_policy: IfNotPresent
command: ["sh", "-c"]
args:
- ansible localhost -m mysql_user -a
"login_host='mariadb'
login_port='3306'
login_user='root'
login_password='$DATABASE_PASSWORD'
name='cinder'
password='$CINDER_DATABASE_PASSWORD'
host='%'
priv='cinder.*:ALL'
append_privs='yes'"
env:
- name: CINDER_DATABASE_PASSWORD
value: "cinderpass"
- name: DATABASE_PASSWORD
value: "mysqlpass"
- name: "TZ"
value: "UTC"
# Cinder Scheduler won't start without cinder tables present
- name: Create Cinder Tables
k8s_v1_job:
name: cinder-create-db
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
restart_policy: OnFailure
containers:
- name: cinder-create-db
image: tripleoupstream/centos-binary-cinder-api:latest
image_pull_policy: IfNotPresent
command: ["sh", "-c"]
args:
- cinder-manage db sync
env:
- name: "TZ"
value: "UTC"
volume_mounts:
- name: kolla-config
mountPath: /var/lib/kolla/config_files/
volumes:
- name: kolla-config
config_map:
name: cinder-scheduler
- name: create cinder-api service
k8s_v1_service:
name: cinder-api
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
labels:
app: cinder-api
service: cinder-api
selector:
app: cinder-api
service: cinder-api
ports:
- name: cinder-api
port: 8776
target_port: 8776
# Cinder API
- name: Create Cinder-api Deployment
k8s_v1beta1_deployment:
name: cinder-api
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
labels:
app: cinder-api
service: cinder-api
replicas: 1
spec_template_metadata_labels:
app: cinder-api
service: cinder-api
containers:
- env:
image: tripleoupstream/centos-binary-cinder-api:latest
name: cinder-api
ports:
- container_port: 8776
protocol: TCP
env:
- name: TZ
value: UTC
- name: KOLLA_CONFIG_STRATEGY
value: COPY_ALWAYS
- name: KOLLA_KUBERNETES
value: ""
volume_mounts:
- name: kolla-config
mountPath: /var/lib/kolla/config_files/
volumes:
- name: kolla-config
config_map:
name: cinder-api
# Cinder Scheduler
- name: create cinder-scheduler deployment
k8s_v1beta1_deployment:
name: cinder-scheduler
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
labels:
app: cinder-scheduler
service: cinder-scheduler
replicas: 1
spec_template_metadata_labels:
app: cinder-scheduler
service: cinder-scheduler
containers:
- image: tripleoupstream/centos-binary-cinder-scheduler:latest
name: cinder-scheduler
env:
- name: TZ
value: UTC
- name: KOLLA_CONFIG_STRATEGY
value: COPY_ALWAYS
- name: KOLLA_KUBERNETES
value: ""
volume_mounts:
- name: kolla-config
mountPath: /var/lib/kolla/config_files/
volumes:
- name: kolla-config
config_map:
name: cinder-scheduler
- name: Create Cinder Volume Deployment
k8s_v1beta1_deployment:
name: cinder-volume
host: "{{coe_host}}"
context: "{{kube_context}}"
kubeconfig: "{{config_file}}"
namespace: "{{ namespace }}"
service_account_name: openstack
labels:
app: cinder-volume
service: cinder-volume
replicas: 1
spec_template_metadata_labels:
app: cinder-volume
service: cinder-volume
containers:
- name: cinder-volume
image: tripleoupstream/centos-binary-cinder-volume:latest
image_pull_policy: IfNotPresent
volumeMounts:
- name: kolla-config
mountPath: /var/lib/kolla/config_files/
env:
- name: TZ
value: UTC
- name: KOLLA_CONFIG_STRATEGY
value: COPY_ALWAYS
- name: KOLLA_KUBERNETES
value: ""
volumes:
- name: kolla-config
config_map:
name: cinder-volume

View File

@ -0,0 +1,17 @@
Listen 0.0.0.0:8776
TraceEnable off
<VirtualHost *:8776>
WSGIDaemonProcess cinder-api processes=5 threads=1 user=cinder group=cinder display-name=%{GROUP} python-path=/usr/lib/python2.7/site-packages
WSGIProcessGroup cinder-api
WSGIScriptAlias / /var/www/cgi-bin/cinder/cinder-wsgi
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/kolla/cinder/cinder-api.log
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog /var/log/kolla/cinder/cinder-api-access.log logformat
</VirtualHost>

53
templates/cinder.conf Normal file
View File

@ -0,0 +1,53 @@
cinder.conf: |
[DEFAULT]
glance_api_servers=none
glance_api_version=2
enable_v3_api=True
storage_availability_zone=nova
default_availability_zone=nova
auth_strategy=noauth
enabled_backends=tripleo_iscsi
nova_catalog_info=compute:nova:internalURL
nova_catalog_admin_info=compute:nova:adminURL
scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler
osapi_volume_listen=0.0.0.0
osapi_volume_workers=2
log_dir=/var/log/cinder
transport_url=rabbit://guest:rabbit_weakpassword@rabbitmq:5672/?ssl=0
rpc_backend=rabbit
control_exchange=openstack
api_paste_config=/etc/cinder/api-paste.ini
[database]
connection=mysql+pymysql://cinder:cinderpass@mariadb:3306/cinder
max_retries=-1
db_max_retries=-1
[oslo_concurrency]
lock_path=/var/lib/cinder/tmp
[oslo_messaging_notifications]
driver=messagingv2
transport_url=rabbit://guest:rabbit_weakpassword@rabbitmq:5672/?ssl=0
[oslo_messaging_rabbit]
ssl=False
rabbit_port=5672
rabbit_userid=guest
rabbit_password=rabbit_weakpassword
heartbeat_timeout_threshold=60
[oslo_middleware]
enable_proxy_headers_parsing=True
[oslo_policy]
policy_file=/etc/cinder/policy.json
[tripleo_iscsi]
volume_backend_name=tripleo_iscsi
volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver
iscsi_ip_address=iscsid
iscsi_helper=lioadm
volumes_dir=/var/lib/cinder/cinder-volumes
iscsi_protocol=iscsi

117
templates/lvm.conf Normal file
View File

@ -0,0 +1,117 @@
config {
checks = 1
abort_on_errors = 0
profile_dir = "/etc/lvm/profile"
}
devices {
dir = "/dev"
scan = [ "/dev" ]
obtain_device_list_from_udev = 1
external_device_info_source = "none"
preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
cache_dir = "/etc/lvm/cache"
cache_file_prefix = ""
write_cache_state = 1
sysfs_scan = 1
multipath_component_detection = 1
md_component_detection = 1
fw_raid_component_detection = 0
md_chunk_alignment = 1
data_alignment_detection = 1
data_alignment = 0
data_alignment_offset_detection = 1
ignore_suspended_devices = 0
ignore_lvm_mirrors = 1
disable_after_error_count = 0
require_restorefile_with_uuid = 1
pv_min_size = 2048
issue_discards = 0
allow_changes_with_duplicate_pvs = 1
}
allocation {
maximise_cling = 1
use_blkid_wiping = 1
wipe_signatures_when_zeroing_new_lvs = 1
mirror_logs_require_separate_pvs = 0
cache_pool_metadata_require_separate_pvs = 0
thin_pool_metadata_require_separate_pvs = 0
}
log {
verbose = 0
silent = 0
syslog = 1
overwrite = 0
level = 0
indent = 1
command_names = 0
prefix = " "
activation = 0
debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
}
backup {
backup = 1
backup_dir = "/etc/lvm/backup"
archive = 1
archive_dir = "/etc/lvm/archive"
retain_min = 10
}
shell {
history_size = 100
}
global {
umask = 077
test = 0
units = "h"
si_unit_consistency = 1
suffix = 1
activation = 1
proc = "/proc"
etc = "/etc"
locking_type = 1
wait_for_locks = 1
fallback_to_clustered_locking = 1
fallback_to_local_locking = 1
locking_dir = "/run/lock/lvm"
prioritise_write_locks = 1
abort_on_internal_errors = 0
detect_internal_vg_cache_corruption = 0
metadata_read_only = 0
mirror_segtype_default = "raid1"
raid10_segtype_default = "raid10"
sparse_segtype_default = "thin"
use_lvmetad = 1
use_lvmlockd = 0
system_id_source = "none"
use_lvmpolld = 1
notify_dbus = 1
}
activation {
checks = 0
udev_sync = 0
udev_rules = 0
verify_udev_operations = 0
retry_deactivation = 1
missing_stripe_filler = "error"
use_linear_target = 1
reserved_stack = 64
reserved_memory = 8192
process_priority = -18
raid_region_size = 512
readahead = "auto"
raid_fault_policy = "warn"
mirror_image_fault_policy = "remove"
mirror_log_fault_policy = "allocate"
snapshot_autoextend_threshold = 100
snapshot_autoextend_percent = 20
thin_pool_autoextend_threshold = 100
thin_pool_autoextend_percent = 20
use_mlockall = 0
monitoring = 1
polling_interval = 15
activation_mode = "degraded"
}
dmeventd {
mirror_library = "libdevmapper-event-lvm2mirror.so"
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
thin_library = "libdevmapper-event-lvm2thin.so"
}