Import initial configs

This commit is contained in:
Logan V 2018-03-01 15:27:36 +00:00
parent 1f806c2e93
commit 341090f75e
4 changed files with 152 additions and 0 deletions

View File

@ -0,0 +1,6 @@
---
container_skel:
cinder_volumes_container:
properties:
is_metal: false

View File

@ -0,0 +1,19 @@
---
# Copyright 2016, Logan Vig <logan2211@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
container_skel:
haproxy_container:
properties:
is_metal: false

View File

@ -0,0 +1,83 @@
---
cidr_networks: &cidr_networks
container: 10.53.2.0/24
external: 192.169.91.96/28
used_ips:
- "10.53.2.0,10.53.2.10"
- "192.169.91.96,192.169.91.100"
global_overrides:
cidr_networks: *cidr_networks
internal_lb_vip_address: 10.53.2.2
#
# The below domain name must resolve to an IP address
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
# If using different protocols (https/http) for the public/internal
# endpoints the two addresses must be different.
#
# osa.continuous.pw => 192.169.91.101
external_lb_vip_address: osa.continuous.pw
tunnel_bridge: "br-mgmt"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- all_containers
- hosts
is_container_address: true
is_ssh_address: true
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "bond0"
type: "flat"
net_name: "flat"
group_binds:
- neutron_linuxbridge_agent
###
### Infrastructure
###
_infrastructure_hosts: &infrastructure_hosts
lsn-mc1016:
ip: 192.169.91.98
lsn-mc1022:
ip: 192.169.91.99
lsn-mc1023:
ip: 192.169.91.100
shared-infra_hosts: *infrastructure_hosts
ceph-mon_hosts: *infrastructure_hosts
repo-infra_hosts: *infrastructure_hosts
haproxy_hosts: *infrastructure_hosts
identity_hosts: *infrastructure_hosts
storage-infra_hosts: *infrastructure_hosts
storage_hosts: *infrastructure_hosts
image_hosts: *infrastructure_hosts
compute-infra_hosts: *infrastructure_hosts
orchestration_hosts: *infrastructure_hosts
dashboard_hosts: *infrastructure_hosts
network_hosts: *infrastructure_hosts
metering-infra_hosts: *infrastructure_hosts
metering-alarm_hosts: *infrastructure_hosts
metrics_hosts: *infrastructure_hosts
# nova hypervisors
#compute_hosts: &compute_hosts
# compute1:
# ip: 172.29.236.16
# compute2:
# ip: 172.29.236.17
#
#ceph-osd_hosts: *compute_hosts
#metering-compute_hosts: *compute_hosts

View File

@ -0,0 +1,44 @@
---
# Because we have three haproxy nodes, we need
# to one active LB IP, and we use keepalived for that.
## Load Balancer Configuration (haproxy/keepalived)
haproxy_keepalived_external_vip_cidr: "192.169.91.101/32"
haproxy_keepalived_internal_vip_cidr: "10.52.2.2/32"
haproxy_keepalived_external_interface: br-mgmt
haproxy_keepalived_internal_interface: br-mgmt
## Ceph cluster fsid (must be generated before first run)
## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
generate_fsid: false
fsid: 98a1078e-7b72-4782-9cf7-ae7c68fd900f # Replace with your generated UUID
## ceph-ansible settings
## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
## additional configuration options availble.
monitor_address_block: "{{ cidr_networks.container }}"
public_network: "{{ cidr_networks.container }}"
osd_scenario: collocated
osd_objectstore: bluestore
# ceph-ansible automatically creates pools & keys for OpenStack services
openstack_config: true
cinder_ceph_client: cinder
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
# Ceph OSD devices
devices:
- /dev/sdc
- /dev/sdd
cinder_backends:
RBD:
volume_driver: cinder.volume.drivers.rbd.RBDDriver
rbd_pool: volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_store_chunk_size: 8
volume_backend_name: rbddriver
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
report_discard_supported: true