Refactor openshift services for composable roles

Introduce an openshift_node template that serves as base for all
openshift services. This reworks the inventory files so that hosts are
defined once and made part of the appropriate groups.

The master node can now be split from the infra node, or bundled
together with the Worker in the all-in-one role.

Provide environment files to enable the Master, Worker, Infra or
all-in-one role individually.

Change-Id: I9ad86185b01c88b609d320e2384c5644bd99bdae
This commit is contained in:
Martin André 2018-09-03 18:34:43 +02:00
parent df04ed9315
commit 7373adc72e
7 changed files with 534 additions and 419 deletions

View File

@ -39,10 +39,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry. via parameter_defaults in the resource registry.
type: json type: json
OpenShiftGlobalVariables:
default: {}
description: Global Ansible variables for OpenShift-Ansible installer.
type: json
OpenShiftAnsiblePlaybook: OpenShiftAnsiblePlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml' default: '/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml'
description: Path to OpenShift-Ansible playbook. description: Path to OpenShift-Ansible playbook.
@ -51,18 +47,23 @@ parameters:
default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-master/scaleup.yml' default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-master/scaleup.yml'
description: Path to OpenShift-Ansible playbook. description: Path to OpenShift-Ansible playbook.
type: string type: string
OpenShiftWorkerScaleupPlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-node/scaleup.yml'
description: Path to OpenShift-Ansible playbook.
type: string
OpenShiftUpgradePlaybook: OpenShiftUpgradePlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml' default: '/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml'
description: Path to OpenShift-Ansible Upgrade playbook. description: Path to OpenShift-Ansible Upgrade playbook.
type: string type: string
OpenShiftGlobalVariables:
default: {}
description: Global Ansible variables for OpenShift-Ansible installer.
type: json
# TODO(mandre) Add as a inventory group var
OpenShiftMasterNodeVars: OpenShiftMasterNodeVars:
default: {} default: {}
description: OpenShift node vars specific for the master nodes description: OpenShift node vars specific for the master nodes
type: json type: json
OpenShiftWorkerNodeVars:
default: {}
description: OpenShift node vars specific for the worker nodes
type: json
DockerInsecureRegistryAddress: DockerInsecureRegistryAddress:
description: Optional. The IP Address and Port of an insecure docker description: Optional. The IP Address and Port of an insecure docker
namespace that will be configured in /etc/sysconfig/docker. namespace that will be configured in /etc/sysconfig/docker.
@ -82,362 +83,353 @@ parameters:
description: etcd container image for openshift description: etcd container image for openshift
type: string type: string
resources:
OpenShiftNode:
type: ./openshift-node.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs: outputs:
role_data: role_data:
description: Role data for the Openshift Service description: Role data for the Openshift Service
value: value:
service_name: openshift_master service_name: openshift_master
config_settings: config_settings:
tripleo.openshift_master.firewall_rules: map_merge:
'200 openshift-master api': - get_attr: [OpenShiftNode, role_data, config_settings]
dport: 6443 - tripleo.openshift_master.firewall_rules:
proto: tcp '200 openshift-master api':
'200 openshift-master etcd': dport: 6443
dport: proto: tcp
- 2379 '200 openshift-master etcd':
- 2380 dport:
proto: tcp - 2379
- 2380
proto: tcp
upgrade_tasks: [] upgrade_tasks: []
step_config: '' step_config: ''
external_deploy_tasks: external_deploy_tasks:
- name: openshift_master step 2 list_concat:
when: step == '2' - get_attr: [OpenShiftNode, role_data, external_deploy_tasks]
tags: openshift - - name: openshift_master step 2
block: when: step == '2'
- name: create openshift temp dirs tags: openshift
file: block:
path: "{{item}}" - name: set openshift global vars fact
state: directory set_fact:
with_items: openshift_global_vars:
- "{{playbook_dir}}/openshift/inventory" map_merge:
- openshift_release: '3.10'
openshift_version: '3.10'
openshift_image_tag:
yaql:
expression:
$.data.image.rightSplit(":", 1)[1]
data:
image: {get_param: DockerOpenShiftBaseImage}
openshift_enable_excluders: false
openshift_deployment_type: origin
openshift_use_external_openvswitch: true
openshift_docker_selinux_enabled: false
# Disable services we're not using for now
openshift_enable_service_catalog: false
template_service_broker_install: false
# Needed for containerized deployment
skip_version: true
# Fatal and Errors only
debug_level: 0
openshift_master_cluster_method: native
openshift_master_cluster_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
openshift_master_cluster_public_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
# Local Registry
openshift_examples_modify_imagestreams: true
oreg_url:
yaql:
expression:
$.data.image.rightSplit(":", 1).join("-${component}:")
data:
image: {get_param: DockerOpenShiftBaseImage}
etcd_image: {get_param: DockerOpenShiftEtcdImage}
osm_etcd_image: {get_param: DockerOpenShiftEtcdImage}
osm_image: {get_param: DockerOpenShiftBaseImage}
osn_image: {get_param: DockerOpenShiftNodeImage}
openshift_cockpit_deployer_image: {get_param: DockerOpenShiftCockpitImage}
openshift_docker_additional_registries: {get_param: DockerInsecureRegistryAddress}
openshift_master_bootstrap_auto_approve: true
osm_controller_args: {"experimental-cluster-signing-duration": ["20m"]}
- {get_param: OpenShiftGlobalVariables}
tripleo_stack_action: {get_param: StackAction}
openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars}
- name: set openshift global vars fact - name: set role facts for generating inventory
set_fact: set_fact:
openshift_global_vars: tripleo_role_name: {get_param: RoleName}
map_merge:
- openshift_release: '3.10'
openshift_version: '3.10'
openshift_image_tag:
yaql:
expression:
$.data.image.rightSplit(":", 1)[1]
data:
image: {get_param: DockerOpenShiftBaseImage}
openshift_enable_excluders: false
openshift_deployment_type: origin
openshift_use_external_openvswitch: true
openshift_docker_selinux_enabled: false
# Disable services we're not using for now
openshift_enable_service_catalog: false
template_service_broker_install: false
# Needed for containerized deployment
skip_version: true
# Fatal and Errors only
debug_level: 0
openshift_master_cluster_method: native
openshift_master_cluster_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
openshift_master_cluster_public_hostname: {get_param: [EndpointMap, OpenshiftPublic, host]}
# Local Registry
openshift_examples_modify_imagestreams: true
oreg_url:
yaql:
expression:
$.data.image.rightSplit(":", 1).join("-${component}:")
data:
image: {get_param: DockerOpenShiftBaseImage}
etcd_image: {get_param: DockerOpenShiftEtcdImage}
osm_etcd_image: {get_param: DockerOpenShiftEtcdImage}
osm_image: {get_param: DockerOpenShiftBaseImage}
osn_image: {get_param: DockerOpenShiftNodeImage}
openshift_cockpit_deployer_image: {get_param: DockerOpenShiftCockpitImage}
openshift_web_console_prefix:
yaql:
expression:
$.data.image.rightSplit(":", 1)[0] + "-"
data:
image: {get_param: DockerOpenShiftBaseImage}
openshift_docker_additional_registries: {get_param: DockerInsecureRegistryAddress}
openshift_master_bootstrap_auto_approve: true
osm_controller_args: {"experimental-cluster-signing-duration": ["20m"]}
- {get_param: OpenShiftGlobalVariables}
tripleo_role_name: {get_param: RoleName}
tripleo_stack_action: {get_param: StackAction}
openshift_master_node_vars: {get_param: OpenShiftMasterNodeVars}
openshift_worker_node_vars: {get_param: OpenShiftWorkerNodeVars}
openshift_master_network: {get_param: [ServiceNetMap, OpenshiftMasterNetwork]}
# NOTE(flaper87): Check if origin-node is running # FIXME(mandre) This task always fails:
# in the openshift nodes so we can flag the node # - become:true doesn't work in that context (containerized undercloud issue?)
# as new later on. # - there is no origin-master-api docker container
# This task ignores errors because docker inspect # We should be checking for systemd service status instead.
# exits with 1 if origin-node doesn't exist. Perhaps # NOTE(flaper87): Check if origin-node is running in the openshift
# we could use failed_when instead of ignoring the # nodes so we can flag the node as new later on.
# errors. Future improvement. #
- name: Check if origin-node is running # This task ignores errors because docker inspect exits with 1 if
become: true # origin-node doesn't exist. Perhaps we could use failed_when
shell: > # instead of ignoring the errors. Future improvement.
docker inspect atomic-enterprise-master-api > /dev/null 2>&1 - name: Check if origin-node is running
|| docker inspect origin-master-api > /dev/null 2>&1 become: true
|| echo "false" shell: >
register: origin_nodes docker inspect atomic-enterprise-master-api > /dev/null 2>&1
delegate_to: "{{item}}" || docker inspect origin-master-api > /dev/null 2>&1
with_items: "{{ groups[tripleo_role_name] | default([]) }}" || echo "false"
register: origin_nodes
delegate_to: "{{item}}"
with_items: "{{ groups[tripleo_role_name] | default([]) }}"
# NOTE(flaper87): Create all the nodes objects - set_fact:
# now, as yaml dicts, instead of formatting nodes:
# everything as part of a template. - new_node: "{{origin_nodes.results | selectattr('item', 'equalto', item) | selectattr('stdout', 'equalto', 'false') | list | count > 0}}"
# We consider new_node all the nodes that hostname: "{{item}}"
# exited with 1 in the previous task. register: all_master_nodes
# with_items: "{{groups[tripleo_role_name] | default([]) }}"
# Future Improvement: Use hostvars[] syntax
# instead of raw_get to reduce verbosity.
- set_fact:
nodes:
- new_node: "{{origin_nodes.results | selectattr('item', 'equalto', item) | selectattr('stdout', 'equalto', 'false') | list | count > 0}}"
hostname: "{{item}}"
ansible_user: "{{ hostvars[item]['ansible_user'] | default(hostvars[item]['ansible_ssh_user']) | default('root') }}"
ansible_host: "{{ hostvars[item]['ansible_host'] | default(item) }}"
ansible_become: true
containerized: true
openshift_node_group_name: 'node-config-master-infra'
etcd_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_master_bind_addr: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
register: all_master_nodes - set_fact:
with_items: "{{groups[tripleo_role_name] | default([]) }}" master_nodes: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | list}}"
new_master_nodes: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}}"
- set_fact: # NOTE(flaper87): Every master node will be in the masters group
master_nodes: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | list}}" # but only new master nodes will be in the new_masters section, which
new_masters: "{{all_master_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}}" # will be created only if there are nodes to add. We'll add `new_masters`
# to the OSEv3 group regardless to simplify the implementation. Ansible
# will ignore the section if it doesn't exist or if it's empty
- name: generate openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if master_nodes | count > 0%}
all:
children:
masters:
hosts:
{% for host in master_nodes -%}
{{host.hostname}}:
{% endfor %}
# NOTE(flaper87): Every master node will be in the masters group vars:
# but only new master nodes will be in the new_masters section, which {{openshift_master_node_vars | to_nice_yaml() | indent(6)}}
# will be created only if there are nodes to add. We'll add `new_masters`
# to the OSEv3 group regardless to simplify the implementation. Ansible
# will ignore the section if it doesn't exist or if it's empty
- name: generate openshift inventory for openshift_master service
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_master.yml"
content: |
{% if master_nodes | count > 0%}
masters:
hosts:
{% for host in master_nodes %}
{{host.hostname}}:
{{host | combine(openshift_master_node_vars) | to_nice_yaml() | indent(6)}}
{% endfor %}
nodes: {% if new_master_nodes | count > 0 -%}
hosts: new_masters:
{% for host in master_nodes %} hosts:
{{host.hostname}}: # FIXME(mandre)
{{host | combine(openshift_master_node_vars) | to_nice_yaml() | indent(6)}} # patterns do not work in inventory files, so we
{% endfor %} # can't write something like
{% endif %} # hosts:
# new_nodes:&masters: {}
#
# Also impossible to register var with templated
# name, we can't re-use the all_role_nodes var
# for the master role in openshift-node.yaml
{% for host in new_master_nodes -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
{% endif %}
{% if new_masters | count > 0 %} - name: generate openshift inventory for groups
new_masters: copy:
hosts: dest: "{{playbook_dir}}/openshift/inventory/groups.yml"
{% for host in new_masters %} content: |
{{host.hostname}}: all:
{{host | combine(openshift_master_node_vars) | to_nice_yaml() | indent(6)}} children:
{% endfor %} etcd:
children:
masters: {}
new_nodes: new_etcd:
hosts: children:
{% for host in master_nodes %} new_masters: {}
{{host.hostname}}:
{{host | combine(openshift_master_node_vars) | to_nice_yaml() | indent(6)}}
{% endfor %}
new_etcd: OSEv3:
children: children:
new_masters: {} masters: {}
{% endif %} nodes: {}
new_masters: {}
new_nodes: {}
{% if groups['openshift_glusterfs'] | default([]) %}glusterfs: {}{% endif %}
etcd: - name: generate openshift global defaults
children: copy:
masters: {} dest: "{{playbook_dir}}/openshift/global_defaults.yml"
content: |
containerized: true
openshift_master_cluster_method: native
openshift_use_dnsmasq: true
openshift_use_external_openvswitch: true
OSEv3: - name: generate openshift global vars
children: copy:
masters: {} dest: "{{playbook_dir}}/openshift/global_vars.yml"
nodes: {} content: "{{openshift_global_vars|to_nice_yaml}}"
new_masters: {}
new_nodes: {}
{% if groups['openshift_glusterfs'] | default([]) %}glusterfs: {}{% endif %}
- name: generate openshift global defaults - name: set openshift ansible playbook paths
copy: set_fact:
dest: "{{playbook_dir}}/openshift/global_defaults.yml" openshift_ansible_playbook_path: {get_param: OpenShiftAnsiblePlaybook}
content: | openshift_master_scaleup_playbook_path: {get_param: OpenShiftMasterScaleupPlaybook}
containerized: true openshift_worker_scaleup_playbook_path: {get_param: OpenShiftWorkerScaleupPlaybook}
openshift_master_cluster_method: native openshift_upgrade_playbook_path: {get_param: OpenShiftUpgradePlaybook}
openshift_use_dnsmasq: true
openshift_use_external_openvswitch: true
- name: generate openshift global vars # NOTE(flaper87): We'll use openshift_ansible_scaleup_playbook_path
copy: # if there are new master or new worker nodes and we are doing an
dest: "{{playbook_dir}}/openshift/global_vars.yml" # UPDATE. For all the other cases, we shall use the deploy playbook.
content: "{{openshift_global_vars|to_nice_yaml}}" - name: generate openshift playbook
copy:
dest: "{{playbook_dir}}/openshift/playbook.yml"
content: |
# NOTE(flaper87): The NetworkManager setup has been moved
# into openshift-ansible but it's not been released yet.
# This code will go away as soon as an rpm with the required
# roles hits the repo.
- name: OpenShift networking preparation
hosts: all
- name: set openshift ansible playbook paths tasks:
set_fact: - name: install NetworkManager
openshift_ansible_playbook_path: {get_param: OpenShiftAnsiblePlaybook} package:
openshift_master_scaleup_playbook_path: {get_param: OpenShiftMasterScaleupPlaybook} name: NetworkManager
openshift_upgrade_playbook_path: {get_param: OpenShiftUpgradePlaybook} state: present
# NOTE(flaper87): We'll use openshift_ansible_scaleup_playbook_path - name: generate nm dispatcher script
# if there are new master or new worker nodes and we are doing an copy:
# UPDATE. For all the other cases, we shall use the deploy playbook. dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh"
- name: generate openshift playbook owner: root
copy: mode: 0755
dest: "{{playbook_dir}}/openshift/playbook.yml" content: >-
content: | #!/bin/bash -x
# NOTE(flaper87): The NetworkManager setup has been moved
# into openshift-ansible but it's not been released yet.
# This code will go away as soon as an rpm with the required
# roles hits the repo.
- name: OpenShift networking preparation
hosts: all
tasks: DEVS=$(nmcli device | grep unmanaged | awk '{print $1}')
- name: install NetworkManager
package:
name: NetworkManager
state: present
- name: generate nm dispatcher script for dev in $DEVS;
copy: do
dest: "/etc/NetworkManager/dispatcher.d/99-os-net-config-origin-dns.sh" temp="${dev%\"}"
owner: root temp="${temp#\"}"
mode: 0755 export DEVICE_IFACE=$temp
content: >-
#!/bin/bash -x
DEVS=$(nmcli device | grep unmanaged | awk '{print $1}') /etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up
done
for dev in $DEVS; - name: Enable NetworkManager
do service:
temp="${dev%\"}" name: NetworkManager
temp="${temp#\"}" state: restarted
export DEVICE_IFACE=$temp enabled: yes
/etc/NetworkManager/dispatcher.d/99-origin-dns.sh $DEVICE_IFACE up
done
- name: Enable NetworkManager
service:
name: NetworkManager
state: restarted
enabled: yes
- include: "/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml" - include: "/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml"
{% if tripleo_stack_action == 'UPDATE' and new_masters | count > 0 %} {% if tripleo_stack_action == 'UPDATE' and new_masters | count > 0 %}
- include: "{{openshift_master_scaleup_playbook_path}}" - include: "{{openshift_master_scaleup_playbook_path}}"
{% endif %} {% endif %}
{% if tripleo_stack_action == 'UPDATE' and new_nodes | count > 0 %} {% if tripleo_stack_action == 'UPDATE' and new_nodes | count > 0 %}
- include: "{{openshift_worker_scaleup_playbook_path}}" - include: "{{openshift_worker_scaleup_playbook_path}}"
{% endif %} {% endif %}
{% if tripleo_stack_action == 'UPDATE' and openshift_upgrade %} {% if tripleo_stack_action == 'UPDATE' and openshift_upgrade %}
- include: "{{openshift_upgrade_playbook_path}}" - include: "{{openshift_upgrade_playbook_path}}"
{% endif %} {% endif %}
{% if tripleo_stack_action == 'CREATE' or (tripleo_stack_action == 'UPDATE' and (new_masters + new_nodes) | count == 0) %} {% if tripleo_stack_action == 'CREATE' or (tripleo_stack_action == 'UPDATE' and (new_masters + new_nodes) | count == 0) %}
- include: "{{openshift_ansible_playbook_path}}" - include: "{{openshift_ansible_playbook_path}}"
{% endif %} {% endif %}
- name: Simple validation OpenShift is actually deployed - name: Simple validation OpenShift is actually deployed
hosts: masters hosts: masters
tasks: tasks:
- name: Check oc status - name: Check oc status
command: oc status --suggest command: oc status --suggest
register: oc_status register: oc_status
become: true become: true
- name: Register failure if oc status fails - name: Register failure if oc status fails
command: echo true command: echo true
register: oc_status_failed register: oc_status_failed
when: '"fail" in oc_status.stdout' when: '"fail" in oc_status.stdout'
- debug: - debug:
var: oc_status.stdout_lines var: oc_status.stdout_lines
- name: Check oc get dc/router - name: Check oc get dc/router
command: "oc get dc/router -o jsonpath='{.status.readyReplicas}'" command: "oc get dc/router -o jsonpath='{.status.readyReplicas}'"
register: oc_get_router register: oc_get_router
become: true become: true
- name: Register failure if oc get dc/router fails - name: Register failure if oc get dc/router fails
command: echo true command: echo true
register: oc_get_router_failed register: oc_get_router_failed
when: 'oc_get_router.stdout|int < 1' when: 'oc_get_router.stdout|int < 1'
- debug: - debug:
var: oc_get_router.stdout var: oc_get_router.stdout
- name: Check oc get dc/docker-registry - name: Check oc get dc/docker-registry
command: "oc get dc/docker-registry -o jsonpath='{.status.readyReplicas}'" command: "oc get dc/docker-registry -o jsonpath='{.status.readyReplicas}'"
register: oc_get_registry register: oc_get_registry
become: true become: true
- name: Register failure if oc get dc/docker-registry fails - name: Register failure if oc get dc/docker-registry fails
command: echo true command: echo true
register: oc_get_registry_failed register: oc_get_registry_failed
when: 'oc_get_registry.stdout|int < 1' when: 'oc_get_registry.stdout|int < 1'
- debug: - debug:
var: oc_get_registry.stdout var: oc_get_registry.stdout
- name: Check oc get nodes - name: Check oc get nodes
command: oc get nodes --all-namespaces command: oc get nodes --all-namespaces
register: oc_get_nodes register: oc_get_nodes
become: true become: true
- name: Register failure if oc get nodes fails - name: Register failure if oc get nodes fails
command: echo true command: echo true
register: oc_get_nodes_failed register: oc_get_nodes_failed
when: '"NotReady" in oc_get_nodes.stdout' when: '"NotReady" in oc_get_nodes.stdout'
- debug: - debug:
var: oc_get_nodes.stdout_lines var: oc_get_nodes.stdout_lines
- name: Fail the playbook if any validations failed - name: Fail the playbook if any validations failed
fail: fail:
when: > when: >
oc_status_failed.changed or oc_status_failed.changed or
oc_get_nodes_failed.changed or oc_get_nodes_failed.changed or
oc_get_router_failed.changed or oc_get_router_failed.changed or
oc_get_registry_failed.changed oc_get_registry_failed.changed
- name: set openshift command - name: set openshift command
set_fact: set_fact:
openshift_command: >- openshift_command: >-
{%- if openshift_command is defined -%} {%- if openshift_command is defined -%}
{{openshift_command}} {{openshift_command}}
{%- else -%} {%- else -%}
ANSIBLE_HOST_KEY_CHECKING=False ANSIBLE_HOST_KEY_CHECKING=False
ansible-playbook ansible-playbook
-i '{{playbook_dir}}/openshift/inventory' -i '{{playbook_dir}}/openshift/inventory'
--extra-vars '@{{playbook_dir}}/openshift/global_defaults.yml' --extra-vars '@{{playbook_dir}}/openshift/global_defaults.yml'
--extra-vars '@{{playbook_dir}}/openshift/global_vars.yml' --extra-vars '@{{playbook_dir}}/openshift/global_vars.yml'
'{{playbook_dir}}/openshift/playbook.yml' '{{playbook_dir}}/openshift/playbook.yml'
{%- endif -%} {%- endif -%}
- name: print openshift command - name: print openshift command
debug: debug:
var: openshift_command var: openshift_command
- name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log) - name: run openshift (immediate log at {{playbook_dir}}/openshift/playbook.log)
shell: | shell: |
{{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log {{openshift_command}} 2>&1 | tee {{playbook_dir}}/openshift/playbook.log
exit ${PIPESTATUS[0]} exit ${PIPESTATUS[0]}
external_upgrade_tasks: external_upgrade_tasks:
- name: set OpenShift upgrade facts - name: set OpenShift upgrade facts

View File

@ -0,0 +1,163 @@
heat_template_version: rocky
description: External tasks definition for OpenShift
parameters:
StackAction:
type: string
description: >
Heat action on performed top-level stack. Note StackUpdateType is
set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
RoleNetIpMap:
default: {}
type: json
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
OpenShiftNodeGroupName:
default: node-config-all-in-one
description: The group the nodes belong to.
type: string
tags:
- role_specific
resources:
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- OpenShiftNodeGroupName: OpenShiftNodeGroupName
- values: {get_param: [RoleParameters]}
- values:
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs:
role_data:
description: Role data for the Openshift Service
value:
service_name: openshift_node
config_settings: {}
upgrade_tasks: []
step_config: ''
external_deploy_tasks:
- name: openshift_node step 1
when: step == '1'
tags: openshift
block:
- name: create openshift temp dirs
file:
path: "{{item}}"
state: directory
with_items:
- "{{playbook_dir}}/openshift/inventory"
- name: set role facts for generating inventory
set_fact:
tripleo_role_name: {get_param: RoleName}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
openshift_master_network: {get_param: [ServiceNetMap, OpenshiftMasterNetwork]}
# FIXME(mandre) This task always fails:
# - become:true doesn't work in that context (containerized undercloud issue?)
# - there is no origin-master-api docker container
# We should be checking for systemd service status instead.
# NOTE(flaper87): Check if origin-node is running in the openshift
# nodes so we can flag the node as new later on.
#
# This task ignores errors because docker inspect exits with 1 if
# origin-node doesn't exist. Perhaps we could use failed_when
# instead of ignoring the errors. Future improvement.
- name: Check if origin-node is running
become: true
shell: >
docker inspect atomic-enterprise-master-api > /dev/null 2>&1
|| docker inspect origin-master-api > /dev/null 2>&1
|| echo "false"
register: origin_nodes
delegate_to: "{{item}}"
with_items: "{{ groups[tripleo_role_name] | default([]) }}"
# NOTE(flaper87): Create all the nodes objects
# now, as yaml dicts, instead of formatting
# everything as part of a template.
# We consider new_node all the nodes that
# exited with 1 in the previous task.
- set_fact:
nodes:
- new_node: "{{origin_nodes.results | selectattr('item', 'equalto', item) | selectattr('stdout', 'equalto', 'false') | list | count > 0}}"
hostname: "{{item}}"
ansible_user: "{{ hostvars[item]['ansible_user'] | default(hostvars[item]['ansible_ssh_user']) | default('root') }}"
ansible_host: "{{ hostvars[item]['ansible_host'] | default(item) }}"
ansible_become: true
containerized: true
openshift_node_group_name: '{{tripleo_node_group_name}}'
etcd_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_master_bind_addr: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
register: all_role_nodes
with_items: "{{groups[tripleo_role_name] | default([]) }}"
- set_fact:
role_nodes: "{{all_role_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | list}}"
new_role_nodes: "{{all_role_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}}"
- name: generate openshift inventory for {{tripleo_role_name}} role nodes
copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_nodes.yml"
content: |
{% if role_nodes | count > 0%}
all:
hosts:
{% for host in role_nodes -%}
{{host.hostname}}:
{{host | to_nice_yaml() | indent(6)}}
{% endfor %}
children:
nodes:
hosts:
{% for host in role_nodes -%}
{{host.hostname}}:
{% endfor %}
{% if new_role_nodes | count > 0 -%}
new_nodes:
hosts:
{% for host in new_role_nodes -%}
{{host.hostname}}:
{% endfor %}
{% endif %}
{% endif %}

View File

@ -32,117 +32,40 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry. via parameter_defaults in the resource registry.
type: json type: json
OpenShiftNodeGroupName: # TODO(mandre) This is unused. Remove it or make it OpenShiftNodeVars
default: node-config-compute OpenShiftWorkerNodeVars:
description: The group the nodes belong to. default: {}
type: string description: OpenShift node vars specific for the worker nodes
tags: type: json
- role_specific
OpenShiftWorkerScaleupPlaybook:
default: '/usr/share/ansible/openshift-ansible/playbooks/openshift-node/scaleup.yml'
description: Path to OpenShift-Ansible playbook.
type: string
resources: resources:
RoleParametersValue:
type: OS::Heat::Value OpenShiftNode:
properties: type: ./openshift-node.yaml
type: json properties:
value: EndpointMap: {get_param: EndpointMap}
map_replace: ServiceNetMap: {get_param: ServiceNetMap}
- map_replace: DefaultPasswords: {get_param: DefaultPasswords}
- OpenShiftNodeGroupName: OpenShiftNodeGroupName RoleName: {get_param: RoleName}
- values: {get_param: [RoleParameters]} RoleParameters: {get_param: RoleParameters}
- values:
OpenShiftNodeGroupName: {get_param: OpenShiftNodeGroupName}
outputs: outputs:
role_data: role_data:
description: Role data for the Openshift Service description: Role data for the Openshift Service
value: value:
# This service template essentially tags the nodes that we want
# as workers. The actual installation is performed in
# openshift-master service template.
service_name: openshift_worker service_name: openshift_worker
config_settings: config_settings:
tripleo.openshift_worker.firewall_rules: map_merge:
'200 openshift-worker kubelet': - get_attr: [OpenShiftNode, role_data, config_settings]
dport: - tripleo.openshift_worker.firewall_rules:
- 10250 '200 openshift-worker kubelet':
- 10255 dport:
proto: tcp - 10250
'200 openshift-worker external services': - 10255
dport: '30000-32767' proto: tcp
'200 openshift-worker external services':
dport: '30000-32767'
upgrade_tasks: [] upgrade_tasks: []
step_config: '' step_config: ''
external_deploy_tasks: external_deploy_tasks:
- name: openshift_worker step 1 - get_attr: [OpenShiftNode, role_data, external_deploy_tasks]
when: step == '1'
tags: openshift
block:
- name: create openshift temp dirs
file:
path: "{{item}}"
state: directory
with_items:
- "{{playbook_dir}}/openshift/inventory"
- name: set global vars facts
set_fact:
tripleo_role_name: {get_param: RoleName}
tripleo_node_group_name: {get_attr: [RoleParametersValue, value, OpenShiftNodeGroupName]}
openshift_master_network: {get_param: [ServiceNetMap, OpenshiftMasterNetwork]}
openshift_worker_scaleup_playbook_path: {get_param: OpenShiftWorkerScaleupPlaybook}
- name: Check if origin-node is running
become: true
shell: >
docker inspect atomic-enterprise-node > /dev/null 2>&1
|| docker inspect origin-node > /dev/null 2>&1
|| echo "false"
register: origin_nodes
delegate_to: "{{item}}"
with_items: "{{ groups[tripleo_role_name] | default([]) }}"
- set_fact:
nodes:
- new_node: "{{origin_nodes.results | selectattr('item', 'equalto', item) | selectattr('stdout', 'equalto', 'false') | list | count > 0}}"
hostname: "{{item}}"
ansible_user: "{{ hostvars[item]['ansible_user'] | default(hostvars[item]['ansible_ssh_user']) | default('root') }}"
ansible_host: "{{ hostvars[item]['ansible_host'] | default(item) }}"
ansible_become: true
containerized: true
openshift_node_group_name: '{{tripleo_node_group_name }}'
etcd_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_ip: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_public_hostname: "{{hostvars[item][openshift_master_network + '_ip']}}"
openshift_schedulable: '{{tripleo_node_group_name != "node-config-infra"}}'
register: all_worker_nodes
with_items: "{{groups[tripleo_role_name] | default([]) }}"
- set_fact:
worker_nodes: "{{all_worker_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | list}}"
new_nodes: "{{all_worker_nodes.results | map(attribute='ansible_facts') | map(attribute='nodes') | flatten | selectattr('new_node', 'equalto', True) | list}}"
- copy:
dest: "{{playbook_dir}}/openshift/inventory/{{tripleo_role_name}}_openshift_worker.yml"
content: |
{% if worker_nodes | count > 0 %}
nodes:
hosts:
{% for host in worker_nodes %}
{{host.hostname}}:
{{host | to_nice_yaml() | indent(6)}}
{% endfor %}
{% endif %}
{% if new_nodes | count > 0 %}
new_nodes:
hosts:
{% for host in new_nodes %}
{{host.hostname}}:
{{host | to_nice_yaml() | indent(6)}}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,33 @@
###############################################################################
# Role: OpenShiftAllInOne #
###############################################################################
- name: OpenShiftAllInOne
description: |
OpenShiftAllInOne role
CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-all-in-one'
tags:
- primary
- controller
- openshift
networks:
- External
- InternalApi
- Storage
- StorageMgmt
- Tenant
# For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External']
default_route_networks: ['External']
ServicesDefault:
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::OpenShift::Master
- OS::TripleO::Services::OpenShift::Worker
- OS::TripleO::Services::OpenShift::GlusterFS

View File

@ -5,6 +5,8 @@
description: | description: |
OpenShiftInfra role, a specialized worker that only runs infra pods. OpenShiftInfra role, a specialized worker that only runs infra pods.
CountDefault: 1 CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-infra'
tags: tags:
- openshift - openshift
networks: networks:
@ -12,8 +14,6 @@
- Storage - Storage
- StorageMgmt - StorageMgmt
- Tenant - Tenant
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-infra'
# For systems with both IPv4 and IPv6, you may specify a gateway network for # For systems with both IPv4 and IPv6, you may specify a gateway network for
# each, such as ['ControlPlane', 'External'] # each, such as ['ControlPlane', 'External']
default_route_networks: ['ControlPlane'] default_route_networks: ['ControlPlane']

View File

@ -5,6 +5,8 @@
description: | description: |
OpenShiftMaster role OpenShiftMaster role
CountDefault: 1 CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-master'
tags: tags:
- primary - primary
- controller - controller

View File

@ -5,6 +5,8 @@
description: | description: |
OpenShiftWorker role OpenShiftWorker role
CountDefault: 1 CountDefault: 1
RoleParametersDefault:
OpenShiftNodeGroupName: 'node-config-compute'
tags: tags:
- openshift - openshift
networks: networks: