Verify YAML syntax in gates

This patchset implements yamllint test to all *.yml
files.

Also fixes syntax errors to make jobs to pass.

Change-Id: I3186adf9835b4d0cada272d156b17d1bc9c2b799
This commit is contained in:
Eduardo Gonzalez 2018-02-25 12:02:00 +01:00
parent 7851de5c3c
commit ea1a1dee0d
50 changed files with 160 additions and 158 deletions

15
.yamllint Normal file
View File

@ -0,0 +1,15 @@
extends: default
ignore: |
.tox/
rules:
line-length: disable
truthy: disable
braces:
max-spaces-inside: 1
comments:
# Ignore first space in comment because we set default options as:
#openstack_version: "pike"
require-starting-space: true
ignore: |
etc/kolla/globals.yml

View File

@ -1,3 +1,4 @@
---
- project:
check:
jobs:
@ -72,7 +73,7 @@
vars:
scenario: aio
roles:
- zuul: openstack-infra/zuul-jobs
- zuul: openstack-infra/zuul-jobs
- job:
name: kolla-ansible-centos-source

View File

@ -62,7 +62,7 @@ container_proxy:
# By default, Kolla API services bind to the network address assigned
# to the api_interface. Allow the bind address to be an override.
api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
################
# Chrony options
@ -98,14 +98,14 @@ docker_restart_policy_retry: "10"
# Common options used throughout Docker
docker_common_options:
auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}"
auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}"
####################
@ -336,7 +336,7 @@ supported_policy_format_list:
# In the context of multi-regions, list here the name of all your regions.
multiple_regions_names:
- "{{ openstack_region_name }}"
- "{{ openstack_region_name }}"
openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min }}"
openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}"
@ -350,11 +350,11 @@ nova_console: "novnc"
# OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user.
openstack_auth:
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
username: "admin"
password: "{{ keystone_admin_password }}"
project_name: "admin"
domain_name: "default"
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
username: "admin"
password: "{{ keystone_admin_password }}"
project_name: "admin"
domain_name: "default"
# Endpoint type used to connect with OpenStack services with ansible modules.
# Valid options are [ public, internal, admin ]

View File

@ -37,4 +37,3 @@
roles:
- { role: baremetal,
tags: baremetal }

View File

@ -93,7 +93,7 @@
- enable_host_ntp | bool
- name: Synchronizing time one-time
command: ntpd -gq
command: ntpd -gq
become: True
when: enable_host_ntp | bool

View File

@ -31,7 +31,7 @@
mode: "0660"
become: true
with_items:
- "rabbitmq-env.conf"
- "rabbitmq-env.conf"
- name: Template ssh keys
template:

View File

@ -11,7 +11,7 @@
auth: "{{ '{{ openstack_ceilometer_auth }}' }}"
endpoint_type: "{{ openstack_interface }}"
module_extra_vars:
openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
run_once: True
- name: Associate the ResellerAdmin role and ceilometer user
@ -25,6 +25,6 @@
auth: "{{ '{{ openstack_ceilometer_auth }}' }}"
endpoint_type: "{{ openstack_interface }}"
module_extra_vars:
openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
openstack_ceilometer_auth: "{{ openstack_ceilometer_auth }}"
when: enable_swift | bool
run_once: True

View File

@ -49,4 +49,3 @@
with_dict: "{{ chrony_services }}"
notify:
- Restart chrony container

View File

@ -28,7 +28,7 @@ common_services:
- "/dev/:/dev/"
- "/run/:/run/:shared"
- "kolla_logs:/var/log/kolla/"
# DUMMY_ENVIRONMENT is needed because empty environment is not supported
# DUMMY_ENVIRONMENT is needed because empty environment is not supported
cron:
container_name: cron
enabled: True

View File

@ -79,7 +79,7 @@
vars:
service: "{{ designate_services['designate-worker'] }}"
template:
src: "{{ item }}"
src: "{{ item }}"
dest: "{{ node_config_directory }}/designate-worker/pools.yaml"
mode: "0660"
become: true

View File

@ -1,7 +1,7 @@
---
- name: Destroying Kolla host configuration
become: true
script: ../tools/cleanup-host
script: ../tools/cleanup-host
environment:
enable_haproxy: "{{ enable_haproxy }}"
enable_swift: "{{ enable_swift }}"

View File

@ -45,4 +45,3 @@
or glance_conf.changed | bool
or policy_overwriting.changed | bool
or glance_registry_container.changed | bool

View File

@ -125,4 +125,3 @@
notify:
- Restart glance-api container
- Restart glance-registry container

View File

@ -109,4 +109,3 @@
with_dict: "{{ haproxy_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -13,7 +13,7 @@
with_dict: "{{ horizon_services }}"
- set_fact:
custom_policy: []
custom_policy: []
- include: policy_item.yml
vars:
@ -131,4 +131,3 @@
- horizon.enabled | bool
notify:
- Restart horizon container

View File

@ -15,67 +15,67 @@
run_once: true
- block:
- name: Stop MariaDB containers
kolla_docker:
name: "{{ mariadb_service.container_name }}"
action: "stop_container"
- name: Stop MariaDB containers
kolla_docker:
name: "{{ mariadb_service.container_name }}"
action: "stop_container"
- name: Run MariaDB wsrep recovery
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
BOOTSTRAP_ARGS: "--wsrep-recover"
image: "{{ mariadb_service.image }}"
labels:
BOOTSTRAP:
name: "{{ mariadb_service.container_name }}"
restart_policy: "never"
volumes: "{{ mariadb_service.volumes }}"
- name: Run MariaDB wsrep recovery
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
BOOTSTRAP_ARGS: "--wsrep-recover"
image: "{{ mariadb_service.image }}"
labels:
BOOTSTRAP:
name: "{{ mariadb_service.container_name }}"
restart_policy: "never"
volumes: "{{ mariadb_service.volumes }}"
- name: Stop MariaDB containers
kolla_docker:
name: "{{ mariadb_service.container_name }}"
action: "stop_container"
- name: Stop MariaDB containers
kolla_docker:
name: "{{ mariadb_service.container_name }}"
action: "stop_container"
- name: Copying MariaDB log file to /tmp
shell: "docker cp {{ mariadb_service.container_name }}:/var/log/kolla/mariadb/mariadb.log /tmp/mariadb_tmp.log"
- name: Copying MariaDB log file to /tmp
shell: "docker cp {{ mariadb_service.container_name }}:/var/log/kolla/mariadb/mariadb.log /tmp/mariadb_tmp.log"
- name: Get MariaDB wsrep recovery seqno
shell: "tail -n 200 /tmp/mariadb_tmp.log | grep Recovered | tail -1 | awk '{print $7}' | awk -F'\n' '{print $1}' | awk -F':' '{print $2}'"
register: wsrep_recovery_seqno
- name: Get MariaDB wsrep recovery seqno
shell: "tail -n 200 /tmp/mariadb_tmp.log | grep Recovered | tail -1 | awk '{print $7}' | awk -F'\n' '{print $1}' | awk -F':' '{print $2}'"
register: wsrep_recovery_seqno
- name: Removing MariaDB log file from /tmp
file: path=/tmp/mariadb_tmp.log state=absent
changed_when: false
check_mode: no
- name: Removing MariaDB log file from /tmp
file: path=/tmp/mariadb_tmp.log state=absent
changed_when: false
check_mode: no
- name: Registering MariaDB seqno variable
set_fact:
seqno: "{{ wsrep_recovery_seqno.stdout_lines[0] }}"
changed_when: false
- name: Registering MariaDB seqno variable
set_fact:
seqno: "{{ wsrep_recovery_seqno.stdout_lines[0] }}"
changed_when: false
- name: Comparing seqno value on all mariadb hosts
shell:
cmd: |
if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} &&
{{ hostvars[inventory_hostname]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^[0-9]+$ &&
{{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi
with_items: "{{ groups['mariadb'] }}"
register: seqno_compare
args:
executable: /bin/bash
changed_when: false
- name: Comparing seqno value on all mariadb hosts
shell:
cmd: |
if [[ ! -z {{ hostvars[inventory_hostname]['seqno'] }} && ! -z {{ hostvars[item]['seqno'] }} &&
{{ hostvars[inventory_hostname]['seqno'] }} =~ ^[0-9]+$ && {{ hostvars[item]['seqno'] }} =~ ^[0-9]+$ &&
{{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi
with_items: "{{ groups['mariadb'] }}"
register: seqno_compare
args:
executable: /bin/bash
changed_when: false
- name: Writing hostname of host with the largest seqno to temp file
local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644
changed_when: false
when: seqno_compare.results | map(attribute='stdout') | join('') == ""
- name: Writing hostname of host with the largest seqno to temp file
local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644
changed_when: false
when: seqno_compare.results | map(attribute='stdout') | join('') == ""
- name: Registering mariadb_recover_inventory_name from temp file
set_fact:
mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
- name: Registering mariadb_recover_inventory_name from temp file
set_fact:
mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
when:
- mariadb_recover_inventory_name is not defined

View File

@ -37,4 +37,3 @@
- service.enabled | bool
- action != "config"
notify: Restart memcached container

View File

@ -86,4 +86,3 @@
group: "{{ config_owner_group }}"
when: inventory_hostname in groups['compute']
with_sequence: start=1 end={{ num_nova_fake_per_node }}

View File

@ -436,4 +436,3 @@
with_dict: "{{ neutron_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -111,7 +111,7 @@ nova_services:
group: "compute"
image: "{{ nova_compute_image_full }}"
environment:
LIBGUESTFS_BACKEND: "direct"
LIBGUESTFS_BACKEND: "direct"
privileged: True
enabled: "{{ not enable_nova_fake | bool }}"
ipc_mode: "host"

View File

@ -1,3 +1,4 @@
---
- name: Install package python-os-xenapi
package:
name: python-os-xenapi

View File

@ -203,4 +203,3 @@
with_dict: "{{ nova_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -70,4 +70,3 @@
with_dict: "{{ openvswitch_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -50,7 +50,7 @@ ovsdpdk_services:
####################
ovs_bridge_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
ovs_port_mappings: "{% for bridge in neutron_bridge_name.split(',') %} {{ neutron_external_interface.split(',')[loop.index0] }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
dpdk_tunnel_interface : "{{neutron_external_interface}}"
dpdk_tunnel_interface: "{{neutron_external_interface}}"
dpdk_tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['address'] }}"
tunnel_interface_network: "{{ hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['network']}}/{{hostvars[inventory_hostname]['ansible_' + dpdk_tunnel_interface]['ipv4']['netmask']}}"
tunnel_interface_cidr: "{{dpdk_tunnel_interface_address}}/{{ tunnel_interface_network | ipaddr('prefix') }}"

View File

@ -50,7 +50,7 @@
or inventory_hostname in groups['neutron-l3-agent']
or inventory_hostname in groups['neutron-metadata-agent']
or inventory_hostname in groups['neutron-vpnaas-agent'])
- ovs_physical_port_policy == 'indexed'
- ovs_physical_port_policy == 'indexed'
- name: Restart ovsdpdk-vswitchd container
vars:
@ -86,7 +86,7 @@
or inventory_hostname in groups['neutron-l3-agent']
or inventory_hostname in groups['neutron-metadata-agent']
or inventory_hostname in groups['neutron-vpnaas-agent'])
- ovs_physical_port_policy == 'named'
- ovs_physical_port_policy == 'named'
- name: wait for dpdk tunnel ip
wait_for:

View File

@ -19,7 +19,7 @@
- item.value.host_in_groups | bool
with_dict: "{{ ovsdpdk_services }}"
notify:
- "Restart {{ item.key }} container"
- "Restart {{ item.key }} container"
- name: Copying ovs-dpdkctl tool
copy:

View File

@ -8,4 +8,3 @@
- item.value.enabled | bool
- item.value.host_in_groups | bool
with_dict: "{{ ovsdpdk_services }}"

View File

@ -6,9 +6,9 @@
port: "{{ database_port }}"
with_items: "{{ groups['mariadb'] }}"
when:
- not enable_mariadb | bool
- enable_external_mariadb_load_balancer | bool
- inventory_hostname in groups['haproxy']
- not enable_mariadb | bool
- enable_external_mariadb_load_balancer | bool
- inventory_hostname in groups['haproxy']
- name: "Check if external database address is reachable from all hosts"
wait_for:
@ -16,5 +16,5 @@
host: "{{ database_address }}"
port: "{{ database_port }}"
when:
- not enable_mariadb | bool
- not enable_external_mariadb_load_balancer | bool
- not enable_mariadb | bool
- not enable_external_mariadb_load_balancer | bool

View File

@ -10,10 +10,8 @@
key: "{{ config_owner_group }}"
register: getent_group
#(duonghq) it's only a basic check, should be refined later
# NOTE(duonghq): it's only a basic check, should be refined later
- name: Check if ansible user can do passwordless sudo
shell: sudo -n true
register: result
failed_when: result | failed

View File

@ -39,10 +39,10 @@
- inventory_hostname in groups[service.group]
- service.enabled | bool
with_items:
- "rabbitmq-env.conf"
- "rabbitmq.config"
- "rabbitmq-clusterer.config"
- "definitions.json"
- "rabbitmq-env.conf"
- "rabbitmq.config"
- "rabbitmq-clusterer.config"
- "definitions.json"
notify:
- Restart rabbitmq container

View File

@ -34,4 +34,3 @@ skydive_analyzer_image_full: "{{ skydive_analyzer_image }}:{{ skydive_analyzer_t
skydive_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-skydive-agent"
skydive_agent_tag: "{{ openstack_release }}"
skydive_agent_image_full: "{{ skydive_agent_image }}:{{ skydive_agent_tag }}"

View File

@ -41,4 +41,3 @@
- config_json.changed | bool
or skydive_conf.changed |bool
or skydive_agent_container.changed | bool

View File

@ -31,4 +31,3 @@
- container_facts['skydive_agent'] is not defined
- inventory_hostname in groups[skydive_agent.group]
- skydive_agent.enabled | bool

View File

@ -1,4 +1,3 @@
---
- name: Stopping Kolla containers
command: /tmp/kolla-stop/tools/stop-containers

View File

@ -121,8 +121,8 @@ vitrage_datasource:
enabled: "{{ enable_cinder | bool }}"
- name: "neutron.network,neutron.port"
enabled: "{{ enable_neutron | bool }}"
#TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody.
# Document process to deploy vitrage+heat.
# TODO(egonzalez) Heat cannot be used with default policy.json due stacks:global_index=rule:deny_everybody.
# Document process to deploy vitrage+heat.
- name: "heat.stack"
enabled: "no"

View File

@ -4,7 +4,7 @@ kind: ReplicationController
metadata:
name: redis
spec:
replicas: 2
replicas: 2
selector:
name: redis
template:
@ -13,16 +13,16 @@ spec:
name: redis
spec:
containers:
- name: redis
image: kubernetes/redis:v1
ports:
- containerPort: 6379
resources:
limits:
cpu: "1"
volumeMounts:
- mountPath: /redis-master-data
name: data
volumes:
- name: data
emptyDir: {}
- name: redis
image: kubernetes/redis:v1
ports:
- containerPort: 6379
resources:
limits:
cpu: "1"
volumeMounts:
- mountPath: /redis-master-data
name: data
volumes:
- name: data
emptyDir: {}

View File

@ -8,8 +8,8 @@ metadata:
name: redis-proxy
spec:
containers:
- name: proxy
image: kubernetes/redis-proxy:v1
ports:
- containerPort: 6379
name: api
- name: proxy
image: kubernetes/redis-proxy:v1
ports:
- containerPort: 6379
name: api

View File

@ -4,7 +4,7 @@ kind: ReplicationController
metadata:
name: redis-sentinel
spec:
replicas: 2
replicas: 2
selector:
redis-sentinel: "true"
template:
@ -15,10 +15,10 @@ spec:
role: sentinel
spec:
containers:
- name: sentinel
image: kubernetes/redis:v1
env:
- name: SENTINEL
value: "true"
ports:
- containerPort: 26379
- name: sentinel
image: kubernetes/redis:v1
env:
- name: SENTINEL
value: "true"
ports:
- containerPort: 26379

View File

@ -179,7 +179,7 @@ vitrage_keystone_password:
memcache_secret_key:
#HMAC secret key
# HMAC secret key
osprofiler_secret:
nova_ssh_key:

View File

@ -1,5 +1,5 @@
---
features:
- Add ansible role for openstack congress project which provide
policy as a service across any collection of cloud services in
order to offer governance and compliance for dynamic infrastructures.
- Add ansible role for openstack congress project which provide
policy as a service across any collection of cloud services in
order to offer governance and compliance for dynamic infrastructures.

View File

@ -1,3 +1,3 @@
---
features:
- Add designate-producer ansible role. Orchestrates periodic tasks that are run by designate.
- Add designate-producer ansible role. Orchestrates periodic tasks that are run by designate.

View File

@ -1,6 +1,6 @@
---
features:
- Introduce OpenStack Infrastructure Optimization
service, also known as Watcher. This project makes
use of Ceilometer data to rebalance the cloud to
meet declared goals and strategies.
features:
- Introduce OpenStack Infrastructure Optimization
service, also known as Watcher. This project makes
use of Ceilometer data to rebalance the cloud to
meet declared goals and strategies.

View File

@ -1,4 +1,4 @@
---
other:
- Congress doesn't work correctly out of the box and will
not deploy. See Bug #1634641.
not deploy. See Bug https://bugs.launchpad.net/kolla-ansible/+bug/1634641.

View File

@ -6,4 +6,3 @@ deprecations:
* /etc/kolla/config/database.conf
* /etc/kolla/config/messaging.conf

View File

@ -8,4 +8,4 @@ upgrade:
- |
On upgrade NFS Cinder snapshots will be activated. One can
prohibit this by setting nfs_snapshot_support = False in
/etc/kolla/config/cinder/cinder-volume.conf, section '[nfs-1]'.
/etc/kolla/config/cinder/cinder-volume.conf, section '[nfs-1]'.

View File

@ -1,3 +1,3 @@
---
features:
- Add OpenDaylight role
- Add OpenDaylight role

View File

@ -2,4 +2,3 @@
deprecations:
- The nova-network was deprecated, we remove it from the nova ansible
role.

View File

@ -1,3 +1,4 @@
---
- hosts: all
vars:
logs_dir: "/tmp/logs"

View File

@ -32,8 +32,8 @@
become: true
tasks:
- name: Create log directory for node
file:
state: directory
path: /tmp/{{ inventory_hostname }}
become: false
- name: Create log directory for node
file:
state: directory
path: /tmp/{{ inventory_hostname }}
become: false

View File

@ -30,11 +30,15 @@ setenv = VIRTUAL_ENV={envdir}
commands = python setup.py testr --coverage --testr-args='{posargs}'
[testenv:pep8]
deps =
{[testenv]deps}
yamllint
commands =
{toxinidir}/tools/run-bashate.sh
flake8 {posargs}
python {toxinidir}/tools/validate-all-file.py
bandit -r ansible kolla_ansible tests tools
yamllint .
[testenv:bandit]
commands = bandit -r ansible kolla_ansible tests tools