From ce7e2103b2ae7741298507da12feaaece15b5905 Mon Sep 17 00:00:00 2001 From: Xicheng Chang Date: Wed, 18 May 2016 09:46:58 -0500 Subject: [PATCH] Syncing opnfv compass-adapters With openstack/compass-adapters Change-Id: Ibbadf313fe0759500ae9b0528aba8ceca259ec9a --- ansible/openstack/HA-ansible-multinodes.yml | 239 +++ .../allinone.yml | 2 +- .../{openstack_juno => openstack}/compute.yml | 0 .../controller.yml | 0 .../group_vars/all | 4 + .../multinodes.yml | 6 + .../{openstack_juno => openstack}/network.yml | 0 .../single-controller.yml | 8 +- .../{openstack_juno => openstack}/storage.yml | 0 .../templates/dnsmasq-neutron.conf | 0 .../templates/ml2_conf.ini | 17 +- .../templates/neutron.conf | 30 +- .../templates/nova.conf | 40 +- ansible/openstack_juno/.gitkeep | 0 .../single-controller.yml | 35 - ansible/openstack_kilo/.gitkeep | 0 ansible/openstack_liberty/.gitkeep | 0 ansible/openstack_mitaka/.gitkeep | 0 .../openstack_mitaka/aodh/handlers/main.yml | 12 + ansible/openstack_mitaka/aodh/tasks/main.yml | 29 + .../aodh/templates/aodh.conf.j2 | 41 + .../aodh/templates/api_paste.ini.j2 | 22 + .../aodh/templates/policy.json.j2 | 20 + ansible/openstack_mitaka/aodh/vars/Debian.yml | 22 + ansible/openstack_mitaka/aodh/vars/RedHat.yml | 22 + ansible/openstack_mitaka/aodh/vars/main.yml | 12 + .../ceilometer_controller/vars/Debian.yml | 37 + .../ceilometer_controller/vars/RedHat.yml | 36 + .../openstack_mitaka/common/vars/Debian.yml | 30 + ansible/roles/apache/handlers/main.yml | 12 + ansible/roles/apache/tasks/main.yml | 30 + .../templates/openstack-dashboard.conf.j2 | 15 + ansible/roles/apache/templates/ports.conf.j2 | 1 + ansible/roles/apache/vars/Debian.yml | 17 + ansible/roles/apache/vars/RedHat.yml | 17 + ansible/roles/apache/vars/main.yml | 12 + .../ceilometer_compute/handlers/main.yml | 12 + .../roles/ceilometer_compute/tasks/main.yml | 44 + .../templates/ceilometer.j2 | 31 + .../ceilometer_compute/templates/nova.j2 | 5 + .../roles/ceilometer_compute/vars/Debian.yml | 23 + .../roles/ceilometer_compute/vars/RedHat.yml | 25 + .../roles/ceilometer_compute/vars/main.yml | 11 + .../ceilometer_controller/handlers/main.yml | 12 + .../ceilometer_controller/tasks/main.yml | 51 + .../templates/ceilometer.j2 | 37 + .../ceilometer_controller/templates/cinder.j2 | 2 + .../ceilometer_controller/templates/glance.j2 | 8 + .../ceilometer_controller/vars/Debian.yml | 33 + .../ceilometer_controller/vars/RedHat.yml | 33 + .../roles/ceilometer_controller/vars/main.yml | 11 + ansible/roles/ceph-config/files/create_osd.sh | 39 + .../roles/ceph-config/tasks/create_config.yml | 67 + ansible/roles/ceph-config/tasks/main.yml | 13 + ansible/roles/ceph-config/templates/ceph.j2 | 25 + .../ceph-config/templates/create_monmap.j2 | 5 + .../roles/ceph-config/templates/dump_var.j2 | 8 + ansible/roles/ceph-mon/tasks/install_mon.yml | 32 + ansible/roles/ceph-mon/tasks/main.yml | 13 + ansible/roles/ceph-mon/vars/Debian.yml | 12 + ansible/roles/ceph-mon/vars/RedHat.yml | 12 + ansible/roles/ceph-mon/vars/main.yml | 10 + .../tasks/ceph_openstack_conf.yml | 40 + .../tasks/ceph_openstack_pre.yml | 77 + ansible/roles/ceph-openstack/tasks/main.yml | 26 + .../roles/ceph-openstack/templates/secret.j2 | 6 + ansible/roles/ceph-openstack/vars/Debian.yml | 30 + ansible/roles/ceph-openstack/vars/RedHat.yml | 20 + ansible/roles/ceph-openstack/vars/main.yml | 13 + ansible/roles/ceph-osd/files/create_osd.sh | 39 + ansible/roles/ceph-osd/tasks/install_osd.yml | 33 + ansible/roles/ceph-osd/tasks/main.yml | 13 + ansible/roles/ceph-purge/tasks/main.yml | 35 + .../roles/cinder-controller/handlers/main.yml | 15 +- .../cinder-controller/tasks/cinder_config.yml | 19 + .../tasks/cinder_install.yml | 34 + .../roles/cinder-controller/tasks/main.yml | 44 +- .../cinder-controller/templates/api-paste.ini | 4 +- .../cinder-controller/templates/cinder.conf | 15 +- .../templates/cinder_init.sh | 6 - .../roles/cinder-controller/vars/Debian.yml | 17 + .../roles/cinder-controller/vars/RedHat.yml | 16 + ansible/roles/cinder-controller/vars/main.yml | 14 + ansible/roles/cinder-volume/files/loop.yml | 1 - ansible/roles/cinder-volume/handlers/main.yml | 16 +- ansible/roles/cinder-volume/tasks/main.yml | 72 +- .../roles/cinder-volume/templates/cinder.conf | 14 +- ansible/roles/cinder-volume/vars/Debian.yml | 14 + ansible/roles/cinder-volume/vars/RedHat.yml | 19 + ansible/roles/cinder-volume/vars/main.yml | 14 + .../sources.list.d/cloudarchive-juno.list | 1 - ansible/roles/common/handlers/main.yml | 4 - ansible/roles/common/tasks/main.yml | 110 +- ansible/roles/common/templates/hosts | 31 +- ansible/roles/common/templates/ntp.conf | 16 +- ansible/roles/common/templates/pip.conf | 5 + ansible/roles/common/vars/Debian.yml | 30 + ansible/roles/common/vars/RedHat.yml | 26 + ansible/roles/common/vars/main.yml | 14 + ansible/roles/dashboard/handlers/main.yml | 12 + ansible/roles/dashboard/tasks/main.yml | 122 +- .../dashboard/templates/local_settings.py | 511 ------- .../templates/openstack-dashboard.conf.j2 | 15 + ansible/roles/dashboard/templates/ports.j2 | 15 + ansible/roles/dashboard/vars/Debian.yml | 16 + ansible/roles/dashboard/vars/RedHat.yml | 17 + ansible/roles/dashboard/vars/main.yml | 13 + ansible/roles/database/files/my.cnf | 131 -- ansible/roles/database/files/remove_user.sh | 5 + ansible/roles/database/handlers/main.yml | 3 - ansible/roles/database/tasks/main.yml | 142 +- .../roles/database/tasks/mariadb_cluster.yml | 14 + .../database/tasks/mariadb_cluster_debian.yml | 60 + .../database/tasks/mariadb_cluster_redhat.yml | 59 + .../roles/database/tasks/mariadb_config.yml | 67 + .../roles/database/tasks/mariadb_install.yml | 69 + .../roles/database/tasks/mongodb_config.yml | 55 + .../roles/database/tasks/mongodb_install.yml | 39 + ansible/roles/database/templates/data.j2 | 45 + ansible/roles/database/templates/mongodb.conf | 6 + ansible/roles/database/templates/my.cnf | 59 + ansible/roles/database/templates/replica.js | 8 + ansible/roles/database/templates/wsrep.cnf | 105 ++ ansible/roles/database/vars/Debian.yml | 45 + ansible/roles/database/vars/RedHat.yml | 46 + ansible/roles/database/vars/main.yml | 34 + ansible/roles/ext-network/handlers/main.yml | 29 + ansible/roles/ext-network/tasks/main.yml | 43 + ansible/roles/glance/handlers/main.yml | 16 +- ansible/roles/glance/tasks/glance_config.yml | 17 + ansible/roles/glance/tasks/glance_install.yml | 26 + ansible/roles/glance/tasks/main.yml | 64 +- ansible/roles/glance/tasks/nfs.yml | 57 + .../roles/glance/templates/glance-api.conf | 714 +-------- .../glance/templates/glance-registry.conf | 214 +-- .../roles/glance/templates/image_upload.sh | 10 +- ansible/roles/glance/vars/Debian.yml | 18 + ansible/roles/glance/vars/RedHat.yml | 19 + ansible/roles/glance/vars/main.yml | 13 + ansible/roles/ha/files/galera_chk | 10 + ansible/roles/ha/files/mysqlchk | 15 + ansible/roles/ha/handlers/main.yml | 17 + ansible/roles/ha/tasks/main.yml | 96 ++ ansible/roles/ha/templates/haproxy.cfg | 206 +++ ansible/roles/ha/templates/keepalived.conf | 47 + ansible/roles/ha/vars/Debian.yml | 11 + ansible/roles/ha/vars/RedHat.yml | 11 + ansible/roles/ha/vars/main.yml | 18 + ansible/roles/heat/handlers/main.yml | 16 + ansible/roles/heat/tasks/heat_config.yml | 17 + ansible/roles/heat/tasks/heat_install.yml | 27 + ansible/roles/heat/tasks/main.yml | 23 + ansible/roles/heat/templates/heat.j2 | 25 + ansible/roles/heat/vars/Debian.yml | 20 + ansible/roles/heat/vars/RedHat.yml | 19 + ansible/roles/heat/vars/main.yml | 13 + ansible/roles/keystone/handlers/main.yml | 13 +- .../roles/keystone/tasks/keystone_config.yml | 61 + .../roles/keystone/tasks/keystone_install.yml | 87 ++ ansible/roles/keystone/tasks/main.yml | 53 +- .../roles/keystone/templates/admin-openrc.sh | 13 +- .../roles/keystone/templates/demo-openrc.sh | 10 +- .../roles/keystone/templates/keystone.conf | 1345 +---------------- .../roles/keystone/templates/keystone_init | 43 - .../keystone/templates/wsgi-keystone.conf.j2 | 46 + ansible/roles/keystone/vars/Debian.yml | 20 + ansible/roles/keystone/vars/RedHat.yml | 19 + ansible/roles/keystone/vars/main.yml | 164 ++ ansible/roles/memcached/handlers/main.yml | 12 + ansible/roles/memcached/tasks/main.yml | 35 + ansible/roles/memcached/vars/Debian.yml | 15 + ansible/roles/memcached/vars/RedHat.yml | 15 + ansible/roles/memcached/vars/main.yml | 14 + .../monitor/files/check_Debian_service.sh | 15 + .../monitor/files/check_RedHat_service.sh | 16 + ansible/roles/monitor/files/check_service.sh | 15 + ansible/roles/monitor/files/root | 1 + ansible/roles/monitor/tasks/main.yml | 22 + ansible/roles/monitor/vars/Debian.yml | 12 + ansible/roles/monitor/vars/RedHat.yml | 11 + ansible/roles/mq/tasks/main.yml | 17 +- ansible/roles/mq/tasks/rabbitmq.yml | 8 - ansible/roles/mq/tasks/rabbitmq_cluster.yml | 36 + ansible/roles/mq/tasks/rabbitmq_config.yml | 23 + ansible/roles/mq/tasks/rabbitmq_install.yml | 91 ++ ansible/roles/mq/templates/.erlang.cookie | 1 + ansible/roles/mq/templates/rabbitmq-env.conf | 1 + ansible/roles/mq/vars/Debian.yml | 12 + ansible/roles/mq/vars/RedHat.yml | 12 + ansible/roles/mq/vars/main.yml | 14 + .../roles/neutron-common/handlers/main.yml | 20 +- .../roles/neutron-compute/defaults/main.yml | 9 +- .../roles/neutron-compute/handlers/main.yml | 23 +- ansible/roles/neutron-compute/tasks/main.yml | 67 +- .../neutron-compute/templates/l3_agent.ini | 2 +- .../templates/metadata_agent.ini | 6 +- .../neutron-compute/templates/ml2_conf.ini | 108 -- .../templates/neutron-network.conf | 466 ------ .../neutron-compute/templates/neutron.conf | 467 ------ .../neutron-compute/templates/neutron_init.sh | 4 - .../roles/neutron-compute/templates/nova.conf | 68 - ansible/roles/neutron-compute/vars/Debian.yml | 19 + ansible/roles/neutron-compute/vars/RedHat.yml | 18 + ansible/roles/neutron-compute/vars/main.yml | 12 + .../neutron-controller/handlers/main.yml | 35 +- .../roles/neutron-controller/tasks/main.yml | 61 +- .../tasks/neutron_config.yml | 33 + .../tasks/neutron_install.yml | 44 + .../templates/dnsmasq-neutron.conf | 2 - .../neutron-controller/templates/l3_agent.ini | 2 +- .../templates/metadata_agent.ini | 6 +- .../neutron-controller/templates/ml2_conf.ini | 108 -- .../templates/neutron-network.conf | 466 ------ .../templates/neutron_init.sh | 4 - .../roles/neutron-controller/vars/Debian.yml | 14 + .../roles/neutron-controller/vars/RedHat.yml | 14 + .../roles/neutron-controller/vars/main.yml | 14 + .../neutron-network/files/vpnaas.filters | 7 + ansible/roles/neutron-network/files/xorp | 23 + .../roles/neutron-network/handlers/main.yml | 34 +- .../roles/neutron-network/tasks/firewall.yml | 30 + .../neutron-network/tasks/igmp-router.yml | 16 +- ansible/roles/neutron-network/tasks/main.yml | 159 +- ansible/roles/neutron-network/tasks/odl.yml | 10 +- ansible/roles/neutron-network/tasks/vpn.yml | 47 + .../templates/dnsmasq-neutron.conf | 2 - .../templates/etc/xorp/config.boot | 6 +- .../neutron-network/templates/l3_agent.ini | 2 +- .../templates/metadata_agent.ini | 6 +- .../templates/neutron-network.conf | 466 ------ .../neutron-network/templates/neutron.conf | 467 ------ .../neutron-network/templates/neutron_init.sh | 4 - .../roles/neutron-network/templates/nova.conf | 68 - ansible/roles/neutron-network/vars/Debian.yml | 25 + ansible/roles/neutron-network/vars/RedHat.yml | 29 + ansible/roles/neutron-network/vars/main.yml | 15 + ansible/roles/nova-compute/handlers/main.yml | 13 +- ansible/roles/nova-compute/tasks/main.yml | 42 +- .../nova-compute/templates/nova-compute.conf | 7 + .../roles/nova-compute/templates/nova.conf | 68 - ansible/roles/nova-compute/vars/Debian.yml | 15 + ansible/roles/nova-compute/vars/RedHat.yml | 16 + ansible/roles/nova-compute/vars/main.yml | 12 + .../roles/nova-controller/handlers/main.yml | 31 +- ansible/roles/nova-controller/tasks/main.yml | 52 +- .../nova-controller/tasks/nova_config.yml | 15 + .../nova-controller/tasks/nova_install.yml | 39 + .../templates/dnsmasq-neutron.conf | 2 - .../nova-controller/templates/l3_agent.ini | 2 +- .../templates/metadata_agent.ini | 6 +- .../nova-controller/templates/ml2_conf.ini | 108 -- .../templates/neutron-network.conf | 466 ------ .../nova-controller/templates/neutron.conf | 467 ------ .../nova-controller/templates/neutron_init.sh | 8 + .../roles/nova-controller/templates/nova.conf | 68 - ansible/roles/nova-controller/vars/Debian.yml | 25 + ansible/roles/nova-controller/vars/RedHat.yml | 24 + ansible/roles/nova-controller/vars/main.yml | 12 + .../roles/odl_cluster/files/install_jdk8.tar | Bin 0 -> 4608 bytes .../odl_cluster/files/recover_network.py | 65 + .../files/recover_network_odl_l3.py | 30 + .../files/setup_networks_odl_l3.py | 91 ++ ansible/roles/odl_cluster/handlers/main.yml | 12 + ansible/roles/odl_cluster/tasks/main.yml | 21 + .../odl_cluster/tasks/odl_controller.yml | 250 +++ .../roles/odl_cluster/tasks/openvswitch.yml | 148 ++ ansible/roles/odl_cluster/templates/akka.conf | 105 ++ .../odl_cluster/templates/custom.properties | 135 ++ .../odl_cluster/templates/haproxy-odl.cfg | 24 + ansible/roles/odl_cluster/templates/jetty.xml | 106 ++ .../odl_cluster/templates/keepalived.conf | 47 + .../roles/odl_cluster/templates/ml2_conf.sh | 14 + .../odl_cluster/templates/module-shards.conf | 101 ++ .../roles/odl_cluster/templates/opendaylight | 31 + .../odl_cluster/templates/opendaylight.conf | 42 + .../templates/org.apache.karaf.features.cfg | 57 + .../odl_cluster/templates/tomcat-server.xml | 61 + ansible/roles/odl_cluster/vars/Debian.yml | 23 + ansible/roles/odl_cluster/vars/RedHat.yml | 23 + ansible/roles/odl_cluster/vars/main.yml | 30 + .../roles/odl_cluster_neutron/tasks/main.yml | 22 + ansible/roles/odl_cluster_post/tasks/main.yml | 8 + .../roles/onos_cluster/files/install_jdk8.tar | Bin 0 -> 4608 bytes .../onos_cluster/files/networking-onos.tar | Bin 0 -> 153600 bytes ansible/roles/onos_cluster/handlers/main.yml | 11 + ansible/roles/onos_cluster/tasks/main.yml | 53 + .../onos_cluster/tasks/onos_controller.yml | 155 ++ .../roles/onos_cluster/tasks/openvswitch.yml | 103 ++ .../roles/onos_cluster/templates/cluster.json | 10 + .../roles/onos_cluster/templates/ml2_conf.sh | 15 + .../roles/onos_cluster/templates/tablets.json | 63 + ansible/roles/onos_cluster/vars/Debian.yml | 14 + ansible/roles/onos_cluster/vars/RedHat.yml | 14 + ansible/roles/onos_cluster/vars/main.yml | 14 + .../open-contrail/files/provision/cacert.pem | 70 + .../files/provision/compute.filters.patch | 14 + .../files/provision/model.py.patch | 12 + .../files/provision/test_vif.py.patch | 70 + .../files/provision/vif.py.patch | 91 ++ .../files/provision/vtep-cert.pem | 70 + .../files/provision/vtep-privkey.pem | 27 + .../files/recover_network_opencontrail.py | 33 + .../files/setup_networks_opencontrail.py | 107 ++ ansible/roles/open-contrail/tasks/ext-net.yml | 47 + .../tasks/install/install-collector.yml | 24 + .../tasks/install/install-common.yml | 104 ++ .../tasks/install/install-compute.yml | 55 + .../tasks/install/install-config.yml | 51 + .../tasks/install/install-control.yml | 32 + .../tasks/install/install-database.yml | 25 + .../tasks/install/install-interface.yml | 34 + .../tasks/install/install-kernel.yml | 60 + .../tasks/install/install-webui.yml | 26 + ansible/roles/open-contrail/tasks/main.yml | 151 ++ .../tasks/provision/-node-common.yml | 28 + .../tasks/provision/-rabbitmq-stop.yml | 30 + .../tasks/provision/-redis-setup.yml | 34 + .../provision/-vrouter-compute-setup.yml | 115 ++ .../tasks/provision/provision-add-nodes.yml | 86 ++ .../tasks/provision/provision-collector.yml | 106 ++ .../tasks/provision/provision-compute.yml | 262 ++++ .../tasks/provision/provision-config.yml | 343 +++++ .../tasks/provision/provision-control.yml | 69 + .../tasks/provision/provision-database.yml | 209 +++ .../provision/provision-increase-limits.yml | 60 + .../tasks/provision/provision-rabbitmq.yml | 87 ++ .../tasks/provision/provision-route.yml | 50 + .../tasks/provision/provision-toragent.yml | 85 ++ .../tasks/provision/provision-tsn.yml | 104 ++ .../tasks/provision/provision-webui.yml | 74 + .../tasks/uninstall-openvswitch.yml | 46 + .../templates/install/override.j2 | 1 + .../templates/neutron_plugin_contrail.tar.gz | Bin 0 -> 73771 bytes .../templates/nova_contrail_vif.tar.gz | Bin 0 -> 3552 bytes .../provision/contrail-analytics-api-conf.j2 | 31 + .../templates/provision/contrail-api-conf.j2 | 29 + .../contrail-api-supervisord-conf.j2 | 12 + .../provision/contrail-collector-conf.j2 | 86 ++ .../provision/contrail-control-conf.j2 | 15 + .../provision/contrail-device-manager-conf.j2 | 16 + .../provision/contrail-discovery-conf.j2 | 43 + .../contrail-discovery-supervisord-conf.j2 | 12 + .../templates/provision/contrail-dns-conf.j2 | 15 + .../provision/contrail-keystone-auth-conf.j2 | 9 + .../provision/contrail-query-engine-conf.j2 | 18 + .../provision/contrail-schema-conf.j2 | 22 + .../templates/provision/contrail-sudoers.j2 | 5 + .../provision/contrail-svc-monitor-conf.j2 | 31 + .../provision/contrail-tor-agent-conf.j2 | 111 ++ .../provision/contrail-tor-agent-ini.j2 | 12 + .../provision/contrail-vnc-api-lib-ini.j2 | 11 + .../provision/contrail-vrouter-agent-conf.j2 | 177 +++ .../templates/provision/default-pmac.j2 | 1 + .../provision/haproxy-contrail-cfg.j2 | 78 + .../ifmap-authorization-properties.j2 | 2 + .../ifmap-basicauthusers-properties.j2 | 30 + .../provision/ifmap-log4j-properties.j2 | 26 + .../provision/ifmap-publisher-properties.j2 | 16 + .../templates/provision/keepalived-conf.j2 | 29 + .../provision/neutron-contrail-plugin-ini.j2 | 15 + .../open-contrail/templates/provision/nova.j2 | 58 + .../provision/qemu-device-acl-conf.j2 | 6 + .../provision/rabbitmq-conf-single.j2 | 6 + .../templates/provision/rabbitmq-conf.j2 | 25 + .../templates/provision/rabbitmq-cookie.j2 | 1 + .../templates/provision/rabbitmq-env-conf.j2 | 2 + .../provision/vrouter-nodemgr-param.j2 | 1 + .../provision/zookeeper-unique-id.j2 | 1 + .../templates/vrouter-functions.sh | 223 +++ ansible/roles/open-contrail/vars/Debian.yml | 48 + ansible/roles/open-contrail/vars/RedHat.yml | 9 + ansible/roles/open-contrail/vars/main.yml | 86 ++ ansible/roles/plumgrid-plugin/tasks/main.yml | 148 -- .../plumgrid-plugin/templates/plumgrid.ini | 14 - .../templates/plumgrid_plugin.py | 811 ---------- .../plumgrid-plugin/templates/plumlib.filters | 23 - .../plumgrid-plugin/templates/plumlib.py | 118 -- ansible/roles/plumgrid/tasks/main.yml | 156 -- ansible/roles/plumgrid/templates/default.conf | 143 -- .../roles/plumgrid/templates/keepalived.conf | 30 - .../roles/plumgrid/templates/plumgrid.conf | 10 - ansible/roles/plumgrid/templates/qemu.conf | 27 - ansible/roles/secgroup/handlers/main.yml | 18 + ansible/roles/secgroup/tasks/main.yml | 20 + ansible/roles/secgroup/tasks/secgroup.yml | 35 + ansible/roles/secgroup/templates/neutron.j2 | 4 + ansible/roles/secgroup/templates/nova.j2 | 3 + ansible/roles/secgroup/vars/Debian.yml | 35 + ansible/roles/secgroup/vars/RedHat.yml | 35 + ansible/roles/secgroup/vars/main.yml | 11 + .../setup-network/files/setup_networks/log.py | 41 + .../files/setup_networks/net_init | 20 + .../files/setup_networks/setup_networks.py | 73 + ansible/roles/setup-network/tasks/main.yml | 62 + .../setup-network/templates/my_configs.debian | 14 + .../roles/setup-network/templates/network.cfg | 5 + ansible/roles/storage/files/create_img.sh | 12 + ansible/roles/storage/files/get_var_size.sh | 14 + ansible/roles/storage/files/loop.yml | 9 + ansible/roles/storage/files/losetup.sh | 15 + ansible/roles/storage/files/storage | 2 + ansible/roles/storage/files/storage.service | 15 + ansible/roles/storage/tasks/loop.yml | 31 + ansible/roles/storage/tasks/main.yml | 57 + ansible/roles/storage/tasks/real.yml | 16 + ansible/roles/tacker/tasks/main.yml | 14 + .../roles/tacker/tasks/tacker_controller.yml | 128 ++ .../tacker/templates/haproxy-tacker-cfg.j2 | 10 + ansible/roles/tacker/templates/ml2_conf.j2 | 2 + ansible/roles/tacker/templates/tacker.j2 | 29 + ansible/roles/tacker/vars/Debian.yml | 14 + ansible/roles/tacker/vars/RedHat.yml | 14 + ansible/roles/tacker/vars/main.yml | 19 + cobbler/conf/modules.conf | 8 +- cobbler/conf/settings | 32 +- cobbler/conf/tftpd.template | 2 +- cobbler/kickstarts/default.ks | 7 +- cobbler/kickstarts/default.seed | 7 + cobbler/snippets/hosts.xml | 2 +- cobbler/snippets/kdump.xml | 12 +- cobbler/snippets/keep_cfengine_keys | 20 +- cobbler/snippets/keep_files | 30 +- cobbler/snippets/keep_rhn_keys | 4 +- cobbler/snippets/keep_ssh_host_keys | 30 +- cobbler/snippets/kickstart_chef_run.sh | 6 +- cobbler/snippets/kickstart_client.rb | 4 +- cobbler/snippets/kickstart_knife.rb | 2 +- cobbler/snippets/kickstart_limits.conf | 2 +- cobbler/snippets/kickstart_network_config | 2 +- cobbler/snippets/kickstart_ntp | 24 +- cobbler/snippets/kickstart_post_anamon | 2 + .../kickstart_post_install_network_config | 98 +- .../kickstart_pre_install_network_config | 10 +- .../snippets/kickstart_pre_partition_disks | 170 +-- cobbler/snippets/kickstart_rsyslog.conf | 2 +- cobbler/snippets/kickstart_ssh | 2 +- cobbler/snippets/kickstart_sysctl.conf | 10 +- cobbler/snippets/kickstart_yum | 2 +- cobbler/snippets/limits_conf.xml | 2 +- cobbler/snippets/networking.xml | 8 +- cobbler/snippets/preseed_apt_repo_config | 2 +- cobbler/snippets/preseed_chef_run.sh | 4 +- cobbler/snippets/preseed_client.rb | 4 +- cobbler/snippets/preseed_knife.rb | 2 +- cobbler/snippets/preseed_limits.conf | 2 +- cobbler/snippets/preseed_ntp | 24 +- cobbler/snippets/preseed_post_anamon | 2 + cobbler/snippets/preseed_post_apt_repo_config | 7 +- .../preseed_post_install_network_config | 142 +- cobbler/snippets/preseed_post_partition_disks | 2 +- .../preseed_pre_install_network_config | 2 +- cobbler/snippets/preseed_pre_partition_disks | 188 +-- cobbler/snippets/preseed_rsyslog.conf | 2 +- cobbler/snippets/preseed_sysctl.conf | 10 +- cobbler/snippets/puppet_register_if_enabled | 2 +- cobbler/snippets/redhat_register | 6 +- cobbler/snippets/repo_config.xml | 2 +- cobbler/snippets/rsyslog.xml | 2 +- cobbler/snippets/ssh.xml | 2 +- cobbler/snippets/sshd.xml | 4 +- cobbler/snippets/sysctl.xml | 10 +- .../centos/6.5/kickstart_centos_base_repo | 4 +- .../centos/6.5/kickstart_centos_vault_repo | 2 +- .../centos/6.6/kickstart_centos_base_repo | 4 +- .../centos/6.6/kickstart_centos_vault_repo | 2 +- .../centos/7.0/kickstart_centos_base_repo | 4 +- 466 files changed, 12984 insertions(+), 9385 deletions(-) create mode 100644 ansible/openstack/HA-ansible-multinodes.yml rename ansible/{openstack_juno => openstack}/allinone.yml (97%) rename ansible/{openstack_juno => openstack}/compute.yml (100%) rename ansible/{openstack_juno => openstack}/controller.yml (100%) rename ansible/{openstack_juno => openstack}/group_vars/all (93%) rename ansible/{openstack_juno => openstack}/multinodes.yml (92%) rename ansible/{openstack_juno => openstack}/network.yml (100%) rename ansible/{openstack_juno => openstack}/single-controller.yml (85%) rename ansible/{openstack_juno => openstack}/storage.yml (100%) rename ansible/{roles/neutron-compute => openstack}/templates/dnsmasq-neutron.conf (100%) rename ansible/{roles/neutron-network => openstack}/templates/ml2_conf.ini (90%) rename ansible/{roles/neutron-controller => openstack}/templates/neutron.conf (96%) rename ansible/{roles/neutron-controller => openstack}/templates/nova.conf (60%) create mode 100644 ansible/openstack_juno/.gitkeep delete mode 100644 ansible/openstack_juno_plumgrid/single-controller.yml create mode 100644 ansible/openstack_kilo/.gitkeep create mode 100644 ansible/openstack_liberty/.gitkeep create mode 100644 ansible/openstack_mitaka/.gitkeep create mode 100644 ansible/openstack_mitaka/aodh/handlers/main.yml create mode 100644 ansible/openstack_mitaka/aodh/tasks/main.yml create mode 100644 ansible/openstack_mitaka/aodh/templates/aodh.conf.j2 create mode 100644 ansible/openstack_mitaka/aodh/templates/api_paste.ini.j2 create mode 100644 ansible/openstack_mitaka/aodh/templates/policy.json.j2 create mode 100644 ansible/openstack_mitaka/aodh/vars/Debian.yml create mode 100644 ansible/openstack_mitaka/aodh/vars/RedHat.yml create mode 100644 ansible/openstack_mitaka/aodh/vars/main.yml create mode 100644 ansible/openstack_mitaka/ceilometer_controller/vars/Debian.yml create mode 100644 ansible/openstack_mitaka/ceilometer_controller/vars/RedHat.yml create mode 100644 ansible/openstack_mitaka/common/vars/Debian.yml create mode 100755 ansible/roles/apache/handlers/main.yml create mode 100755 ansible/roles/apache/tasks/main.yml create mode 100755 ansible/roles/apache/templates/openstack-dashboard.conf.j2 create mode 100644 ansible/roles/apache/templates/ports.conf.j2 create mode 100755 ansible/roles/apache/vars/Debian.yml create mode 100755 ansible/roles/apache/vars/RedHat.yml create mode 100755 ansible/roles/apache/vars/main.yml create mode 100644 ansible/roles/ceilometer_compute/handlers/main.yml create mode 100644 ansible/roles/ceilometer_compute/tasks/main.yml create mode 100644 ansible/roles/ceilometer_compute/templates/ceilometer.j2 create mode 100644 ansible/roles/ceilometer_compute/templates/nova.j2 create mode 100644 ansible/roles/ceilometer_compute/vars/Debian.yml create mode 100644 ansible/roles/ceilometer_compute/vars/RedHat.yml create mode 100644 ansible/roles/ceilometer_compute/vars/main.yml create mode 100644 ansible/roles/ceilometer_controller/handlers/main.yml create mode 100644 ansible/roles/ceilometer_controller/tasks/main.yml create mode 100644 ansible/roles/ceilometer_controller/templates/ceilometer.j2 create mode 100644 ansible/roles/ceilometer_controller/templates/cinder.j2 create mode 100644 ansible/roles/ceilometer_controller/templates/glance.j2 create mode 100644 ansible/roles/ceilometer_controller/vars/Debian.yml create mode 100644 ansible/roles/ceilometer_controller/vars/RedHat.yml create mode 100644 ansible/roles/ceilometer_controller/vars/main.yml create mode 100755 ansible/roles/ceph-config/files/create_osd.sh create mode 100755 ansible/roles/ceph-config/tasks/create_config.yml create mode 100755 ansible/roles/ceph-config/tasks/main.yml create mode 100755 ansible/roles/ceph-config/templates/ceph.j2 create mode 100644 ansible/roles/ceph-config/templates/create_monmap.j2 create mode 100755 ansible/roles/ceph-config/templates/dump_var.j2 create mode 100644 ansible/roles/ceph-mon/tasks/install_mon.yml create mode 100644 ansible/roles/ceph-mon/tasks/main.yml create mode 100644 ansible/roles/ceph-mon/vars/Debian.yml create mode 100644 ansible/roles/ceph-mon/vars/RedHat.yml create mode 100644 ansible/roles/ceph-mon/vars/main.yml create mode 100755 ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml create mode 100755 ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml create mode 100644 ansible/roles/ceph-openstack/tasks/main.yml create mode 100644 ansible/roles/ceph-openstack/templates/secret.j2 create mode 100755 ansible/roles/ceph-openstack/vars/Debian.yml create mode 100755 ansible/roles/ceph-openstack/vars/RedHat.yml create mode 100755 ansible/roles/ceph-openstack/vars/main.yml create mode 100755 ansible/roles/ceph-osd/files/create_osd.sh create mode 100644 ansible/roles/ceph-osd/tasks/install_osd.yml create mode 100644 ansible/roles/ceph-osd/tasks/main.yml create mode 100644 ansible/roles/ceph-purge/tasks/main.yml create mode 100644 ansible/roles/cinder-controller/tasks/cinder_config.yml create mode 100644 ansible/roles/cinder-controller/tasks/cinder_install.yml delete mode 100644 ansible/roles/cinder-controller/templates/cinder_init.sh create mode 100644 ansible/roles/cinder-controller/vars/Debian.yml create mode 100644 ansible/roles/cinder-controller/vars/RedHat.yml create mode 100644 ansible/roles/cinder-controller/vars/main.yml delete mode 100644 ansible/roles/cinder-volume/files/loop.yml create mode 100644 ansible/roles/cinder-volume/vars/Debian.yml create mode 100644 ansible/roles/cinder-volume/vars/RedHat.yml create mode 100644 ansible/roles/cinder-volume/vars/main.yml delete mode 100644 ansible/roles/common/files/sources.list.d/cloudarchive-juno.list delete mode 100644 ansible/roles/common/handlers/main.yml create mode 100644 ansible/roles/common/templates/pip.conf create mode 100644 ansible/roles/common/vars/Debian.yml create mode 100644 ansible/roles/common/vars/RedHat.yml create mode 100644 ansible/roles/common/vars/main.yml create mode 100755 ansible/roles/dashboard/handlers/main.yml delete mode 100644 ansible/roles/dashboard/templates/local_settings.py create mode 100755 ansible/roles/dashboard/templates/openstack-dashboard.conf.j2 create mode 100755 ansible/roles/dashboard/templates/ports.j2 create mode 100644 ansible/roles/dashboard/vars/Debian.yml create mode 100644 ansible/roles/dashboard/vars/RedHat.yml create mode 100644 ansible/roles/dashboard/vars/main.yml delete mode 100644 ansible/roles/database/files/my.cnf create mode 100755 ansible/roles/database/files/remove_user.sh delete mode 100644 ansible/roles/database/handlers/main.yml create mode 100644 ansible/roles/database/tasks/mariadb_cluster.yml create mode 100644 ansible/roles/database/tasks/mariadb_cluster_debian.yml create mode 100644 ansible/roles/database/tasks/mariadb_cluster_redhat.yml create mode 100644 ansible/roles/database/tasks/mariadb_config.yml create mode 100644 ansible/roles/database/tasks/mariadb_install.yml create mode 100755 ansible/roles/database/tasks/mongodb_config.yml create mode 100755 ansible/roles/database/tasks/mongodb_install.yml create mode 100644 ansible/roles/database/templates/data.j2 create mode 100644 ansible/roles/database/templates/mongodb.conf create mode 100644 ansible/roles/database/templates/my.cnf create mode 100644 ansible/roles/database/templates/replica.js create mode 100644 ansible/roles/database/templates/wsrep.cnf create mode 100644 ansible/roles/database/vars/Debian.yml create mode 100644 ansible/roles/database/vars/RedHat.yml create mode 100644 ansible/roles/database/vars/main.yml create mode 100644 ansible/roles/ext-network/handlers/main.yml create mode 100644 ansible/roles/ext-network/tasks/main.yml create mode 100644 ansible/roles/glance/tasks/glance_config.yml create mode 100644 ansible/roles/glance/tasks/glance_install.yml create mode 100644 ansible/roles/glance/tasks/nfs.yml create mode 100644 ansible/roles/glance/vars/Debian.yml create mode 100644 ansible/roles/glance/vars/RedHat.yml create mode 100644 ansible/roles/glance/vars/main.yml create mode 100644 ansible/roles/ha/files/galera_chk create mode 100644 ansible/roles/ha/files/mysqlchk create mode 100644 ansible/roles/ha/handlers/main.yml create mode 100644 ansible/roles/ha/tasks/main.yml create mode 100644 ansible/roles/ha/templates/haproxy.cfg create mode 100644 ansible/roles/ha/templates/keepalived.conf create mode 100644 ansible/roles/ha/vars/Debian.yml create mode 100644 ansible/roles/ha/vars/RedHat.yml create mode 100644 ansible/roles/ha/vars/main.yml create mode 100644 ansible/roles/heat/handlers/main.yml create mode 100644 ansible/roles/heat/tasks/heat_config.yml create mode 100644 ansible/roles/heat/tasks/heat_install.yml create mode 100644 ansible/roles/heat/tasks/main.yml create mode 100644 ansible/roles/heat/templates/heat.j2 create mode 100644 ansible/roles/heat/vars/Debian.yml create mode 100644 ansible/roles/heat/vars/RedHat.yml create mode 100644 ansible/roles/heat/vars/main.yml mode change 100644 => 100755 ansible/roles/keystone/handlers/main.yml create mode 100644 ansible/roles/keystone/tasks/keystone_config.yml create mode 100644 ansible/roles/keystone/tasks/keystone_install.yml delete mode 100644 ansible/roles/keystone/templates/keystone_init create mode 100644 ansible/roles/keystone/templates/wsgi-keystone.conf.j2 create mode 100644 ansible/roles/keystone/vars/Debian.yml create mode 100644 ansible/roles/keystone/vars/RedHat.yml create mode 100644 ansible/roles/keystone/vars/main.yml create mode 100755 ansible/roles/memcached/handlers/main.yml create mode 100644 ansible/roles/memcached/tasks/main.yml create mode 100644 ansible/roles/memcached/vars/Debian.yml create mode 100644 ansible/roles/memcached/vars/RedHat.yml create mode 100644 ansible/roles/memcached/vars/main.yml create mode 100644 ansible/roles/monitor/files/check_Debian_service.sh create mode 100644 ansible/roles/monitor/files/check_RedHat_service.sh create mode 100644 ansible/roles/monitor/files/check_service.sh create mode 100644 ansible/roles/monitor/files/root create mode 100644 ansible/roles/monitor/tasks/main.yml create mode 100644 ansible/roles/monitor/vars/Debian.yml create mode 100644 ansible/roles/monitor/vars/RedHat.yml delete mode 100644 ansible/roles/mq/tasks/rabbitmq.yml create mode 100644 ansible/roles/mq/tasks/rabbitmq_cluster.yml create mode 100644 ansible/roles/mq/tasks/rabbitmq_config.yml create mode 100755 ansible/roles/mq/tasks/rabbitmq_install.yml create mode 100644 ansible/roles/mq/templates/.erlang.cookie create mode 100644 ansible/roles/mq/templates/rabbitmq-env.conf create mode 100644 ansible/roles/mq/vars/Debian.yml create mode 100644 ansible/roles/mq/vars/RedHat.yml create mode 100644 ansible/roles/mq/vars/main.yml delete mode 100644 ansible/roles/neutron-compute/templates/ml2_conf.ini delete mode 100644 ansible/roles/neutron-compute/templates/neutron-network.conf delete mode 100644 ansible/roles/neutron-compute/templates/neutron.conf delete mode 100644 ansible/roles/neutron-compute/templates/neutron_init.sh delete mode 100644 ansible/roles/neutron-compute/templates/nova.conf create mode 100644 ansible/roles/neutron-compute/vars/Debian.yml create mode 100644 ansible/roles/neutron-compute/vars/RedHat.yml create mode 100644 ansible/roles/neutron-compute/vars/main.yml create mode 100644 ansible/roles/neutron-controller/tasks/neutron_config.yml create mode 100644 ansible/roles/neutron-controller/tasks/neutron_install.yml delete mode 100644 ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf delete mode 100644 ansible/roles/neutron-controller/templates/ml2_conf.ini delete mode 100644 ansible/roles/neutron-controller/templates/neutron-network.conf delete mode 100644 ansible/roles/neutron-controller/templates/neutron_init.sh create mode 100644 ansible/roles/neutron-controller/vars/Debian.yml create mode 100644 ansible/roles/neutron-controller/vars/RedHat.yml create mode 100644 ansible/roles/neutron-controller/vars/main.yml create mode 100644 ansible/roles/neutron-network/files/vpnaas.filters create mode 100644 ansible/roles/neutron-network/files/xorp create mode 100755 ansible/roles/neutron-network/tasks/firewall.yml create mode 100755 ansible/roles/neutron-network/tasks/vpn.yml delete mode 100644 ansible/roles/neutron-network/templates/dnsmasq-neutron.conf delete mode 100644 ansible/roles/neutron-network/templates/neutron-network.conf delete mode 100644 ansible/roles/neutron-network/templates/neutron.conf delete mode 100644 ansible/roles/neutron-network/templates/neutron_init.sh delete mode 100644 ansible/roles/neutron-network/templates/nova.conf create mode 100644 ansible/roles/neutron-network/vars/Debian.yml create mode 100644 ansible/roles/neutron-network/vars/RedHat.yml create mode 100644 ansible/roles/neutron-network/vars/main.yml delete mode 100644 ansible/roles/nova-compute/templates/nova.conf create mode 100644 ansible/roles/nova-compute/vars/Debian.yml create mode 100644 ansible/roles/nova-compute/vars/RedHat.yml create mode 100644 ansible/roles/nova-compute/vars/main.yml create mode 100644 ansible/roles/nova-controller/tasks/nova_config.yml create mode 100644 ansible/roles/nova-controller/tasks/nova_install.yml delete mode 100644 ansible/roles/nova-controller/templates/dnsmasq-neutron.conf delete mode 100644 ansible/roles/nova-controller/templates/ml2_conf.ini delete mode 100644 ansible/roles/nova-controller/templates/neutron-network.conf delete mode 100644 ansible/roles/nova-controller/templates/neutron.conf delete mode 100644 ansible/roles/nova-controller/templates/nova.conf create mode 100644 ansible/roles/nova-controller/vars/Debian.yml create mode 100644 ansible/roles/nova-controller/vars/RedHat.yml create mode 100644 ansible/roles/nova-controller/vars/main.yml create mode 100755 ansible/roles/odl_cluster/files/install_jdk8.tar create mode 100755 ansible/roles/odl_cluster/files/recover_network.py create mode 100755 ansible/roles/odl_cluster/files/recover_network_odl_l3.py create mode 100644 ansible/roles/odl_cluster/files/setup_networks_odl_l3.py create mode 100755 ansible/roles/odl_cluster/handlers/main.yml create mode 100755 ansible/roles/odl_cluster/tasks/main.yml create mode 100755 ansible/roles/odl_cluster/tasks/odl_controller.yml create mode 100755 ansible/roles/odl_cluster/tasks/openvswitch.yml create mode 100755 ansible/roles/odl_cluster/templates/akka.conf create mode 100644 ansible/roles/odl_cluster/templates/custom.properties create mode 100755 ansible/roles/odl_cluster/templates/haproxy-odl.cfg create mode 100755 ansible/roles/odl_cluster/templates/jetty.xml create mode 100644 ansible/roles/odl_cluster/templates/keepalived.conf create mode 100755 ansible/roles/odl_cluster/templates/ml2_conf.sh create mode 100755 ansible/roles/odl_cluster/templates/module-shards.conf create mode 100755 ansible/roles/odl_cluster/templates/opendaylight create mode 100755 ansible/roles/odl_cluster/templates/opendaylight.conf create mode 100755 ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg create mode 100755 ansible/roles/odl_cluster/templates/tomcat-server.xml create mode 100755 ansible/roles/odl_cluster/vars/Debian.yml create mode 100755 ansible/roles/odl_cluster/vars/RedHat.yml create mode 100755 ansible/roles/odl_cluster/vars/main.yml create mode 100755 ansible/roles/odl_cluster_neutron/tasks/main.yml create mode 100644 ansible/roles/odl_cluster_post/tasks/main.yml create mode 100755 ansible/roles/onos_cluster/files/install_jdk8.tar create mode 100644 ansible/roles/onos_cluster/files/networking-onos.tar create mode 100755 ansible/roles/onos_cluster/handlers/main.yml create mode 100755 ansible/roles/onos_cluster/tasks/main.yml create mode 100755 ansible/roles/onos_cluster/tasks/onos_controller.yml create mode 100755 ansible/roles/onos_cluster/tasks/openvswitch.yml create mode 100755 ansible/roles/onos_cluster/templates/cluster.json create mode 100755 ansible/roles/onos_cluster/templates/ml2_conf.sh create mode 100755 ansible/roles/onos_cluster/templates/tablets.json create mode 100755 ansible/roles/onos_cluster/vars/Debian.yml create mode 100755 ansible/roles/onos_cluster/vars/RedHat.yml create mode 100755 ansible/roles/onos_cluster/vars/main.yml create mode 100755 ansible/roles/open-contrail/files/provision/cacert.pem create mode 100755 ansible/roles/open-contrail/files/provision/compute.filters.patch create mode 100755 ansible/roles/open-contrail/files/provision/model.py.patch create mode 100755 ansible/roles/open-contrail/files/provision/test_vif.py.patch create mode 100755 ansible/roles/open-contrail/files/provision/vif.py.patch create mode 100755 ansible/roles/open-contrail/files/provision/vtep-cert.pem create mode 100755 ansible/roles/open-contrail/files/provision/vtep-privkey.pem create mode 100755 ansible/roles/open-contrail/files/recover_network_opencontrail.py create mode 100755 ansible/roles/open-contrail/files/setup_networks_opencontrail.py create mode 100644 ansible/roles/open-contrail/tasks/ext-net.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-collector.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-common.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-compute.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-config.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-control.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-database.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-interface.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-kernel.yml create mode 100755 ansible/roles/open-contrail/tasks/install/install-webui.yml create mode 100755 ansible/roles/open-contrail/tasks/main.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/-node-common.yml create mode 100644 ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/-redis-setup.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-collector.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-compute.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-config.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-control.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-database.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml create mode 100644 ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-route.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-toragent.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-tsn.yml create mode 100755 ansible/roles/open-contrail/tasks/provision/provision-webui.yml create mode 100755 ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml create mode 100755 ansible/roles/open-contrail/templates/install/override.j2 create mode 100644 ansible/roles/open-contrail/templates/neutron_plugin_contrail.tar.gz create mode 100644 ansible/roles/open-contrail/templates/nova_contrail_vif.tar.gz create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-api-supervisord-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-control-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-discovery-supervisord-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-keystone-auth-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-sudoers.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-tor-agent-ini.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-vnc-api-lib-ini.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/contrail-vrouter-agent-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/default-pmac.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/ifmap-authorization-properties.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/ifmap-basicauthusers-properties.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/ifmap-log4j-properties.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/ifmap-publisher-properties.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/keepalived-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/neutron-contrail-plugin-ini.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/nova.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/qemu-device-acl-conf.j2 create mode 100644 ansible/roles/open-contrail/templates/provision/rabbitmq-conf-single.j2 create mode 100644 ansible/roles/open-contrail/templates/provision/rabbitmq-conf.j2 create mode 100644 ansible/roles/open-contrail/templates/provision/rabbitmq-cookie.j2 create mode 100644 ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2 create mode 100755 ansible/roles/open-contrail/templates/provision/zookeeper-unique-id.j2 create mode 100755 ansible/roles/open-contrail/templates/vrouter-functions.sh create mode 100755 ansible/roles/open-contrail/vars/Debian.yml create mode 100755 ansible/roles/open-contrail/vars/RedHat.yml create mode 100755 ansible/roles/open-contrail/vars/main.yml delete mode 100644 ansible/roles/plumgrid-plugin/tasks/main.yml delete mode 100644 ansible/roles/plumgrid-plugin/templates/plumgrid.ini delete mode 100644 ansible/roles/plumgrid-plugin/templates/plumgrid_plugin.py delete mode 100644 ansible/roles/plumgrid-plugin/templates/plumlib.filters delete mode 100644 ansible/roles/plumgrid-plugin/templates/plumlib.py delete mode 100644 ansible/roles/plumgrid/tasks/main.yml delete mode 100644 ansible/roles/plumgrid/templates/default.conf delete mode 100644 ansible/roles/plumgrid/templates/keepalived.conf delete mode 100644 ansible/roles/plumgrid/templates/plumgrid.conf delete mode 100644 ansible/roles/plumgrid/templates/qemu.conf create mode 100644 ansible/roles/secgroup/handlers/main.yml create mode 100644 ansible/roles/secgroup/tasks/main.yml create mode 100644 ansible/roles/secgroup/tasks/secgroup.yml create mode 100644 ansible/roles/secgroup/templates/neutron.j2 create mode 100644 ansible/roles/secgroup/templates/nova.j2 create mode 100644 ansible/roles/secgroup/vars/Debian.yml create mode 100644 ansible/roles/secgroup/vars/RedHat.yml create mode 100644 ansible/roles/secgroup/vars/main.yml create mode 100644 ansible/roles/setup-network/files/setup_networks/log.py create mode 100755 ansible/roles/setup-network/files/setup_networks/net_init create mode 100644 ansible/roles/setup-network/files/setup_networks/setup_networks.py create mode 100644 ansible/roles/setup-network/tasks/main.yml create mode 100644 ansible/roles/setup-network/templates/my_configs.debian create mode 100644 ansible/roles/setup-network/templates/network.cfg create mode 100755 ansible/roles/storage/files/create_img.sh create mode 100755 ansible/roles/storage/files/get_var_size.sh create mode 100755 ansible/roles/storage/files/loop.yml create mode 100755 ansible/roles/storage/files/losetup.sh create mode 100755 ansible/roles/storage/files/storage create mode 100644 ansible/roles/storage/files/storage.service create mode 100755 ansible/roles/storage/tasks/loop.yml create mode 100755 ansible/roles/storage/tasks/main.yml create mode 100755 ansible/roles/storage/tasks/real.yml create mode 100755 ansible/roles/tacker/tasks/main.yml create mode 100755 ansible/roles/tacker/tasks/tacker_controller.yml create mode 100644 ansible/roles/tacker/templates/haproxy-tacker-cfg.j2 create mode 100644 ansible/roles/tacker/templates/ml2_conf.j2 create mode 100644 ansible/roles/tacker/templates/tacker.j2 create mode 100755 ansible/roles/tacker/vars/Debian.yml create mode 100755 ansible/roles/tacker/vars/RedHat.yml create mode 100755 ansible/roles/tacker/vars/main.yml diff --git a/ansible/openstack/HA-ansible-multinodes.yml b/ansible/openstack/HA-ansible-multinodes.yml new file mode 100644 index 0000000..d8b82c7 --- /dev/null +++ b/ansible/openstack/HA-ansible-multinodes.yml @@ -0,0 +1,239 @@ +--- +- hosts: all + remote_user: root + pre_tasks: + - name: make sure ssh dir exist + file: + path: '{{ item.path }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + state: directory + mode: 0755 + with_items: + - path: /root/.ssh + owner: root + group: root + + - name: write ssh config + copy: + content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no" + dest: '{{ item.dest }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + mode: 0600 + with_items: + - dest: /root/.ssh/config + owner: root + group: root + + - name: generate ssh keys + shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi; + + - name: fetch ssh keys + fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes + + - authorized_key: + user: root + key: "{{ lookup('file', 'item') }}" + with_fileglob: + - /tmp/ssh-keys-* + max_fail_percentage: 0 + roles: + - common + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - setup-network + +- hosts: ha + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ha + +- hosts: controller + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - memcached + - apache + - database + - mq + - keystone + - nova-controller + - neutron-controller + - cinder-controller + - glance + - neutron-common + - neutron-network + - ceilometer_controller +# - ext-network + - dashboard + - heat +# - aodh + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - storage + +- hosts: compute + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - nova-compute + - neutron-compute + - cinder-volume + - ceilometer_compute + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - secgroup + +- hosts: ceph_adm + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: [] + # - ceph-deploy + +- hosts: ceph + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-purge + - ceph-config + +- hosts: ceph_mon + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-mon + +- hosts: ceph_osd + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-osd + +- hosts: ceph + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-openstack + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - monitor + + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + tasks: + - name: set bash to nova + user: + name: nova + shell: /bin/bash + + - name: make sure ssh dir exist + file: + path: '{{ item.path }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + state: directory + mode: 0755 + with_items: + - path: /var/lib/nova/.ssh + owner: nova + group: nova + + - name: copy ssh keys for nova + shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh; + + - name: write ssh config + copy: + content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no" + dest: '{{ item.dest }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + mode: 0600 + with_items: + - dest: /var/lib/nova/.ssh/config + owner: nova + group: nova + + - authorized_key: + user: nova + key: "{{ lookup('file', 'item') }}" + with_fileglob: + - /tmp/ssh-keys-* + + - name: chown ssh file + shell: chown -R nova:nova /var/lib/nova/.ssh; + + +- hosts: all + remote_user: root + max_fail_percentage: 0 + roles: + - odl_cluster + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - onos_cluster + +- hosts: all + remote_user: root + sudo: True + max_fail_percentage: 0 + roles: + - open-contrail + +- hosts: all + remote_user: root + serial: 1 + max_fail_percentage: 0 + roles: + - odl_cluster_neutron + +- hosts: all + remote_user: root + max_fail_percentage: 0 + roles: + - odl_cluster_post + +- hosts: controller + remote_user: root + max_fail_percentage: 0 + roles: + - ext-network + +- hosts: controller + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - tacker diff --git a/ansible/openstack_juno/allinone.yml b/ansible/openstack/allinone.yml similarity index 97% rename from ansible/openstack_juno/allinone.yml rename to ansible/openstack/allinone.yml index d86af06..4539e5f 100644 --- a/ansible/openstack_juno/allinone.yml +++ b/ansible/openstack/allinone.yml @@ -1,7 +1,7 @@ --- - hosts: controller sudo: True - roles: + roles: - common - database - mq diff --git a/ansible/openstack_juno/compute.yml b/ansible/openstack/compute.yml similarity index 100% rename from ansible/openstack_juno/compute.yml rename to ansible/openstack/compute.yml diff --git a/ansible/openstack_juno/controller.yml b/ansible/openstack/controller.yml similarity index 100% rename from ansible/openstack_juno/controller.yml rename to ansible/openstack/controller.yml diff --git a/ansible/openstack_juno/group_vars/all b/ansible/openstack/group_vars/all similarity index 93% rename from ansible/openstack_juno/group_vars/all rename to ansible/openstack/group_vars/all index 5643fcd..79859d0 100644 --- a/ansible/openstack_juno/group_vars/all +++ b/ansible/openstack/group_vars/all @@ -24,6 +24,10 @@ NOVA_PASS: nova_secret DASH_DBPASS: dash_db_secret CINDER_DBPASS: cinder_db_secret CINDER_PASS: cinder_secret +HEAT_DBPASS: heat_db_secret +HEAT_PASS: heat_secret +AODH_DBPASS: aodh_db_secret +AODH_PASS: aodh_secret NEUTRON_DBPASS: neutron_db_secret NEUTRON_PASS: netron_secret NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] diff --git a/ansible/openstack_juno/multinodes.yml b/ansible/openstack/multinodes.yml similarity index 92% rename from ansible/openstack_juno/multinodes.yml rename to ansible/openstack/multinodes.yml index ae7c0a8..5b43a69 100644 --- a/ansible/openstack_juno/multinodes.yml +++ b/ansible/openstack/multinodes.yml @@ -65,3 +65,9 @@ - common - nova-compute - neutron-compute + +- hosts: odl + remote_user: root + sudo: True + roles: + - odl diff --git a/ansible/openstack_juno/network.yml b/ansible/openstack/network.yml similarity index 100% rename from ansible/openstack_juno/network.yml rename to ansible/openstack/network.yml diff --git a/ansible/openstack_juno/single-controller.yml b/ansible/openstack/single-controller.yml similarity index 85% rename from ansible/openstack_juno/single-controller.yml rename to ansible/openstack/single-controller.yml index d86af06..96ec0a6 100644 --- a/ansible/openstack_juno/single-controller.yml +++ b/ansible/openstack/single-controller.yml @@ -1,7 +1,7 @@ --- - hosts: controller sudo: True - roles: + roles: - common - database - mq @@ -30,3 +30,9 @@ - common - nova-compute - neutron-compute + +- hosts: odl + remote_user: root + sudo: True + roles: + - odl diff --git a/ansible/openstack_juno/storage.yml b/ansible/openstack/storage.yml similarity index 100% rename from ansible/openstack_juno/storage.yml rename to ansible/openstack/storage.yml diff --git a/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf b/ansible/openstack/templates/dnsmasq-neutron.conf similarity index 100% rename from ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf rename to ansible/openstack/templates/dnsmasq-neutron.conf diff --git a/ansible/roles/neutron-network/templates/ml2_conf.ini b/ansible/openstack/templates/ml2_conf.ini similarity index 90% rename from ansible/roles/neutron-network/templates/ml2_conf.ini rename to ansible/openstack/templates/ml2_conf.ini index 9972842..7b3e76d 100644 --- a/ansible/roles/neutron-network/templates/ml2_conf.ini +++ b/ansible/openstack/templates/ml2_conf.ini @@ -29,7 +29,7 @@ mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} # can be created. Use * to allow flat networks with arbitrary # physical_network names. # -flat_networks = external +flat_networks = * # Example:flat_networks = physnet1,physnet2 # Example:flat_networks = * @@ -39,7 +39,7 @@ flat_networks = external # tenant networks, as well as ranges of VLAN tags on each # physical_network available for allocation as tenant networks. # -network_vlan_ranges = +network_vlan_ranges = {{ NEUTRON_VLAN_RANGES|join(",") }} # Example: network_vlan_ranges = physnet1:1000:2999,physnet2 [ml2_type_gre] @@ -67,16 +67,18 @@ firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewal enable_security_group = True [database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8 +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 [ovs] local_ip = {{ internal_ip }} {% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} integration_bridge = br-int +{% if NEUTRON_TUNNEL_TYPES %} tunnel_bridge = br-tun tunnel_id_ranges = 1001:4095 tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} +{% endif %} +bridge_mappings = {{ NEUTRON_OVS_BRIDGE_MAPPINGS | join(",") }} {% endif %} [agent] @@ -90,11 +92,14 @@ l2_population = False [odl] {% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} network_vlan_ranges = 1001:4095 +{% if NEUTRON_TUNNEL_TYPES %} tunnel_id_ranges = 1001:4095 tun_peer_patch_port = patch-int int_peer_patch_port = patch-tun -tenant_network_type = vxlan tunnel_bridge = br-tun +{% endif %} + +tenant_network_type = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} integration_bridge = br-int controllers = 10.1.0.15:8080:admin:admin {% endif %} @@ -103,6 +108,6 @@ controllers = 10.1.0.15:8080:admin:admin {% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} username = {{ odl_username }} password = {{ odl_password }} -url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron +url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron {% endif %} diff --git a/ansible/roles/neutron-controller/templates/neutron.conf b/ansible/openstack/templates/neutron.conf similarity index 96% rename from ansible/roles/neutron-controller/templates/neutron.conf rename to ansible/openstack/templates/neutron.conf index 28bb2ba..ebc46f9 100644 --- a/ansible/roles/neutron-controller/templates/neutron.conf +++ b/ansible/openstack/templates/neutron.conf @@ -142,7 +142,7 @@ rabbit_port = 5672 # rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' # rabbit_hosts = localhost:5672 # User ID used for RabbitMQ connections -rabbit_userid = guest +rabbit_userid = {{ RABBIT_USER }} # Location of a virtual RabbitMQ installation. # rabbit_virtual_host = / # Maximum retries with trying to connect to RabbitMQ @@ -153,7 +153,6 @@ rabbit_userid = guest # Use HA queues in RabbitMQ (x-ha-policy: all). You need to # wipe RabbitMQ database when changing this option. (boolean value) # rabbit_ha_queues = false - # QPID # rpc_backend=neutron.openstack.common.rpc.impl_qpid # Qpid broker hostname @@ -305,22 +304,23 @@ notify_nova_on_port_status_changes = True notify_nova_on_port_data_changes = True # URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 +nova_url = http://{{ internal_vip.ip }}:8774/v2 # Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne +nova_region_name = regionOne # Username for connection to nova in admin context nova_admin_username = nova # The uuid of the admin nova tenant +{% if NOVA_ADMIN_TENANT_ID|default('') %} nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - +{% endif %} # Password for connection to nova in admin context. nova_admin_password = {{ NOVA_PASS }} # Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 +nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0 # Number of seconds between sending events to nova if there are any events to send send_events_interval = 2 @@ -394,8 +394,8 @@ report_interval = 30 # =========== end of items for agent management extension ===== [keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 admin_tenant_name = service admin_user = neutron admin_password = {{ NEUTRON_PASS }} @@ -408,7 +408,7 @@ signing_dir = $state_path/keystone-signing # Replace 127.0.0.1 above with the IP address of the database used by the # main neutron server. (Leave it as is if the database runs on this host.) # connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron # The SQLAlchemy connection string used to connect to the slave database slave_connection = @@ -428,7 +428,8 @@ min_pool_size = 1 max_pool_size = 100 # Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 +idle_timeout = 30 +use_db_reconnect = True # If set, use this value for max_overflow with sqlalchemy max_overflow = 100 @@ -453,8 +454,7 @@ pool_timeout = 10 # example of non-default provider: # service_provider=FIREWALL:name2:firewall_driver_path # --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default # In order to activate Radware's lbaas driver you need to uncomment the next line. # If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. # Otherwise comment the HA Proxy line @@ -465,3 +465,9 @@ service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVP # service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default # Uncomment the line below to use Embrane heleos as Load Balancer service provider. # service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default + +{% if enable_fwaas %} +[fwaas] +driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +enabled = True +{% endif %} diff --git a/ansible/roles/neutron-controller/templates/nova.conf b/ansible/openstack/templates/nova.conf similarity index 60% rename from ansible/roles/neutron-controller/templates/nova.conf rename to ansible/openstack/templates/nova.conf index dfb4b93..5277359 100644 --- a/ansible/roles/neutron-controller/templates/nova.conf +++ b/ansible/openstack/templates/nova.conf @@ -1,9 +1,15 @@ +{% set memcached_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} + [DEFAULT] dhcpbridge_flagfile=/etc/nova/nova.conf dhcpbridge=/usr/bin/nova-dhcpbridge logdir=/var/log/nova state_path=/var/lib/nova -lock_path=/var/lock/nova +lock_path=/var/lib/nova/tmp force_dhcp_release=True iscsi_helper=tgtadm libvirt_use_virtio_for_bridges=True @@ -14,22 +20,24 @@ debug={{ DEBUG }} ec2_private_dns_show_ip=True api_paste_config=/etc/nova/api-paste.ini volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 +enabled_apis=osapi_compute,metadata +default_floating_pool={{ public_net_info.network }} auth_strategy = keystone rpc_backend = rabbit rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} rabbit_password = {{ RABBIT_PASS }} +osapi_compute_listen={{ internal_ip }} +metadata_listen={{ internal_ip }} + my_ip = {{ internal_ip }} vnc_enabled = True -vncserver_listen = 0.0.0.0 +vncserver_listen = {{ internal_ip }} vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html +novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html novncproxy_host = {{ internal_ip }} novncproxy_port = 6080 @@ -45,24 +53,32 @@ notify_on_state_change = vm_and_task_state notification_driver = nova.openstack.common.notifier.rpc_notifier notification_driver = ceilometer.compute.nova_notifier +memcached_servers = {{ memcached_servers }} + [database] # The SQLAlchemy connection string used to connect to the database connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 [keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/2.0 -identity_uri = http://{{ identity_host }}:35357 +auth_uri = http://{{ internal_vip.ip }}:5000/2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 admin_tenant_name = service admin_user = nova admin_password = {{ NOVA_PASS }} +memcached_servers = {{ memcached_servers }} [glance] -host = {{ image_host }} +host = {{ internal_vip.ip }} [neutron] -url = http://{{ network_server_host }}:9696 +url = http://{{ internal_vip.ip }}:9696 auth_strategy = keystone admin_tenant_name = service admin_username = neutron admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ identity_host }}:35357/v2.0 +admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0 +service_metadata_proxy = True +metadata_proxy_shared_secret = {{ METADATA_SECRET }} diff --git a/ansible/openstack_juno/.gitkeep b/ansible/openstack_juno/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ansible/openstack_juno_plumgrid/single-controller.yml b/ansible/openstack_juno_plumgrid/single-controller.yml deleted file mode 100644 index 5551a19..0000000 --- a/ansible/openstack_juno_plumgrid/single-controller.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- hosts: controller - sudo: True - roles: - - common - - database - - mq - - keystone - - nova-controller - - neutron-controller - - dashboard - - cinder-controller - - glance - - plumgrid - - plumgrid-plugin - -- hosts: network - sudo: True - roles: - - common - - plumgrid - -- hosts: storage - sudo: True - roles: - - common - - cinder-volume - -- hosts: compute - sudo: True - roles: - - common - - nova-compute - - neutron-compute - - plumgrid diff --git a/ansible/openstack_kilo/.gitkeep b/ansible/openstack_kilo/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ansible/openstack_liberty/.gitkeep b/ansible/openstack_liberty/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ansible/openstack_mitaka/.gitkeep b/ansible/openstack_mitaka/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ansible/openstack_mitaka/aodh/handlers/main.yml b/ansible/openstack_mitaka/aodh/handlers/main.yml new file mode 100644 index 0000000..e1084c8 --- /dev/null +++ b/ansible/openstack_mitaka/aodh/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart aodh services + service: name={{ item }} state=restarted enabled=yes + with_items: services diff --git a/ansible/openstack_mitaka/aodh/tasks/main.yml b/ansible/openstack_mitaka/aodh/tasks/main.yml new file mode 100644 index 0000000..aa23b9e --- /dev/null +++ b/ansible/openstack_mitaka/aodh/tasks/main.yml @@ -0,0 +1,29 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install aodh packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: update aodh conf + template: src={{ item }} dest=/etc/aodh/{{ item }} + backup=yes + with_items: + - aodh.conf.j2 +# - api_paste.ini.j2 +# - policy.json.j2 + notify: restart aodh services + +- name: write services to monitor list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services + +- meta: flush_handlers diff --git a/ansible/openstack_mitaka/aodh/templates/aodh.conf.j2 b/ansible/openstack_mitaka/aodh/templates/aodh.conf.j2 new file mode 100644 index 0000000..eac6e5b --- /dev/null +++ b/ansible/openstack_mitaka/aodh/templates/aodh.conf.j2 @@ -0,0 +1,41 @@ +[DEFAULT] +bind_host = {{ internal_ip }} +bind_port = 8042 +rpc_backend = rabbit +auth_strategy = keystone +debug = True +verbose = True + +[oslo_messaging_rabbit] +rabbit_hosts = {{ internal_vip.ip }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +rabbit_use_ssl = false + +[database] +connection = mongodb://aodh:{{ AODH_DBPASS }}@{{ internal_vip.ip }}:27017/aodh + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +identity_uri = http://{{ internal_vip.ip }}:35357 +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = service +username = aodh +password = {{ AODH_PASS }} +memcached_servers = {{ memcached_servers }} +token_cache_time = 300 +revocation_cache_time = 60 + +[service_credentials] +os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +os_username = aodh +os_tenant_name = service +os_password = {{ AODH_PASS }} +os_endpoint_type = internalURL +os_region_name = regionOne + +[api] +host = {{ internal_ip }} diff --git a/ansible/openstack_mitaka/aodh/templates/api_paste.ini.j2 b/ansible/openstack_mitaka/aodh/templates/api_paste.ini.j2 new file mode 100644 index 0000000..151789c --- /dev/null +++ b/ansible/openstack_mitaka/aodh/templates/api_paste.ini.j2 @@ -0,0 +1,22 @@ +# aodh API WSGI Pipeline +# Define the filters that make up the pipeline for processing WSGI requests +# Note: This pipeline is PasteDeploy's term rather than aodh's pipeline +# used for processing samples + +# Remove authtoken from the pipeline if you don't want to use keystone authentication +[pipeline:main] +pipeline = cors request_id authtoken api-server + +[app:api-server] +paste.app_factory = aodh.api.app:app_factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +oslo_config_project = aodh + +[filter:request_id] +paste.filter_factory = oslo_middleware:RequestId.factory + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = aodh diff --git a/ansible/openstack_mitaka/aodh/templates/policy.json.j2 b/ansible/openstack_mitaka/aodh/templates/policy.json.j2 new file mode 100644 index 0000000..4fd873e --- /dev/null +++ b/ansible/openstack_mitaka/aodh/templates/policy.json.j2 @@ -0,0 +1,20 @@ +{ + "context_is_admin": "role:admin", + "segregation": "rule:context_is_admin", + "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "telemetry:get_alarm": "rule:admin_or_owner", + "telemetry:get_alarms": "rule:admin_or_owner", + "telemetry:query_alarm": "rule:admin_or_owner", + + "telemetry:create_alarm": "", + "telemetry:change_alarm": "rule:admin_or_owner", + "telemetry:delete_alarm": "rule:admin_or_owner", + + "telemetry:get_alarm_state": "rule:admin_or_owner", + "telemetry:change_alarm_state": "rule:admin_or_owner", + + "telemetry:alarm_history": "rule:admin_or_owner", + "telemetry:query_alarm_history": "rule:admin_or_owner" +} diff --git a/ansible/openstack_mitaka/aodh/vars/Debian.yml b/ansible/openstack_mitaka/aodh/vars/Debian.yml new file mode 100644 index 0000000..bdf4655 --- /dev/null +++ b/ansible/openstack_mitaka/aodh/vars/Debian.yml @@ -0,0 +1,22 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################# +--- +packages: + - aodh-api + - aodh-evaluator + - aodh-notifier + - aodh-listener + - aodh-expirer + - python-ceilometerclient + +services: + - aodh-api + - aodh-notifier + - aodh-evaluator + - aodh-listener diff --git a/ansible/openstack_mitaka/aodh/vars/RedHat.yml b/ansible/openstack_mitaka/aodh/vars/RedHat.yml new file mode 100644 index 0000000..3d18288 --- /dev/null +++ b/ansible/openstack_mitaka/aodh/vars/RedHat.yml @@ -0,0 +1,22 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################# +--- +packages: + - openstack-aodh-api + - openstack-aodh-evaluator + - openstack-aodh-notifier + - openstack-aodh-listener + - openstack-aodh-expirer + - python-ceilometerclient + +services: + - aodh-api + - aodh-notifier + - aodh-evaluator + - aodh-listener diff --git a/ansible/openstack_mitaka/aodh/vars/main.yml b/ansible/openstack_mitaka/aodh/vars/main.yml new file mode 100644 index 0000000..b17f6ed --- /dev/null +++ b/ansible/openstack_mitaka/aodh/vars/main.yml @@ -0,0 +1,12 @@ +############################################################################## +## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +## +## All rights reserved. This program and the accompanying materials +## are made available under the terms of the Apache License, Version 2.0 +## which accompanies this distribution, and is available at +## http://www.apache.org/licenses/LICENSE-2.0 +############################################################################### +--- +packages_noarch: [] + +services_noarch: [] diff --git a/ansible/openstack_mitaka/ceilometer_controller/vars/Debian.yml b/ansible/openstack_mitaka/ceilometer_controller/vars/Debian.yml new file mode 100644 index 0000000..b749ffa --- /dev/null +++ b/ansible/openstack_mitaka/ceilometer_controller/vars/Debian.yml @@ -0,0 +1,37 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - ceilometer-api + - ceilometer-collector + - ceilometer-agent-central + - ceilometer-agent-notification +# - ceilometer-alarm-evaluator +# - ceilometer-alarm-notifier + - python-ceilometerclient + +ceilometer_services: + - ceilometer-agent-central + - ceilometer-agent-notification + - ceilometer-api + - ceilometer-collector +# - ceilometer-alarm-evaluator +# - ceilometer-alarm-notifier + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: cinder.j2 + dest: + - /etc/cinder/cinder.conf + - src: glance.j2 + dest: + - /etc/glance/glance-api.conf + - /etc/glance/glance-registry.conf diff --git a/ansible/openstack_mitaka/ceilometer_controller/vars/RedHat.yml b/ansible/openstack_mitaka/ceilometer_controller/vars/RedHat.yml new file mode 100644 index 0000000..6c5f53e --- /dev/null +++ b/ansible/openstack_mitaka/ceilometer_controller/vars/RedHat.yml @@ -0,0 +1,36 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - openstack-ceilometer-api + - openstack-ceilometer-collector + - openstack-ceilometer-central + - openstack-ceilometer-notification +# - openstack-ceilometer-alarm + - python-ceilometerclient + +ceilometer_services: + - openstack-ceilometer-central + - openstack-ceilometer-notification + - openstack-ceilometer-api + - openstack-ceilometer-collector +# - openstack-ceilometer-alarm-evaluator +# - openstack-ceilometer-alarm-notifier + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: cinder.j2 + dest: + - /etc/cinder/cinder.conf + - src: glance.j2 + dest: + - /etc/glance/glance-api.conf + - /etc/glance/glance-registry.conf diff --git a/ansible/openstack_mitaka/common/vars/Debian.yml b/ansible/openstack_mitaka/common/vars/Debian.yml new file mode 100644 index 0000000..980bcac --- /dev/null +++ b/ansible/openstack_mitaka/common/vars/Debian.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - ubuntu-cloud-keyring + #- python-dev + - openvswitch-datapath-dkms + - openvswitch-switch + - python-memcache + - python-iniparse + - python-lxml +# - python-d* #TODO, need remove + +pip_packages: + - crudini + - python-keyczar + - yang2tosca + +pip_conf: pip.conf + +services: + - ntp + + diff --git a/ansible/roles/apache/handlers/main.yml b/ansible/roles/apache/handlers/main.yml new file mode 100755 index 0000000..0f28dc2 --- /dev/null +++ b/ansible/roles/apache/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart apache related services + service: name={{ item }} state=restarted enabled=yes + with_items: services| union(services_noarch) diff --git a/ansible/roles/apache/tasks/main.yml b/ansible/roles/apache/tasks/main.yml new file mode 100755 index 0000000..7053229 --- /dev/null +++ b/ansible/roles/apache/tasks/main.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes" + with_items: packages | union(packages_noarch) + +- name: assure listen port exist + template: + dest: '{{ apache_config_dir }}/ports.conf' + src: ports.conf.j2 + notify: + - restart apache related services + +- name: remove default listen port on centos + lineinfile: + dest: /etc/httpd/conf/httpd.conf + state: absent + regexp: 'Listen 80' + when: ansible_os_family == 'RedHat' + +- meta: flush_handlers diff --git a/ansible/roles/apache/templates/openstack-dashboard.conf.j2 b/ansible/roles/apache/templates/openstack-dashboard.conf.j2 new file mode 100755 index 0000000..403fcc2 --- /dev/null +++ b/ansible/roles/apache/templates/openstack-dashboard.conf.j2 @@ -0,0 +1,15 @@ +{% set work_threads = (ansible_processor_vcpus + 1) // 2 %} + + + WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi + WSGIDaemonProcess horizon user=horizon group=horizon processes={{ work_threads }} threads={{ work_threads }} + WSGIProcessGroup horizon + Alias /static {{ horizon_dir }}/static/ + Alias /horizon/static {{ horizon_dir }}/static/ + + Order allow,deny + Allow from all + + + + diff --git a/ansible/roles/apache/templates/ports.conf.j2 b/ansible/roles/apache/templates/ports.conf.j2 new file mode 100644 index 0000000..be27d19 --- /dev/null +++ b/ansible/roles/apache/templates/ports.conf.j2 @@ -0,0 +1 @@ +Listen {{ internal_ip }}:80 diff --git a/ansible/roles/apache/vars/Debian.yml b/ansible/roles/apache/vars/Debian.yml new file mode 100755 index 0000000..95e941e --- /dev/null +++ b/ansible/roles/apache/vars/Debian.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - apache2 + - libapache2-mod-wsgi + +services: + - apache2 + +apache_config_dir: /etc/apache2 diff --git a/ansible/roles/apache/vars/RedHat.yml b/ansible/roles/apache/vars/RedHat.yml new file mode 100755 index 0000000..5211a12 --- /dev/null +++ b/ansible/roles/apache/vars/RedHat.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - mod_wsgi + - httpd + +services: + - httpd + +apache_config_dir: /etc/httpd/conf.d diff --git a/ansible/roles/apache/vars/main.yml b/ansible/roles/apache/vars/main.yml new file mode 100755 index 0000000..f6fef74 --- /dev/null +++ b/ansible/roles/apache/vars/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] diff --git a/ansible/roles/ceilometer_compute/handlers/main.yml b/ansible/roles/ceilometer_compute/handlers/main.yml new file mode 100644 index 0000000..c973d7d --- /dev/null +++ b/ansible/roles/ceilometer_compute/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart ceilometer relation service + service: name={{ item }} state=restarted enabled=yes + with_items: ceilometer_services diff --git a/ansible/roles/ceilometer_compute/tasks/main.yml b/ansible/roles/ceilometer_compute/tasks/main.yml new file mode 100644 index 0000000..864ea97 --- /dev/null +++ b/ansible/roles/ceilometer_compute/tasks/main.yml @@ -0,0 +1,44 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install ceilometer packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: ceilometer_packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: copy ceilometer configs + template: src={{ item.src}} dest=/opt/os_templates + with_items: "{{ ceilometer_configs_templates }}" + +- name: update ceilometer configs + shell: crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} + with_subelements: + - ceilometer_configs_templates + - dest + notify: restart ceilometer relation service + +- name: write services to monitor list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: ceilometer_services + +- meta: flush_handlers diff --git a/ansible/roles/ceilometer_compute/templates/ceilometer.j2 b/ansible/roles/ceilometer_compute/templates/ceilometer.j2 new file mode 100644 index 0000000..ee78de0 --- /dev/null +++ b/ansible/roles/ceilometer_compute/templates/ceilometer.j2 @@ -0,0 +1,31 @@ +[DEFAULT] +verbose = True +rpc_backend = rabbit +auth_strategy = keystone + +[oslo_messaging_rabbit] +rabbit_host = {{ internal_vip.ip }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +[publisher] +metering_secret = {{ metering_secret }} + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = service +username = ceilometer +password = {{ CEILOMETER_PASS }} + +[service_credentials] +os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +os_username = ceilometer +os_tenant_name = service +os_password = {{ CEILOMETER_PASS }} +os_endpoint_type = internalURL +os_region_name = regionOne + diff --git a/ansible/roles/ceilometer_compute/templates/nova.j2 b/ansible/roles/ceilometer_compute/templates/nova.j2 new file mode 100644 index 0000000..e7532c8 --- /dev/null +++ b/ansible/roles/ceilometer_compute/templates/nova.j2 @@ -0,0 +1,5 @@ +[DEFAULT] +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = messagingv2 diff --git a/ansible/roles/ceilometer_compute/vars/Debian.yml b/ansible/roles/ceilometer_compute/vars/Debian.yml new file mode 100644 index 0000000..550d14f --- /dev/null +++ b/ansible/roles/ceilometer_compute/vars/Debian.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - ceilometer-agent-compute + +ceilometer_services: + - ceilometer-agent-compute + - nova-compute + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: nova.j2 + dest: + - /etc/nova/nova.conf diff --git a/ansible/roles/ceilometer_compute/vars/RedHat.yml b/ansible/roles/ceilometer_compute/vars/RedHat.yml new file mode 100644 index 0000000..5a9128c --- /dev/null +++ b/ansible/roles/ceilometer_compute/vars/RedHat.yml @@ -0,0 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - openstack-ceilometer-compute + - python-ceilometerclient + - python-pecan + +ceilometer_services: + - openstack-ceilometer-compute + - openstack-nova-compute + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: nova.j2 + dest: + - /etc/nova/nova.conf diff --git a/ansible/roles/ceilometer_compute/vars/main.yml b/ansible/roles/ceilometer_compute/vars/main.yml new file mode 100644 index 0000000..209e1e0 --- /dev/null +++ b/ansible/roles/ceilometer_compute/vars/main.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] +metering_secret: 1c5df72079b31fb47747 diff --git a/ansible/roles/ceilometer_controller/handlers/main.yml b/ansible/roles/ceilometer_controller/handlers/main.yml new file mode 100644 index 0000000..c973d7d --- /dev/null +++ b/ansible/roles/ceilometer_controller/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart ceilometer relation service + service: name={{ item }} state=restarted enabled=yes + with_items: ceilometer_services diff --git a/ansible/roles/ceilometer_controller/tasks/main.yml b/ansible/roles/ceilometer_controller/tasks/main.yml new file mode 100644 index 0000000..6b1882c --- /dev/null +++ b/ansible/roles/ceilometer_controller/tasks/main.yml @@ -0,0 +1,51 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install ceilometer packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: ceilometer_packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: copy ceilometer configs + template: src={{ item.src}} dest=/opt/os_templates + with_items: "{{ ceilometer_configs_templates }}" + +- name: update ceilometer configs + shell: crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} + with_subelements: + - ceilometer_configs_templates + - dest + notify: restart ceilometer relation service + +- name: change meter polling interval to 300s + replace: + dest: /etc/ceilometer/pipeline.yaml + regexp: 'interval: .+' + replace: 'interval: 300' + notify: restart ceilometer relation service + +- name: write services to monitor list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: ceilometer_services + +- meta: flush_handlers diff --git a/ansible/roles/ceilometer_controller/templates/ceilometer.j2 b/ansible/roles/ceilometer_controller/templates/ceilometer.j2 new file mode 100644 index 0000000..b262a26 --- /dev/null +++ b/ansible/roles/ceilometer_controller/templates/ceilometer.j2 @@ -0,0 +1,37 @@ +[DEFAULT] +rpc_backend = rabbit +auth_strategy = keystone +verbose = True + +[oslo_messaging_rabbit] +rabbit_host = {{ internal_vip.ip }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +[database] +connection = mongodb://ceilometer:{{ CEILOMETER_DBPASS }}@{{ internal_vip.ip }}:27017/ceilometer + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +identity_uri = http://{{ internal_vip.ip }}:35357 +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = service +username = ceilometer +password = {{ CEILOMETER_PASS }} + +[service_credentials] +os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +os_username = ceilometer +os_tenant_name = service +os_password = {{ CEILOMETER_PASS }} +os_endpoint_type = internalURL +os_region_name = regionOne + +[publisher] +metering_secret = {{ metering_secret }} + +[api] +host = {{ internal_ip }} diff --git a/ansible/roles/ceilometer_controller/templates/cinder.j2 b/ansible/roles/ceilometer_controller/templates/cinder.j2 new file mode 100644 index 0000000..dfd0473 --- /dev/null +++ b/ansible/roles/ceilometer_controller/templates/cinder.j2 @@ -0,0 +1,2 @@ +[DEFAULT] +notification_driver = messagingv2 diff --git a/ansible/roles/ceilometer_controller/templates/glance.j2 b/ansible/roles/ceilometer_controller/templates/glance.j2 new file mode 100644 index 0000000..a513d2c --- /dev/null +++ b/ansible/roles/ceilometer_controller/templates/glance.j2 @@ -0,0 +1,8 @@ +[DEFAULT] +notification_driver = messagingv2 +rpc_backend = rabbit + +[oslo_messaging_rabbit] +rabbit_host = {{ internal_vip.ip }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} diff --git a/ansible/roles/ceilometer_controller/vars/Debian.yml b/ansible/roles/ceilometer_controller/vars/Debian.yml new file mode 100644 index 0000000..55f5aa1 --- /dev/null +++ b/ansible/roles/ceilometer_controller/vars/Debian.yml @@ -0,0 +1,33 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - ceilometer-api + - ceilometer-collector + - ceilometer-agent-central + - ceilometer-agent-notification + - python-ceilometerclient + +ceilometer_services: + - ceilometer-agent-central + - ceilometer-agent-notification + - ceilometer-api + - ceilometer-collector + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: cinder.j2 + dest: + - /etc/cinder/cinder.conf + - src: glance.j2 + dest: + - /etc/glance/glance-api.conf + - /etc/glance/glance-registry.conf diff --git a/ansible/roles/ceilometer_controller/vars/RedHat.yml b/ansible/roles/ceilometer_controller/vars/RedHat.yml new file mode 100644 index 0000000..86f464a --- /dev/null +++ b/ansible/roles/ceilometer_controller/vars/RedHat.yml @@ -0,0 +1,33 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - openstack-ceilometer-api + - openstack-ceilometer-collector + - openstack-ceilometer-central + - openstack-ceilometer-notification + - python-ceilometerclient + +ceilometer_services: + - openstack-ceilometer-central + - openstack-ceilometer-notification + - openstack-ceilometer-api + - openstack-ceilometer-collector + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: cinder.j2 + dest: + - /etc/cinder/cinder.conf + - src: glance.j2 + dest: + - /etc/glance/glance-api.conf + - /etc/glance/glance-registry.conf diff --git a/ansible/roles/ceilometer_controller/vars/main.yml b/ansible/roles/ceilometer_controller/vars/main.yml new file mode 100644 index 0000000..209e1e0 --- /dev/null +++ b/ansible/roles/ceilometer_controller/vars/main.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] +metering_secret: 1c5df72079b31fb47747 diff --git a/ansible/roles/ceph-config/files/create_osd.sh b/ansible/roles/ceph-config/files/create_osd.sh new file mode 100755 index 0000000..dd815c2 --- /dev/null +++ b/ansible/roles/ceph-config/files/create_osd.sh @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +if [ -d "/var/local/osd" ]; then +echo "clear /var/local/osd" +rm -r /var/local/osd/ +umount /var/local/osd +rm -r /var/local/osd +fi + + +#safe check +ps -ef |grep lvremove |awk '{print $2}' |xargs kill -9 +ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9 +ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9 +ps -ef |grep lvcreate |awk '{print $2}' |xargs kill -9 + +if [ -L "/dev/storage-volumes/ceph0" ]; then +echo "remove lv vg" +lvremove -f /dev/storage-volumes/ceph0 +fi + + +echo "lvcreate" +lvcreate -l 100%FREE -nceph0 storage-volumes +echo "mkfs" +mkfs.xfs -f /dev/storage-volumes/ceph0 + +if [ ! -d "/var/local/osd" ]; then +echo "mount osd" +mkdir -p /var/local/osd +mount /dev/storage-volumes/ceph0 /var/local/osd +fi + diff --git a/ansible/roles/ceph-config/tasks/create_config.yml b/ansible/roles/ceph-config/tasks/create_config.yml new file mode 100755 index 0000000..891e23e --- /dev/null +++ b/ansible/roles/ceph-config/tasks/create_config.yml @@ -0,0 +1,67 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- name: gen ceph fsid + shell: uuidgen + register: ceph_fsid + when: inventory_hostname in groups['ceph_adm'] + +- name: gen ceph conf + local_action: + module: "template" + src: "ceph.j2" + dest: "/tmp/ceph.conf" + when: inventory_hostname in groups['ceph_adm'] + +- name: "make directory for ceph config file" + file: path="/etc/ceph" state="directory" + +- name: copy ceph conf to dest mon node + copy: src="/tmp/ceph.conf" dest="/etc/ceph/ceph.conf" + +- name: install ceph-related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - ceph + +- name: gen create monmap script + local_action: template src="create_monmap.j2" dest="/tmp/create_monmap.sh" mode=0755 + when: inventory_hostname in groups['ceph_adm'] + +- name: create monmap + script: /tmp/create_monmap.sh + when: inventory_hostname in groups['ceph_mon'] + +- name: create mon.keyring + shell: "ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'" + when: inventory_hostname in groups['ceph_adm'] + +- name: create admin.keyring + shell: "ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'" + when: inventory_hostname in groups['ceph_adm'] + +- name: Add the client.admin key to the ceph.mon.keyring + shell: "ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring" + when: inventory_hostname in groups['ceph_adm'] + +- name: fetch mon.keyring to local + fetch: src="/tmp/ceph.mon.keyring" dest="/tmp/ceph.mon.keyring" flat=yes + when: inventory_hostname in groups['ceph_adm'] + +- name: fetch client.admin.keyring to local + fetch: src="/etc/ceph/ceph.client.admin.keyring" dest="/tmp/ceph.client.admin.keyring" flat=yes + when: inventory_hostname in groups['ceph_adm'] + +- name: copy mon.keyring to remote nodes + copy: src="/tmp/ceph.mon.keyring" dest="/tmp/ceph.mon.keyring" + +- name: copy admin.keyring to remote nodes + copy: src="/tmp/ceph.client.admin.keyring" dest="/etc/ceph/ceph.client.admin.keyring" + + +- meta: flush_handlers diff --git a/ansible/roles/ceph-config/tasks/main.yml b/ansible/roles/ceph-config/tasks/main.yml new file mode 100755 index 0000000..dbe9fea --- /dev/null +++ b/ansible/roles/ceph-config/tasks/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- include: create_config.yml + tags: + - ceph_config + - ceph_deploy + - ceph_mon diff --git a/ansible/roles/ceph-config/templates/ceph.j2 b/ansible/roles/ceph-config/templates/ceph.j2 new file mode 100755 index 0000000..bd0e3f5 --- /dev/null +++ b/ansible/roles/ceph-config/templates/ceph.j2 @@ -0,0 +1,25 @@ +[global] +fsid = {{ ceph_fsid.stdout }} +mon initial members = {{ groups["ceph_mon"] | join(", ")}} +mon host = +{%- for host in groups["ceph_mon"] -%} +{{ ', ' if not loop.first else ''}}{{ ip_settings[host].mgmt.ip }} +{%- endfor %} + +public network = {{ mgmt_cidr }} +cluster network = {{ storage_cidr }} + +auth cluster required = cephx +auth service required = cephx +auth client required = cephx + +osd journal size = 1024 +filestore xattr use omap = true +osd pool default size = 1 +osd pool default min size = 1 +osd pool default pg num = 333 +osd pool default pgp num = 333 +osd crush chooseleaf type = 1 + +debug mon = 1 +debug ms = 0 diff --git a/ansible/roles/ceph-config/templates/create_monmap.j2 b/ansible/roles/ceph-config/templates/create_monmap.j2 new file mode 100644 index 0000000..7d1eb9d --- /dev/null +++ b/ansible/roles/ceph-config/templates/create_monmap.j2 @@ -0,0 +1,5 @@ +monmaptool --create --clobber --fsid {{ ceph_fsid.stdout }} +{%- for host in groups['ceph_mon']%} + --add {{host}} {{ ip_settings[host].mgmt.ip }}:6789 +{%- endfor %} + /tmp/monmap diff --git a/ansible/roles/ceph-config/templates/dump_var.j2 b/ansible/roles/ceph-config/templates/dump_var.j2 new file mode 100755 index 0000000..a4a9b15 --- /dev/null +++ b/ansible/roles/ceph-config/templates/dump_var.j2 @@ -0,0 +1,8 @@ +HOSTVARS (ANSIBLE GATHERED, group_vars, host_vars) : + +{{ hostvars[inventory_hostname] | to_yaml }} + +PLAYBOOK VARS: + +{{ vars | to_yaml }} + diff --git a/ansible/roles/ceph-mon/tasks/install_mon.yml b/ansible/roles/ceph-mon/tasks/install_mon.yml new file mode 100644 index 0000000..658d109 --- /dev/null +++ b/ansible/roles/ceph-mon/tasks/install_mon.yml @@ -0,0 +1,32 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +- include_vars: "{{ ansible_os_family }}.yml" + +- name: Create a default data directory + file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory" + +- name: Populate the monitor daemon + shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring" + +- name: Touch the done and auto start file + file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch" + with_items: + - "done" + - "{{ ceph_start_type }}" + +- name: start mon daemon + shell: "{{ ceph_start_script }}" + +- name: wait for creating osd keyring + wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring + +- name: fetch osd keyring + fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes + run_once: True diff --git a/ansible/roles/ceph-mon/tasks/main.yml b/ansible/roles/ceph-mon/tasks/main.yml new file mode 100644 index 0000000..3defa26 --- /dev/null +++ b/ansible/roles/ceph-mon/tasks/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- include: install_mon.yml + when: inventory_hostname in groups["ceph_mon"] + tags: + - ceph_mon + - ceph_deploy diff --git a/ansible/roles/ceph-mon/vars/Debian.yml b/ansible/roles/ceph-mon/vars/Debian.yml new file mode 100644 index 0000000..16b7989 --- /dev/null +++ b/ansible/roles/ceph-mon/vars/Debian.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +ceph_start_script: "start ceph-mon id={{ inventory_hostname }}" +ceph_start_type: "upstart" diff --git a/ansible/roles/ceph-mon/vars/RedHat.yml b/ansible/roles/ceph-mon/vars/RedHat.yml new file mode 100644 index 0000000..fa19fc0 --- /dev/null +++ b/ansible/roles/ceph-mon/vars/RedHat.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +ceph_start_script: "/etc/init.d/ceph start mon.{{ inventory_hostname }}" +ceph_start_type: "sysvinit" diff --git a/ansible/roles/ceph-mon/vars/main.yml b/ansible/roles/ceph-mon/vars/main.yml new file mode 100644 index 0000000..466ea6a --- /dev/null +++ b/ansible/roles/ceph-mon/vars/main.yml @@ -0,0 +1,10 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + diff --git a/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml b/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml new file mode 100755 index 0000000..d7c414e --- /dev/null +++ b/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml @@ -0,0 +1,40 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: chown of glance/api.log + shell: chown -R glance:glance /var/log/glance + when: inventory_hostname in groups['controller'] + tags: + - ceph_conf_glance + ignore_errors: True + +- name: modify glance-api.conf for ceph + shell: sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a rbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf + when: inventory_hostname in groups['controller'] + tags: + - ceph_conf_glance + +- name: restart glance + shell: rm -f /var/log/glance/api.log && chown -R glance:glance /var/log/glance && service {{ glance_service }} restart + when: inventory_hostname in groups['controller'] + tags: + - ceph_conf_glance + ignore_errors: True + +- name: modify cinder.conf for ceph + shell: sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/g' /etc/cinder/cinder.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart + when: inventory_hostname in groups['compute'] + tags: + - ceph_conf_cinder + +- name: modify nova.conf for ceph + shell: sed -i 's/^\(images_type\).*/\1 = rbd/g' /etc/nova/nova-compute.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart + when: inventory_hostname in groups['compute'] + tags: + - ceph_conf_nova diff --git a/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml b/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml new file mode 100755 index 0000000..78b71ec --- /dev/null +++ b/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml @@ -0,0 +1,77 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: gen ceph uuid + shell: uuidgen + register: ceph_uuid + run_once: true + tags: + - ceph_copy_secret + +- name: gen template secret.xml + local_action: + module: "template" + src: "secret.j2" + dest: "/tmp/secret.xml" + mode: "0777" + when: inventory_hostname in groups['ceph_adm'] + tags: + - ceph_copy_secret + +- name: create pool + shell: ceph osd pool create {{ item }} 50 + with_items: + - volumes + - images + - backups + - vms + when: inventory_hostname in groups['ceph_adm'] + +- name: create ceph users for openstack + shell: ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' && ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' + when: inventory_hostname in groups['ceph_adm'] + +- name: send glance key to controller nodes + shell: ceph auth get-or-create client.glance | tee /etc/ceph/ceph.client.glance.keyring && chown glance:glance /etc/ceph/ceph.client.glance.keyring + when: inventory_hostname in groups['controller'] + +- name: send cinder key to compute nodes + shell: ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring && chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring + when: inventory_hostname in groups['compute'] + tags: + - ceph_send_key + +- name: copy cinder key to compute node + shell: ceph auth get-key client.cinder | tee client.cinder.key + when: inventory_hostname in groups['compute'] + tags: + - ceph_copy_secret + +- name: copy secret.xml to compute nodes + copy: src="/tmp/secret.xml" dest="~/secret.xml" + when: inventory_hostname in groups['compute'] + tags: + - ceph_copy_secret + +- name: undefine libvirt secret in case of repeatedly execute ceph_deploy + shell: "virsh secret-list | awk '$1 ~ /[0-9]+/ {print $1}' | xargs virsh secret-undefine" + when: inventory_hostname in groups['compute'] + tags: + - ceph_copy_secret + ignore_errors: True + + +- name: create key for libvirt on compute nodes + shell: "virsh secret-define --file ~/secret.xml && virsh secret-set-value --secret {{ ceph_uuid.stdout_lines[0] }} --base64 $(cat client.cinder.key)" + when: inventory_hostname in groups['compute'] + tags: + - ceph_copy_secret + ignore_errors: True + + diff --git a/ansible/roles/ceph-openstack/tasks/main.yml b/ansible/roles/ceph-openstack/tasks/main.yml new file mode 100644 index 0000000..8c9734d --- /dev/null +++ b/ansible/roles/ceph-openstack/tasks/main.yml @@ -0,0 +1,26 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- include_vars: "{{ ansible_os_family }}.yml" + tags: + - ceph_deploy + - ceph_openstack_pre + - ceph_openstack_conf + - ceph_openstack + +- include: ceph_openstack_pre.yml + tags: + - ceph_deploy + - ceph_openstack_pre + - ceph_openstack + +- include: ceph_openstack_conf.yml + tags: + - ceph_deploy + - ceph_openstack_conf + - ceph_openstack diff --git a/ansible/roles/ceph-openstack/templates/secret.j2 b/ansible/roles/ceph-openstack/templates/secret.j2 new file mode 100644 index 0000000..a0ffc6e --- /dev/null +++ b/ansible/roles/ceph-openstack/templates/secret.j2 @@ -0,0 +1,6 @@ + + {{ ceph_uuid.stdout_lines[0] }} + + client.cinder secret + + diff --git a/ansible/roles/ceph-openstack/vars/Debian.yml b/ansible/roles/ceph-openstack/vars/Debian.yml new file mode 100755 index 0000000..1da4232 --- /dev/null +++ b/ansible/roles/ceph-openstack/vars/Debian.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - ceph-deploy + - python-flask + - libgoogle-perftools4 + - libleveldb1 + - liblttng-ust0 + - libsnappy1 + - librbd1 + - librados2 + - python-ceph + - ceph + - ceph-mds + - ceph-common + - ceph-fs-common + - gdisk + +services: [] + +cinder_service: cinder-volume +nova_service: nova-compute +glance_service: glance-api diff --git a/ansible/roles/ceph-openstack/vars/RedHat.yml b/ansible/roles/ceph-openstack/vars/RedHat.yml new file mode 100755 index 0000000..d0310f8 --- /dev/null +++ b/ansible/roles/ceph-openstack/vars/RedHat.yml @@ -0,0 +1,20 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - ceph-radosgw + - fcgi + - ceph-deploy + - ceph + +services: [] + +cinder_service: openstack-cinder-volume +nova_service: openstack-nova-compute +glance_service: openstack-glance-api diff --git a/ansible/roles/ceph-openstack/vars/main.yml b/ansible/roles/ceph-openstack/vars/main.yml new file mode 100755 index 0000000..6de7e9f --- /dev/null +++ b/ansible/roles/ceph-openstack/vars/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +ceph_cluster_dir: + - /root/ceph-cluster diff --git a/ansible/roles/ceph-osd/files/create_osd.sh b/ansible/roles/ceph-osd/files/create_osd.sh new file mode 100755 index 0000000..dd815c2 --- /dev/null +++ b/ansible/roles/ceph-osd/files/create_osd.sh @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +if [ -d "/var/local/osd" ]; then +echo "clear /var/local/osd" +rm -r /var/local/osd/ +umount /var/local/osd +rm -r /var/local/osd +fi + + +#safe check +ps -ef |grep lvremove |awk '{print $2}' |xargs kill -9 +ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9 +ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9 +ps -ef |grep lvcreate |awk '{print $2}' |xargs kill -9 + +if [ -L "/dev/storage-volumes/ceph0" ]; then +echo "remove lv vg" +lvremove -f /dev/storage-volumes/ceph0 +fi + + +echo "lvcreate" +lvcreate -l 100%FREE -nceph0 storage-volumes +echo "mkfs" +mkfs.xfs -f /dev/storage-volumes/ceph0 + +if [ ! -d "/var/local/osd" ]; then +echo "mount osd" +mkdir -p /var/local/osd +mount /dev/storage-volumes/ceph0 /var/local/osd +fi + diff --git a/ansible/roles/ceph-osd/tasks/install_osd.yml b/ansible/roles/ceph-osd/tasks/install_osd.yml new file mode 100644 index 0000000..e7e4a24 --- /dev/null +++ b/ansible/roles/ceph-osd/tasks/install_osd.yml @@ -0,0 +1,33 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: create osd lv and mount it on /var/local/osd + script: create_osd.sh + +- name: copy osd keyring + copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring" + +- name: prepare osd disk + shell: ceph-disk prepare --fs-type xfs /var/local/osd + +- name: activate osd node + shell: ceph-disk activate /var/local/osd + +- name: enable ceph service + service: name=ceph enabled=yes + +- name: rebuild osd after reboot + lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script" + when: ansible_os_family == "Debian" + +- name: rebuild osd after reboot for centos + lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd" + when: ansible_os_family == "RedHat" + diff --git a/ansible/roles/ceph-osd/tasks/main.yml b/ansible/roles/ceph-osd/tasks/main.yml new file mode 100644 index 0000000..b2d10b1 --- /dev/null +++ b/ansible/roles/ceph-osd/tasks/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- include: install_osd.yml + when: inventory_hostname in groups["ceph_osd"] + tags: + - ceph_osd + - ceph_deploy diff --git a/ansible/roles/ceph-purge/tasks/main.yml b/ansible/roles/ceph-purge/tasks/main.yml new file mode 100644 index 0000000..a25572c --- /dev/null +++ b/ansible/roles/ceph-purge/tasks/main.yml @@ -0,0 +1,35 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- name: clear tmp files + local_action: shell rm -rf /tmp/ceph* + tags: + - ceph_purge + - ceph_deploy + +- name: install ceph-related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - ceph-deploy + tags: + - ceph_purge + - ceph_deploy + +- name: purge ceph + shell: "ceph-deploy purge {{ inventory_hostname }}; ceph-deploy purgedata {{ inventory_hostname }}; ceph-deploy forgetkeys" + tags: + - ceph_purge + - ceph_deploy + +- name: remove monmap + file: path="/tmp/monmap" state="absent" + tags: + - ceph_purge + - ceph_deploy + + diff --git a/ansible/roles/cinder-controller/handlers/main.yml b/ansible/roles/cinder-controller/handlers/main.yml index aeeda0d..93bffe7 100644 --- a/ansible/roles/cinder-controller/handlers/main.yml +++ b/ansible/roles/cinder-controller/handlers/main.yml @@ -1,6 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart cinder-scheduler - service: name=cinder-scheduler state=restarted -- name: restart cinder-api - service: name=cinder-api state=restarted +- name: restart cinder control serveice + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/cinder-controller/tasks/cinder_config.yml b/ansible/roles/cinder-controller/tasks/cinder_config.yml new file mode 100644 index 0000000..e763a47 --- /dev/null +++ b/ansible/roles/cinder-controller/tasks/cinder_config.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: upload cinder conf + template: src=cinder.conf dest=/etc/cinder/cinder.conf + +- name: sync cinder db + #cinder_manage: action=dbsync + shell: su -s /bin/sh -c 'cinder-manage db sync' cinder + ignore_errors: true + changed_when: true + notify: + - restart cinder control serveice diff --git a/ansible/roles/cinder-controller/tasks/cinder_install.yml b/ansible/roles/cinder-controller/tasks/cinder_install.yml new file mode 100644 index 0000000..d41094d --- /dev/null +++ b/ansible/roles/cinder-controller/tasks/cinder_install.yml @@ -0,0 +1,34 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install cinder packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: generate common cinder service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: upload cinder conf + template: src=cinder.conf dest=/etc/cinder/cinder.conf + notify: + - restart cinder control serveice diff --git a/ansible/roles/cinder-controller/tasks/main.yml b/ansible/roles/cinder-controller/tasks/main.yml index b814d93..c719ca2 100644 --- a/ansible/roles/cinder-controller/tasks/main.yml +++ b/ansible/roles/cinder-controller/tasks/main.yml @@ -1,29 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: install cinder packages - apt: name={{ item }} state=present force=yes - with_items: - - cinder-api - - cinder-scheduler - - python-cinderclient +- include_vars: "{{ ansible_os_family }}.yml" -- name: upload cinder conf - template: src=cinder.conf dest=/etc/cinder/cinder.conf - notify: - - restart cinder-scheduler - - restart cinder-api +- include: cinder_install.yml + tags: + - install + - cinder-install + - cinder -- name: sync cinder db - shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder - notify: - - restart cinder-scheduler - - restart cinder-api +- include: cinder_config.yml + when: inventory_hostname == groups['controller'][0] + tags: + - config + - cinder-config + - cinder - meta: flush_handlers - -- name: upload cinder keystone register script - template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744 - -- name: run cinder register script - shell: /opt/cinder_init.sh && touch cinder_init_complete - args: - creates: cinder_init_complete diff --git a/ansible/roles/cinder-controller/templates/api-paste.ini b/ansible/roles/cinder-controller/templates/api-paste.ini index 6ae63f1..0eb04e2 100644 --- a/ansible/roles/cinder-controller/templates/api-paste.ini +++ b/ansible/roles/cinder-controller/templates/api-paste.ini @@ -54,8 +54,8 @@ paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory # auth_host = 127.0.0.1 # auth_port = 35357 # auth_protocol = http -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 admin_tenant_name = service admin_user = cinder admin_password = {{ CINDER_PASS }} diff --git a/ansible/roles/cinder-controller/templates/cinder.conf b/ansible/roles/cinder-controller/templates/cinder.conf index 2fcd0b7..66d9948 100644 --- a/ansible/roles/cinder-controller/templates/cinder.conf +++ b/ansible/roles/cinder-controller/templates/cinder.conf @@ -3,7 +3,7 @@ rootwrap_config = /etc/cinder/rootwrap.conf api_paste_confg = /etc/cinder/api-paste.ini iscsi_helper = tgtadm volume_name_template = volume-%s -volume_group = cinder-volumes +volume_group = storage-volumes verbose = {{ VERBOSE }} debug = {{ DEBUG }} auth_strategy = keystone @@ -18,11 +18,11 @@ control_exchange = cinder rpc_backend = rabbit rabbit_host = {{ rabbit_host }} rabbit_port = 5672 -rabbit_userid = guest +rabbit_userid = {{ RABBIT_USER }} rabbit_password = {{ RABBIT_PASS }} my_ip = {{ storage_controller_host }} -glance_host = {{ image_host }} +glance_host = {{ internal_vip.ip }} glance_port = 9292 api_rate_limit = False storage_availability_zone = nova @@ -39,7 +39,6 @@ volume_name_template = volume-%s snapshot_name_template = snapshot-%s max_gigabytes=10000 -volume_group=cinder-volumes volume_clear=zero volume_clear_size=10 @@ -53,11 +52,15 @@ volumes_dir=/var/lib/cinder/volumes volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver [keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 +auth_uri = http://{{ internal_vip.ip }}:5000/v3 +identity_uri = http://{{ internal_vip.ip }}:35357 admin_tenant_name = service admin_user = cinder admin_password = {{ CINDER_PASS }} [database] connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder +idle_timeout = 30 + +[keymgr] +encryption_auth_url=http://{{ internal_vip.ip }}:5000/v3 diff --git a/ansible/roles/cinder-controller/templates/cinder_init.sh b/ansible/roles/cinder-controller/templates/cinder_init.sh deleted file mode 100644 index 86968bf..0000000 --- a/ansible/roles/cinder-controller/templates/cinder_init.sh +++ /dev/null @@ -1,6 +0,0 @@ -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ storage_controller_host }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ storage_controller_host }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ storage_controller_host }}:8776/v1/%\(tenant_id\)s - diff --git a/ansible/roles/cinder-controller/vars/Debian.yml b/ansible/roles/cinder-controller/vars/Debian.yml new file mode 100644 index 0000000..801296b --- /dev/null +++ b/ansible/roles/cinder-controller/vars/Debian.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +packages: + - cinder-api + - cinder-scheduler + +services: + - cinder-api + - cinder-scheduler diff --git a/ansible/roles/cinder-controller/vars/RedHat.yml b/ansible/roles/cinder-controller/vars/RedHat.yml new file mode 100644 index 0000000..e11bef9 --- /dev/null +++ b/ansible/roles/cinder-controller/vars/RedHat.yml @@ -0,0 +1,16 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-cinder + - python-oslo-db + +services: + - openstack-cinder-api + - openstack-cinder-scheduler diff --git a/ansible/roles/cinder-controller/vars/main.yml b/ansible/roles/cinder-controller/vars/main.yml new file mode 100644 index 0000000..483300e --- /dev/null +++ b/ansible/roles/cinder-controller/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - python-cinderclient + +services_noarch: [] + diff --git a/ansible/roles/cinder-volume/files/loop.yml b/ansible/roles/cinder-volume/files/loop.yml deleted file mode 100644 index e872652..0000000 --- a/ansible/roles/cinder-volume/files/loop.yml +++ /dev/null @@ -1 +0,0 @@ -physical_device: /dev/loop0 diff --git a/ansible/roles/cinder-volume/handlers/main.yml b/ansible/roles/cinder-volume/handlers/main.yml index 866eb83..f841a63 100644 --- a/ansible/roles/cinder-volume/handlers/main.yml +++ b/ansible/roles/cinder-volume/handlers/main.yml @@ -1,6 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart cinder-volume - service: name=cinder-volume state=restarted - -- name: restart tgt - shell: service tgt restart +- name: restart cinder-volume services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/cinder-volume/tasks/main.yml b/ansible/roles/cinder-volume/tasks/main.yml index d29e94f..f3f40c1 100644 --- a/ansible/roles/cinder-volume/tasks/main.yml +++ b/ansible/roles/cinder-volume/tasks/main.yml @@ -1,51 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + - name: install cinder-volume and lvm2 packages - apt: name={{ item }} state=present force=yes - with_items: - - cinder-volume - - lvm2 + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) -- name: check if physical device exists - stat: path={{ physical_device }} - register: st +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" -- name: repace physical_device if st is false - local_action: copy src=loop.yml dest=/tmp/loop.yml - when: st.stat.exists == False - -- name: load loop.yml - include_vars: /tmp/loop.yml - when: st.stat.exists == False - -- name: check if cinder-volumes is mounted - shell: ls /mnt - register: cindervolumes - -- name: get available partition size - shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }' - register: partition_size - -- name: if not mounted, mount it - shell: dd if=/dev/zero of=/mnt/cinder-volumes - bs=1 count=0 seek={{ partition_size.stdout }} - when: cindervolumes.stdout != 'cinder-volumes' - -- name: get first lo device - shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p - register: first_lo - when: cindervolumes.stdout != 'cinder-volumes' - -- name: do a losetup on /mnt/cinder-volumes - shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes - when: cindervolumes.stdout != 'cinder-volumes' - -- name: create physical and group volumes - lvg: vg=cinder-volumes pvs={{ physical_device }} - vg_options=--force +- name: generate cinder volume service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) - name: upload cinder-volume configuration template: src=cinder.conf dest=/etc/cinder/cinder.conf backup=yes notify: - - restart cinder-volume - - restart tgt + - restart cinder-volume services + +- meta: flush_handlers diff --git a/ansible/roles/cinder-volume/templates/cinder.conf b/ansible/roles/cinder-volume/templates/cinder.conf index a674ca6..0660cba 100644 --- a/ansible/roles/cinder-volume/templates/cinder.conf +++ b/ansible/roles/cinder-volume/templates/cinder.conf @@ -3,11 +3,11 @@ rootwrap_config = /etc/cinder/rootwrap.conf api_paste_confg = /etc/cinder/api-paste.ini iscsi_helper = tgtadm volume_name_template = volume-%s -volume_group = cinder-volumes +volume_group = storage-volumes verbose = True auth_strategy = keystone state_path = /var/lib/cinder -lock_path = /var/lock/cinder +lock_path = /var/lib/cinder/tmp notification_driver=cinder.openstack.common.notifier.rpc_notifier volumes_dir = /var/lib/cinder/volumes @@ -17,11 +17,11 @@ control_exchange = cinder rpc_backend = rabbit rabbit_host = {{ rabbit_host }} rabbit_port = 5672 -rabbit_userid = guest +rabbit_userid = {{ RABBIT_USER }} rabbit_password = {{ RABBIT_PASS }} my_ip = {{ storage_controller_host }} -glance_host = {{ image_host }} +glance_host = {{ internal_vip.ip }} glance_port = 9292 api_rate_limit = False storage_availability_zone = nova @@ -38,7 +38,6 @@ volume_name_template = volume-%s snapshot_name_template = snapshot-%s max_gigabytes=10000 -volume_group=cinder-volumes volume_clear=zero volume_clear_size=10 @@ -52,11 +51,12 @@ volumes_dir=/var/lib/cinder/volumes volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver [keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 +auth_uri = http://{{ internal_vip.ip }}:5000/v3 +identity_uri = http://{{ internal_vip.ip }}:35357 admin_tenant_name = service admin_user = cinder admin_password = {{ CINDER_PASS }} [database] connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder +idle_timeout = 30 diff --git a/ansible/roles/cinder-volume/vars/Debian.yml b/ansible/roles/cinder-volume/vars/Debian.yml new file mode 100644 index 0000000..d95b779 --- /dev/null +++ b/ansible/roles/cinder-volume/vars/Debian.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - cinder-volume + +services: + - cinder-volume diff --git a/ansible/roles/cinder-volume/vars/RedHat.yml b/ansible/roles/cinder-volume/vars/RedHat.yml new file mode 100644 index 0000000..6d596f4 --- /dev/null +++ b/ansible/roles/cinder-volume/vars/RedHat.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-cinder + - targetcli + - python-oslo-db + - MySQL-python + - lvm2 + +services: + - openstack-cinder-volume + - lvm2-lvmetad diff --git a/ansible/roles/cinder-volume/vars/main.yml b/ansible/roles/cinder-volume/vars/main.yml new file mode 100644 index 0000000..9949450 --- /dev/null +++ b/ansible/roles/cinder-volume/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - lvm2 + +services_noarch: [] + diff --git a/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list b/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list deleted file mode 100644 index 920f3d2..0000000 --- a/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list +++ /dev/null @@ -1 +0,0 @@ -deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml deleted file mode 100644 index ba409f6..0000000 --- a/ansible/roles/common/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- name: restart ntp - command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc; service ntp start" - ignore_errors: True diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml index 3730a33..f004e98 100644 --- a/ansible/roles/common/tasks/main.yml +++ b/ansible/roles/common/tasks/main.yml @@ -1,38 +1,92 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: first update pkgs - apt: update_cache=yes +- include_vars: "{{ ansible_os_family }}.yml" -- name: install ubuntu-cloud-keyring(ubuntu) - apt: name={{ item }} state=latest - with_items: - - ubuntu-cloud-keyring - -- name: add juno cloudarchive - apt_repository: repo="{{ juno_cloud_archive }}" state=present - -- name: update packages once - apt: update_cache=yes +- name: speed up ansible by purging landscape-common + apt: pkg=landscape-common state=absent purge=yes + when: ansible_os_family == "Debian" - name: update hosts files to all hosts - template: src=hosts - dest=/etc/hosts - backup=yes + template: src=hosts dest=/etc/hosts backup=yes -- name: install common packages - apt: name={{ item }} state=latest - with_items: - - python-pip - - python-dev - - python-mysqldb - - ntp +- name: get compass-core hostname + local_action: shell hostname + register: name + +- name: get compass-core addr + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: COMPASS_SERVER + +- name: update compass-core name and ip to hosts files + shell: | + echo "# compass" >> /etc/hosts + echo {{ COMPASS_SERVER.stdout_lines[0] }} {{ name.stdout_lines[0] }} >> /etc/hosts + +- name: install python-crypto + yum: name=python-crypto state=present + register: python_crypto_result + ignore_errors: yes + when: ansible_os_family == "RedHat" + +- name: remove python crypt egg file to work-around https://bugs.centos.org/view.php?id=9896&nbn=2 + shell: rm -rf /usr/lib64/python2.7/site-packages/pycrypto-2.6.1-py2.7.egg-info + when: ansible_os_family == "RedHat" and python_crypto_result.msg == "Error unpacking rpm package python2-crypto-2.6.1-9.el7.x86_64\n" + +- name: install packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes" + with_items: packages | union(packages_noarch) + +- name: make config template dir exist + file: path=/opt/os_templates state=directory mode=0755 + +- name: create pip config directory + file: path=~/.pip state=directory + +- name: update pip.conf + template: src=pip.conf dest=~/.pip/{{ pip_conf }} + +- name: install pip packages + pip: name={{ item }} state=present extra_args='--pre' + with_items: pip_packages + +- name: install keyczar for accelerate + pip: name=python-keyczar state=present extra_args='--pre' + delegate_to: 127.0.0.1 + run_once: true - name: update ntp conf template: src=ntp.conf dest=/etc/ntp.conf backup=yes - notify: - - restart ntp -- name: update pip - pip: name={{ item }} state=latest - with_items: - - pip +- name: use ntpdate once for initial sync time + shell: ntpdate {{ ntp_server }} + ignore_errors: True +- name: sync sys clock to hard clock + shell: hwclock --systohc + ignore_errors: True + +- name: create fireball keys dir + file: path=~/.fireball.keys state=directory mode=0700 + delegate_to: 127.0.0.1 + run_once: true + +- name: restart services + service: name={{ item }} state=restarted enabled=yes + with_items: services| union(services_noarch) + +- name: write services to monitor list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services| union(services_noarch) + +- name: kill daemon for accelerate + shell: lsof -ni :5099|grep LISTEN|awk '{print $2}'|xargs kill -9 + ignore_errors: true + +- meta: flush_handlers diff --git a/ansible/roles/common/templates/hosts b/ansible/roles/common/templates/hosts index 9d27c0a..bb770d5 100644 --- a/ansible/roles/common/templates/hosts +++ b/ansible/roles/common/templates/hosts @@ -1,22 +1,9 @@ -# compute-controller -10.145.89.136 host-136 -# database -10.145.89.136 host-136 -# messaging -10.145.89.136 host-136 -# storage-controller -10.145.89.138 host-138 -# image -10.145.89.138 host-138 -# identity -10.145.89.136 host-136 -# network-server -10.145.89.138 host-138 -# dashboard -10.145.89.136 host-136 -# storage-volume -10.145.89.139 host-139 -# network-worker -10.145.89.139 host-139 -# compute-worker -10.145.89.137 host-137 +# localhost +127.0.0.1 localhost +# controller +10.1.0.50 host1 +10.1.0.51 host2 +10.1.0.52 host3 +# compute +10.1.0.53 host4 +10.1.0.54 host5 diff --git a/ansible/roles/common/templates/ntp.conf b/ansible/roles/common/templates/ntp.conf index c613809..2d560be 100644 --- a/ansible/roles/common/templates/ntp.conf +++ b/ansible/roles/common/templates/ntp.conf @@ -16,14 +16,12 @@ filegen clockstats file clockstats type day enable # Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board # on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for # more information. -server {{ NTP_SERVER_LOCAL }} -server 0.ubuntu.pool.ntp.org -server 1.ubuntu.pool.ntp.org -server 2.ubuntu.pool.ntp.org -server 3.ubuntu.pool.ntp.org +server {{ ntp_server }} +server {{ internal_vip.ip }} -# Use Ubuntu's ntp server as a fallback. -server ntp.ubuntu.com +# Use local server as a fallback. +server 127.127.1.0 # local clock +fudge 127.127.1.0 stratum 10 # Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for # details. The web page @@ -34,8 +32,8 @@ server ntp.ubuntu.com # up blocking replies from your own upstream servers. # By default, exchange time with everybody, but don't allow configuration. -restrict -4 default kod notrap nomodify nopeer noquery -restrict -6 default kod notrap nomodify nopeer noquery +restrict -4 default kod notrap nomodify +restrict -6 default kod notrap nomodify # Local users may interrogate the ntp server more closely. restrict 127.0.0.1 diff --git a/ansible/roles/common/templates/pip.conf b/ansible/roles/common/templates/pip.conf new file mode 100644 index 0000000..7bb3e43 --- /dev/null +++ b/ansible/roles/common/templates/pip.conf @@ -0,0 +1,5 @@ +[global] +find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip +no-index = true +[install] +trusted-host={{ COMPASS_SERVER.stdout_lines[0] }} diff --git a/ansible/roles/common/vars/Debian.yml b/ansible/roles/common/vars/Debian.yml new file mode 100644 index 0000000..1d7972e --- /dev/null +++ b/ansible/roles/common/vars/Debian.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - ubuntu-cloud-keyring + - python-dev + - openvswitch-datapath-dkms + - openvswitch-switch + - python-memcache + - python-iniparse + - python-lxml + #- python-d* #TODO, need remove + +pip_packages: + - crudini + - python-keyczar + - yang2tosca + +pip_conf: pip.conf + +services: + - ntp + + diff --git a/ansible/roles/common/vars/RedHat.yml b/ansible/roles/common/vars/RedHat.yml new file mode 100644 index 0000000..8143e1c --- /dev/null +++ b/ansible/roles/common/vars/RedHat.yml @@ -0,0 +1,26 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openvswitch + - python-devel + - python-memcached + - gcc + - redhat-lsb-core + - python-crypto + +pip_packages: + - crudini + - python-keyczar + +pip_conf: pip.conf + +services: + - openvswitch + - ntpd diff --git a/ansible/roles/common/vars/main.yml b/ansible/roles/common/vars/main.yml new file mode 100644 index 0000000..713b6b5 --- /dev/null +++ b/ansible/roles/common/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - python-pip + - ntp + +services_noarch: [] diff --git a/ansible/roles/dashboard/handlers/main.yml b/ansible/roles/dashboard/handlers/main.yml new file mode 100755 index 0000000..62e0b8e --- /dev/null +++ b/ansible/roles/dashboard/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart dashboard services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/dashboard/tasks/main.yml b/ansible/roles/dashboard/tasks/main.yml index 33e6ebf..ce4fd97 100644 --- a/ansible/roles/dashboard/tasks/main.yml +++ b/ansible/roles/dashboard/tasks/main.yml @@ -1,30 +1,108 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + - name: install dashboard packages - apt: name={{ item }} state=present force=yes - with_items: - - apache2 - - memcached - - libapache2-mod-wsgi - - openstack-dashboard + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" - name: remove ubuntu theme - apt: name=openstack-dashboard-ubuntu-theme - state=absent + action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent" + when: ansible_os_family == 'Debian' + notify: + - restart dashboard services -## horizon configuration is already enabled in apache2/conf-enabled -## by openstack-dashboard package deploy script. -#- name: update dashboard conf -# template: src=openstack-dashboard.conf -# dest=/etc/apache2/sites-available/openstack-dashboard.conf -# backup=yes +- name: remove default apache2 config + file: + path: '{{ item }}' + state: absent + when: ansible_os_family == 'Debian' + with_items: + - '{{ apache_config_dir }}/conf-available/openstack-dashboard.conf' + - '{{ apache_config_dir }}/conf-enabled/openstack-dashboard.conf' + - '{{ apache_config_dir }}/sites-available/000-default.conf' + - '{{ apache_config_dir }}/sites-enabled/000-default.conf' + notify: + - restart dashboard services -- name: update horizon settings - template: src=local_settings.py - dest=/etc/openstack-dashboard/local_settings.py - backup=yes +- name: update apache2 configs + template: + src: openstack-dashboard.conf.j2 + dest: '{{ apache_config_dir }}/sites-available/openstack-dashboard.conf' + when: ansible_os_family == 'Debian' + notify: + - restart dashboard services -- name: restart apache2 - service: name=apache2 state=restarted +- name: enable dashboard + file: + src: "/etc/apache2/sites-available/openstack-dashboard.conf" + dest: "/etc/apache2/sites-enabled/openstack-dashboard.conf" + state: "link" + when: ansible_os_family == 'Debian' + notify: + - restart dashboard services -- name: restart memcached - service: name=memcached state=restarted +- name: update ubuntu horizon settings + lineinfile: + dest: /etc/openstack-dashboard/local_settings.py + regexp: '{{ item.regexp }}' + line: '{{ item.line }}' + with_items: + - regexp: '^WEBROOT[ \t]*=.*' + line: 'WEBROOT = "/horizon"' + - regexp: '^COMPRESS_OFFLINE[ \t]*=.*' + line: 'COMPRESS_OFFLINE=True' + - regexp: '^ALLOWED_HOSTS[ \t]*=.*' + line: 'ALLOWED_HOSTS = ["*"]' + - regexp: '^OPENSTACK_HOST[ \t]*=.*' + line: 'OPENSTACK_HOST = "{{ internal_ip }}"' + when: ansible_os_family == 'Debian' + notify: + - restart dashboard services + +- name: precompile horizon css + shell: /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force + ignore_errors: True + when: ansible_os_family == 'Debian' + notify: + - restart dashboard services + +- name: update redhat version horizon settings + lineinfile: + dest: /etc/openstack-dashboard/local_settings + regexp: '{{ item.regexp }}' + line: '{{ item.line }}' + with_items: + - regexp: '^WEBROOT[ \t]*=.*' + line: 'WEBROOT = "/horizon"' + - regexp: '^COMPRESS_OFFLINE[ \t]*=.*' + line: 'COMPRESS_OFFLINE=False' + - regexp: '^ALLOWED_HOSTS[ \t]*=.*' + line: 'ALLOWED_HOSTS = ["*"]' + - regexp: '^OPENSTACK_HOST[ \t]*=.*' + line: 'OPENSTACK_HOST = "{{ internal_ip }}"' + when: ansible_os_family == 'RedHat' + notify: + - restart dashboard services + +- meta: flush_handlers diff --git a/ansible/roles/dashboard/templates/local_settings.py b/ansible/roles/dashboard/templates/local_settings.py deleted file mode 100644 index 35f94c5..0000000 --- a/ansible/roles/dashboard/templates/local_settings.py +++ /dev/null @@ -1,511 +0,0 @@ -import os - -from django.utils.translation import ugettext_lazy as _ - -from openstack_dashboard import exceptions - -DEBUG = True -TEMPLATE_DEBUG = DEBUG - -# Required for Django 1.5. -# If horizon is running in production (DEBUG is False), set this -# with the list of host/domain names that the application can serve. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts -#ALLOWED_HOSTS = ['horizon.example.com', ] - -# Set SSL proxy settings: -# For Django 1.4+ pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header -# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') - -# If Horizon is being served through SSL, then uncomment the following two -# settings to better secure the cookies from security exploits -#CSRF_COOKIE_SECURE = True -#SESSION_COOKIE_SECURE = True - -# Overrides for OpenStack API versions. Use this setting to force the -# OpenStack dashboard to use a specific API version for a given service API. -# NOTE: The version should be formatted as it appears in the URL for the -# service API. For example, The identity service APIs have inconsistent -# use of the decimal point, so valid options would be "2.0" or "3". -# OPENSTACK_API_VERSIONS = { -# "identity": 3, -# "volume": 2 -# } - -# Set this to True if running on multi-domain model. When this is enabled, it -# will require user to enter the Domain name in addition to username for login. -# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False - -# Overrides the default domain used when running on single-domain model -# with Keystone V3. All entities will be created in the default domain. -# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' - -# Set Console type: -# valid options would be "AUTO", "VNC", "SPICE" or "RDP" -# CONSOLE_TYPE = "AUTO" - -# Default OpenStack Dashboard configuration. -HORIZON_CONFIG = { - 'dashboards': ('project', 'admin', 'settings',), - 'default_dashboard': 'project', - 'user_home': 'openstack_dashboard.views.get_user_home', - 'ajax_queue_limit': 10, - 'auto_fade_alerts': { - 'delay': 3000, - 'fade_duration': 1500, - 'types': ['alert-success', 'alert-info'] - }, - 'help_url': "http://docs.openstack.org", - 'exceptions': {'recoverable': exceptions.RECOVERABLE, - 'not_found': exceptions.NOT_FOUND, - 'unauthorized': exceptions.UNAUTHORIZED}, -} - -# Specify a regular expression to validate user passwords. -# HORIZON_CONFIG["password_validator"] = { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements.") -# } - -# Disable simplified floating IP address management for deployments with -# multiple floating IP pools or complex network requirements. -# HORIZON_CONFIG["simple_ip_management"] = False - -# Turn off browser autocompletion for the login form if so desired. -# HORIZON_CONFIG["password_autocomplete"] = "off" - -LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -# Set custom secret key: -# You can either set it to a specific value or you can let horizion generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there -# may be situations where you would want to set this explicitly, e.g. when -# multiple dashboard instances are distributed on different machines (usually -# behind a load-balancer). Either you have to make sure that a session gets all -# requests routed to the same dashboard instance or you set the same SECRET_KEY -# for all of them. -from horizon.utils import secret_key -SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA' -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHES to something like -CACHES = { - 'default': { - 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION' : '127.0.0.1:11211', - } -} - -#CACHES = { -# 'default': { -# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache' -# } -#} - -# Enable the Ubuntu theme if it is present. -try: - from ubuntu_theme import * -except ImportError: - pass - -# Default Ubuntu apache configuration uses /horizon as the application root. -# Configure auth redirects here accordingly. -LOGIN_URL='/horizon/auth/login/' -LOGOUT_URL='/horizon/auth/logout/' -LOGIN_REDIRECT_URL='/horizon' - -# The Ubuntu package includes pre-compressed JS and compiled CSS to allow -# offline compression by default. To enable online compression, install -# the node-less package and enable the following option. -COMPRESS_OFFLINE = True - -# By default, validation of the HTTP Host header is disabled. Production -# installations should have this set accordingly. For more information -# see https://docs.djangoproject.com/en/dev/ref/settings/. -ALLOWED_HOSTS = ['{{ dashboard_host }}', '0.0.0.0'] - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# Configure these for your outgoing email host -# EMAIL_HOST = 'smtp.my-company.com' -# EMAIL_PORT = 25 -# EMAIL_HOST_USER = 'djangomail' -# EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -# AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), -# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), -# ] - -OPENSTACK_HOST = "{{ identity_host }}" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" - -# Disable SSL certificate checks (useful for self-signed certificates): -# OPENSTACK_SSL_NO_VERIFY = True - -# The CA certificate to use to verify SSL connections -# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True, - 'can_edit_group': True, - 'can_edit_project': True, - 'can_edit_domain': True, - 'can_edit_role': True -} - -#Setting this to True, will add a new "Retrieve Password" action on instance, -#allowing Admin session password retrieval/decryption. -#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False - -# The Xen Hypervisor has the ability to set the mount point for volumes -# attached to instances (other Hypervisors currently do not). Setting -# can_set_mount_point to True will add the option to set the mount point -# from the UI. -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': False, - 'can_set_password': False, -} - -# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional -# services provided by neutron. Options currently available are load -# balancer service, security groups, quotas, VPN service. -OPENSTACK_NEUTRON_NETWORK = { - 'enable_lb': False, - 'enable_firewall': False, - 'enable_quotas': True, - 'enable_vpn': False, - # The profile_support option is used to detect if an external router can be - # configured via the dashboard. When using specific plugins the - # profile_support can be turned on if needed. - 'profile_support': None, - #'profile_support': 'cisco', -} - -# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features -# in the OpenStack Dashboard related to the Image service, such as the list -# of supported image formats. -# OPENSTACK_IMAGE_BACKEND = { -# 'image_formats': [ -# ('', ''), -# ('aki', _('AKI - Amazon Kernel Image')), -# ('ami', _('AMI - Amazon Machine Image')), -# ('ari', _('ARI - Amazon Ramdisk Image')), -# ('iso', _('ISO - Optical Disk Image')), -# ('qcow2', _('QCOW2 - QEMU Emulator')), -# ('raw', _('Raw')), -# ('vdi', _('VDI')), -# ('vhd', _('VHD')), -# ('vmdk', _('VMDK')) -# ] -# } - -# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for -# image custom property attributes that appear on image detail pages. -IMAGE_CUSTOM_PROPERTY_TITLES = { - "architecture": _("Architecture"), - "kernel_id": _("Kernel ID"), - "ramdisk_id": _("Ramdisk ID"), - "image_state": _("Euca2ools state"), - "project_id": _("Project ID"), - "image_type": _("Image Type") -} - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'publicURL'. -#OPENSTACK_ENDPOINT_TYPE = "publicURL" - -# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the -# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is None. This -# value should differ from OPENSTACK_ENDPOINT_TYPE if used. -#SECONDARY_ENDPOINT_TYPE = "publicURL" - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -# When launching an instance, the menu of available flavors is -# sorted by RAM usage, ascending. If you would like a different sort order, -# you can provide another flavor attribute as sorting key. Alternatively, you -# can provide a custom callback method to use for sorting. You can also provide -# a flag for reverse sort. For more info, see -# http://docs.python.org/2/library/functions.html#sorted -# CREATE_INSTANCE_FLAVOR_SORT = { -# 'key': 'name', -# # or -# 'key': my_awesome_callback_method, -# 'reverse': False, -# } - -# The Horizon Policy Enforcement engine uses these values to load per service -# policy rule files. The content of these files should match the files the -# OpenStack services are using to determine role based access control in the -# target installation. - -# Path to directory containing policy.json files -#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") -# Map of local copy of service policy files -#POLICY_FILES = { -# 'identity': 'keystone_policy.json', -# 'compute': 'nova_policy.json', -# 'volume': 'cinder_policy.json', -# 'image': 'glance_policy.json', -#} - -# Trove user and database extension support. By default support for -# creating users and databases on database instances is turned on. -# To disable these extensions set the permission here to something -# unusable such as ["!"]. -# TROVE_ADD_USER_PERMS = [] -# TROVE_ADD_DATABASE_PERMS = [] - -LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'django.utils.log.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - }, - }, - 'loggers': { - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'requests': { - 'handlers': ['null'], - 'propagate': False, - }, - 'horizon': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_dashboard': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'cinderclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'glanceclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'neutronclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'heatclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'ceilometerclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'troveclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'swiftclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_auth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'nose.plugins.manager': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'django': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'iso8601': { - 'handlers': ['null'], - 'propagate': False, - }, - } -} - -# 'direction' should not be specified for all_tcp/udp/icmp. -# It is specified in the form. -SECURITY_GROUP_RULES = { - 'all_tcp': { - 'name': 'ALL TCP', - 'ip_protocol': 'tcp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_udp': { - 'name': 'ALL UDP', - 'ip_protocol': 'udp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_icmp': { - 'name': 'ALL ICMP', - 'ip_protocol': 'icmp', - 'from_port': '-1', - 'to_port': '-1', - }, - 'ssh': { - 'name': 'SSH', - 'ip_protocol': 'tcp', - 'from_port': '22', - 'to_port': '22', - }, - 'smtp': { - 'name': 'SMTP', - 'ip_protocol': 'tcp', - 'from_port': '25', - 'to_port': '25', - }, - 'dns': { - 'name': 'DNS', - 'ip_protocol': 'tcp', - 'from_port': '53', - 'to_port': '53', - }, - 'http': { - 'name': 'HTTP', - 'ip_protocol': 'tcp', - 'from_port': '80', - 'to_port': '80', - }, - 'pop3': { - 'name': 'POP3', - 'ip_protocol': 'tcp', - 'from_port': '110', - 'to_port': '110', - }, - 'imap': { - 'name': 'IMAP', - 'ip_protocol': 'tcp', - 'from_port': '143', - 'to_port': '143', - }, - 'ldap': { - 'name': 'LDAP', - 'ip_protocol': 'tcp', - 'from_port': '389', - 'to_port': '389', - }, - 'https': { - 'name': 'HTTPS', - 'ip_protocol': 'tcp', - 'from_port': '443', - 'to_port': '443', - }, - 'smtps': { - 'name': 'SMTPS', - 'ip_protocol': 'tcp', - 'from_port': '465', - 'to_port': '465', - }, - 'imaps': { - 'name': 'IMAPS', - 'ip_protocol': 'tcp', - 'from_port': '993', - 'to_port': '993', - }, - 'pop3s': { - 'name': 'POP3S', - 'ip_protocol': 'tcp', - 'from_port': '995', - 'to_port': '995', - }, - 'ms_sql': { - 'name': 'MS SQL', - 'ip_protocol': 'tcp', - 'from_port': '1433', - 'to_port': '1433', - }, - 'mysql': { - 'name': 'MYSQL', - 'ip_protocol': 'tcp', - 'from_port': '3306', - 'to_port': '3306', - }, - 'rdp': { - 'name': 'RDP', - 'ip_protocol': 'tcp', - 'from_port': '3389', - 'to_port': '3389', - }, -} - -FLAVOR_EXTRA_KEYS = { - 'flavor_keys': [ - ('quota:read_bytes_sec', _('Quota: Read bytes')), - ('quota:write_bytes_sec', _('Quota: Write bytes')), - ('quota:cpu_quota', _('Quota: CPU')), - ('quota:cpu_period', _('Quota: CPU period')), - ('quota:inbound_average', _('Quota: Inbound average')), - ('quota:outbound_average', _('Quota: Outbound average')), - ] -} - diff --git a/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2 b/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2 new file mode 100755 index 0000000..403fcc2 --- /dev/null +++ b/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2 @@ -0,0 +1,15 @@ +{% set work_threads = (ansible_processor_vcpus + 1) // 2 %} + + + WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi + WSGIDaemonProcess horizon user=horizon group=horizon processes={{ work_threads }} threads={{ work_threads }} + WSGIProcessGroup horizon + Alias /static {{ horizon_dir }}/static/ + Alias /horizon/static {{ horizon_dir }}/static/ + + Order allow,deny + Allow from all + + + + diff --git a/ansible/roles/dashboard/templates/ports.j2 b/ansible/roles/dashboard/templates/ports.j2 new file mode 100755 index 0000000..0bfa042 --- /dev/null +++ b/ansible/roles/dashboard/templates/ports.j2 @@ -0,0 +1,15 @@ +# if you just change the port or add more ports here, you will likely also +# have to change the VirtualHost statement in +# /etc/apache2/sites-enabled/000-default.conf + +Listen {{ internal_ip }}:80 + + + Listen 443 + + + + Listen 443 + + +# vim: syntax=apache ts=4 sw=4 sts=4 sr noet diff --git a/ansible/roles/dashboard/vars/Debian.yml b/ansible/roles/dashboard/vars/Debian.yml new file mode 100644 index 0000000..5c9b032 --- /dev/null +++ b/ansible/roles/dashboard/vars/Debian.yml @@ -0,0 +1,16 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: [] + +services: + - memcached + +apache_config_dir: /etc/apache2 +horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard diff --git a/ansible/roles/dashboard/vars/RedHat.yml b/ansible/roles/dashboard/vars/RedHat.yml new file mode 100644 index 0000000..d213381 --- /dev/null +++ b/ansible/roles/dashboard/vars/RedHat.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - mod_wsgi + - httpd + +services: + - httpd + +http_config_file: "/etc/httpd/conf/httpd.conf" diff --git a/ansible/roles/dashboard/vars/main.yml b/ansible/roles/dashboard/vars/main.yml new file mode 100644 index 0000000..2c940ed --- /dev/null +++ b/ansible/roles/dashboard/vars/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - openstack-dashboard + +services_noarch: [] diff --git a/ansible/roles/database/files/my.cnf b/ansible/roles/database/files/my.cnf deleted file mode 100644 index d61f947..0000000 --- a/ansible/roles/database/files/my.cnf +++ /dev/null @@ -1,131 +0,0 @@ -# -# The MySQL database server configuration file. -# -# You can copy this to one of: -# - "/etc/mysql/my.cnf" to set global options, -# - "~/.my.cnf" to set user-specific options. -# -# One can use all long options that the program supports. -# Run program with --help to get a list of available options and with -# --print-defaults to see which it would actually understand and use. -# -# For explanations see -# http://dev.mysql.com/doc/mysql/en/server-system-variables.html - -# This will be passed to all mysql clients -# It has been reported that passwords should be enclosed with ticks/quotes -# escpecially if they contain "#" chars... -# Remember to edit /etc/mysql/debian.cnf when changing the socket location. -[client] -port = 3306 -socket = /var/run/mysqld/mysqld.sock - -# Here is entries for some specific programs -# The following values assume you have at least 32M ram - -# This was formally known as [safe_mysqld]. Both versions are currently parsed. -[mysqld_safe] -socket = /var/run/mysqld/mysqld.sock -nice = 0 - -[mysqld] -# -# * Basic Settings -# -user = mysql -pid-file = /var/run/mysqld/mysqld.pid -socket = /var/run/mysqld/mysqld.sock -port = 3306 -basedir = /usr -datadir = /var/lib/mysql -tmpdir = /tmp -lc-messages-dir = /usr/share/mysql -skip-external-locking -# -# Instead of skip-networking the default is now to listen only on -# localhost which is more compatible and is not less secure. -bind-address = 0.0.0.0 -# -# * Fine Tuning -# -key_buffer = 16M -max_allowed_packet = 16M -thread_stack = 192K -thread_cache_size = 8 -# This replaces the startup script and checks MyISAM tables if needed -# the first time they are touched -myisam-recover = BACKUP -#max_connections = 100 -#table_cache = 64 -#thread_concurrency = 10 -# -# * Query Cache Configuration -# -query_cache_limit = 1M -query_cache_size = 16M -# -# * Logging and Replication -# -# Both location gets rotated by the cronjob. -# Be aware that this log type is a performance killer. -# As of 5.1 you can enable the log at runtime! -#general_log_file = /var/log/mysql/mysql.log -#general_log = 1 -# -# Error log - should be very few entries. -# -log_error = /var/log/mysql/error.log -# -# Here you can see queries with especially long duration -#log_slow_queries = /var/log/mysql/mysql-slow.log -#long_query_time = 2 -#log-queries-not-using-indexes -# -# The following can be used as easy to replay backup logs or for replication. -# note: if you are setting up a replication slave, see README.Debian about -# other settings you may need to change. -#server-id = 1 -#log_bin = /var/log/mysql/mysql-bin.log -expire_logs_days = 10 -max_binlog_size = 100M -#binlog_do_db = include_database_name -#binlog_ignore_db = include_database_name -# -# * InnoDB -# -# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. -# Read the manual for more InnoDB related options. There are many! -# -# * Security Features -# -# Read the manual, too, if you want chroot! -# chroot = /var/lib/mysql/ -# -# For generating SSL certificates I recommend the OpenSSL GUI "tinyca". -# -# ssl-ca=/etc/mysql/cacert.pem -# ssl-cert=/etc/mysql/server-cert.pem -# ssl-key=/etc/mysql/server-key.pem -default-storage-engine = innodb -innodb_file_per_table -collation-server = utf8_general_ci -init-connect = 'SET NAMES utf8' -character-set-server = utf8 - -[mysqldump] -quick -quote-names -max_allowed_packet = 16M - -[mysql] -#no-auto-rehash # faster start of mysql but no tab completition - -[isamchk] -key_buffer = 16M - -# -# * IMPORTANT: Additional settings that can override those from this file! -# The files must end with '.cnf', otherwise they'll be ignored. -# -!includedir /etc/mysql/conf.d/ - diff --git a/ansible/roles/database/files/remove_user.sh b/ansible/roles/database/files/remove_user.sh new file mode 100755 index 0000000..88b1518 --- /dev/null +++ b/ansible/roles/database/files/remove_user.sh @@ -0,0 +1,5 @@ +#!/bin/sh +mysql -uroot -Dmysql < 1 + and not cluster_nodes.stdout | search( '{{ internal_ip }}' )) + + diff --git a/ansible/roles/database/tasks/mariadb_cluster_redhat.yml b/ansible/roles/database/tasks/mariadb_cluster_redhat.yml new file mode 100644 index 0000000..da1b863 --- /dev/null +++ b/ansible/roles/database/tasks/mariadb_cluster_redhat.yml @@ -0,0 +1,59 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: get cluster status + shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}' + register: cluster_status + when: + - inventory_hostname == haproxy_hosts.keys()[0] + +- name: start first node to create new cluster + shell: service mysql start --wsrep-new-cluster + when: | + inventory_hostname == haproxy_hosts.keys()[0] + and not cluster_status.stdout | search("OPERATIONAL") + +- name: wait for cluster ready + shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}' + register: cluster_status + until: cluster_status|success + failed_when: not cluster_status.stdout | search("OPERATIONAL") + retries: 10 + delay: 3 + when: | + inventory_hostname == haproxy_hosts.keys()[0] + and not cluster_status.stdout | search("OPERATIONAL") + +- name: if I in the cluster nodes + shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}' + register: cluster_nodes + changed_when: false + +- name: restart other nodes and join cluster + service: + name: mysql + state: restarted + enabled: yes + when: | + inventory_hostname != haproxy_hosts.keys()[0] + and not cluster_nodes.stdout | search( "{{ internal_ip }}") + +- name: remove unused user + script: remove_user.sh + when: ansible_os_family == "RedHat" + +- name: restart first nodes + service: + name: mysql + state: restarted + when: | + inventory_hostname == haproxy_hosts.keys()[0] + and haproxy_hosts|length > 1 + + diff --git a/ansible/roles/database/tasks/mariadb_config.yml b/ansible/roles/database/tasks/mariadb_config.yml new file mode 100644 index 0000000..b18ae8f --- /dev/null +++ b/ansible/roles/database/tasks/mariadb_config.yml @@ -0,0 +1,67 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: create all needed db + mysql_db: login_unix_socket=/var/run/mysqld/mysqld.sock name={{ item.db }} state=present + with_items: "{{ credentials }}" + tags: + - test_db + +- name: create service db user + mysql_user: + login_unix_socket=/var/run/mysqld/mysqld.sock + name={{ item[0].user }} + password={{ item[0].password }} + priv=*.*:ALL,GRANT + host={{ item[1] }} + state=present + with_nested: + - "{{ credentials }}" + - ['%', 'localhost'] + tags: + - test_user + +- name: create wsrep db user + mysql_user: + login_unix_socket=/var/run/mysqld/mysqld.sock + name={{ WSREP_SST_USER }} + password={{ WSREP_SST_PASS }} + priv=*.*:ALL,GRANT + host={{ item }} + state=present + with_items: ['%', 'localhost'] + +- name: remove unused user + script: remove_user.sh + when: ansible_os_family == "RedHat" + +- name: restart first nodes + service: + name: mysql + state: restarted + when: inventory_hostname == haproxy_hosts.keys()[0] and haproxy_hosts|length > 1 + +- name: wait for cluster ready + command: mysql -e"show status like 'wsrep%'" + register: cluster_status + until: cluster_status|success + failed_when: not cluster_status.stdout | search("ON") + retries: 10 + delay: 3 + when: + - inventory_hostname == haproxy_hosts.keys()[0] + +- name: restart other nodes + service: + name: mysql + state: restarted + enabled: yes + when: + - inventory_hostname != haproxy_hosts.keys()[0] + diff --git a/ansible/roles/database/tasks/mariadb_install.yml b/ansible/roles/database/tasks/mariadb_install.yml new file mode 100644 index 0000000..bf9f346 --- /dev/null +++ b/ansible/roles/database/tasks/mariadb_install.yml @@ -0,0 +1,69 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install python-mysqldb + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: maridb_packages | union(packages_noarch) + +- name: change open file limit + copy: + content: "* - nofile 65536 }}" + dest: "/etc/security/limits.conf" + mode: 0755 + +- name: create conf dir for wsrep + file: path=/etc/my.cnf.d state=directory mode=0755 + when: ansible_os_family == "RedHat" + +- name: update mariadb config file + template: + src: '{{ item.src }}' + dest: '{{ item.dest }}' + backup: yes + mode: 0644 + with_items: mysql_config + +- name: bugfix for rsync version 3.1 + lineinfile: + dest: /usr/bin/wsrep_sst_rsync + state: absent + regexp: '{{ item }}' + with_items: + - "\\s*uid = \\$MYUID$" + - "\\s*gid = \\$MYGID$" + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: set owner + file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory + +- name: get logfile stat + stat: path='{{ mysql_data_dir }}/ib_logfile0' + register: logfile_stat + +- debug: msg='{{ logfile_stat.stat.exists}}' +- debug: msg='{{ logfile_stat.stat.size }}' + when: logfile_stat.stat.exists + +- name: rm logfile if exist and size mismatch + shell: 'rm -rf {{ mysql_data_dir }}/ib_logfile*' + when: | + logfile_stat.stat.exists + and logfile_stat.stat.size != 1073741824 diff --git a/ansible/roles/database/tasks/mongodb_config.yml b/ansible/roles/database/tasks/mongodb_config.yml new file mode 100755 index 0000000..0a449f8 --- /dev/null +++ b/ansible/roles/database/tasks/mongodb_config.yml @@ -0,0 +1,55 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: copy mongo js + template: + src: '{{ item.src }}' + dest: '{{ item.dest }}' + with_items: + - src: replica.js + dest: /opt/replica.js + +- name: init replica servers + shell: mongo compass /opt/replica.js + +- name: wait replica servers are ready + shell: mongo compass --eval 'printjson(rs.status())'|grep -E 'PRIMARY|SECONDARY'|wc -l + register: servers + until: servers.stdout|int == {{ haproxy_hosts|length }} + retries: 60 + delay: 10 + +- debug: msg='{{ servers.stdout |int }}' + +- name: wait replica servers are ready + shell: mongo compass --eval 'printjson(rs.status())'|grep -E 'PRIMARY'|wc -l + register: servers + until: servers.stdout|int == 1 + retries: 60 + delay: 10 + +- debug: msg='{{ servers.stdout |int }}' + +- name: create mongodb user and db + mongodb_user: + login_host: "{{ internal_vip.ip }}" + database: ceilometer + name: ceilometer + password: "{{ CEILOMETER_DBPASS }}" + roles: 'readWrite,dbAdmin' + state: present + +- name: grant user privilege + mongodb_user: + login_host: "{{ internal_vip.ip }}" + database: ceilometer + name: ceilometer + password: "{{ CEILOMETER_DBPASS }}" + roles: 'readWrite,dbAdmin' + state: present diff --git a/ansible/roles/database/tasks/mongodb_install.yml b/ansible/roles/database/tasks/mongodb_install.yml new file mode 100755 index 0000000..dea15a8 --- /dev/null +++ b/ansible/roles/database/tasks/mongodb_install.yml @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: install mongodb packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: mongodb_packages | union(packages_noarch) + +- name: install pymongod packages + pip: name={{ item }} state=present extra_args='--pre' + with_items: pip_packages + +- name: copy ceilometer configs + template: src=mongodb.conf dest=/opt/os_templates backup=yes + +- name: update mongodb config file + shell: crudini --merge {{ mongodb_config.dest }} < /opt/os_templates/mongodb.conf + +- name: rm prealloc files + file: + dest: "{{ item }}" + state: absent + with_fileglob: + - "{{ mongodb_config.journal }}" + +- name: manually restart mongodb server + service: name={{ mongodb_service }} state=restarted enabled=yes + ignore_errors: true + +- name: write mongodb to monitor list + lineinfile: dest=/opt/service create=yes line={{ mongodb_service}} + +- name: wait for mongod ready + wait_for: host=0.0.0.0 port=27017 delay=10 diff --git a/ansible/roles/database/templates/data.j2 b/ansible/roles/database/templates/data.j2 new file mode 100644 index 0000000..109201a --- /dev/null +++ b/ansible/roles/database/templates/data.j2 @@ -0,0 +1,45 @@ +#!/bin/sh +mysql -uroot -Dmysql <[:port] of the node. +# The values supplied will be used as defaults for state transfer receiving, +# listening ports and so on. Default: address of the first network interface. +wsrep_node_address={{ internal_ip }} + +# Address for incoming client connections. Autodetect by default. +#wsrep_node_incoming_address= + +# How many threads will process writesets from other nodes +wsrep_slave_threads={{ ansible_processor_vcpus }} + +# DBUG options for wsrep provider +#wsrep_dbug_option + +# Generate fake primary keys for non-PK tables (required for multi-master +# and parallel applying operation) +wsrep_certify_nonPK=1 + +# Maximum number of rows in write set +wsrep_max_ws_rows=131072 + +# Maximum size of write set +wsrep_max_ws_size=1073741824 + +# to enable debug level logging, set this to 1 +wsrep_debug=1 + +# convert locking sessions into transactions +wsrep_convert_LOCK_to_trx=0 + +# how many times to retry deadlocked autocommits +wsrep_retry_autocommit=3 + +# change auto_increment_increment and auto_increment_offset automatically +wsrep_auto_increment_control=1 + +# retry autoinc insert, which failed for duplicate key error +wsrep_drupal_282555_workaround=0 + +# enable "strictly synchronous" semantics for read operations +wsrep_causal_reads=0 + +# Command to call when node status or cluster membership changes. +# Will be passed all or some of the following options: +# --status - new status of this node +# --uuid - UUID of the cluster +# --primary - whether the component is primary or not ("yes"/"no") +# --members - comma-separated list of members +# --index - index of this node in the list +wsrep_notify_cmd= + +## +## WSREP State Transfer options +## + +# State Snapshot Transfer method +wsrep_sst_method=rsync + +# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!! +# (SST method dependent. Defaults to the first IP of the first interface) +#wsrep_sst_receive_address= + +# SST authentication string. This will be used to send SST to joining nodes. +# Depends on SST method. For mysqldump method it is root: +wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }} + +# Desired SST donor name. +#wsrep_sst_donor= + +# Protocol version to use +# wsrep_protocol_version= diff --git a/ansible/roles/database/vars/Debian.yml b/ansible/roles/database/vars/Debian.yml new file mode 100644 index 0000000..621dc49 --- /dev/null +++ b/ansible/roles/database/vars/Debian.yml @@ -0,0 +1,45 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +mongodb_packages: + - mongodb-server + - mongodb-clients + - python-pymongo + +mysql_packages: + - python-mysqldb + - mysql-server + +maridb_packages: + - libaio1 + - libssl0.9.8 + - python-mysqldb + - mysql-wsrep-server-5.5 + - galera-3 + +pip_packages: [] + +services: [] + +mongodb_service: mongodb +mysql_config: + - dest: /etc/mysql/my.cnf + src: my.cnf + - dest: /etc/mysql/conf.d/wsrep.cnf + src: wsrep.cnf + +mysql_config_dir: /etc/mysql/conf.d +mysql_data_dir: /var/lib/mysql + +mongodb_config: + dest: /etc/mongodb.conf + src: mongodb.conf + journal: /var/lib/mongodb/journal/* + +wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so" diff --git a/ansible/roles/database/vars/RedHat.yml b/ansible/roles/database/vars/RedHat.yml new file mode 100644 index 0000000..aed1ac9 --- /dev/null +++ b/ansible/roles/database/vars/RedHat.yml @@ -0,0 +1,46 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +mysql_packages: + - MySQL-python + - mysql-server + +maridb_packages: + - MySQL-python + - MariaDB-Galera-server + - MariaDB-client + - galera + - MySQL-python + +mongodb_packages: + - mongo-10gen-server + - mongo-10gen + +pip_packages: + - pymongo + +services: [] + +mongodb_service: mongod + +mysql_config: + - dest: /etc/my.cnf + src: my.cnf + - dest: /etc/my.cnf.d/wsrep.cnf + src: wsrep.cnf + +mysql_config_dir: /etc/my.cnf.d +mysql_data_dir: /var/lib/mysql + +mongodb_config: + dest: /etc/mongod.conf + src: mongodb.conf + journal: /var/lib/mongo/journal/* + +wsrep_provider_file: "/usr/lib64/galera/libgalera_smm.so" diff --git a/ansible/roles/database/vars/main.yml b/ansible/roles/database/vars/main.yml new file mode 100644 index 0000000..c053889 --- /dev/null +++ b/ansible/roles/database/vars/main.yml @@ -0,0 +1,34 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: + - mysql + +credentials: + - user: keystone + db: keystone + password: "{{ KEYSTONE_DBPASS }}" + - user: neutron + db: neutron + password: "{{ NEUTRON_DBPASS }}" + - user: glance + db: glance + password: "{{ GLANCE_DBPASS }}" + - user: nova + db: nova + password: "{{ NOVA_DBPASS }}" + - user: cinder + db: cinder + password: "{{ CINDER_DBPASS }}" + - user: heat + db: heat + password: "{{ HEAT_DBPASS }}" + diff --git a/ansible/roles/ext-network/handlers/main.yml b/ansible/roles/ext-network/handlers/main.yml new file mode 100644 index 0000000..a794586 --- /dev/null +++ b/ansible/roles/ext-network/handlers/main.yml @@ -0,0 +1,29 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart neutron-plugin-openvswitch-agent + service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes + when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart neutron-l3-agent + service: name=neutron-l3-agent state=restarted enabled=yes + +- name: kill dnsmasq + command: killall dnsmasq + ignore_errors: True + +- name: restart neutron-dhcp-agent + service: name=neutron-dhcp-agent state=restarted enabled=yes + +- name: restart neutron-metadata-agent + service: name=neutron-metadata-agent state=restarted enabled=yes + +- name: restart xorp + service: name=xorp state=restarted enabled=yes sleep=10 + ignore_errors: True diff --git a/ansible/roles/ext-network/tasks/main.yml b/ansible/roles/ext-network/tasks/main.yml new file mode 100644 index 0000000..809a8fa --- /dev/null +++ b/ansible/roles/ext-network/tasks/main.yml @@ -0,0 +1,43 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: create external net + neutron_network: + login_username: ADMIN + login_password: "{{ ADMIN_PASS }}" + login_tenant_name: admin + auth_url: "http://{{ internal_vip.ip }}:35357/v2.0" + name: "{{ public_net_info.network }}" + provider_network_type: "{{ public_net_info.type }}" + provider_physical_network: "{{ public_net_info.provider_network }}" + provider_segmentation_id: "{{ public_net_info.segment_id}}" + shared: true + router_external: yes + state: present + run_once: true + when: 'public_net_info.enable == True' + +- name: create external subnet + neutron_subnet: + login_username: ADMIN + login_password: "{{ ADMIN_PASS }}" + login_tenant_name: admin + auth_url: "http://{{ internal_vip.ip }}:35357/v2.0" + name: "{{ public_net_info.subnet }}" + network_name: "{{ public_net_info.network }}" + cidr: "{{ public_net_info.floating_ip_cidr }}" + enable_dhcp: "{{ public_net_info.enable_dhcp }}" + no_gateway: "{{ public_net_info.no_gateway }}" + gateway_ip: "{{ public_net_info.external_gw }}" + allocation_pool_start: "{{ public_net_info.floating_ip_start }}" + allocation_pool_end: "{{ public_net_info.floating_ip_end }}" + state: present + run_once: true + when: 'public_net_info.enable == True' + diff --git a/ansible/roles/glance/handlers/main.yml b/ansible/roles/glance/handlers/main.yml index 0c7b25d..53ee01c 100644 --- a/ansible/roles/glance/handlers/main.yml +++ b/ansible/roles/glance/handlers/main.yml @@ -1,6 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart glance-api - service: name=glance-api state=restarted - -- name: restart glance-registry - service: name=glance-registry state=restarted +- name: restart glance services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/glance/tasks/glance_config.yml b/ansible/roles/glance/tasks/glance_config.yml new file mode 100644 index 0000000..2df75ca --- /dev/null +++ b/ansible/roles/glance/tasks/glance_config.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: sync glance db + #glance_manage: action=dbsync + shell: su -s /bin/sh -c 'glance-manage db sync' glance + ignore_errors: True + notify: + - restart glance services + +- meta: flush_handlers diff --git a/ansible/roles/glance/tasks/glance_install.yml b/ansible/roles/glance/tasks/glance_install.yml new file mode 100644 index 0000000..a478363 --- /dev/null +++ b/ansible/roles/glance/tasks/glance_install.yml @@ -0,0 +1,26 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: install glance packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest" + with_items: packages | union(packages_noarch) + +- name: generate glance service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: update glance conf + template: src={{ item }} dest=/etc/glance/{{ item }} + backup=yes + with_items: + - glance-api.conf + - glance-registry.conf + +- name: remove default sqlite db + shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed diff --git a/ansible/roles/glance/tasks/main.yml b/ansible/roles/glance/tasks/main.yml index 32d2ec5..a78ba77 100644 --- a/ansible/roles/glance/tasks/main.yml +++ b/ansible/roles/glance/tasks/main.yml @@ -1,47 +1,29 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: install glance packages - apt: name={{ item }} state=latest force=yes - with_items: +- include_vars: "{{ ansible_os_family }}.yml" + +- include: glance_install.yml + tags: + - install + - glance_install - glance - - python-glanceclient -- name: update glance conf - template: src={{ item }} dest=/etc/glance/{{ item }} - backup=yes - with_items: - - glance-api.conf - - glance-registry.conf - notify: - - restart glance-registry - - restart glance-api +- include: nfs.yml + tags: + - nfs -#- name: manually stop glance-api -# service: name=glance-api state=stopped - -#- name: manually stop glance-registry -# service: name=glance-registry state=stopped - -- name: remove default sqlite db - shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed - -- name: sync glance db - shell: su -s /bin/sh -c "glance-manage db_sync" glance - notify: - - restart glance-registry - - restart glance-api +- include: glance_config.yml + when: inventory_hostname == groups['controller'][0] + tags: + - config + - glance_config + - glance - meta: flush_handlers - -- name: place image upload script - template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744 - -- name: download cirros image file - get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }} - -- name: wait for 9292 port to become available - wait_for: port=9292 delay=5 - -- name: run image upload - shell: /opt/image_upload.sh && touch image_upload_completed - args: - creates: image_upload_completed diff --git a/ansible/roles/glance/tasks/nfs.yml b/ansible/roles/glance/tasks/nfs.yml new file mode 100644 index 0000000..7895c38 --- /dev/null +++ b/ansible/roles/glance/tasks/nfs.yml @@ -0,0 +1,57 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: install nfs + local_action: yum name={{ item }} state=present + with_items: + - rpcbind + - nfs-utils + run_once: True + +- name: create image directory + local_action: file path=/opt/images state=directory mode=0777 + run_once: True + +- name: remove nfs config item if exist + local_action: lineinfile dest=/etc/exports state=absent + regexp="^/opt/images" + run_once: True + +- name: update nfs config + local_action: lineinfile dest=/etc/exports state=present + line="/opt/images *(rw,insecure,sync,all_squash)" + run_once: True + +- name: restart compass nfs service + local_action: service name={{ item }} state=restarted enabled=yes + with_items: + - rpcbind + - nfs-server + run_once: True + +- name: get mount info + command: mount + register: mount_info + +- name: get nfs server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: ip_info + +- name: restart host nfs service + service: name={{ item }} state=restarted enabled=yes + with_items: '{{ nfs_services }}' + +- name: mount image directory + shell: | + mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images + sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab + echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab + when: mount_info.stdout.find('images') == -1 + retries: 5 + delay: 3 diff --git a/ansible/roles/glance/templates/glance-api.conf b/ansible/roles/glance/templates/glance-api.conf index a3bc222..9be29f4 100644 --- a/ansible/roles/glance/templates/glance-api.conf +++ b/ansible/roles/glance/templates/glance-api.conf @@ -1,677 +1,79 @@ +{% set workers = ansible_processor_vcpus // 2 %} +{% set workers = workers if workers else 1 %} +{% set memcached_servers = [] %} +{% set rabbitmq_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% set _ = rabbitmq_servers.append('%s:5672'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} +{% set rabbitmq_servers = rabbitmq_servers|join(',') %} + [DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Which backend scheme should Glance use by default is not specified -# in a request to add a new image to Glance? Known schemes are determined -# by the known_stores option below. -# Default: 'file' -# "default_store" option has been moved to [glance_store] section in -# Juno release - -# List of which store classes and store class locations are -# currently known to glance at startup. -# Existing but disabled stores: -# glance.store.rbd.Store, -# glance.store.s3.Store, -# glance.store.swift.Store, -# glance.store.sheepdog.Store, -# glance.store.cinder.Store, -# glance.store.gridfs.Store, -# glance.store.vmware_datastore.Store, -#known_stores = glance.store.filesystem.Store, -# glance.store.http.Store - - -# Maximum image size (in bytes) that may be uploaded through the -# Glance API server. Defaults to 1 TB. -# WARNING: this value should only be increased after careful consideration -# and must be set to a value under 8 EB (9223372036854775808). -#image_size_cap = 1099511627776 - -# Address to bind the API server -bind_host = 0.0.0.0 - -# Port the bind the API server to -bind_port = 9292 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. +verbose = {{ VERBOSE }} +debug = {{ DEBUG }} log_file = /var/log/glance/api.log - -# Backlog requests when creating socket +bind_host = {{ image_host }} +bind_port = 9292 backlog = 4096 - -# TCP_KEEPIDLE value in seconds when creating socket. -# Not supported on OS X. -#tcp_keepidle = 600 - -# API to use for accessing data. Default value points to sqlalchemy -# package, it is also possible to use: glance.db.registry.api -# data_api = glance.db.sqlalchemy.api - -# Number of Glance API worker processes to start. -# On machines with more than one CPU increasing this value -# may improve performance (especially if using SSL with -# compression turned on). It is typically recommended to set -# this value to the number of CPUs present on your machine. -workers = 1 - -# Maximum line size of message headers to be accepted. -# max_header_line may need to be increased when using large tokens -# (typically those generated by the Keystone v3 API with big service -# catalogs) -# max_header_line = 16384 - -# Role used to identify an authenticated user as administrator -#admin_role = admin - -# Allow unauthenticated users to access the API with read-only -# privileges. This only applies when using ContextMiddleware. -#allow_anonymous_access = False - -# Allow access to version 1 of glance api -#enable_v1_api = True - -# Allow access to version 2 of glance api -#enable_v2_api = True - -# Return the URL that references where the data is stored on -# the backend storage system. For example, if using the -# file system store a URL of 'file:///path/to/image' will -# be returned to the user in the 'direct_url' meta-data field. -# The default value is false. -#show_image_direct_url = False - -# Send headers containing user and tenant information when making requests to -# the v1 glance registry. This allows the registry to function as if a user is -# authenticated without the need to authenticate a user itself using the -# auth_token middleware. -# The default value is false. -#send_identity_headers = False - -# Supported values for the 'container_format' image attribute -#container_formats=ami,ari,aki,bare,ovf,ova - -# Supported values for the 'disk_format' image attribute -#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso - -# Directory to use for lock files. Default to a temp directory -# (string value). This setting needs to be the same for both -# glance-scrubber and glance-api. -#lock_path= - -# Property Protections config file -# This file contains the rules for property protections and the roles/policies -# associated with it. -# If this config value is not specified, by default, property protections -# won't be enforced. -# If a value is specified and the file is not found, then the glance-api -# service will not start. -#property_protection_file = - -# Specify whether 'roles' or 'policies' are used in the -# property_protection_file. -# The default value for property_protection_rule_format is 'roles'. -#property_protection_rule_format = roles - -# Specifies how long (in hours) a task is supposed to live in the tasks DB -# after succeeding or failing before getting soft-deleted. -# The default value for task_time_to_live is 48 hours. -# task_time_to_live = 48 - -# This value sets what strategy will be used to determine the image location -# order. Currently two strategies are packaged with Glance 'location_order' -# and 'store_type'. -#location_strategy = location_order - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -#use_syslog = False - -# Facility to use. If unset defaults to LOG_USER. -#syslog_log_facility = LOG_LOCAL0 - -# ================= SSL Options =============================== - -# Certificate file to use when starting API server securely -#cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -#key_file = /path/to/keyfile - -# CA certificate file to use to verify connecting clients -#ca_file = /path/to/cafile - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -#metadata_encryption_key = <16, 24 or 32 char registry metadata key> - -# ============ Registry Options =============================== - -# Address to find the registry server -registry_host = 0.0.0.0 - -# Port the registry server is listening on +workers = {{ workers }} +registry_host = {{ internal_ip }} registry_port = 9191 - -# What protocol to use when connecting to the registry server? -# Set to https for secure HTTP communication registry_client_protocol = http +cinder_catalog_info = volume:cinder:internalURL -# The path to the key file to use in SSL connections to the -# registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file -#registry_client_key_file = /path/to/key/file +enable_v1_api = True +enable_v1_registry = True +enable_v2_api = True +enable_v2_registry = True -# The path to the cert file to use in SSL connections to the -# registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file -#registry_client_cert_file = /path/to/cert/file +notification_driver = messagingv2 +rpc_backend = rabbit -# The path to the certifying authority cert file to use in SSL connections -# to the registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file -#registry_client_ca_file = /path/to/ca/file +delayed_delete = False +scrubber_datadir = /var/lib/glance/scrubber +scrub_time = 43200 +image_cache_dir = /var/lib/glance/image-cache/ +show_image_direct_url = True -# When using SSL in connections to the registry server, do not require -# validation via a certifying authority. This is the registry's equivalent of -# specifying --insecure on the command line using glanceclient for the API -# Default: False -#registry_client_insecure = False +[task] +task_executor = taskflow -# The period of time, in seconds, that the API server will wait for a registry -# request to complete. A value of '0' implies no timeout. -# Default: 600 -#registry_client_timeout = 600 +[database] +backend = sqlalchemy +connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8 +idle_timeout = 30 -# Whether to automatically create the database tables. -# Default: False -#db_auto_create = False +[glance_store] +default_store = file +stores = file,http,cinder,rbd +filesystem_store_datadir = /var/lib/glance/images/ -# Enable DEBUG log messages from sqlalchemy which prints every database -# query and response. -# Default: False -#sqlalchemy_debug = True +[profiler] +enabled = True -# Pass the user's token through for API requests to the registry. -# Default: True -#use_user_token = True +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 +admin_tenant_name = service +admin_user = glance +admin_password = {{ GLANCE_PASS }} +memcached_servers = {{ memcached_servers }} +token_cache_time = 300 +revocation_cache_time = 60 -# If 'use_user_token' is not in effect then admin credentials -# can be specified. Requests to the registry on behalf of -# the API will use these credentials. -# Admin user name -#admin_user = None -# Admin password -#admin_password = None -# Admin tenant name -#admin_tenant_name = None -# Keystone endpoint -#auth_url = None -# Keystone region -#auth_region = None -# Auth strategy -#auth_strategy = keystone +[paste_deploy] +flavor= keystone -# ============ Notification System Options ===================== +[oslo_messaging_amqp] +idle_timeout = 7200 -# Notifications can be sent when images are create, updated or deleted. -# There are three methods of sending notifications, logging (via the -# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid -# message queue), or noop (no notifications sent, the default) -# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver` -# notifier_strategy = default - -# Driver or drivers to handle sending notifications -# notification_driver = noop - -# Default publisher_id for outgoing notifications. -# default_publisher_id = image.localhost - -# Configuration options if sending notifications via rabbitmq (these are -# the defaults) -rabbit_host = localhost -rabbit_port = 5672 +[oslo_messaging_rabbit] +rabbit_hosts = {{ rabbitmq_servers }} rabbit_use_ssl = false -rabbit_userid = guest -rabbit_password = guest +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} rabbit_virtual_host = / rabbit_notification_exchange = glance rabbit_notification_topic = notifications rabbit_durable_queues = False - -# Configuration options if sending notifications via Qpid (these are -# the defaults) -qpid_notification_exchange = glance -qpid_notification_topic = notifications -qpid_hostname = localhost -qpid_port = 5672 -qpid_username = -qpid_password = -qpid_sasl_mechanisms = -qpid_reconnect_timeout = 0 -qpid_reconnect_limit = 0 -qpid_reconnect_interval_min = 0 -qpid_reconnect_interval_max = 0 -qpid_reconnect_interval = 0 -qpid_heartbeat = 5 -# Set to 'ssl' to enable SSL -qpid_protocol = tcp -qpid_tcp_nodelay = True - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -# this option has been moved to [glance_store] for Juno release -# filesystem_store_datadir = /var/lib/glance/images/ - -# A list of directories where image data can be stored. -# This option may be specified multiple times for specifying multiple store -# directories. Either one of filesystem_store_datadirs or -# filesystem_store_datadir option is required. A priority number may be given -# after each directory entry, separated by a ":". -# When adding an image, the highest priority directory will be selected, unless -# there is not enough space available in cases where the image size is already -# known. If no priority is given, it is assumed to be zero and the directory -# will be considered for selection last. If multiple directories have the same -# priority, then the one with the most free space available is selected. -# If same store is specified multiple times then BadStoreConfiguration -# exception will be raised. -#filesystem_store_datadirs = /var/lib/glance/images/:1 - -# A path to a JSON file that contains metadata describing the storage -# system. When show_multiple_locations is True the information in this -# file will be returned with any location that is contained in this -# store. -#filesystem_store_metadata_file = None - -# ============ Swift Store Options ============================= - -# Version of the authentication service to use -# Valid versions are '2' for keystone and '1' for swauth and rackspace -swift_store_auth_version = 2 - -# Address where the Swift authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'https://' -# For swauth, use something like '127.0.0.1:8080/v1.0/' -swift_store_auth_address = 127.0.0.1:5000/v2.0/ - -# User to authenticate against the Swift authentication service -# If you use Swift authentication service, set it to 'account':'user' -# where 'account' is a Swift storage account and 'user' -# is a user in that account -swift_store_user = jdoe:jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# If set to True enables multi-tenant storage mode which causes Glance images -# to be stored in tenant specific Swift accounts. -#swift_store_multi_tenant = False - -# A list of swift ACL strings that will be applied as both read and -# write ACLs to the containers created by Glance in multi-tenant -# mode. This grants the specified tenants/users read and write access -# to all newly created image objects. The standard swift ACL string -# formats are allowed, including: -# : -# : -# *: -# Multiple ACLs can be combined using a comma separated list, for -# example: swift_store_admin_tenants = service:glance,*:admin -#swift_store_admin_tenants = - -# The region of the swift endpoint to be used for single tenant. This setting -# is only necessary if the tenant has multiple swift endpoints. -#swift_store_region = - -# If set to False, disables SSL layer compression of https swift requests. -# Setting to 'False' may improve performance for images which are already -# in a compressed format, eg qcow2. If set to True, enables SSL layer -# compression (provided it is supported by the target swift proxy). -#swift_store_ssl_compression = True - -# The number of times a Swift download will be retried before the -# request fails -#swift_store_retry_get_count = 0 - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'http://' -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# When sending images to S3, the data will first be written to a -# temporary buffer on disk. By default the platform's temporary directory -# will be used. If required, an alternative directory can be specified here. -#s3_store_object_buffer_dir = /path/to/dir - -# When forming a bucket url, boto will either set the bucket name as the -# subdomain or as the first token of the path. Amazon's S3 service will -# accept it as the subdomain, but Swift's S3 middleware requires it be -# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. -#s3_store_bucket_url_format = subdomain - -# ============ RBD Store Options ============================= - -# Ceph configuration file path -# If using cephx authentication, this file should -# include a reference to the right keyring -# in a client. section -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# RADOS user to authenticate as (only applicable if using cephx) -# If , a default will be chosen based on the client. section -# in rbd_store_ceph_conf -#rbd_store_user = - -# RADOS pool in which images are stored -#rbd_store_pool = images - -# RADOS images will be chunked into objects of this size (in megabytes). -# For best performance, this should be a power of two -#rbd_store_chunk_size = 8 - -# ============ Sheepdog Store Options ============================= - -sheepdog_store_address = localhost - -sheepdog_store_port = 7000 - -# Images will be chunked into objects of this size (in megabytes). -# For best performance, this should be a power of two -sheepdog_store_chunk_size = 64 - -# ============ Cinder Store Options =============================== - -# Info to match when looking for cinder in the service catalog -# Format is : separated values of the form: -# :: (string value) -#cinder_catalog_info = volume:cinder:publicURL - -# Override service catalog lookup with template for cinder endpoint -# e.g. http://localhost:8776/v1/%(project_id)s (string value) -#cinder_endpoint_template = - -# Region name of this node (string value) -#os_region_name = - -# Location of ca certicates file to use for cinder client requests -# (string value) -#cinder_ca_certificates_file = - -# Number of cinderclient retries on failed http calls (integer value) -#cinder_http_retries = 3 - -# Allow to perform insecure SSL requests to cinder (boolean value) -#cinder_api_insecure = False - -# ============ VMware Datastore Store Options ===================== - -# ESX/ESXi or vCenter Server target system. -# The server value can be an IP address or a DNS name -# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com -#vmware_server_host = - -# Server username (string value) -#vmware_server_username = - -# Server password (string value) -#vmware_server_password = - -# Inventory path to a datacenter (string value) -# Value optional when vmware_server_ip is an ESX/ESXi host: if specified -# should be `ha-datacenter`. -#vmware_datacenter_path = - -# Datastore associated with the datacenter (string value) -#vmware_datastore_name = - -# The number of times we retry on failures -# e.g., socket error, etc (integer value) -#vmware_api_retry_count = 10 - -# The interval used for polling remote tasks -# invoked on VMware ESX/VC server in seconds (integer value) -#vmware_task_poll_interval = 5 - -# Absolute path of the folder containing the images in the datastore -# (string value) -#vmware_store_image_dir = /openstack_glance - -# Allow to perform insecure SSL requests to the target system (boolean value) -#vmware_api_insecure = False - -# ============ Delayed Delete Options ============================= - -# Turn on/off delayed delete -delayed_delete = False - -# Delayed delete time in seconds -scrub_time = 43200 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-scrubber.conf -scrubber_datadir = /var/lib/glance/scrubber - -# =============== Quota Options ================================== - -# The maximum number of image members allowed per image -#image_member_quota = 128 - -# The maximum number of image properties allowed per image -#image_property_quota = 128 - -# The maximum number of tags allowed per image -#image_tag_quota = 128 - -# The maximum number of locations allowed per image -#image_location_quota = 10 - -# Set a system wide quota for every user. This value is the total number -# of bytes that a user can use across all storage systems. A value of -# 0 means unlimited. -#user_storage_quota = 0 - -# =============== Image Cache Options ============================= - -# Base directory that the Image Cache uses -image_cache_dir = /var/lib/glance/image-cache/ - -# =============== Manager Options ================================= - -# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE. -# Whether or not to enforce that all DB tables have charset utf8. -# If your database tables do not have charset utf8 you will -# need to convert before this option is removed. This option is -# only relevant if your database engine is MySQL. -#db_enforce_mysql_charset = True - -# =============== Glance Store ==================================== -[glance_store] -# Moved from [DEFAULT], for Juno release -default_store = file -filesystem_store_datadir = /var/lib/glance/images/ - -# =============== Database Options ================================= - -[database] -# The file name to use with SQLite (string value) -sqlite_db = /var/lib/glance/glance.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous = True - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -backend = sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = -connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect = False - -# seconds between db connection retries (integer value) -#db_retry_interval = 1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval = True - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval = 10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries = 20 - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = glance -admin_password = {{ GLANCE_PASS }} - -[paste_deploy] -# Name of the paste configuration file that defines the available pipelines -#config_file = glance-api-paste.ini - -# Partial name of a pipeline in your paste configuration file with the -# service name removed. For example, if your paste section name is -# [pipeline:glance-api-keystone], you would configure the flavor below -# as 'keystone'. -flavor= keystone - -[store_type_location_strategy] -# The scheme list to use to get store preference order. The scheme must be -# registered by one of the stores defined by the 'known_stores' config option. -# This option will be applied when you using 'store_type' option as image -# location strategy defined by the 'location_strategy' config option. -#store_type_preference = diff --git a/ansible/roles/glance/templates/glance-registry.conf b/ansible/roles/glance/templates/glance-registry.conf index 1c1c651..8453b96 100644 --- a/ansible/roles/glance/templates/glance-registry.conf +++ b/ansible/roles/glance/templates/glance-registry.conf @@ -1,190 +1,56 @@ +{% set workers = ansible_processor_vcpus // 2 %} +{% set workers = workers if workers else 1 %} +{% set memcached_servers = [] %} +{% set rabbitmq_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% set _ = rabbitmq_servers.append('%s:5672'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} +{% set rabbitmq_servers = rabbitmq_servers|join(',') %} + [DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Address to bind the registry server -bind_host = 0.0.0.0 - -# Port the bind the registry server to +verbose = {{ VERBOSE }} +debug = {{ DEBUG }} +log_file = /var/log/glance/api.log +bind_host = {{ image_host }} bind_port = 9191 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -log_file = /var/log/glance/registry.log - -# Backlog requests when creating socket backlog = 4096 +workers = {{ workers }} -# TCP_KEEPIDLE value in seconds when creating socket. -# Not supported on OS X. -#tcp_keepidle = 600 - -# API to use for accessing data. Default value points to sqlalchemy -# package. -#data_api = glance.db.sqlalchemy.api - -# Enable Registry API versions individually or simultaneously -#enable_v1_registry = True -#enable_v2_registry = True - -# Limit the api to return `param_limit_max` items in a call to a container. If -# a larger `limit` query param is provided, it will be reduced to this value. -api_limit_max = 1000 - -# If a `limit` query param is not provided in an api request, it will -# default to `limit_param_default` -limit_param_default = 25 - -# Role used to identify an authenticated user as administrator -#admin_role = admin - -# Whether to automatically create the database tables. -# Default: False -#db_auto_create = False - -# Enable DEBUG log messages from sqlalchemy which prints every database -# query and response. -# Default: False -#sqlalchemy_debug = True - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -#use_syslog = False - -# Facility to use. If unset defaults to LOG_USER. -#syslog_log_facility = LOG_LOCAL1 - -# ================= SSL Options =============================== - -# Certificate file to use when starting registry server securely -#cert_file = /path/to/certfile - -# Private key file to use when starting registry server securely -#key_file = /path/to/keyfile - -# CA certificate file to use to verify connecting clients -#ca_file = /path/to/cafile - -# ================= Database Options ========================== +notification_driver = messagingv2 +rpc_backend = rabbit [database] -# The file name to use with SQLite (string value) -sqlite_db = /var/lib/glance/glance.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous = True - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend backend = sqlalchemy +connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8 +idle_timeout = 30 -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = -connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect = False - -# seconds between db connection retries (integer value) -#db_retry_interval = 1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval = True - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval = 10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries = 20 +[profiler] +enabled = True [keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 admin_tenant_name = service admin_user = glance admin_password = {{ GLANCE_PASS }} +memcached_servers = {{ memcached_servers }} +token_cache_time = 300 +revocation_cache_time = 60 [paste_deploy] -# Name of the paste configuration file that defines the available pipelines -#config_file = glance-registry-paste.ini - -# Partial name of a pipeline in your paste configuration file with the -# service name removed. For example, if your paste section name is -# [pipeline:glance-registry-keystone], you would configure the flavor below -# as 'keystone'. flavor= keystone + +[oslo_messaging_amqp] +idle_timeout = 7200 + +[oslo_messaging_rabbit] +rabbit_hosts = {{ rabbitmq_servers }} +rabbit_use_ssl = false +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False diff --git a/ansible/roles/glance/templates/image_upload.sh b/ansible/roles/glance/templates/image_upload.sh index 31b32b7..39cf927 100644 --- a/ansible/roles/glance/templates/image_upload.sh +++ b/ansible/roles/glance/templates/image_upload.sh @@ -1,2 +1,10 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## sleep 10 -glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed +glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ internal_vip.ip }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed diff --git a/ansible/roles/glance/vars/Debian.yml b/ansible/roles/glance/vars/Debian.yml new file mode 100644 index 0000000..b5b4b6c --- /dev/null +++ b/ansible/roles/glance/vars/Debian.yml @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - glance + - nfs-common + +nfs_services: [] + +services: + - glance-registry + - glance-api diff --git a/ansible/roles/glance/vars/RedHat.yml b/ansible/roles/glance/vars/RedHat.yml new file mode 100644 index 0000000..517f347 --- /dev/null +++ b/ansible/roles/glance/vars/RedHat.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-glance + - rpcbind + +nfs_services: + - rpcbind + +services: + - openstack-glance-api + - openstack-glance-registry diff --git a/ansible/roles/glance/vars/main.yml b/ansible/roles/glance/vars/main.yml new file mode 100644 index 0000000..d34d42f --- /dev/null +++ b/ansible/roles/glance/vars/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - python-glanceclient + +services_noarch: [] diff --git a/ansible/roles/ha/files/galera_chk b/ansible/roles/ha/files/galera_chk new file mode 100644 index 0000000..9fd165c --- /dev/null +++ b/ansible/roles/ha/files/galera_chk @@ -0,0 +1,10 @@ +#! /bin/sh + +code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'` + +if [ "$code"=="1" ] +then + echo "HTTP/1.1 200 OK\r\n" +else + echo "HTTP/1.1 503 Service Unavailable\r\n" +fi diff --git a/ansible/roles/ha/files/mysqlchk b/ansible/roles/ha/files/mysqlchk new file mode 100644 index 0000000..7c5eaad --- /dev/null +++ b/ansible/roles/ha/files/mysqlchk @@ -0,0 +1,15 @@ +# default: off +# description: An xinetd internal service which echo's characters back to +# clients. +# This is the tcp version. +service mysqlchk +{ + disable = no + flags = REUSE + socket_type = stream + protocol = tcp + user = root + wait = no + server = /usr/local/bin/galera_chk + port = 9200 +} diff --git a/ansible/roles/ha/handlers/main.yml b/ansible/roles/ha/handlers/main.yml new file mode 100644 index 0000000..34d7a57 --- /dev/null +++ b/ansible/roles/ha/handlers/main.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart haproxy + service: name=haproxy state=restarted enabled=yes + +- name: restart xinetd + service: name=xinetd state=restarted enabled=yes + +- name: restart keepalived + service: name=keepalived state=restarted enabled=yes diff --git a/ansible/roles/ha/tasks/main.yml b/ansible/roles/ha/tasks/main.yml new file mode 100644 index 0000000..1a4c8ba --- /dev/null +++ b/ansible/roles/ha/tasks/main.yml @@ -0,0 +1,96 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install keepalived xinet haproxy + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: generate ha service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: install pexpect + pip: name=pexpect state=present extra_args='--pre' + +- name: activate ip_nonlocal_bind + sysctl: name=net.ipv4.ip_nonlocal_bind value=1 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_intvl + sysctl: name=net.ipv4.tcp_keepalive_intvl value=1 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_probes + sysctl: name=net.ipv4.tcp_keepalive_probes value=5 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_time + sysctl: name=net.ipv4.tcp_keepalive_time value=5 + state=present reload=yes + +- name: update haproxy cfg + template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg + notify: restart haproxy + +- name: set haproxy enable flag + lineinfile: dest=/etc/default/haproxy state=present + regexp="ENABLED=*" + line="ENABLED=1" + notify: restart haproxy + when: ansible_os_family == "Debian" + +- name: set haproxy log + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="local0.* /var/log/haproxy.log" + line="local0.* /var/log/haproxy.log" + +- name: set rsyslog udp module + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="^#$ModLoad imudp" + line="$ModLoad imudp" + +- name: set rsyslog udp port + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="^#$UDPServerRun 514" + line="$UDPServerRun 514" + +- name: copy galera_chk file + copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777 + +- name: add network service + lineinfile: dest=/etc/services state=present + line="mysqlchk 9200/tcp" + insertafter="Local services" + notify: restart xinetd + +- name: copy mysqlchk file + copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777 + notify: restart xinetd + +- name: set keepalived start param + lineinfile: dest=/etc/default/keepalived state=present + regexp="^DAEMON_ARGS=*" + line="DAEMON_ARGS=\"-D -d -S 1\"" + when: ansible_os_family == "Debian" + +- name: set keepalived log + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="local1.* /var/log/keepalived.log" + line="local1.* /var/log/keepalived.log" + +- name: update keepalived info + template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf + notify: restart keepalived + +- name: restart rsyslog + shell: service rsyslog restart + +- meta: flush_handlers diff --git a/ansible/roles/ha/templates/haproxy.cfg b/ansible/roles/ha/templates/haproxy.cfg new file mode 100644 index 0000000..222b556 --- /dev/null +++ b/ansible/roles/ha/templates/haproxy.cfg @@ -0,0 +1,206 @@ + +global + #chroot /var/run/haproxy + daemon + user haproxy + group haproxy + maxconn 4000 + pidfile /var/run/haproxy/haproxy.pid + #log 127.0.0.1 local0 + tune.bufsize 1000000 + stats socket /var/run/haproxy.sock + stats timeout 2m + +defaults + log global + maxconn 8000 + option redispatch + option dontlognull + option splice-auto + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 50s + timeout server 50s + timeout check 10s + retries 3 + +listen proxy-mysql + bind {{ internal_vip.ip }}:3306 + option tcpka + option tcplog + balance source +{% for host, ip in haproxy_hosts.items() %} +{% if loop.index == 1 %} + server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 +{% else %} + server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 backup +{% endif %} +{% endfor %} + +listen proxy-rabbit + bind {{ internal_vip.ip }}:5672 + bind {{ public_vip.ip }}:5672 + + option tcpka + option tcplog + timeout client 3h + timeout server 3h + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-glance_registry_cluster + bind {{ internal_vip.ip }}:9191 + bind {{ public_vip.ip }}:9191 + option tcpka + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:9191 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-glance_api_cluster + bind {{ internal_vip.ip }}:9292 + bind {{ public_vip.ip }}:9292 + option tcpka + option tcplog + option httpchk + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:9292 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova-novncproxy + bind {{ internal_vip.ip }}:6080 + bind {{ public_vip.ip }}:6080 + option tcpka + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:6080 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-network + bind {{ internal_vip.ip }}:9696 + bind {{ public_vip.ip }}:9696 + option tcpka + option tcplog + balance source + option httpchk +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:9696 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-volume + bind {{ internal_vip.ip }}:8776 + bind {{ public_vip.ip }}:8776 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-keystone_admin_cluster + bind {{ internal_vip.ip }}:35357 + bind {{ public_vip.ip }}:35357 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:35357 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-keystone_public_internal_cluster + bind {{ internal_vip.ip }}:5000 + bind {{ public_vip.ip }}:5000 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:5000 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova_compute_api_cluster + bind {{ internal_vip.ip }}:8774 + bind {{ public_vip.ip }}:8774 + mode tcp + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8774 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova_metadata_api_cluster + bind {{ internal_vip.ip }}:8775 + bind {{ public_vip.ip }}:8775 + option tcpka + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8775 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-cinder_api_cluster + bind {{ internal_vip.ip }}:8776 + bind {{ public_vip.ip }}:8776 + mode tcp + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-ceilometer_api_cluster + bind {{ internal_vip.ip }}:8777 + bind {{ public_vip.ip }}:8777 + mode tcp + option tcp-check + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8777 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-aodh_api_cluster + bind {{ internal_vip.ip }}:8042 + bind {{ public_vip.ip }}:8042 + mode tcp + option tcp-check + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-dashboarad + bind {{ public_vip.ip }}:80 + mode http + balance source + capture cookie vgnvisitor= len 32 + cookie SERVERID insert indirect nocache + option forwardfor + option httpchk + option httpclose + rspidel ^Set-cookie:\ IP= +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen stats + mode http + bind 0.0.0.0:9999 + stats enable + stats refresh 30s + stats uri / + stats realm Global\ statistics + stats auth admin:admin + + diff --git a/ansible/roles/ha/templates/keepalived.conf b/ansible/roles/ha/templates/keepalived.conf new file mode 100644 index 0000000..c2af86b --- /dev/null +++ b/ansible/roles/ha/templates/keepalived.conf @@ -0,0 +1,47 @@ +global_defs { + router_id {{ inventory_hostname }} +} + +vrrp_sync_group VG1 { + group { + internal_vip + public_vip + } +} + +vrrp_instance internal_vip { + interface {{ internal_vip.interface }} + virtual_router_id {{ vrouter_id_internal }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 1234 + } + + virtual_ipaddress { + {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }} + } +} + +vrrp_instance public_vip { + interface {{ network_cfg.public_vip.interface }} + virtual_router_id {{ vrouter_id_public }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 4321 + } + + virtual_ipaddress { + {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ network_cfg.public_vip.interface }} + } + +} diff --git a/ansible/roles/ha/vars/Debian.yml b/ansible/roles/ha/vars/Debian.yml new file mode 100644 index 0000000..b9f46bd --- /dev/null +++ b/ansible/roles/ha/vars/Debian.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: [] +packages: [] diff --git a/ansible/roles/ha/vars/RedHat.yml b/ansible/roles/ha/vars/RedHat.yml new file mode 100644 index 0000000..b9f46bd --- /dev/null +++ b/ansible/roles/ha/vars/RedHat.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: [] +packages: [] diff --git a/ansible/roles/ha/vars/main.yml b/ansible/roles/ha/vars/main.yml new file mode 100644 index 0000000..bd73969 --- /dev/null +++ b/ansible/roles/ha/vars/main.yml @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - keepalived + - xinetd + - haproxy + +services_noarch: + - keepalived + - xinetd + - haproxy diff --git a/ansible/roles/heat/handlers/main.yml b/ansible/roles/heat/handlers/main.yml new file mode 100644 index 0000000..6cc567f --- /dev/null +++ b/ansible/roles/heat/handlers/main.yml @@ -0,0 +1,16 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart heat service + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) + +- name: remove heat-sqlite-db + shell: rm /var/lib/heat/heat.sqlite || touch heat.sqlite.db.removed + diff --git a/ansible/roles/heat/tasks/heat_config.yml b/ansible/roles/heat/tasks/heat_config.yml new file mode 100644 index 0000000..a24e2f8 --- /dev/null +++ b/ansible/roles/heat/tasks/heat_config.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: heat db sync + shell: su -s /bin/sh -c "heat-manage db_sync" heat + ignore_errors: True + notify: + - restart heat service + +- meta: flush_handlers + diff --git a/ansible/roles/heat/tasks/heat_install.yml b/ansible/roles/heat/tasks/heat_install.yml new file mode 100644 index 0000000..1fbada8 --- /dev/null +++ b/ansible/roles/heat/tasks/heat_install.yml @@ -0,0 +1,27 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install heat related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: generate heat service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: update heat conf + template: src=heat.j2 + dest=/etc/heat/heat.conf + backup=yes + notify: + - restart heat service + - remove heat-sqlite-db + diff --git a/ansible/roles/heat/tasks/main.yml b/ansible/roles/heat/tasks/main.yml new file mode 100644 index 0000000..886907e --- /dev/null +++ b/ansible/roles/heat/tasks/main.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include: heat_install.yml + tags: + - install + - heat_install + - heat + +- include: heat_config.yml + when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' + tags: + - config + - heat_config + - heat + +- meta: flush_handlers diff --git a/ansible/roles/heat/templates/heat.j2 b/ansible/roles/heat/templates/heat.j2 new file mode 100644 index 0000000..aec6b2e --- /dev/null +++ b/ansible/roles/heat/templates/heat.j2 @@ -0,0 +1,25 @@ +[DEFAULT] +heat_metadata_server_url = http://{{ internal_vip.ip }}:8000 +heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +log_dir = /var/log/heat + +[database] +connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 + +[ec2authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 +admin_tenant_name = service +admin_user = heat +admin_password = {{ HEAT_PASS }} + diff --git a/ansible/roles/heat/vars/Debian.yml b/ansible/roles/heat/vars/Debian.yml new file mode 100644 index 0000000..64608ca --- /dev/null +++ b/ansible/roles/heat/vars/Debian.yml @@ -0,0 +1,20 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - heat-api + - heat-api-cfn + - heat-engine + - python-heatclient + +services: + - heat-api + - heat-api-cfn + - heat-engine + diff --git a/ansible/roles/heat/vars/RedHat.yml b/ansible/roles/heat/vars/RedHat.yml new file mode 100644 index 0000000..680b161 --- /dev/null +++ b/ansible/roles/heat/vars/RedHat.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: + - openstack-heat-api + - openstack-heat-api-cfn + - openstack-heat-engine + +packages: + - openstack-heat-api + - openstack-heat-api-cfn + - openstack-heat-engine + - python-heatclient diff --git a/ansible/roles/heat/vars/main.yml b/ansible/roles/heat/vars/main.yml new file mode 100644 index 0000000..7f867d2 --- /dev/null +++ b/ansible/roles/heat/vars/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] + diff --git a/ansible/roles/keystone/handlers/main.yml b/ansible/roles/keystone/handlers/main.yml old mode 100644 new mode 100755 index ca8afc8..608a8a0 --- a/ansible/roles/keystone/handlers/main.yml +++ b/ansible/roles/keystone/handlers/main.yml @@ -1,3 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart keystone - service: name=keystone state=restarted +- name: restart keystone services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/keystone/tasks/keystone_config.yml b/ansible/roles/keystone/tasks/keystone_config.yml new file mode 100644 index 0000000..574ebab --- /dev/null +++ b/ansible/roles/keystone/tasks/keystone_config.yml @@ -0,0 +1,61 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: keystone-manage db-sync + #keystone_manage: action=dbsync + shell: su -s /bin/sh -c 'keystone-manage db_sync' keystone + +- name: wait for keystone ready + wait_for: port=35357 delay=3 timeout=10 host={{ internal_vip.ip }} + +- name: cron job to purge expired tokens hourly + cron: + name: 'purge expired tokens' + special_time: hourly + job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' + +- name: add tenants + keystone_user: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + tenant: "{{ item.tenant }}" + tenant_description: "{{ item.tenant_description }}" + with_items: "{{ os_users }}" + +- name: add users + keystone_user: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + user: "{{ item.user }}" + tenant: "{{ item.tenant }}" + password: "{{ item.password }}" + email: "{{ item.email }}" + with_items: "{{ os_users }}" + +- name: grant roles + keystone_user: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + user: "{{ item.user }}" + role: "{{ item.role }}" + tenant: "{{ item.tenant }}" + with_items: "{{ os_users }}" + +- name: add endpoints + keystone_service: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + name: "{{ item.name }}" + type: "{{ item.type }}" + region: "{{ item.region}}" + description: "{{ item.description }}" + publicurl: "{{ item.publicurl }}" + internalurl: "{{ item.internalurl }}" + adminurl: "{{ item.adminurl }}" + with_items: "{{ os_services }}" diff --git a/ansible/roles/keystone/tasks/keystone_install.yml b/ansible/roles/keystone/tasks/keystone_install.yml new file mode 100644 index 0000000..ffae8ff --- /dev/null +++ b/ansible/roles/keystone/tasks/keystone_install.yml @@ -0,0 +1,87 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install keystone packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: generate keystone service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: delete sqlite database + file: + path: /var/lib/keystone/keystone.db + state: absent + +- name: update keystone conf + template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes + notify: + - restart keystone services + +- name: assure listen port exist + lineinfile: + dest: '{{ apache_config_dir }}/ports.conf' + regexp: '{{ item.regexp }}' + line: '{{ item.line}}' + with_items: + - regexp: "^Listen {{ internal_ip }}:5000" + line: "Listen {{ internal_ip }}:5000" + - regexp: "^Listen {{ internal_ip }}:35357" + line: "Listen {{ internal_ip }}:35357" + notify: + - restart keystone services + +- name: update apache2 configs + template: + src: wsgi-keystone.conf.j2 + dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf' + when: ansible_os_family == 'Debian' + notify: + - restart keystone services + +- name: update apache2 configs + template: + src: wsgi-keystone.conf.j2 + dest: '{{ apache_config_dir }}/wsgi-keystone.conf' + when: ansible_os_family == 'RedHat' + notify: + - restart keystone services + +- name: enable keystone server + file: + src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf" + dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf" + state: "link" + when: ansible_os_family == 'Debian' + notify: + - restart keystone services + +- name: keystone source files + template: src={{ item }} dest=/opt/{{ item }} + with_items: + - admin-openrc.sh + - demo-openrc.sh + +- meta: flush_handlers diff --git a/ansible/roles/keystone/tasks/main.yml b/ansible/roles/keystone/tasks/main.yml index a1a02be..21939fa 100644 --- a/ansible/roles/keystone/tasks/main.yml +++ b/ansible/roles/keystone/tasks/main.yml @@ -1,36 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: install keystone packages - apt: name=keystone state=present force=yes +- include: keystone_install.yml + tags: + - install + - keystone_install + - keystone -- name: update keystone conf - template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes - notify: restart keystone +- include: keystone_config.yml + when: inventory_hostname == groups['controller'][0] + tags: + - config + - keystone_config + - keystone -- name: delete sqlite database - shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed - -- name: manually stop keystone once - service: name=keystone state=stopped - -- name: keystone-manage db-sync - shell: su -s /bin/sh -c "keystone-manage db_sync" - -- name: cron job to purge expired tokens hourly - shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone - -- name: manually start keystone - service: name=keystone state=started - -- name: place keystone init script under /opt/ - template: src=keystone_init dest=/opt/keystone_init mode=0744 - -- name: run keystone_init - shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed - args: - creates: keystone_init_complete - -- name: keystone source files - template: src={{ item }} dest=/opt/{{ item }} - with_items: - - admin-openrc.sh - - demo-openrc.sh +- meta: flush_handlers diff --git a/ansible/roles/keystone/templates/admin-openrc.sh b/ansible/roles/keystone/templates/admin-openrc.sh index 2e692f6..6ba620f 100644 --- a/ansible/roles/keystone/templates/admin-openrc.sh +++ b/ansible/roles/keystone/templates/admin-openrc.sh @@ -1,6 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## # Verify the Identity Service installation export OS_PASSWORD={{ ADMIN_PASS }} export OS_TENANT_NAME=admin -export OS_AUTH_URL=http://{{ identity_host }}:35357/v2.0 -export OS_USERNAME=ADMIN +export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0 +export OS_USERNAME=admin +export OS_VOLUME_API_VERSION=2 diff --git a/ansible/roles/keystone/templates/demo-openrc.sh b/ansible/roles/keystone/templates/demo-openrc.sh index c66022d..5807e86 100644 --- a/ansible/roles/keystone/templates/demo-openrc.sh +++ b/ansible/roles/keystone/templates/demo-openrc.sh @@ -1,5 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## export OS_USERNAME=demo export OS_PASSWORD={{ DEMO_PASS }} export OS_TENANT_NAME=demo -export OS_AUTH_URL=http://{{ identity_host }}:35357/v2.0 +export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0 diff --git a/ansible/roles/keystone/templates/keystone.conf b/ansible/roles/keystone/templates/keystone.conf index 9d5ba88..649fc32 100644 --- a/ansible/roles/keystone/templates/keystone.conf +++ b/ansible/roles/keystone/templates/keystone.conf @@ -1,1318 +1,59 @@ +{% set memcached_servers = [] %} +{% set rabbitmq_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% set _ = rabbitmq_servers.append('%s:5672'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} +{% set rabbitmq_servers = rabbitmq_servers|join(',') %} [DEFAULT] - admin_token={{ ADMIN_TOKEN }} - -#public_bind_host=0.0.0.0 - -#admin_bind_host=0.0.0.0 - -#compute_port=8774 - -#admin_port=35357 - -#public_port=5000 - -# The base public endpoint URL for keystone that are -# advertised to clients (NOTE: this does NOT affect how -# keystone listens for connections) (string value). -# Defaults to the base host URL of the request. Eg a -# request to http://server:5000/v2.0/users will -# default to http://server:5000. You should only need -# to set this value if the base URL contains a path -# (eg /prefix/v2.0) or the endpoint should be found on -# a different server. -#public_endpoint=http://localhost:%(public_port)s/ - -# The base admin endpoint URL for keystone that are advertised -# to clients (NOTE: this does NOT affect how keystone listens -# for connections) (string value). -# Defaults to the base host URL of the request. Eg a -# request to http://server:35357/v2.0/users will -# default to http://server:35357. You should only need -# to set this value if the base URL contains a path -# (eg /prefix/v2.0) or the endpoint should be found on -# a different server. -#admin_endpoint=http://localhost:%(admin_port)s/ - -# onready allows you to send a notification when the process -# is ready to serve For example, to have it notify using -# systemd, one could set shell command: "onready = systemd- -# notify --ready" or a module with notify() method: "onready = -# keystone.common.systemd". (string value) -#onready= - -# enforced by optional sizelimit middleware -# (keystone.middleware:RequestBodySizeLimiter). (integer -# value) -#max_request_body_size=114688 - -# limit the sizes of user & tenant ID/names. (integer value) -#max_param_size=64 - -# similar to max_param_size, but provides an exception for -# token values. (integer value) -#max_token_size=8192 - -# During a SQL upgrade member_role_id will be used to create a -# new role that will replace records in the -# user_tenant_membership table with explicit role grants. -# After migration, the member_role_id will be used in the API -# add_user_to_project. (string value) -#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab - -# During a SQL upgrade member_role_id will be used to create a -# new role that will replace records in the -# user_tenant_membership table with explicit role grants. -# After migration, member_role_name will be ignored. (string -# value) -#member_role_name=_member_ - -# The value passed as the keyword "rounds" to passlib encrypt -# method. (integer value) -#crypt_strength=40000 - -# Set this to True if you want to enable TCP_KEEPALIVE on -# server sockets i.e. sockets used by the keystone wsgi server -# for client connections. (boolean value) -#tcp_keepalive=false - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Only applies if tcp_keepalive is True. Not supported -# on OS X. (integer value) -#tcp_keepidle=600 - -# The maximum number of entities that will be returned in a -# collection can be set with list_limit, with no limit set by -# default. This global limit may be then overridden for a -# specific driver, by specifying a list_limit in the -# appropriate section (e.g. [assignment]). (integer value) -#list_limit= - -# Set this to false if you want to enable the ability for -# user, group and project entities to be moved between domains -# by updating their domain_id. Allowing such movement is not -# recommended if the scope of a domain admin is being -# restricted by use of an appropriate policy file (see -# policy.v3cloudsample as an example). (boolean value) -#domain_id_immutable=true - - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. (list value) -#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -#rabbit_host=localhost - -# The RabbitMQ broker port where a single node is used. -# (integer value) -#rabbit_port=5672 - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -#rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -#rabbit_userid=guest - -# The RabbitMQ password. (string value) -#rabbit_password=guest - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=keystone - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -#notification_driver= - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -#rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=openstack - - -# -# Options defined in keystone.notifications -# - -# Default publisher_id for outgoing notifications (string -# value) -#default_publisher_id= - - -# -# Options defined in keystone.middleware.ec2_token -# - -# URL to get token from ec2 request. (string value) -#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens - -# Required if EC2 server requires client certificate. (string -# value) -#keystone_ec2_keyfile= - -# Client certificate key filename. Required if EC2 server -# requires client certificate. (string value) -#keystone_ec2_certfile= - -# A PEM encoded certificate authority to use when verifying -# HTTPS connections. Defaults to the system CAs. (string -# value) -#keystone_ec2_cafile= - -# Disable SSL certificate verification. (boolean value) -#keystone_ec2_insecure=false - - -# -# Options defined in keystone.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in keystone.openstack.common.lockutils -# - -# Whether to disable inter-process locks (boolean value) -#disable_process_locking=false - -# Directory to use for lock files. (string value) -#lock_path= - - -# -# Options defined in keystone.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error (boolean value) -#use_stderr=true - -# Format string to use for log messages with context (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN - -# Publish error events (boolean value) -#publish_errors=false - -# Make deprecations fatal (boolean value) -#fatal_deprecations=false - -# If an instance is passed with the log message, format it -# like this (string value) -#instance_format="[instance: %(uuid)s] " - -# If an instance UUID is passed with the log message, format -# it like this (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of logging configuration file. It does not disable -# existing loggers, but just appends specified logging -# configuration to any other existing logging options. Please -# see the Python logging module documentation for details on -# logging configuration files. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths (string value) -# Deprecated group/name - [DEFAULT]/logdir +debug={{ DEBUG }} log_dir = /var/log/keystone -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and then will be changed in J to honor RFC5424 -# (boolean value) -#use_syslog=false - -# (Optional) Use syslog rfc5424 format for logging. If -# enabled, will add APP-NAME (RFC5424) before the MSG part of -# the syslog message. The old format without APP-NAME is -# deprecated in I, and will be removed in J. (boolean value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in keystone.openstack.common.policy -# - -# JSON file containing policy (string value) -#policy_file=policy.json - -# Rule enforced when requested rule is not found (string -# value) -#policy_default_rule=default - - -[assignment] - -# -# Options defined in keystone -# - -# Keystone Assignment backend driver. (string value) -#driver= - -# Toggle for assignment caching. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# TTL (in seconds) to cache assignment data. This has no -# effect unless global caching is enabled. (integer value) -#cache_time= - -# Maximum number of entities that will be returned in an -# assignment collection. (integer value) -#list_limit= - - -[auth] - -# -# Options defined in keystone -# - -# Default auth methods. (list value) -#methods=external,password,token - -# The password auth plugin module. (string value) -#password=keystone.auth.plugins.password.Password - -# The token auth plugin module. (string value) -#token=keystone.auth.plugins.token.Token - -# The external (REMOTE_USER) auth plugin module. (string -# value) -#external=keystone.auth.plugins.external.DefaultDomain - - [cache] +backend=keystone.cache.memcache_pool +memcache_servers={{ memcached_servers}} +enabled=true -# -# Options defined in keystone -# - -# Prefix for building the configuration dictionary for the -# cache region. This should not need to be changed unless -# there is another dogpile.cache region with the same -# configuration name. (string value) -#config_prefix=cache.keystone - -# Default TTL, in seconds, for any cached item in the -# dogpile.cache region. This applies to any cached method that -# doesn't have an explicit cache expiration time defined for -# it. (integer value) -#expiration_time=600 - -# Dogpile.cache backend module. It is recommended that -# Memcache (dogpile.cache.memcache) or Redis -# (dogpile.cache.redis) be used in production deployments. -# Small workloads (single process) like devstack can use the -# dogpile.cache.memory backend. (string value) -#backend=keystone.common.cache.noop - -# Use a key-mangling function (sha1) to ensure fixed length -# cache-keys. This is toggle-able for debugging purposes, it -# is highly recommended to always leave this set to True. -# (boolean value) -#use_key_mangler=true - -# Arguments supplied to the backend module. Specify this -# option once per argument to be passed to the dogpile.cache -# backend. Example format: ":". (multi valued) -#backend_argument= - -# Proxy Classes to import that will affect the way the -# dogpile.cache backend functions. See the dogpile.cache -# documentation on changing-backend-behavior. Comma delimited -# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2. -# (list value) -#proxies= - -# Global toggle for all caching using the should_cache_fn -# mechanism. (boolean value) -#enabled=false - -# Extra debugging from the cache backend (cache keys, -# get/set/delete/etc calls) This is only really useful if you -# need to see the specific cache-backend get/set/delete calls -# with the keys/values. Typically this should be left set to -# False. (boolean value) -#debug_cache_backend=false - - -[catalog] - -# -# Options defined in keystone -# - -# Catalog template file name for use with the template catalog -# backend. (string value) -#template_file=default_catalog.templates - -# Keystone catalog backend driver. (string value) -#driver=keystone.catalog.backends.sql.Catalog - -# Maximum number of entities that will be returned in a -# catalog collection. (integer value) -#list_limit= - - -[credential] - -# -# Options defined in keystone -# - -# Keystone Credential backend driver. (string value) -#driver=keystone.credential.backends.sql.Credential - +[revoke] +driver=sql +expiration_buffer=3600 +caching=true [database] - -# -# Options defined in keystone.openstack.common.db.options -# - -# The file name to use with SQLite (string value) -#sqlite_db=keystone.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous=true - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection= -connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode=TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect=false - -# seconds between db connection retries (integer value) -#db_retry_interval=1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval=true - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval=10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries=20 - - -[ec2] - -# -# Options defined in keystone -# - -# Keystone EC2Credential backend driver. (string value) -#driver=keystone.contrib.ec2.backends.kvs.Ec2 - - -[endpoint_filter] - -# -# Options defined in keystone -# - -# Keystone Endpoint Filter backend driver (string value) -#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter - -# Toggle to return all active endpoints if no filter exists. -# (boolean value) -#return_all_endpoints_if_no_filter=true - - -[federation] - -# -# Options defined in keystone -# - -# Keystone Federation backend driver. (string value) -#driver=keystone.contrib.federation.backends.sql.Federation - -# Value to be used when filtering assertion parameters from -# the environment. (string value) -#assertion_prefix= +connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8 +idle_timeout=30 +min_pool_size=5 +max_pool_size=120 +pool_timeout=30 [identity] +default_domain_id=default +driver=sql -# -# Options defined in keystone -# - -# This references the domain to use for all Identity API v2 -# requests (which are not aware of domains). A domain with -# this ID will be created for you by keystone-manage db_sync -# in migration 008. The domain referenced by this ID cannot -# be deleted on the v3 API, to prevent accidentally breaking -# the v2 API. There is nothing special about this domain, -# other than the fact that it must exist to order to maintain -# support for your v2 clients. (string value) -#default_domain_id=default - -# A subset (or all) of domains can have their own identity -# driver, each with their own partial configuration file in a -# domain configuration directory. Only values specific to the -# domain need to be placed in the domain specific -# configuration file. This feature is disabled by default; set -# to True to enable. (boolean value) -#domain_specific_drivers_enabled=false - -# Path for Keystone to locate the domain specificidentity -# configuration files if domain_specific_drivers_enabled is -# set to true. (string value) -#domain_config_dir=/etc/keystone/domains - -# Keystone Identity backend driver. (string value) -#driver=keystone.identity.backends.sql.Identity - -# Maximum supported length for user passwords; decrease to -# improve performance. (integer value) -#max_password_length=4096 - -# Maximum number of entities that will be returned in an -# identity collection. (integer value) -#list_limit= - - -[kvs] - -# -# Options defined in keystone -# - -# Extra dogpile.cache backend modules to register with the -# dogpile.cache library. (list value) -#backends= - -# Prefix for building the configuration dictionary for the KVS -# region. This should not need to be changed unless there is -# another dogpile.cache region with the same configuration -# name. (string value) -#config_prefix=keystone.kvs - -# Toggle to disable using a key-mangling function to ensure -# fixed length keys. This is toggle-able for debugging -# purposes, it is highly recommended to always leave this set -# to True. (boolean value) -#enable_key_mangler=true - -# Default lock timeout for distributed locking. (integer -# value) -#default_lock_timeout=5 - - -[ldap] - -# -# Options defined in keystone -# - -# URL for connecting to the LDAP server. (string value) -#url=ldap://localhost - -# User BindDN to query the LDAP server. (string value) -#user= - -# Password for the BindDN to query the LDAP server. (string -# value) -#password= - -# LDAP server suffix (string value) -#suffix=cn=example,cn=com - -# If true, will add a dummy member to groups. This is required -# if the objectclass for groups requires the "member" -# attribute. (boolean value) -#use_dumb_member=false - -# DN of the "dummy member" to use when "use_dumb_member" is -# enabled. (string value) -#dumb_member=cn=dumb,dc=nonexistent - -# allow deleting subtrees. (boolean value) -#allow_subtree_delete=false - -# The LDAP scope for queries, this can be either "one" -# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). -# (string value) -#query_scope=one - -# Maximum results per page; a value of zero ("0") disables -# paging. (integer value) -#page_size=0 - -# The LDAP dereferencing option for queries. This can be -# either "never", "searching", "always", "finding" or -# "default". The "default" option falls back to using default -# dereferencing configured by your ldap.conf. (string value) -#alias_dereferencing=default - -# Override the system's default referral chasing behavior for -# queries. (boolean value) -#chase_referrals= - -# Search base for users. (string value) -#user_tree_dn= - -# LDAP search filter for users. (string value) -#user_filter= - -# LDAP objectClass for users. (string value) -#user_objectclass=inetOrgPerson - -# LDAP attribute mapped to user id. (string value) -#user_id_attribute=cn - -# LDAP attribute mapped to user name. (string value) -#user_name_attribute=sn - -# LDAP attribute mapped to user email. (string value) -#user_mail_attribute=email - -# LDAP attribute mapped to password. (string value) -#user_pass_attribute=userPassword - -# LDAP attribute mapped to user enabled flag. (string value) -#user_enabled_attribute=enabled - -# Bitmask integer to indicate the bit that the enabled value -# is stored in if the LDAP server represents "enabled" as a -# bit on an integer rather than a boolean. A value of "0" -# indicates the mask is not used. If this is not set to "0" -# the typical value is "2". This is typically used when -# "user_enabled_attribute = userAccountControl". (integer -# value) -#user_enabled_mask=0 - -# Default value to enable users. This should match an -# appropriate int value if the LDAP server uses non-boolean -# (bitmask) values to indicate if a user is enabled or -# disabled. If this is not set to "True"the typical value is -# "512". This is typically used when "user_enabled_attribute = -# userAccountControl". (string value) -#user_enabled_default=True - -# List of attributes stripped off the user on update. (list -# value) -#user_attribute_ignore=default_project_id,tenants - -# LDAP attribute mapped to default_project_id for users. -# (string value) -#user_default_project_id_attribute= - -# Allow user creation in LDAP backend. (boolean value) -#user_allow_create=true - -# Allow user updates in LDAP backend. (boolean value) -#user_allow_update=true - -# Allow user deletion in LDAP backend. (boolean value) -#user_allow_delete=true - -# If True, Keystone uses an alternative method to determine if -# a user is enabled or not by checking if they are a member of -# the "user_enabled_emulation_dn" group. (boolean value) -#user_enabled_emulation=false - -# DN of the group entry to hold enabled users when using -# enabled emulation. (string value) -#user_enabled_emulation_dn= - -# List of additional LDAP attributes used for mapping -# Additional attribute mappings for users. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#user_additional_attribute_mapping= - -# Search base for projects (string value) -#tenant_tree_dn= - -# LDAP search filter for projects. (string value) -#tenant_filter= - -# LDAP objectClass for projects. (string value) -#tenant_objectclass=groupOfNames - -# LDAP attribute mapped to project id. (string value) -#tenant_id_attribute=cn - -# LDAP attribute mapped to project membership for user. -# (string value) -#tenant_member_attribute=member - -# LDAP attribute mapped to project name. (string value) -#tenant_name_attribute=ou - -# LDAP attribute mapped to project description. (string value) -#tenant_desc_attribute=description - -# LDAP attribute mapped to project enabled. (string value) -#tenant_enabled_attribute=enabled - -# LDAP attribute mapped to project domain_id. (string value) -#tenant_domain_id_attribute=businessCategory - -# List of attributes stripped off the project on update. (list -# value) -#tenant_attribute_ignore= - -# Allow tenant creation in LDAP backend. (boolean value) -#tenant_allow_create=true - -# Allow tenant update in LDAP backend. (boolean value) -#tenant_allow_update=true - -# Allow tenant deletion in LDAP backend. (boolean value) -#tenant_allow_delete=true - -# If True, Keystone uses an alternative method to determine if -# a project is enabled or not by checking if they are a member -# of the "tenant_enabled_emulation_dn" group. (boolean value) -#tenant_enabled_emulation=false - -# DN of the group entry to hold enabled projects when using -# enabled emulation. (string value) -#tenant_enabled_emulation_dn= - -# Additional attribute mappings for projects. Attribute -# mapping format is :, where ldap_attr -# is the attribute in the LDAP entry and user_attr is the -# Identity API attribute. (list value) -#tenant_additional_attribute_mapping= - -# Search base for roles. (string value) -#role_tree_dn= - -# LDAP search filter for roles. (string value) -#role_filter= - -# LDAP objectClass for roles. (string value) -#role_objectclass=organizationalRole - -# LDAP attribute mapped to role id. (string value) -#role_id_attribute=cn - -# LDAP attribute mapped to role name. (string value) -#role_name_attribute=ou - -# LDAP attribute mapped to role membership. (string value) -#role_member_attribute=roleOccupant - -# List of attributes stripped off the role on update. (list -# value) -#role_attribute_ignore= - -# Allow role creation in LDAP backend. (boolean value) -#role_allow_create=true - -# Allow role update in LDAP backend. (boolean value) -#role_allow_update=true - -# Allow role deletion in LDAP backend. (boolean value) -#role_allow_delete=true - -# Additional attribute mappings for roles. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#role_additional_attribute_mapping= - -# Search base for groups. (string value) -#group_tree_dn= - -# LDAP search filter for groups. (string value) -#group_filter= - -# LDAP objectClass for groups. (string value) -#group_objectclass=groupOfNames - -# LDAP attribute mapped to group id. (string value) -#group_id_attribute=cn - -# LDAP attribute mapped to group name. (string value) -#group_name_attribute=ou - -# LDAP attribute mapped to show group membership. (string -# value) -#group_member_attribute=member - -# LDAP attribute mapped to group description. (string value) -#group_desc_attribute=description - -# List of attributes stripped off the group on update. (list -# value) -#group_attribute_ignore= - -# Allow group creation in LDAP backend. (boolean value) -#group_allow_create=true - -# Allow group update in LDAP backend. (boolean value) -#group_allow_update=true - -# Allow group deletion in LDAP backend. (boolean value) -#group_allow_delete=true - -# Additional attribute mappings for groups. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#group_additional_attribute_mapping= - -# CA certificate file path for communicating with LDAP -# servers. (string value) -#tls_cacertfile= - -# CA certificate directory path for communicating with LDAP -# servers. (string value) -#tls_cacertdir= - -# Enable TLS for communicating with LDAP servers. (boolean -# value) -#use_tls=false - -# valid options for tls_req_cert are demand, never, and allow. -# (string value) -#tls_req_cert=demand - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[memcache] - -# -# Options defined in keystone -# - -# Memcache servers in the format of "host:port" (list value) -#servers=localhost:11211 - -# Number of compare-and-set attempts to make when using -# compare-and-set in the token memcache back end. (integer -# value) -#max_compare_and_set_retry=16 - - -[oauth1] - -# -# Options defined in keystone -# - -# Keystone Credential backend driver. (string value) -#driver=keystone.contrib.oauth1.backends.sql.OAuth1 - -# Duration (in seconds) for the OAuth Request Token. (integer -# value) -#request_token_duration=28800 - -# Duration (in seconds) for the OAuth Access Token. (integer -# value) -#access_token_duration=86400 - - -[os_inherit] - -# -# Options defined in keystone -# - -# role-assignment inheritance to projects from owning domain -# can be optionally enabled. (boolean value) -#enabled=false - - -[paste_deploy] - -# -# Options defined in keystone -# - -# Name of the paste configuration file that defines the -# available pipelines. (string value) -#config_file=keystone-paste.ini - - -[policy] - -# -# Options defined in keystone -# - -# Keystone Policy backend driver. (string value) -#driver=keystone.policy.backends.sql.Policy - -# Maximum number of entities that will be returned in a policy -# collection. (integer value) -#list_limit= - - -[revoke] - -# -# Options defined in keystone -# - -# An implementation of the backend for persisting revocation -# events. (string value) -#driver=keystone.contrib.revoke.backends.kvs.Revoke - -# This value (calculated in seconds) is added to token -# expiration before a revocation event may be removed from the -# backend. (integer value) -#expiration_buffer=1800 - -# Toggle for revocation event cacheing. This has no effect -# unless global caching is enabled. (boolean value) -#caching=true - - -[signing] - -# -# Options defined in keystone -# - -# Deprecated in favor of provider in the [token] section. -# (string value) -#token_format= - -# Path of the certfile for token signing. (string value) -#certfile=/etc/keystone/ssl/certs/signing_cert.pem - -# Path of the keyfile for token signing. (string value) -#keyfile=/etc/keystone/ssl/private/signing_key.pem - -# Path of the CA for token signing. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA Key for token signing. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Key Size (in bits) for token signing cert (auto generated -# certificate). (integer value) -#key_size=2048 - -# Day the token signing cert is valid for (auto generated -# certificate). (integer value) -#valid_days=3650 - -# Certificate Subject (auto generated certificate) for token -# signing. (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com - - -[ssl] - -# -# Options defined in keystone -# - -# Toggle for SSL support on the keystone eventlet servers. -# (boolean value) -#enable=false - -# Path of the certfile for SSL. (string value) -#certfile=/etc/keystone/ssl/certs/keystone.pem - -# Path of the keyfile for SSL. (string value) -#keyfile=/etc/keystone/ssl/private/keystonekey.pem - -# Path of the ca cert file for SSL. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA key file for SSL. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Require client certificate. (boolean value) -#cert_required=false - -# SSL Key Length (in bits) (auto generated certificate). -# (integer value) -#key_size=1024 - -# Days the certificate is valid for once signed (auto -# generated certificate). (integer value) -#valid_days=3650 - -# SSL Certificate Subject (auto generated certificate). -# (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost - - -[stats] - -# -# Options defined in keystone -# - -# Keystone stats backend driver. (string value) -#driver=keystone.contrib.stats.backends.kvs.Stats - +[assignment] +driver=sql +[resource] +driver=sql +caching=true +cache_time=3600 + [token] +enforce_token_bind=permissive +expiration=43200 +provider=uuid +driver=sql +caching=true +cache_time=3600 -# -# Options defined in keystone -# - -# External auth mechanisms that should add bind information to -# token e.g. kerberos, x509. (list value) -#bind= - -# Enforcement policy on tokens presented to keystone with bind -# information. One of disabled, permissive, strict, required -# or a specifically required bind mode e.g. kerberos or x509 -# to require binding to that authentication. (string value) -#enforce_token_bind=permissive - -# Amount of time a token should remain valid (in seconds). -# (integer value) -#expiration=3600 - -# Controls the token construction, validation, and revocation -# operations. Core providers are -# "keystone.token.providers.[pki|uuid].Provider". (string -# value) -provider=keystone.token.providers.uuid.Provider - -# Keystone Token persistence backend driver. (string value) -driver=keystone.token.persistence.backends.sql.Token - -# Toggle for token system cacheing. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# Time to cache the revocation list and the revocation events -# if revoke extension is enabled (in seconds). This has no -# effect unless global and token caching are enabled. (integer -# value) -revocation_cache_time=3600 - -# Time to cache tokens (in seconds). This has no effect unless -# global and token caching are enabled. (integer value) -#cache_time= - -# Revoke token by token identifier. Setting revoke_by_id to -# True enables various forms of enumerating tokens, e.g. `list -# tokens for user`. These enumerations are processed to -# determine the list of tokens to revoke. Only disable if -# you are switching to using the Revoke extension with a -# backend other than KVS, which stores events in memory. -# (boolean value) -#revoke_by_id=true - - -[trust] - -# -# Options defined in keystone -# - -# delegation and impersonation features can be optionally -# disabled. (boolean value) -#enabled=true - -# Keystone Trust backend driver. (string value) -#driver=keystone.trust.backends.sql.Trust - - -[extra_headers] -Distribution = Ubuntu +[eventlet_server] +public_bind_host= {{ identity_host }} +admin_bind_host= {{ identity_host }} +[oslo_messaging_rabbit] +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +rabbit_hosts = {{ rabbitmq_servers }} diff --git a/ansible/roles/keystone/templates/keystone_init b/ansible/roles/keystone/templates/keystone_init deleted file mode 100644 index ac81cb4..0000000 --- a/ansible/roles/keystone/templates/keystone_init +++ /dev/null @@ -1,43 +0,0 @@ -# create an administrative user - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 role-create --name=admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin - -# create a normal user - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo - -# create a service tenant -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-create --name=service --description="Service Tenant" - -# regist keystone -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ identity_host }}:5000/v2.0 --internalurl=http://{{ identity_host }}:5000/v2.0 --adminurl=http://{{ identity_host }}:35357/v2.0 - -# Create a glance user that the Image Service can use to authenticate with the Identity service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin - -#Register the Image Service with the Identity service so that other OpenStack services can locate it -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ image_host }}:9292 --internalurl=http://{{ image_host }}:9292 --adminurl=http://{{ image_host }}:9292 - -#Create a nova user that Compute uses to authenticate with the Identity Service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin - -# register Compute with the Identity Service so that other OpenStack services can locate it -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ identity_host }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ compute_controller_host }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ compute_controller_host }}:8774/v2/%\(tenant_id\)s - -# register netron user, role and service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ network_server_host }}:9696 --adminurl http://{{ network_server_host }}:9696 --internalurl http://{{ network_server_host }}:9696 diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 new file mode 100644 index 0000000..64d864a --- /dev/null +++ b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 @@ -0,0 +1,46 @@ + {% set work_threads = (ansible_processor_vcpus + 1) // 2 %} + + WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP} + WSGIProcessGroup keystone-public + WSGIScriptAlias / /usr/bin/keystone-wsgi-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/{{ http_service_name }}/keystone.log + CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + + + + WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP} + WSGIProcessGroup keystone-admin + WSGIScriptAlias / /usr/bin/keystone-wsgi-admin + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/{{ http_service_name }}/keystone.log + CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/ansible/roles/keystone/vars/Debian.yml b/ansible/roles/keystone/vars/Debian.yml new file mode 100644 index 0000000..6754727 --- /dev/null +++ b/ansible/roles/keystone/vars/Debian.yml @@ -0,0 +1,20 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +cron_path: "/var/spool/cron/crontabs" + +packages: + - keystone + +services: + - apache2 + +apache_config_dir: /etc/apache2 +http_service_name: apache2 diff --git a/ansible/roles/keystone/vars/RedHat.yml b/ansible/roles/keystone/vars/RedHat.yml new file mode 100644 index 0000000..38f8d91 --- /dev/null +++ b/ansible/roles/keystone/vars/RedHat.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +cron_path: "/var/spool/cron" + +packages: + - openstack-keystone + +services: + - httpd + +apache_config_dir: /etc/httpd/conf.d +http_service_name: httpd diff --git a/ansible/roles/keystone/vars/main.yml b/ansible/roles/keystone/vars/main.yml new file mode 100644 index 0000000..58751df --- /dev/null +++ b/ansible/roles/keystone/vars/main.yml @@ -0,0 +1,164 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - python-keystoneclient + +services_noarch: [] +os_services: + - name: keystone + type: identity + region: regionOne + description: "OpenStack Identity" + publicurl: "http://{{ public_vip.ip }}:5000/v2.0" + internalurl: "http://{{ internal_vip.ip }}:5000/v2.0" + adminurl: "http://{{ internal_vip.ip }}:35357/v2.0" + + - name: glance + type: image + region: regionOne + description: "OpenStack Image Service" + publicurl: "http://{{ public_vip.ip }}:9292" + internalurl: "http://{{ internal_vip.ip }}:9292" + adminurl: "http://{{ internal_vip.ip }}:9292" + + - name: nova + type: compute + region: regionOne + description: "OpenStack Compute" + publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + + - name: neutron + type: network + region: regionOne + description: "OpenStack Networking" + publicurl: "http://{{ public_vip.ip }}:9696" + internalurl: "http://{{ internal_vip.ip }}:9696" + adminurl: "http://{{ internal_vip.ip }}:9696" + + - name: ceilometer + type: metering + region: regionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8777" + internalurl: "http://{{ internal_vip.ip }}:8777" + adminurl: "http://{{ internal_vip.ip }}:8777" + + - name: aodh + type: alarming + region: regionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8042" + internalurl: "http://{{ internal_vip.ip }}:8042" + adminurl: "http://{{ internal_vip.ip }}:8042" + + - name: cinder + type: volume + region: regionOne + description: "OpenStack Block Storage" + publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" + + - name: cinderv2 + type: volumev2 + region: regionOne + description: "OpenStack Block Storage v2" + publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s" + + - name: heat + type: orchestration + region: regionOne + description: "OpenStack Orchestration" + publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s" + + - name: heat-cfn + type: cloudformation + region: regionOne + description: "OpenStack CloudFormation Orchestration" + publicurl: "http://{{ public_vip.ip }}:8000/v1" + internalurl: "http://{{ internal_vip.ip }}:8000/v1" + adminurl: "http://{{ internal_vip.ip }}:8000/v1" + +os_users: + - user: admin + password: "{{ ADMIN_PASS }}" + email: admin@admin.com + role: admin + tenant: admin + tenant_description: "Admin Tenant" + + - user: glance + password: "{{ GLANCE_PASS }}" + email: glance@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: nova + password: "{{ NOVA_PASS }}" + email: nova@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: keystone + password: "{{ KEYSTONE_PASS }}" + email: keystone@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: neutron + password: "{{ NEUTRON_PASS }}" + email: neutron@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: ceilometer + password: "{{ CEILOMETER_PASS }}" + email: ceilometer@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: cinder + password: "{{ CINDER_PASS }}" + email: cinder@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: aodh + password: "{{ AODH_PASS }}" + email: aodh@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: heat + password: "{{ HEAT_PASS }}" + email: heat@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: demo + password: "" + email: heat@demo.com + role: heat_stack_user + tenant: demo + tenant_description: "Demo Tenant" diff --git a/ansible/roles/memcached/handlers/main.yml b/ansible/roles/memcached/handlers/main.yml new file mode 100755 index 0000000..4c3230c --- /dev/null +++ b/ansible/roles/memcached/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart memcahed services + service: name={{ item }} state=restarted enabled=yes + with_items: services| union(services_noarch) diff --git a/ansible/roles/memcached/tasks/main.yml b/ansible/roles/memcached/tasks/main.yml new file mode 100644 index 0000000..99ee6e8 --- /dev/null +++ b/ansible/roles/memcached/tasks/main.yml @@ -0,0 +1,35 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: change memcache listen ip + lineinfile: dest=/etc/memcached.conf regexp="^-l " line="-l 0.0.0.0" + when: ansible_os_family == "Debian" + notify: + - restart memcahed services + +- meta: flush_handlers diff --git a/ansible/roles/memcached/vars/Debian.yml b/ansible/roles/memcached/vars/Debian.yml new file mode 100644 index 0000000..277bf3b --- /dev/null +++ b/ansible/roles/memcached/vars/Debian.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - python-memcache + +services: [] + + diff --git a/ansible/roles/memcached/vars/RedHat.yml b/ansible/roles/memcached/vars/RedHat.yml new file mode 100644 index 0000000..521ac3e --- /dev/null +++ b/ansible/roles/memcached/vars/RedHat.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - python-memcached + +pip_packages: [] + +services: [] diff --git a/ansible/roles/memcached/vars/main.yml b/ansible/roles/memcached/vars/main.yml new file mode 100644 index 0000000..908d267 --- /dev/null +++ b/ansible/roles/memcached/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - memcached + +services_noarch: + - memcached diff --git a/ansible/roles/monitor/files/check_Debian_service.sh b/ansible/roles/monitor/files/check_Debian_service.sh new file mode 100644 index 0000000..5dea3e6 --- /dev/null +++ b/ansible/roles/monitor/files/check_Debian_service.sh @@ -0,0 +1,15 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +services=`cat /opt/service | uniq` +for service in $services; do + if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then + /sbin/start $service + fi +done diff --git a/ansible/roles/monitor/files/check_RedHat_service.sh b/ansible/roles/monitor/files/check_RedHat_service.sh new file mode 100644 index 0000000..1111f63 --- /dev/null +++ b/ansible/roles/monitor/files/check_RedHat_service.sh @@ -0,0 +1,16 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +services=`cat /opt/service | uniq` +for service in $services; do + /usr/sbin/service $service status >/dev/null 2>&1 + if [[ $? -ne 0 ]]; then + /usr/sbin/service $service start + fi +done diff --git a/ansible/roles/monitor/files/check_service.sh b/ansible/roles/monitor/files/check_service.sh new file mode 100644 index 0000000..5dea3e6 --- /dev/null +++ b/ansible/roles/monitor/files/check_service.sh @@ -0,0 +1,15 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +services=`cat /opt/service | uniq` +for service in $services; do + if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then + /sbin/start $service + fi +done diff --git a/ansible/roles/monitor/files/root b/ansible/roles/monitor/files/root new file mode 100644 index 0000000..9c55c4f --- /dev/null +++ b/ansible/roles/monitor/files/root @@ -0,0 +1 @@ +* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log 2>&1 diff --git a/ansible/roles/monitor/tasks/main.yml b/ansible/roles/monitor/tasks/main.yml new file mode 100644 index 0000000..b31b91e --- /dev/null +++ b/ansible/roles/monitor/tasks/main.yml @@ -0,0 +1,22 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: copy service check file + copy: src=check_{{ ansible_os_family }}_service.sh dest=/usr/local/bin/check_service.sh mode=0777 + +- name: copy cron file + copy: src=root dest={{ cron_path }}/root mode=0600 + +- name: restart cron + service: name={{ cron }} state=restarted + +- meta: flush_handlers + diff --git a/ansible/roles/monitor/vars/Debian.yml b/ansible/roles/monitor/vars/Debian.yml new file mode 100644 index 0000000..225a149 --- /dev/null +++ b/ansible/roles/monitor/vars/Debian.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +cron: cron +cron_path: "/var/spool/cron/crontabs" + diff --git a/ansible/roles/monitor/vars/RedHat.yml b/ansible/roles/monitor/vars/RedHat.yml new file mode 100644 index 0000000..58ce03f --- /dev/null +++ b/ansible/roles/monitor/vars/RedHat.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +cron: crond +cron_path: "/var/spool/cron" diff --git a/ansible/roles/mq/tasks/main.yml b/ansible/roles/mq/tasks/main.yml index d4ff1e3..74be9cc 100644 --- a/ansible/roles/mq/tasks/main.yml +++ b/ansible/roles/mq/tasks/main.yml @@ -1,2 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- include: rabbitmq.yml +- include_vars: "{{ ansible_os_family }}.yml" + +- include: rabbitmq_install.yml + +- include: rabbitmq_config.yml + when: inventory_hostname == haproxy_hosts.keys()[0] + +- meta: flush_handlers diff --git a/ansible/roles/mq/tasks/rabbitmq.yml b/ansible/roles/mq/tasks/rabbitmq.yml deleted file mode 100644 index 3354325..0000000 --- a/ansible/roles/mq/tasks/rabbitmq.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: install rabbitmq-server - apt: name=rabbitmq-server state=present - -- name: start and enable rabbitmq-server - service: name=rabbitmq-server - state=restarted - enabled=yes diff --git a/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/ansible/roles/mq/tasks/rabbitmq_cluster.yml new file mode 100644 index 0000000..50c062f --- /dev/null +++ b/ansible/roles/mq/tasks/rabbitmq_cluster.yml @@ -0,0 +1,36 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: check if i in the node list + shell: | + rabbitmqctl -q cluster_status | grep '\[{nodes,'|grep {{ inventory_hostname }} + changed_when: is_member.rc != 0 + failed_when: false + register: is_member + delegate_to: '{{ haproxy_hosts.keys()[0] }}' + +- name: stop rabbitmq app + shell: | + rabbitmqctl stop_app; sleep 5 + failed_when: false + when: is_member.rc != 0 + +- name: join cluster + shell: rabbitmqctl join_cluster rabbit@{{ haproxy_hosts.keys()[0] }} + register: join_result + when: is_member.rc != 0 + until: join_result|success + failed_when: join_result|failed and not 'already_member' in join_result.stderr + changed_when: join_result|success + retries: 20 + delay: 3 + +- name: start rabbitmq app + shell: rabbitmqctl start_app + when: is_member.rc != 0 diff --git a/ansible/roles/mq/tasks/rabbitmq_config.yml b/ansible/roles/mq/tasks/rabbitmq_config.yml new file mode 100644 index 0000000..e26b81b --- /dev/null +++ b/ansible/roles/mq/tasks/rabbitmq_config.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: remove default guest user is removed + rabbitmq_user: + user: guest + state: absent + +- name: add rabbitmq user + rabbitmq_user: + user='{{ RABBIT_USER }}' + password='{{ RABBIT_PASS }}' + vhost=/ + configure_priv=.* + write_priv=.* + read_priv=.* + state=present diff --git a/ansible/roles/mq/tasks/rabbitmq_install.yml b/ansible/roles/mq/tasks/rabbitmq_install.yml new file mode 100755 index 0000000..1c8c2fe --- /dev/null +++ b/ansible/roles/mq/tasks/rabbitmq_install.yml @@ -0,0 +1,91 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: create rabbitmq directory + file: path=/etc/rabbitmq state=directory mode=0755 + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install rabbitmq-server + action: "{{ ansible_pkg_mgr }} name=rabbitmq-server state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: make sure rabbitmq-server stopped + service: + name: rabbitmq-server + state: stopped + enabled: yes + +- name: replace cookie + copy: + content: "{{ ERLANG_TOKEN }}" + dest: /var/lib/rabbitmq/.erlang.cookie + mode: 0400 + owner: rabbitmq + group: rabbitmq + +- name: replace config + copy: + content: "RABBITMQ_NODE_IP_ADDRESS={{ internal_ip }}" + dest: /etc/rabbitmq/rabbitmq-env.conf + mode: 0400 + owner: rabbitmq + group: rabbitmq + +- name: set open file limit for rabbitmq + copy: + content: "ulimit -n 65536" + dest: /etc/default/rabbitmq-server + mode: 0400 + owner: rabbitmq + group: rabbitmq + +- name: restart rabbitmq-server + service: + name: rabbitmq-server + state: restarted + +- name: enable queue mirroring + rabbitmq_policy: + name: "ha-all" + pattern: '^(?!amq\.).*' + tags: "ha-mode=all" + +- name: get cluster name + shell: | + rabbitmqctl cluster_status | grep -w '<<"compass">>' + register: cluster_status + failed_when: false + changed_when: cluster_status.rc != 0 + when: | + inventory_hostname == haproxy_hosts.keys()[0] + +- name: set cluster name + shell: rabbitmqctl set_cluster_name compass + when: | + inventory_hostname == haproxy_hosts.keys()[0] + and cluster_status.rc != 0 + +- include: rabbitmq_cluster.yml + when: inventory_hostname != haproxy_hosts.keys()[0] + +- name: generate mq service list + shell: echo {{ item }} >> /opt/service + with_items: services_noarch diff --git a/ansible/roles/mq/templates/.erlang.cookie b/ansible/roles/mq/templates/.erlang.cookie new file mode 100644 index 0000000..cadcfaf --- /dev/null +++ b/ansible/roles/mq/templates/.erlang.cookie @@ -0,0 +1 @@ +{{ ERLANG_TOKEN }} diff --git a/ansible/roles/mq/templates/rabbitmq-env.conf b/ansible/roles/mq/templates/rabbitmq-env.conf new file mode 100644 index 0000000..377c89d --- /dev/null +++ b/ansible/roles/mq/templates/rabbitmq-env.conf @@ -0,0 +1 @@ +RABBITMQ_NODE_IP_ADDRESS={{ internal_vip.ip }} diff --git a/ansible/roles/mq/vars/Debian.yml b/ansible/roles/mq/vars/Debian.yml new file mode 100644 index 0000000..a8f73e1 --- /dev/null +++ b/ansible/roles/mq/vars/Debian.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: [] +packages: [] + diff --git a/ansible/roles/mq/vars/RedHat.yml b/ansible/roles/mq/vars/RedHat.yml new file mode 100644 index 0000000..a8f73e1 --- /dev/null +++ b/ansible/roles/mq/vars/RedHat.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: [] +packages: [] + diff --git a/ansible/roles/mq/vars/main.yml b/ansible/roles/mq/vars/main.yml new file mode 100644 index 0000000..35c499d --- /dev/null +++ b/ansible/roles/mq/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - rabbitmq-server + +services_noarch: + - rabbitmq-server diff --git a/ansible/roles/neutron-common/handlers/main.yml b/ansible/roles/neutron-common/handlers/main.yml index d82c01b..a86a145 100644 --- a/ansible/roles/neutron-common/handlers/main.yml +++ b/ansible/roles/neutron-common/handlers/main.yml @@ -1,13 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- - name: restart neutron-plugin-openvswitch-agent - service: name=neutron-plugin-openvswitch-agent state=restarted - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: restart neutron-l3-agent - service: name=neutron-l3-agent state=restarted - -- name: restart neutron-dhcp-agent - service: name=neutron-dhcp-agent state=restarted - -- name: restart neutron-metadata-agent - service: name=neutron-metadata-agent state=restarted + service: name={{ neutron_plugin_openvswitch_agent_services }} state=restarted enabled=yes diff --git a/ansible/roles/neutron-compute/defaults/main.yml b/ansible/roles/neutron-compute/defaults/main.yml index 825178b..d760b4e 100644 --- a/ansible/roles/neutron-compute/defaults/main.yml +++ b/ansible/roles/neutron-compute/defaults/main.yml @@ -1,2 +1,9 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -neutron_ovs_bridge_mappings: "" diff --git a/ansible/roles/neutron-compute/handlers/main.yml b/ansible/roles/neutron-compute/handlers/main.yml index d82c01b..d544494 100644 --- a/ansible/roles/neutron-compute/handlers/main.yml +++ b/ansible/roles/neutron-compute/handlers/main.yml @@ -1,13 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart neutron-plugin-openvswitch-agent - service: name=neutron-plugin-openvswitch-agent state=restarted - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: restart neutron-l3-agent - service: name=neutron-l3-agent state=restarted - -- name: restart neutron-dhcp-agent - service: name=neutron-dhcp-agent state=restarted - -- name: restart neutron-metadata-agent - service: name=neutron-metadata-agent state=restarted +- name: restart neutron compute service + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/neutron-compute/tasks/main.yml b/ansible/roles/neutron-compute/tasks/main.yml index 39523a9..3e4b24b 100644 --- a/ansible/roles/neutron-compute/tasks/main.yml +++ b/ansible/roles/neutron-compute/tasks/main.yml @@ -1,4 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- +- include_vars: "{{ ansible_os_family }}.yml" - name: activate ipv4 forwarding sysctl: name=net.ipv4.ip_forward value=1 @@ -12,39 +21,49 @@ sysctl: name=net.ipv4.conf.default.rp_filter value=0 state=present reload=yes +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + - name: install compute-related neutron packages - apt: name={{ item }} state=present force=yes - with_items: - - neutron-common - - neutron-plugin-ml2 - - openvswitch-datapath-dkms - - openvswitch-switch + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) -- name: install neutron openvswitch agent - apt: name=neutron-plugin-openvswitch-agent - state=present force=yes - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" -- name: config neutron - template: src=neutron-network.conf - dest=/etc/neutron/neutron.conf backup=yes - notify: - - restart neutron-plugin-openvswitch-agent +- name: fix openstack neutron plugin config file + shell: | + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service + systemctl daemon-reload + when: ansible_os_family == 'RedHat' + +- name: generate neutron compute service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) - name: config ml2 plugin - template: src=ml2_conf.ini + template: src=templates/ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes - notify: - - restart neutron-plugin-openvswitch-agent -- name: add br-int - openvswitch_bridge: bridge=br-int state=present +- name: ln plugin.ini + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link + +- name: config neutron + template: src=templates/neutron.conf + dest=/etc/neutron/neutron.conf backup=yes notify: - - restart neutron-plugin-openvswitch-agent - - restart nova-compute + - restart neutron compute service + - restart nova-compute services + +- meta: flush_handlers - include: ../../neutron-network/tasks/odl.yml when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- meta: flush_handlers diff --git a/ansible/roles/neutron-compute/templates/l3_agent.ini b/ansible/roles/neutron-compute/templates/l3_agent.ini index b394c00..5f49934 100644 --- a/ansible/roles/neutron-compute/templates/l3_agent.ini +++ b/ansible/roles/neutron-compute/templates/l3_agent.ini @@ -45,7 +45,7 @@ handle_internal_only_routers = True # Name of bridge used for external network traffic. This should be set to # empty value for the linux bridge. when this parameter is set, each L3 agent # can be associated with no more than one external network. -external_network_bridge = br-ex +external_network_bridge = # TCP Port used by Neutron metadata server metadata_port = 9697 diff --git a/ansible/roles/neutron-compute/templates/metadata_agent.ini b/ansible/roles/neutron-compute/templates/metadata_agent.ini index edde22c..87937cc 100644 --- a/ansible/roles/neutron-compute/templates/metadata_agent.ini +++ b/ansible/roles/neutron-compute/templates/metadata_agent.ini @@ -3,8 +3,8 @@ debug = True # The Neutron user information for accessing the Neutron API. -auth_url = http://{{ identity_host }}:5000/v2.0 -auth_region = RegionOne +auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +auth_region = regionOne # Turn off verification of the certificate for ssl # auth_insecure = False # Certificate Authority public key (CA cert) file for ssl @@ -17,7 +17,7 @@ admin_password = {{ NEUTRON_PASS }} # endpoint_type = adminURL # IP address used by Nova metadata server -nova_metadata_ip = {{ compute_controller_host }} +nova_metadata_ip = {{ internal_vip.ip }} # TCP Port used by Nova metadata server nova_metadata_port = 8775 diff --git a/ansible/roles/neutron-compute/templates/ml2_conf.ini b/ansible/roles/neutron-compute/templates/ml2_conf.ini deleted file mode 100644 index 9972842..0000000 --- a/ansible/roles/neutron-compute/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/ansible/roles/neutron-compute/templates/neutron-network.conf b/ansible/roles/neutron-compute/templates/neutron-network.conf deleted file mode 100644 index 318e4c0..0000000 --- a/ansible/roles/neutron-compute/templates/neutron-network.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/neutron-compute/templates/neutron.conf b/ansible/roles/neutron-compute/templates/neutron.conf deleted file mode 100644 index 28bb2ba..0000000 --- a/ansible/roles/neutron-compute/templates/neutron.conf +++ /dev/null @@ -1,467 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/neutron-compute/templates/neutron_init.sh b/ansible/roles/neutron-compute/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/ansible/roles/neutron-compute/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/ansible/roles/neutron-compute/templates/nova.conf b/ansible/roles/neutron-compute/templates/nova.conf deleted file mode 100644 index dfb4b93..0000000 --- a/ansible/roles/neutron-compute/templates/nova.conf +++ /dev/null @@ -1,68 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = 0.0.0.0 -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ image_host }} - -[neutron] -url = http://{{ network_server_host }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ identity_host }}:35357/v2.0 diff --git a/ansible/roles/neutron-compute/vars/Debian.yml b/ansible/roles/neutron-compute/vars/Debian.yml new file mode 100644 index 0000000..8319e42 --- /dev/null +++ b/ansible/roles/neutron-compute/vars/Debian.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +packages: + - neutron-common + - neutron-plugin-ml2 + - openvswitch-datapath-dkms + - openvswitch-switch + - neutron-plugin-openvswitch-agent + +services: + - neutron-plugin-openvswitch-agent diff --git a/ansible/roles/neutron-compute/vars/RedHat.yml b/ansible/roles/neutron-compute/vars/RedHat.yml new file mode 100644 index 0000000..65dda6b --- /dev/null +++ b/ansible/roles/neutron-compute/vars/RedHat.yml @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-neutron + - openstack-neutron-ml2 + - openstack-neutron-openvswitch + +services: + - openvswitch + - neutron-openvswitch-agent + - libvirtd diff --git a/ansible/roles/neutron-compute/vars/main.yml b/ansible/roles/neutron-compute/vars/main.yml new file mode 100644 index 0000000..f6fef74 --- /dev/null +++ b/ansible/roles/neutron-compute/vars/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] diff --git a/ansible/roles/neutron-controller/handlers/main.yml b/ansible/roles/neutron-controller/handlers/main.yml index c830296..98d2113 100644 --- a/ansible/roles/neutron-controller/handlers/main.yml +++ b/ansible/roles/neutron-controller/handlers/main.yml @@ -1,24 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart nova-api - service: name=nova-api state=restarted - -- name: restart nova-cert - service: name=nova-cert state=restarted - -- name: restart nova-consoleauth - service: name=nova-consoleauth state=restarted - -- name: restart nova-scheduler - service: name=nova-scheduler state=restarted - -- name: restart nova-conductor - service: name=nova-conductor state=restarted - -- name: restart nova-novncproxy - service: name=nova-novncproxy state=restarted - -- name: remove nova-sqlite-db - shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed - -- name: restart neutron-server - service: name=neutron-server state=restarted +- name: restart neutron control services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) + when: item != "neutron-server" diff --git a/ansible/roles/neutron-controller/tasks/main.yml b/ansible/roles/neutron-controller/tasks/main.yml index ed0c297..1aaf91a 100644 --- a/ansible/roles/neutron-controller/tasks/main.yml +++ b/ansible/roles/neutron-controller/tasks/main.yml @@ -1,46 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: install controller-related neutron packages - apt: name={{ item }} state=present force=yes - with_items: - - neutron-server - - neutron-plugin-ml2 +- include: neutron_install.yml + tags: + - install + - neutron_install + - neutron -- name: get tenant id to fill neutron.conf - shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}' - register: NOVA_ADMIN_TENANT_ID - -- name: update neutron conf - template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes - notify: - - restart neutron-server - -- name: update ml2 plugin conf - template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes - notify: - - restart neutron-server +- include: neutron_config.yml + when: inventory_hostname == groups['controller'][0] + tags: + - config + - neutron_config + - neutron - meta: flush_handlers -#- name: manually restart nova-api -# service: name=nova-api state=restarted - -#- name: manually restart nova-scheduler -# service: name=nova-scheduler state=restarted - -#- name: manually restart nova-conductor -# service: name=nova-conductor state=restarted - -#- name: manually restart neutron-server -# service: name=neutron-server state=restarted - -- name: place neutron_init.sh under /opt/ - template: src=neutron_init.sh dest=/opt/neutron_init.sh mode=0744 - -- name: init neutron - shell: /opt/neutron_init.sh && touch neutron_init_complete || touch neutron_init_failed - args: - creates: neutron_init_complete - -- name: neutron-db-manage upgrade to Juno - shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head - notify: - - restart neutron-server diff --git a/ansible/roles/neutron-controller/tasks/neutron_config.yml b/ansible/roles/neutron-controller/tasks/neutron_config.yml new file mode 100644 index 0000000..d07e187 --- /dev/null +++ b/ansible/roles/neutron-controller/tasks/neutron_config.yml @@ -0,0 +1,33 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: fix openstack neutron plugin config file + shell: | + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service + systemctl daemon-reload + when: ansible_os_family == 'RedHat' + +- name: neutron-db-manage upgrade to Juno + shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head + register: result + run_once: True + until: result.rc == 0 + retries: 5 + delay: 3 + notify: + - restart neutron control services + +- name: restart first neutron-server + service: name=neutron-server state=restarted enabled=yes + +- name: restart other neutron-server + service: name=neutron-server state=restarted enabled=yes + +- meta: flush_handlers diff --git a/ansible/roles/neutron-controller/tasks/neutron_install.yml b/ansible/roles/neutron-controller/tasks/neutron_install.yml new file mode 100644 index 0000000..be64c41 --- /dev/null +++ b/ansible/roles/neutron-controller/tasks/neutron_install.yml @@ -0,0 +1,44 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install controller-related neutron packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: generate neutron control service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: get tenant id to fill neutron.conf + shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}' + register: NOVA_ADMIN_TENANT_ID + +- name: update neutron conf + template: src=templates/neutron.conf dest=/etc/neutron/neutron.conf backup=yes + +- name: update ml2 plugin conf + template: src=templates/ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes + +- name: ln plugin.ini + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link diff --git a/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf b/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/ansible/roles/neutron-controller/templates/l3_agent.ini b/ansible/roles/neutron-controller/templates/l3_agent.ini index b394c00..5f49934 100644 --- a/ansible/roles/neutron-controller/templates/l3_agent.ini +++ b/ansible/roles/neutron-controller/templates/l3_agent.ini @@ -45,7 +45,7 @@ handle_internal_only_routers = True # Name of bridge used for external network traffic. This should be set to # empty value for the linux bridge. when this parameter is set, each L3 agent # can be associated with no more than one external network. -external_network_bridge = br-ex +external_network_bridge = # TCP Port used by Neutron metadata server metadata_port = 9697 diff --git a/ansible/roles/neutron-controller/templates/metadata_agent.ini b/ansible/roles/neutron-controller/templates/metadata_agent.ini index edde22c..87937cc 100644 --- a/ansible/roles/neutron-controller/templates/metadata_agent.ini +++ b/ansible/roles/neutron-controller/templates/metadata_agent.ini @@ -3,8 +3,8 @@ debug = True # The Neutron user information for accessing the Neutron API. -auth_url = http://{{ identity_host }}:5000/v2.0 -auth_region = RegionOne +auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +auth_region = regionOne # Turn off verification of the certificate for ssl # auth_insecure = False # Certificate Authority public key (CA cert) file for ssl @@ -17,7 +17,7 @@ admin_password = {{ NEUTRON_PASS }} # endpoint_type = adminURL # IP address used by Nova metadata server -nova_metadata_ip = {{ compute_controller_host }} +nova_metadata_ip = {{ internal_vip.ip }} # TCP Port used by Nova metadata server nova_metadata_port = 8775 diff --git a/ansible/roles/neutron-controller/templates/ml2_conf.ini b/ansible/roles/neutron-controller/templates/ml2_conf.ini deleted file mode 100644 index 9972842..0000000 --- a/ansible/roles/neutron-controller/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/ansible/roles/neutron-controller/templates/neutron-network.conf b/ansible/roles/neutron-controller/templates/neutron-network.conf deleted file mode 100644 index 318e4c0..0000000 --- a/ansible/roles/neutron-controller/templates/neutron-network.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/neutron-controller/templates/neutron_init.sh b/ansible/roles/neutron-controller/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/ansible/roles/neutron-controller/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/ansible/roles/neutron-controller/vars/Debian.yml b/ansible/roles/neutron-controller/vars/Debian.yml new file mode 100644 index 0000000..70d652c --- /dev/null +++ b/ansible/roles/neutron-controller/vars/Debian.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - neutron-server + - neutron-plugin-ml2 + +services: [] diff --git a/ansible/roles/neutron-controller/vars/RedHat.yml b/ansible/roles/neutron-controller/vars/RedHat.yml new file mode 100644 index 0000000..89f41de --- /dev/null +++ b/ansible/roles/neutron-controller/vars/RedHat.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-neutron + - openstack-neutron-ml2 + +services: [] diff --git a/ansible/roles/neutron-controller/vars/main.yml b/ansible/roles/neutron-controller/vars/main.yml new file mode 100644 index 0000000..928b0bd --- /dev/null +++ b/ansible/roles/neutron-controller/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: + - neutron-server + diff --git a/ansible/roles/neutron-network/files/vpnaas.filters b/ansible/roles/neutron-network/files/vpnaas.filters new file mode 100644 index 0000000..c5eaa80 --- /dev/null +++ b/ansible/roles/neutron-network/files/vpnaas.filters @@ -0,0 +1,7 @@ +[Filters] +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +ipsec: CommandFilter, ipsec, root +strongswan: CommandFilter, strongswan, root +neutron_netns_wrapper: CommandFilter, neutron-vpn-netns-wrapper, root +neutron_netns_wrapper_local: CommandFilter, /usr/local/bin/neutron-vpn-netns-wrapper, root diff --git a/ansible/roles/neutron-network/files/xorp b/ansible/roles/neutron-network/files/xorp new file mode 100644 index 0000000..5a48610 --- /dev/null +++ b/ansible/roles/neutron-network/files/xorp @@ -0,0 +1,23 @@ +# Defaults for xorp initscript +# sourced by /etc/init.d/xorp +# installed at /etc/default/xorp by the maintainer scripts + +# +# This is a POSIX shell fragment +# + +# Master system-wide xorp switch. The initscript +# will not run if it is not set to yes. + +RUN="yes" + + +# Additional options that are passed to the rtrmgr Daemon. +# e.g. : +# -a Host allowed by the finder +# -n Subnet allowed by the finder +# -v Print verbose information +# -b Specify boot file +# -d Run as a daemon, detach from tty + +DAEMON_OPTS="-b /etc/xorp/config.boot " diff --git a/ansible/roles/neutron-network/handlers/main.yml b/ansible/roles/neutron-network/handlers/main.yml index cbfc8e9..9a9c9b4 100644 --- a/ansible/roles/neutron-network/handlers/main.yml +++ b/ansible/roles/neutron-network/handlers/main.yml @@ -1,21 +1,31 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart neutron-plugin-openvswitch-agent - service: name=neutron-plugin-openvswitch-agent state=restarted - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" +- name: restart neutron network relation service + service: name={{ item }} state=restarted enabled=yes + with_flattened: + - services_noarch + - services -- name: restart neutron-l3-agent - service: name=neutron-l3-agent state=restarted +- name: restart openvswitch agent service + service: name=neutron-openvswitch-agent state=restarted enabled=yes + +- name: restart vpn agent service + service: name={{ item }} state=restarted enabled=yes + with_items: + - neutron-vpn-agent + - strongswan - name: kill dnsmasq command: killall dnsmasq ignore_errors: True -- name: restart neutron-dhcp-agent - service: name=neutron-dhcp-agent state=restarted - -- name: restart neutron-metadata-agent - service: name=neutron-metadata-agent state=restarted - - name: restart xorp - service: name=xorp state=restarted sleep=10 + service: name=xorp state=restarted enabled=yes sleep=10 ignore_errors: True diff --git a/ansible/roles/neutron-network/tasks/firewall.yml b/ansible/roles/neutron-network/tasks/firewall.yml new file mode 100755 index 0000000..aec714f --- /dev/null +++ b/ansible/roles/neutron-network/tasks/firewall.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install firewall packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: firewall_packages + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: update firewall related conf + shell: crudini --set --list /etc/neutron/neutron.conf DEFAULT service_plugins firewall diff --git a/ansible/roles/neutron-network/tasks/igmp-router.yml b/ansible/roles/neutron-network/tasks/igmp-router.yml index d6f38a0..2ce1651 100644 --- a/ansible/roles/neutron-network/tasks/igmp-router.yml +++ b/ansible/roles/neutron-network/tasks/igmp-router.yml @@ -1,6 +1,18 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- - name: Install XORP to provide IGMP router functionality - apt: pkg=xorp + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: xorp_packages + +- name: create xorp directory + file: path=/etc/xorp state=directory - name: configure xorp template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot @@ -8,7 +20,7 @@ - restart xorp - name: set xorp defaults - lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes + copy: src=xorp dest=/etc/default/xorp notify: - restart xorp diff --git a/ansible/roles/neutron-network/tasks/main.yml b/ansible/roles/neutron-network/tasks/main.yml index 4a804ef..9b41ac1 100644 --- a/ansible/roles/neutron-network/tasks/main.yml +++ b/ansible/roles/neutron-network/tasks/main.yml @@ -1,4 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- +- include_vars: "{{ ansible_os_family }}.yml" + - name: activate ipv4 forwarding sysctl: name=net.ipv4.ip_forward value=1 state=present reload=yes @@ -11,80 +21,6 @@ sysctl: name=net.ipv4.conf.default.rp_filter value=0 state=present reload=yes -- name: install neutron network related packages - apt: name={{ item }} state=present force=yes - with_items: - - neutron-plugin-ml2 - - openvswitch-datapath-dkms - - openvswitch-switch - - neutron-l3-agent - - neutron-dhcp-agent - -- name: install neutron openvswitch agent - apt: name=neutron-plugin-openvswitch-agent - state=present force=yes - when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: config neutron - template: src=neutron-network.conf - dest=/etc/neutron/neutron.conf backup=yes - notify: - - restart neutron-plugin-openvswitch-agent - - restart neutron-l3-agent - - kill dnsmasq - - restart neutron-dhcp-agent - - restart neutron-metadata-agent - -- name: config l3 agent - template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini - backup=yes - notify: - - restart neutron-l3-agent - -- name: config dhcp agent - template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini - backup=yes - notify: - - kill dnsmasq - - restart neutron-dhcp-agent - -- name: update dnsmasq-neutron.conf - template: src=dnsmasq-neutron.conf - dest=/etc/neutron/dnsmasq-neutron.conf - notify: - - kill dnsmasq - - restart neutron-dhcp-agent - -- name: config metadata agent - template: src=metadata_agent.ini - dest=/etc/neutron/metadata_agent.ini backup=yes - notify: - - restart neutron-metadata-agent - -- name: config ml2 plugin - template: src=ml2_conf.ini - dest=/etc/neutron/plugins/ml2/ml2_conf.ini - backup=yes - notify: - - restart neutron-plugin-openvswitch-agent - -- meta: flush_handlers - -- name: add br-int - openvswitch_bridge: bridge=br-int state=present - -- name: add br-ex - openvswitch_bridge: bridge=br-ex state=present - when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: assign a port to br-ex for physical ext interface - openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }} - state=present - when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- include: igmp-router.yml - when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" - - name: assert kernel support for vxlan command: modinfo -F version vxlan when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" @@ -95,10 +31,81 @@ failed_when: iproute_out.rc == 255 when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install neutron network related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: generate neutron network service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: fix openstack neutron plugin config file + shell: | + sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service + systemctl daemon-reload + when: ansible_os_family == 'RedHat' + +- name: config l3 agent + template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini + backup=yes + +- name: config dhcp agent + template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini + backup=yes + +- name: update dnsmasq-neutron.conf + template: src=templates/dnsmasq-neutron.conf + dest=/etc/neutron/dnsmasq-neutron.conf + +- name: config metadata agent + template: src=metadata_agent.ini + dest=/etc/neutron/metadata_agent.ini backup=yes + +- name: config ml2 plugin + template: src=templates/ml2_conf.ini + dest=/etc/neutron/plugins/ml2/ml2_conf.ini + backup=yes + +- name: ln plugin.ini + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link + +- name: config neutron + template: src=templates/neutron.conf + dest=/etc/neutron/neutron.conf backup=yes + +- name: force mtu to 1450 for vxlan + lineinfile: + dest: /etc/neutron/dnsmasq-neutron.conf + regexp: '^dhcp-option-force' + line: 'dhcp-option-force=26,1450' + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- include: firewall.yml + when: enable_fwaas == True + +- include: vpn.yml + when: enable_vpnaas == True + - include: odl.yml when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" -- name: restart ovs service - service: name=openvswitch-switch state=restarted +- name: restart neutron network relation service + service: name={{ item }} state=restarted enabled=yes + with_flattened: + - services_noarch + - services - meta: flush_handlers diff --git a/ansible/roles/neutron-network/tasks/odl.yml b/ansible/roles/neutron-network/tasks/odl.yml index 5817a2f..dd1e478 100644 --- a/ansible/roles/neutron-network/tasks/odl.yml +++ b/ansible/roles/neutron-network/tasks/odl.yml @@ -1,6 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- - name: ovs set manager - command: ovs-vsctl set-manager tcp:{{ odl_controller }}:6640 + command: ovs-vsctl set-manager tcp:{{ controller }}:6640 - name: get ovs uuid shell: ovs-vsctl get Open_vSwitch . _uuid diff --git a/ansible/roles/neutron-network/tasks/vpn.yml b/ansible/roles/neutron-network/tasks/vpn.yml new file mode 100755 index 0000000..9722ab7 --- /dev/null +++ b/ansible/roles/neutron-network/tasks/vpn.yml @@ -0,0 +1,47 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install vpn packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: vpn_packages + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: update vpn related conf + shell: crudini --set /etc/neutron/l3_agent.ini vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver; + crudini --set --list /etc/neutron/neutron.conf DEFAULT service_plugins vpnaas + crudini --set /etc/neutron/neutron_vpnaas.conf service_providers service_provider 'VPN:strongswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default' + +- name: make sure rootwrap.d dir exist + file: path=/etc/neutron/rootwrap.d state=directory mode=0755 + +- name: update rootwrap + copy: src=vpnaas.filters dest=/etc/neutron/rootwrap.d/vpnaas.filters + +- name: enable vpn service + service: name={{ item }} state=started enabled=yes + with_items: + - neutron-vpn-agent + - strongswan + notify: + - restart vpn agent service + diff --git a/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/ansible/roles/neutron-network/templates/etc/xorp/config.boot b/ansible/roles/neutron-network/templates/etc/xorp/config.boot index 32caf96..426a8fd 100644 --- a/ansible/roles/neutron-network/templates/etc/xorp/config.boot +++ b/ansible/roles/neutron-network/templates/etc/xorp/config.boot @@ -1,6 +1,6 @@ interfaces { restore-original-config-on-shutdown: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + interface {{ internal_nic }} { description: "Internal pNodes interface" disable: false default-system-config @@ -10,8 +10,8 @@ interfaces { protocols { igmp { disable: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + interface {{ internal_nic }} { + vif {{ internal_nic }} { disable: false version: 3 } diff --git a/ansible/roles/neutron-network/templates/l3_agent.ini b/ansible/roles/neutron-network/templates/l3_agent.ini index b394c00..5f49934 100644 --- a/ansible/roles/neutron-network/templates/l3_agent.ini +++ b/ansible/roles/neutron-network/templates/l3_agent.ini @@ -45,7 +45,7 @@ handle_internal_only_routers = True # Name of bridge used for external network traffic. This should be set to # empty value for the linux bridge. when this parameter is set, each L3 agent # can be associated with no more than one external network. -external_network_bridge = br-ex +external_network_bridge = # TCP Port used by Neutron metadata server metadata_port = 9697 diff --git a/ansible/roles/neutron-network/templates/metadata_agent.ini b/ansible/roles/neutron-network/templates/metadata_agent.ini index edde22c..87937cc 100644 --- a/ansible/roles/neutron-network/templates/metadata_agent.ini +++ b/ansible/roles/neutron-network/templates/metadata_agent.ini @@ -3,8 +3,8 @@ debug = True # The Neutron user information for accessing the Neutron API. -auth_url = http://{{ identity_host }}:5000/v2.0 -auth_region = RegionOne +auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +auth_region = regionOne # Turn off verification of the certificate for ssl # auth_insecure = False # Certificate Authority public key (CA cert) file for ssl @@ -17,7 +17,7 @@ admin_password = {{ NEUTRON_PASS }} # endpoint_type = adminURL # IP address used by Nova metadata server -nova_metadata_ip = {{ compute_controller_host }} +nova_metadata_ip = {{ internal_vip.ip }} # TCP Port used by Nova metadata server nova_metadata_port = 8775 diff --git a/ansible/roles/neutron-network/templates/neutron-network.conf b/ansible/roles/neutron-network/templates/neutron-network.conf deleted file mode 100644 index 318e4c0..0000000 --- a/ansible/roles/neutron-network/templates/neutron-network.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/neutron-network/templates/neutron.conf b/ansible/roles/neutron-network/templates/neutron.conf deleted file mode 100644 index 28bb2ba..0000000 --- a/ansible/roles/neutron-network/templates/neutron.conf +++ /dev/null @@ -1,467 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/neutron-network/templates/neutron_init.sh b/ansible/roles/neutron-network/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/ansible/roles/neutron-network/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/ansible/roles/neutron-network/templates/nova.conf b/ansible/roles/neutron-network/templates/nova.conf deleted file mode 100644 index dfb4b93..0000000 --- a/ansible/roles/neutron-network/templates/nova.conf +++ /dev/null @@ -1,68 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = 0.0.0.0 -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ image_host }} - -[neutron] -url = http://{{ network_server_host }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ identity_host }}:35357/v2.0 diff --git a/ansible/roles/neutron-network/vars/Debian.yml b/ansible/roles/neutron-network/vars/Debian.yml new file mode 100644 index 0000000..86d1af6 --- /dev/null +++ b/ansible/roles/neutron-network/vars/Debian.yml @@ -0,0 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - neutron-plugin-ml2 + - openvswitch-datapath-dkms + - openvswitch-switch + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-plugin-openvswitch-agent + +services: + - openvswitch-switch + - neutron-plugin-openvswitch-agent + +openvswitch_agent: neutron-plugin-openvswitch-agent + +xorp_packages: + - xorp diff --git a/ansible/roles/neutron-network/vars/RedHat.yml b/ansible/roles/neutron-network/vars/RedHat.yml new file mode 100644 index 0000000..aa35dde --- /dev/null +++ b/ansible/roles/neutron-network/vars/RedHat.yml @@ -0,0 +1,29 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-neutron-ml2 + - openstack-neutron-openvswitch + +vpn_packages: + - openstack-neutron-vpn-agent + - strongswan + +firewall_packages: + - openstack-neutron-fwaas + +services: + - openvswitch + - neutron-openvswitch-agent + +openvswitch_agent: neutron-openvswitch-agent + +xorp_packages: + - openssl098e + #- xorp diff --git a/ansible/roles/neutron-network/vars/main.yml b/ansible/roles/neutron-network/vars/main.yml new file mode 100644 index 0000000..ddd983e --- /dev/null +++ b/ansible/roles/neutron-network/vars/main.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-metadata-agent diff --git a/ansible/roles/nova-compute/handlers/main.yml b/ansible/roles/nova-compute/handlers/main.yml index 71c8d46..a50ce3d 100644 --- a/ansible/roles/nova-compute/handlers/main.yml +++ b/ansible/roles/nova-compute/handlers/main.yml @@ -1,3 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart nova-compute - service: name=nova-compute state=restarted +- name: restart nova-compute services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/ansible/roles/nova-compute/tasks/main.yml b/ansible/roles/nova-compute/tasks/main.yml index 813864a..7ee60ba 100644 --- a/ansible/roles/nova-compute/tasks/main.yml +++ b/ansible/roles/nova-compute/tasks/main.yml @@ -1,16 +1,50 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + - name: install nova-compute related packages - apt: name=nova-compute-kvm state=present force=yes + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: update nova-compute conf + template: src=templates/{{ item }} dest=/etc/nova/{{ item }} + with_items: + - nova.conf + notify: + - restart nova-compute services - name: update nova-compute conf template: src={{ item }} dest=/etc/nova/{{ item }} with_items: - - nova.conf - nova-compute.conf notify: - - restart nova-compute + - restart nova-compute services -- meta: flush_handlers +- name: generate neutron control service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) - name: remove nova sqlite db shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed + +- meta: flush_handlers diff --git a/ansible/roles/nova-compute/templates/nova-compute.conf b/ansible/roles/nova-compute/templates/nova-compute.conf index 1ef5590..1ac775b 100644 --- a/ansible/roles/nova-compute/templates/nova-compute.conf +++ b/ansible/roles/nova-compute/templates/nova-compute.conf @@ -1,4 +1,11 @@ [DEFAULT] compute_driver=libvirt.LibvirtDriver +force_raw_images = true [libvirt] +{% if deploy_type == 'virtual' %} virt_type=qemu +{% else %} +virt_type=kvm +{% endif %} +images_type = raw +mem_stats_period_seconds=0 diff --git a/ansible/roles/nova-compute/templates/nova.conf b/ansible/roles/nova-compute/templates/nova.conf deleted file mode 100644 index dfb4b93..0000000 --- a/ansible/roles/nova-compute/templates/nova.conf +++ /dev/null @@ -1,68 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = 0.0.0.0 -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ image_host }} - -[neutron] -url = http://{{ network_server_host }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ identity_host }}:35357/v2.0 diff --git a/ansible/roles/nova-compute/vars/Debian.yml b/ansible/roles/nova-compute/vars/Debian.yml new file mode 100644 index 0000000..20b1141 --- /dev/null +++ b/ansible/roles/nova-compute/vars/Debian.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +packages: + - nova-compute-kvm + +services: + - nova-compute diff --git a/ansible/roles/nova-compute/vars/RedHat.yml b/ansible/roles/nova-compute/vars/RedHat.yml new file mode 100644 index 0000000..dab2cfd --- /dev/null +++ b/ansible/roles/nova-compute/vars/RedHat.yml @@ -0,0 +1,16 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-nova-compute + - sysfsutils + +services: + - libvirtd + - openstack-nova-compute diff --git a/ansible/roles/nova-compute/vars/main.yml b/ansible/roles/nova-compute/vars/main.yml new file mode 100644 index 0000000..f6fef74 --- /dev/null +++ b/ansible/roles/nova-compute/vars/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] diff --git a/ansible/roles/nova-controller/handlers/main.yml b/ansible/roles/nova-controller/handlers/main.yml index c830296..0e512a7 100644 --- a/ansible/roles/nova-controller/handlers/main.yml +++ b/ansible/roles/nova-controller/handlers/main.yml @@ -1,24 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: restart nova-api - service: name=nova-api state=restarted - -- name: restart nova-cert - service: name=nova-cert state=restarted - -- name: restart nova-consoleauth - service: name=nova-consoleauth state=restarted - -- name: restart nova-scheduler - service: name=nova-scheduler state=restarted - -- name: restart nova-conductor - service: name=nova-conductor state=restarted - -- name: restart nova-novncproxy - service: name=nova-novncproxy state=restarted +- name: restart nova service + service: name={{ item}} state=restarted enabled=yes + with_items: services | union(services_noarch) - name: remove nova-sqlite-db shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed - -- name: restart neutron-server - service: name=neutron-server state=restarted diff --git a/ansible/roles/nova-controller/tasks/main.yml b/ansible/roles/nova-controller/tasks/main.yml index 85e3a8a..1ebe628 100644 --- a/ansible/roles/nova-controller/tasks/main.yml +++ b/ansible/roles/nova-controller/tasks/main.yml @@ -1,37 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## --- -- name: install nova related packages - apt: name={{ item }} state=present force=yes - with_items: - - nova-api - - nova-cert - - nova-conductor - - nova-consoleauth - - nova-novncproxy - - nova-scheduler - - python-novaclient - - python-oslo.rootwrap +- include: nova_install.yml + tags: + - install + - nova_install + - nova -- name: update nova conf - template: src=nova.conf - dest=/etc/nova/nova.conf - backup=yes - notify: - - restart nova-api - - restart nova-cert - - restart nova-consoleauth - - restart nova-scheduler - - restart nova-conductor - - restart nova-novncproxy - - remove nova-sqlite-db - -- name: nova db sync - command: su -s /bin/sh -c "nova-manage db sync" nova - notify: - - restart nova-api - - restart nova-cert - - restart nova-consoleauth - - restart nova-scheduler - - restart nova-conductor - - restart nova-novncproxy +- include: nova_config.yml + when: inventory_hostname == groups['controller'][0] + tags: + - config + - nova_config + - nova - meta: flush_handlers diff --git a/ansible/roles/nova-controller/tasks/nova_config.yml b/ansible/roles/nova-controller/tasks/nova_config.yml new file mode 100644 index 0000000..bf1b0f6 --- /dev/null +++ b/ansible/roles/nova-controller/tasks/nova_config.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: nova db sync + nova_manage: action=dbsync + notify: + - restart nova service + +- meta: flush_handlers diff --git a/ansible/roles/nova-controller/tasks/nova_install.yml b/ansible/roles/nova-controller/tasks/nova_install.yml new file mode 100644 index 0000000..865ad2e --- /dev/null +++ b/ansible/roles/nova-controller/tasks/nova_install.yml @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install nova related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: generate nova control service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: update nova conf + template: src=templates/nova.conf + dest=/etc/nova/nova.conf + backup=yes + notify: + - restart nova service + - remove nova-sqlite-db diff --git a/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf b/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/ansible/roles/nova-controller/templates/l3_agent.ini b/ansible/roles/nova-controller/templates/l3_agent.ini index b394c00..5f49934 100644 --- a/ansible/roles/nova-controller/templates/l3_agent.ini +++ b/ansible/roles/nova-controller/templates/l3_agent.ini @@ -45,7 +45,7 @@ handle_internal_only_routers = True # Name of bridge used for external network traffic. This should be set to # empty value for the linux bridge. when this parameter is set, each L3 agent # can be associated with no more than one external network. -external_network_bridge = br-ex +external_network_bridge = # TCP Port used by Neutron metadata server metadata_port = 9697 diff --git a/ansible/roles/nova-controller/templates/metadata_agent.ini b/ansible/roles/nova-controller/templates/metadata_agent.ini index edde22c..87937cc 100644 --- a/ansible/roles/nova-controller/templates/metadata_agent.ini +++ b/ansible/roles/nova-controller/templates/metadata_agent.ini @@ -3,8 +3,8 @@ debug = True # The Neutron user information for accessing the Neutron API. -auth_url = http://{{ identity_host }}:5000/v2.0 -auth_region = RegionOne +auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +auth_region = regionOne # Turn off verification of the certificate for ssl # auth_insecure = False # Certificate Authority public key (CA cert) file for ssl @@ -17,7 +17,7 @@ admin_password = {{ NEUTRON_PASS }} # endpoint_type = adminURL # IP address used by Nova metadata server -nova_metadata_ip = {{ compute_controller_host }} +nova_metadata_ip = {{ internal_vip.ip }} # TCP Port used by Nova metadata server nova_metadata_port = 8775 diff --git a/ansible/roles/nova-controller/templates/ml2_conf.ini b/ansible/roles/nova-controller/templates/ml2_conf.ini deleted file mode 100644 index 9972842..0000000 --- a/ansible/roles/nova-controller/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/ansible/roles/nova-controller/templates/neutron-network.conf b/ansible/roles/nova-controller/templates/neutron-network.conf deleted file mode 100644 index 318e4c0..0000000 --- a/ansible/roles/nova-controller/templates/neutron-network.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/nova-controller/templates/neutron.conf b/ansible/roles/nova-controller/templates/neutron.conf deleted file mode 100644 index 28bb2ba..0000000 --- a/ansible/roles/nova-controller/templates/neutron.conf +++ /dev/null @@ -1,467 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = guest -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false - -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ compute_controller_host }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/v2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/ansible/roles/nova-controller/templates/neutron_init.sh b/ansible/roles/nova-controller/templates/neutron_init.sh index b92e202..8ab4324 100644 --- a/ansible/roles/nova-controller/templates/neutron_init.sh +++ b/ansible/roles/nova-controller/templates/neutron_init.sh @@ -1,3 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## # neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True # neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} diff --git a/ansible/roles/nova-controller/templates/nova.conf b/ansible/roles/nova-controller/templates/nova.conf deleted file mode 100644 index dfb4b93..0000000 --- a/ansible/roles/nova-controller/templates/nova.conf +++ /dev/null @@ -1,68 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = 0.0.0.0 -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ identity_host }}:5000/2.0 -identity_uri = http://{{ identity_host }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ image_host }} - -[neutron] -url = http://{{ network_server_host }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ identity_host }}:35357/v2.0 diff --git a/ansible/roles/nova-controller/vars/Debian.yml b/ansible/roles/nova-controller/vars/Debian.yml new file mode 100644 index 0000000..26178cf --- /dev/null +++ b/ansible/roles/nova-controller/vars/Debian.yml @@ -0,0 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler + - python-oslo.rootwrap + +services: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler diff --git a/ansible/roles/nova-controller/vars/RedHat.yml b/ansible/roles/nova-controller/vars/RedHat.yml new file mode 100644 index 0000000..62913f9 --- /dev/null +++ b/ansible/roles/nova-controller/vars/RedHat.yml @@ -0,0 +1,24 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-nova-api + - openstack-nova-cert + - openstack-nova-conductor + - openstack-nova-console + - openstack-nova-novncproxy + - openstack-nova-scheduler + +services: + - openstack-nova-api + - openstack-nova-cert + - openstack-nova-conductor + - openstack-nova-consoleauth + - openstack-nova-novncproxy + - openstack-nova-scheduler diff --git a/ansible/roles/nova-controller/vars/main.yml b/ansible/roles/nova-controller/vars/main.yml new file mode 100644 index 0000000..f6fef74 --- /dev/null +++ b/ansible/roles/nova-controller/vars/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] diff --git a/ansible/roles/odl_cluster/files/install_jdk8.tar b/ansible/roles/odl_cluster/files/install_jdk8.tar new file mode 100755 index 0000000000000000000000000000000000000000..faaaeb393d65b34479915b1a80e3881519d454e3 GIT binary patch literal 4608 zcmeH}&u@Y-6vsL9S9F=DCD0aHFiSkRY2sX#EpAz|lL5gAA`n1hw(NgjQR}Z|bqCl% z=z*r?+rHQL`g!1mX=Vk%(08UyD3#L?BFHpN39Z|4pU+BIqXyQAMhq$;j0n~wK+AFG zaMw9If5KckoUP2r+t2_0Kusq_1%c&Ln17Ak4W56BFw4KL>sSK+l9P((|MCLbUI=X~ zoyhWR>Uar&^E82hXG4E6gEFH7D9evAfKi-5Rwk6@oMI*L!ue8J8boCR_5A}Vw?_+S zi^B=F6st`Y4Jkf)VJ$4BsYHo23S4;<17%}u|2pQDuK;wj5sZ^)>;;<)EQJ;gtvbj`nJm_$d6e{2|f%|DM{nSJVJZnyw_?YVQ`xf_Vc+)4*su@d@; zD{s_K?Ux>CeCFNP^+9Fv_p#55km$8zpHeWJxrE#8w_n@C-s3~}4=3t$2RCo0#L%(N zvD*CLzj*xf|2g6JpHO|rKUwilv03w9Cii@AcNs@Xw&lO|JEHPdy5e7;T=BnA=is** k`BQ{ +# mkdir -p {{ odl_home }}/configuration/initial; + +#- name: create akka config +# template: +# src: akka.conf +# dest: "{{ odl_home }}/configuration/initial/akka.conf" +# notify: +# - restart odl service + + +#- name: create module-shards config +# template: +# src: module-shards.conf +# dest: "{{ odl_home }}/configuration/initial/module-shards.conf" +# notify: +# - restart odl service + +#- name: copy Jolokia-OSGi config +# shell: > +# cp -r jolokia {{ odl_home }}system/org/; + +#- name: copy Jolokia-OSGi config +# template: +# src: jolokia +# dest: "{{ odl_home }}/system/org/" +# notify: +# - restart odl service + + +#- name: mkdir Jolokia-OSGi directory +# shell: > +# mkdir -p {{ odl_home }}system/org/jolokia; +# mkdir -p {{ odl_home }}system/org/jolokia/jolokia-osgi; +# mkdir -p {{ odl_home }}system/org/jolokia/jolokia-osgi/1.1.5; + + +#- name: copy Jolokia-OSGi config +# template: src={{ item.src }} dest={{ item.dest }} +# with_items: +# - src: "jolokia-osgi-1.1.5-features.xml" +# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5-features.xml" +# - src: "jolokia-osgi-1.1.5.jar.sha1" +# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar.sha1" +# - src: "jolokia-osgi-1.1.5.jar" +# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar" + +#- name: copy Jolokia-OSGi jar config +# copy: src=roles/odl_cluster/templates/jolokia-osgi-1.1.5.jar dest="{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/" + +- name: remove karaf data directory + shell: rm -rf {{ odl_home }}/data/*; + +#- name: chown OpenDaylight Directory and Files +# shell: > +# chown -R odl:odl "{{ odl_home }}"; +# chown odl:odl "{{ service_file.dst }}"; + + +########################################################################################################## +################################ OpenDayLight connect with OpenStack ################################ +########################################################################################################## +- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node + shell: > + sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ; + sed -i '/neutron-server/d' /opt/service; + sed -i '/keepalived/d' /opt/service; + +- name: turn off neutron-server on control node + service: name=neutron-server state=stopped + +- name: turn off keepalived on control node + service: name=keepalived state=stopped + when: ansible_os_family == "Debian" + +- name: chown opendaylight directory and files + shell: > + chown -R odl:odl "{{ odl_home }}"; + chown odl:odl "{{ service_file.dst }}"; + +- name: start opendaylight + service: name=opendaylight state=started + when: ansible_os_family == "Debian" + +- name: set opendaylight autostart + shell: chkconfig opendaylight on + when: ansible_os_family == "RedHat" + +- name: start opendaylight + shell: service opendaylight start + when: ansible_os_family == "RedHat" + +- name: check if opendaylight running + shell: netstat -lpen --tcp | grep java | grep 6653; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep java | grep 6653; done + +- name: run openvswitch script + include: openvswitch.yml + +#- name: Configure Neutron1 +# shell: > +# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; +# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + +#- name: Create ML2 Configuration File +# template: +# src: ml2_conf.sh +# dest: "/opt/ml2_conf.sh" +# mode: 0777 + +#- name: Execute ML2 Configuration File +# command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + + +- name: configure l2 configuration + shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-prv; + when: odl_l3_agent == "Disable" + +- name: configure l3 configuration + shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-ex; + when: odl_l3_agent == "Enable" + +- name: configure odl l3 driver + shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin; + when: odl_l3_agent == "Enable" + + + +- name: drop and recreate neutron database + shell: mysql -e "drop database if exists neutron;"; + mysql -e "create database neutron character set utf8;"; + mysql -e "grant all on neutron.* to 'neutron'@'%' identified by '{{ NEUTRON_DBPASS }}';"; + su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron; + when: inventory_hostname == haproxy_hosts.keys()[0] + tags: + - test_odl + diff --git a/ansible/roles/odl_cluster/tasks/openvswitch.yml b/ansible/roles/odl_cluster/tasks/openvswitch.yml new file mode 100755 index 0000000..9c476bf --- /dev/null +++ b/ansible/roles/odl_cluster/tasks/openvswitch.yml @@ -0,0 +1,148 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +#- name: Install Crudini +# apt: name={{ item }} state=present +# with_items: +# - crudini + +- name: install compute packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: compute_packages | union(compute_packages_noarch) + +- name: remove neutron-openvswitch-agent service daemon + shell: sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ; + +- name: shut down and disable Neutron's openvswitch agent services + service: name={{ service_ovs_agent_name }} state=stopped enabled=no + +- name: remove Neutron's openvswitch agent services + shell: > + update-rc.d -f {{ service_ovs_agent_name }} remove; + mv /etc/init.d/{{ service_ovs_agent_name }} /home/{{ service_ovs_agent_name }}; + mv /etc/init/{{ service_ovs_agent_name }}.conf /home/{{ service_ovs_agent_name }}.conf; + when: ansible_os_family == "Debian" + + +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + service {{ service_ovs_name }} stop ; + rm -rf /var/log/openvswitch/* ; + rm -rf /etc/openvswitch/conf.db ; + service {{ service_ovs_name }} start ; + +- name: set opendaylight as the manager + command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;" + +- name: check br-int + shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done + +- name: set local ip in openvswitch + shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '}; + +#' + +################################################################## +########### Recover External network for odl l3 ################# +################################################################## + +- name: check br-ex + shell: ovs-vsctl list-br | grep br-ex; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done + when: odl_l3_agent == "Enable" + +- name: add ovs uplink + openvswitch_port: bridge=br-ex port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Enable" + +- name: wait 10 seconds + shell: sleep 10 + when: odl_l3_agent == "Enable" + +- name: set external nic in openvswitch + shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config:provider_mappings=br-ex:{{ item["interface"] }} + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Enable" + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - recover_network_odl_l3.py + - setup_networks_odl_l3.py + when: odl_l3_agent == "Enable" + +- name: recover external script + shell: python /opt/setup_networks/recover_network_odl_l3.py + when: odl_l3_agent == "Enable" + +- name: update keepalived info + template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf + when: inventory_hostname in groups['odl'] and odl_l3_agent == "Enable" + +- name: modify net-init + shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' /etc/init.d/net_init + when: odl_l3_agent == "Enable" + +################################################################## +########### Recover External network for odl l2 ################# +################################################################## + +- name: add ovs bridge + openvswitch_bridge: bridge={{ item["name"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Disable" + +- name: add ovs uplink + openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Disable" + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - recover_network.py + when: odl_l3_agent == "Disable" + +- name: recover external script + shell: python /opt/setup_networks/recover_network.py + when: odl_l3_agent == "Disable" + +################################################################## + + +- name: restart keepalived to recover external IP + shell: service keepalived restart + when: inventory_hostname in groups['odl'] + ignore_errors: True + + + +################################################################## +################################################################## +################################################################## +- name: configure opendaylight -> ml2 + shell: > + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True; + +#- name: Adjust Service Daemon +# shell: > +# sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ; +# echo opendaylight >> /opt/service ; + +- name: copy ml2 configuration script + template: + src: ml2_conf.sh + dest: "/opt/ml2_conf.sh" + mode: 0777 + +- name: execute ml2 configuration script + command: su -s /bin/sh -c "/opt/ml2_conf.sh;" diff --git a/ansible/roles/odl_cluster/templates/akka.conf b/ansible/roles/odl_cluster/templates/akka.conf new file mode 100755 index 0000000..7779849 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/akka.conf @@ -0,0 +1,105 @@ + +odl-cluster-data { + bounded-mailbox { + mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" + mailbox-capacity = 1000 + mailbox-push-timeout-time = 100ms + } + + metric-capture-enabled = true + + akka { + loglevel = "INFO" + loggers = ["akka.event.slf4j.Slf4jLogger"] + + actor { + + provider = "akka.cluster.ClusterActorRefProvider" + serializers { + java = "akka.serialization.JavaSerializer" + proto = "akka.remote.serialization.ProtobufSerializer" + } + + serialization-bindings { + "com.google.protobuf.Message" = proto + + } + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "{{ hostvars[inventory_hostname]['ansible_' + internal_nic].ipv4.address }}" + port = 2550 + maximum-frame-size = 419430400 + send-buffer-size = 52428800 + receive-buffer-size = 52428800 + } + } + + cluster { + seed-nodes = [ +{% for host in groups['odl'] %} + {% if loop.last %} + "akka.tcp://opendaylight-cluster-data@{{ hostvars[host]['ansible_' + internal_nic].ipv4.address }}:2550" + {% else %} + "akka.tcp://opendaylight-cluster-data@{{ hostvars[host]['ansible_' + internal_nic].ipv4.address }}:2550", + {% endif %} +{% endfor %} + ] + + auto-down-unreachable-after = 10s + + roles = [ +{% set key = 0 %} +{% for host in groups['odl'] %} + {% set key = key + 1 %} + {% if hostvars[host]['ansible_' + internal_nic].ipv4.address == hostvars[inventory_hostname]['ansible_' + internal_nic].ipv4.address %} + "member-{{ key }}" + {% endif %} +{% endfor %} + ] + + } + } +} + +odl-cluster-rpc { + bounded-mailbox { + mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" + mailbox-capacity = 1000 + mailbox-push-timeout-time = 100ms + } + + metric-capture-enabled = true + + akka { + loglevel = "INFO" + loggers = ["akka.event.slf4j.Slf4jLogger"] + + actor { + provider = "akka.cluster.ClusterActorRefProvider" + + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "{{ hostvars[inventory_hostname]['ansible_' + internal_nic].ipv4.address }}" + port = 2551 + } + } + + cluster { + seed-nodes = [ +{% for host in groups['odl'] %} + {% if loop.last %} + "akka.tcp://odl-cluster-rpc@{{ hostvars[host]['ansible_' + internal_nic].ipv4.address }}:2551" + {% else %} + "akka.tcp://odl-cluster-rpc@{{ hostvars[host]['ansible_' + internal_nic].ipv4.address }}:2551", + {% endif %} +{% endfor %} + ] + + auto-down-unreachable-after = 10s + } + } +} diff --git a/ansible/roles/odl_cluster/templates/custom.properties b/ansible/roles/odl_cluster/templates/custom.properties new file mode 100644 index 0000000..4eb8618 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/custom.properties @@ -0,0 +1,135 @@ +# Extra packages to import from the boot class loader +org.osgi.framework.system.packages.extra=org.apache.karaf.branding,sun.reflect,sun.reflect.misc,sun.misc,sun.nio.ch + +# https://bugs.eclipse.org/bugs/show_bug.cgi?id=325578 +# Extend the framework to avoid the resources to be presented with +# a URL of type bundleresource: but to be presented as file: +osgi.hook.configurators.include=org.eclipse.virgo.kernel.equinox.extensions.hooks.ExtensionsHookConfigurator + +# Embedded Tomcat configuration File +org.eclipse.gemini.web.tomcat.config.path=configuration/tomcat-server.xml +org.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true + +# Use Equinox as default OSGi Framework Implementation +karaf.framework=equinox + +# Netconf startup configuration +netconf.tcp.address=127.0.0.1 +netconf.tcp.port=8383 + +netconf.tcp.client.address=127.0.0.1 +netconf.tcp.client.port=8383 + +netconf.ssh.address=0.0.0.0 +netconf.ssh.port=1830 +# Use Linux style path +netconf.ssh.pk.path = ./configuration/RSA.pk +# Set security provider to BouncyCastle +org.apache.karaf.security.providers = org.bouncycastle.jce.provider.BouncyCastleProvider + + +netconf.config.persister.active=1 + +netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter +netconf.config.persister.1.properties.fileStorage=etc/opendaylight/current/controller.currentconfig.xml +netconf.config.persister.1.properties.numberOfBackups=1 + +# logback configuration +logback.configurationFile=configuration/logback.xml + +# Container configuration +container.profile = Container + +# Connection manager configuration +connection.scheme = ANY_CONTROLLER_ONE_MASTER + +# Open Flow related system parameters +# TCP port on which the controller is listening (default 6633) +# of.listenPort=6633 +# IP address of the controller (default: wild card) +# of.address = 127.0.0.1 +# The time (in milliseconds) the controller will wait for a response after sending a Barrier Request or a Statistic Request message (default 2000 msec) +# of.messageResponseTimer=2000 +# The switch liveness timeout value (default 60500 msec) +# of.switchLivenessTimeout=60500 +# The size of the queue holding pending statistics requests (default 64). For large networks of n switches, it is recommended to set the queue size to n +# of.statsQueueSize = 64 +# The flow statistics polling interval in second (default 10 sec) +# of.flowStatsPollInterval=10 +# The port statistics polling interval in second (default 5 sec) +# of.portStatsPollInterval=5 +# The description statistics polling interval in second (default 60 sec) +# of.descStatsPollInterval=60 +# The table statistics polling interval in second (default 10 sec) +# of.tableStatsPollInterval=10 +# The maximum number of asynchronous messages can be sent before sending a Barrier Request (default 100) +# of.barrierMessagePriorCount=100 +# The interval which determines how often the discovery packets should be sent (default 300 sec) +# of.discoveryInterval=300 +# The timeout multiple of discovery interval +# of.discoveryTimeoutMultiple=2 +# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 30 sec) +# of.discoveryThreshold=30 +# The maximum number of ports handled in one discovery batch (default 512) +# of.discoveryBatchMaxPorts=512 + +# OVSDB configuration +# ovsdb plugin supports both active and passive connections. It listens on port 6640 by default for Active connections. +ovsdb.listenPort=6640 + +# ovsdb creates Openflow nodes/bridges. This configuration configures the bridge's Openflow version. +# default Openflow version = 1.0, we also support 1.3. +# ovsdb.of.version=1.3 + +# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is +# disabled by default. +ovsdb.l3.fwd.enabled=yes + +# ovsdb can be configured with ml2 to perform arp responder, enabled by default. +ovsdb.l3.arp.responder.disabled=no + +# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default +# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that +# specific arp/neighDiscovery lookup. +# ovsdb.l3gateway.mac=00:00:5E:00:02:01 + +# TLS configuration +# To enable TLS, set secureChannelEnabled=true and specify the location of controller Java KeyStore and TrustStore files. +# The Java KeyStore contains controller's private key and certificate. The Java TrustStore contains the trusted certificate +# entries, including switches' Certification Authority (CA) certificates. For example, +# secureChannelEnabled=true +# controllerKeyStore=./configuration/ctlKeyStore +# controllerKeyStorePassword=xxxxxxxx (this password should match the password used for KeyStore generation and at least 6 characters) +# controllerTrustStore=./configuration/ctlTrustStore +# controllerTrustStorePassword=xxxxxxxx (this password should match the password used for TrustStore generation and at least 6 characters) + +secureChannelEnabled=false +controllerKeyStore= +controllerKeyStorePassword= +controllerTrustStore= +controllerTrustStorePassword= + +# User Manager configurations +enableStrongPasswordCheck = false + +#Jolokia configurations +#org.jolokia.listenForHttpService=false + +# Logging configuration for Tomcat-JUL logging +java.util.logging.config.file=configuration/tomcat-logging.properties + +#Hosttracker hostsdb key scheme setting +hosttracker.keyscheme=IP + +# LISP Flow Mapping configuration +# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings (default: true) +lisp.mappingOverwrite = true +# Enable the Solicit-Map-Request (SMR) mechanism (default: true) +lisp.smr = true +# Choose policy for Explicit Locator Path (ELP) handling +# There are three options: +# default: don't add or remove locator records, return mapping as-is +# both: keep the ELP, but add the next hop as a standalone non-LCAF locator with a lower priority +# replace: remove the ELP, add the next hop as a standalone non-LCAF locator +lisp.elpPolicy = default + diff --git a/ansible/roles/odl_cluster/templates/haproxy-odl.cfg b/ansible/roles/odl_cluster/templates/haproxy-odl.cfg new file mode 100755 index 0000000..1f3bc9e --- /dev/null +++ b/ansible/roles/odl_cluster/templates/haproxy-odl.cfg @@ -0,0 +1,24 @@ +listen odl-rest-api-1 + bind {{ internal_vip.ip }}:8080 + bind {{ public_vip.ip }}:8080 + mode http + balance source + option httplog + option nolinger + timeout client 3m + timeout server 3m +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 3 +{% endfor %} + +listen odl-rest-api-2 + bind {{ internal_vip.ip }}:8181 + bind {{ public_vip.ip }}:8181 + mode http + balance source + option httplog + timeout client 3m + timeout server 3m +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8181 weight 1 check inter 2000 rise 2 fall 3 +{% endfor %} diff --git a/ansible/roles/odl_cluster/templates/jetty.xml b/ansible/roles/odl_cluster/templates/jetty.xml new file mode 100755 index 0000000..3ee3750 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/jetty.xml @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + 300000 + 2 + false + 8543 + 20000 + 5000 + + + + + + + + + + + + + 300000 + 2 + false + 8443 + 20000 + 5000 + + + + + + + + + + + + + + + karaf + karaf + + + org.apache.karaf.jaas.boot.principal.RolePrincipal + + + + + + + + + + default + karaf + + + org.apache.karaf.jaas.boot.principal.RolePrincipal + + + + + + + + diff --git a/ansible/roles/odl_cluster/templates/keepalived.conf b/ansible/roles/odl_cluster/templates/keepalived.conf new file mode 100644 index 0000000..4ccf1c4 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/keepalived.conf @@ -0,0 +1,47 @@ +global_defs { + router_id {{ inventory_hostname }} +} + +vrrp_sync_group VG1 { + group { + internal_vip + public_vip + } +} + +vrrp_instance internal_vip { + interface {{ internal_vip.interface }} + virtual_router_id {{ vrouter_id_internal }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 1234 + } + + virtual_ipaddress { + {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }} + } +} + +vrrp_instance public_vip { + interface br-ex + virtual_router_id {{ vrouter_id_public }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 4321 + } + + virtual_ipaddress { + {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev br-ex + } + +} diff --git a/ansible/roles/odl_cluster/templates/ml2_conf.sh b/ansible/roles/odl_cluster/templates/ml2_conf.sh new file mode 100755 index 0000000..0d42e48 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/ml2_conf.sh @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +cat <> /etc/neutron/plugins/ml2/ml2_conf.ini +[ml2_odl] +password = admin +username = admin +url = http://{{ internal_vip.ip }}:8080/controller/nb/v2/neutron +EOT diff --git a/ansible/roles/odl_cluster/templates/module-shards.conf b/ansible/roles/odl_cluster/templates/module-shards.conf new file mode 100755 index 0000000..9a5d4c3 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/module-shards.conf @@ -0,0 +1,101 @@ +# This file describes which shards live on which members +# The format for a module-shards is as follows, +# { +# name = "" +# shards = [ +# { +# name="" +# replicas = [ +# "" +# ] +# ] +# } +# +# For Helium we support only one shard per module. Beyond Helium +# we will support more than 1 +# The replicas section is a collection of member names. This information +# will be used to decide on which members replicas of a particular shard will be +# located. Once replication is integrated with the distributed data store then +# this section can have multiple entries. +# +# + + +module-shards = [ + { + name = "default" + shards = [ + { + name="default" + replicas = [ +{% set key = 0 %} +{% for host in groups['odl'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + }, + { + name = "topology" + shards = [ + { + name="topology" + replicas = [ +{% set key = 0 %} +{% for host in groups['odl'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + }, + { + name = "inventory" + shards = [ + { + name="inventory" + replicas = [ +{% set key = 0 %} +{% for host in groups['odl'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + }, + { + name = "toaster" + shards = [ + { + name="toaster" + replicas = [ +{% set key = 0 %} +{% for host in groups['odl'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + } +] diff --git a/ansible/roles/odl_cluster/templates/opendaylight b/ansible/roles/odl_cluster/templates/opendaylight new file mode 100755 index 0000000..90a267d --- /dev/null +++ b/ansible/roles/odl_cluster/templates/opendaylight @@ -0,0 +1,31 @@ +#!/bin/bash +# chkconfig: 345 98 2 +# description: ODL controller +# OpenDaylight service controller script +export JAVA_HOME=/usr/lib/jvm/java-8-oracle +USER=odl + +cd /opt/opendaylight-0.3.0 +case "$1" in + start) + /bin/su -m $USER -s /bin/bash -c ./bin/start + ;; + stop) + /bin/su -m $USER -s /bin/bash -c ./bin/stop + ;; + status) + PID=`ps aux | grep java | grep karaf | awk '{print $2}'` + if test -z $PID + then + echo "ODL is down..." + exit 1 + else + echo "ODL is running... PID $PID" + exit 0 + fi + ;; + *) + echo "Usage: $0 {start|stop|status}" + exit 1 + ;; +esac diff --git a/ansible/roles/odl_cluster/templates/opendaylight.conf b/ansible/roles/odl_cluster/templates/opendaylight.conf new file mode 100755 index 0000000..105bb26 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/opendaylight.conf @@ -0,0 +1,42 @@ + +# vim:set ft=upstart ts=2 et: +description "OpenDaylight controller" +author "mskalski@miranits.com" + +start on runlevel [2345] +stop on runlevel [!2345] + +#setgid odl +#setuid odl + +env KARAF_HOME="/opt/opendaylight-0.3.0" +#env JAVA_HOME="/usr/lib/jvm/java-7-openjdk-amd64" +env JAVA_HOME="/usr/lib/jvm/java-8-oracle" +env JAVA_OPTS="-server -Xms128M -Xmx4096M -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:MaxPermSize=512M -Dcom.sun.management.jmxremote" +env OPTS="-Dkaraf.startLocalConsole=true -Dkaraf.startRemoteShell=true" +env MAIN="org.apache.karaf.main.Main" + + +chdir /opt/opendaylight-0.3.0 + +script + export KARAF_BASE="$KARAF_HOME" + export KARAF_DATA="$KARAF_BASE/data" + export KARAF_ETC="$KARAF_BASE/etc" + export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:$KARAF_BASE/lib" + export JAVA_ENDORSED_DIRS="${JAVA_HOME}/jre/lib/endorsed:${JAVA_HOME}/lib/endorsed:${KARAF_HOME}/lib/endorsed" + export JAVA_EXT_DIRS="${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext:${KARAF_HOME}/lib/ext" + export JAVA_SECURITY_PRO="${KARAF_HOME}/etc/odl.java.security" + + for file in "$KARAF_HOME"/lib/karaf*.jar + do + if [ -z "$CLASSPATH" ]; then + CLASSPATH="$file" + else + CLASSPATH="$CLASSPATH:$file" + fi + done + + exec $JAVA_HOME/bin/java -Djava.security.properties="${JAVA_SECURITY_PRO}" $JAVA_OPTS -Djava.endorsed.dirs="${JAVA_ENDORSED_DIRS}" -Djava.ext.dirs="${JAVA_EXT_DIRS}" -Dkaraf.instances="${KARAF_HOME}/instances" -Dkaraf.home="$KARAF_HOME" -Dkaraf.base="$KARAF_BASE" -Dkaraf.data="$KARAF_DATA" -Dkaraf.etc="$KARAF_ETC" -Djava.io.tmpdir="$KARAF_DATA/tmp" -Djava.util.logging.config.file="$KARAF_BASE/etc/java.util.logging.properties" $KARAF_OPTS $OPTS -classpath "$CLASSPATH" $MAIN + +end script diff --git a/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg b/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg new file mode 100755 index 0000000..df2035f --- /dev/null +++ b/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg @@ -0,0 +1,57 @@ +################################################################################ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ + +# +# Defines if the startlvl should be respected during feature startup. The default value is true. The default +# behavior for 2.x is false (!) for this property +# +# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to +# set this to false, please use this only as a temporary solution! +# +#respectStartLvlDuringFeatureStartup=true + + +# +# Defines if the startlvl should be respected during feature uninstall. The default value is true. +# If true, means stop bundles respecting the descend order of start level in a certain feature. +# +#respectStartLvlDuringFeatureUninstall=true + +# +# Comma separated list of features repositories to register by default +# +featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.0-Beryllium/xml/features +#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.0-Beryllium-RC1/xml/features +#mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.3-Lithium-SR3/xml/features +#mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.2-Lithium-SR2/xml/features + +# +# Comma separated list of features to install at startup +# +featuresBoot=config,standard,region,package,kar,ssh,management,odl-ovsdb-openstack + +#,odl-restconf-all,odl-aaa-authn,odl-dlux-all + +# odl-base-all,odl-restconf,odl-ovsdb-openstack,odl-dlux-all,odl-mdsal-apidocs +#,odl-mdsal-clustering,odl-openflowplugin-flow-services + +# +# Defines if the boot features are started in asynchronous mode (in a dedicated thread) +# +featuresBootAsynchronous=false diff --git a/ansible/roles/odl_cluster/templates/tomcat-server.xml b/ansible/roles/odl_cluster/templates/tomcat-server.xml new file mode 100755 index 0000000..bc7ab13 --- /dev/null +++ b/ansible/roles/odl_cluster/templates/tomcat-server.xml @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ansible/roles/odl_cluster/vars/Debian.yml b/ansible/roles/odl_cluster/vars/Debian.yml new file mode 100755 index 0000000..a94d36c --- /dev/null +++ b/ansible/roles/odl_cluster/vars/Debian.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +controller_packages: +# - openjdk-7-jdk + - crudini + +compute_packages: + - crudini + +service_ovs_name: openvswitch-switch +service_ovs_agent_name: neutron-plugin-openvswitch-agent + +service_file: + src: opendaylight.conf + dst: /etc/init/opendaylight.conf diff --git a/ansible/roles/odl_cluster/vars/RedHat.yml b/ansible/roles/odl_cluster/vars/RedHat.yml new file mode 100755 index 0000000..4446ebc --- /dev/null +++ b/ansible/roles/odl_cluster/vars/RedHat.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +controller_packages: +# - java-1.7.0-openjdk + - crudini + +compute_packages: + - crudini + +service_ovs_name: openvswitch +service_ovs_agent_name: neutron-openvswitch-agent + +service_file: + src: opendaylight + dst: /etc/init.d/opendaylight diff --git a/ansible/roles/odl_cluster/vars/main.yml b/ansible/roles/odl_cluster/vars/main.yml new file mode 100755 index 0000000..eb6f9dd --- /dev/null +++ b/ansible/roles/odl_cluster/vars/main.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +odl_username: admin +odl_password: admin +odl_api_port: 8080 + +#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz +odl_pkg_url: karaf.tar.gz +odl_pkg_name: karaf.tar.gz +odl_home: "/opt/opendaylight-0.3.0/" +odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core'] +odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi'] +odl_features: "{{ odl_base_features + odl_extra_features }}" +odl_api_port: 8080 + +jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz + +controller_packages_noarch: [] +compute_packages_noarch: [] + +odl_pip: + - networking_odl + diff --git a/ansible/roles/odl_cluster_neutron/tasks/main.yml b/ansible/roles/odl_cluster_neutron/tasks/main.yml new file mode 100755 index 0000000..751a02d --- /dev/null +++ b/ansible/roles/odl_cluster_neutron/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: restart neutron-server + service: name=neutron-server state=restarted enabled=yes + when: inventory_hostname in groups['odl'] + +- name: add service daemon + shell: > + echo keepalived >> /opt/service ; + echo neutron-server >> /opt/service ; + when: inventory_hostname in groups['odl'] + +- name: restart neutron-l3-agent server + service: name=neutron-l3-agent state=restarted + when: inventory_hostname in groups['odl'] + +- name: restart neutron-dhcp-agent server + service: name=neutron-dhcp-agent state=restarted + when: inventory_hostname in groups['odl'] + +- name: restart neutron-metadata-agent server + service: name=neutron-metadata-agent state=restarted + when: inventory_hostname in groups['odl'] diff --git a/ansible/roles/odl_cluster_post/tasks/main.yml b/ansible/roles/odl_cluster_post/tasks/main.yml new file mode 100644 index 0000000..8432186 --- /dev/null +++ b/ansible/roles/odl_cluster_post/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: restart opendaylight + shell: sleep 60; service opendaylight restart; sleep 300; + when: inventory_hostname in groups['odl'] + +- name: add service daemon + shell: echo opendaylight >> /opt/service ; + when: inventory_hostname in groups['odl'] diff --git a/ansible/roles/onos_cluster/files/install_jdk8.tar b/ansible/roles/onos_cluster/files/install_jdk8.tar new file mode 100755 index 0000000000000000000000000000000000000000..faaaeb393d65b34479915b1a80e3881519d454e3 GIT binary patch literal 4608 zcmeH}&u@Y-6vsL9S9F=DCD0aHFiSkRY2sX#EpAz|lL5gAA`n1hw(NgjQR}Z|bqCl% z=z*r?+rHQL`g!1mX=Vk%(08UyD3#L?BFHpN39Z|4pU+BIqXyQAMhq$;j0n~wK+AFG zaMw9If5KckoUP2r+t2_0Kusq_1%c&Ln17Ak4W56BFw4KL>sSK+l9P((|MCLbUI=X~ zoyhWR>Uar&^E82hXG4E6gEFH7D9evAfKi-5Rwk6@oMI*L!ue8J8boCR_5A}Vw?_+S zi^B=F6st`Y4Jkf)VJ$4BsYHo23S4;<17%}u|2pQDuK;wj5sZ^)>;;<)EQJ;gtvbj`nJm_$d6e{2|f%|DM{nSJVJZnyw_?YVQ`xf_Vc+)4*su@d@; zD{s_K?Ux>CeCFNP^+9Fv_p#55km$8zpHeWJxrE#8w_n@C-s3~}4=3t$2RCo0#L%(N zvD*CLzj*xf|2g6JpHO|rKUwilv03w9Cii@AcNs@Xw&lO|JEHPdy5e7;T=BnA=is** k`BQ{v|hUk|x~0aTVE=H2~d2kpNFJyXhH9qHNAkq?V-Q_QTP_1)xB}0s%A%5XJU- zpS_UpyScrRecy=8s;t6+;6ZA;8|=17pt3R}BO)UsBly>2gF{ljPY zS;Nov=H?&x8$bEq@?E96Ro&j)z|Ynn)+*KQ_02z+&CdYKSeK)C7!Hj2!=Tq2jy?3g zxb3;u^!M~NHKK{UI{Lo@w;D!$G?~8p@~PWn`Ms z$EefmUq#)a>9x#pKkA+i!^Ve|0^h!Z5H+n{r_;MOY#k4yer%e(#)vOOO%o1x3I$`# z<#5>FSzX1GIOw4d?7fW-L2q!e+U~Xn;p$b`9fh4$J6be|gUjJn=ZQCZnc7MV0~HF# z-3H*h?8R=t%kX1l>QU6i1g`p>2qUkU#%0*Oh+`MwAKRhKuRzp%UDBKy>0+m(0U-ax5$5}f15e^zrD7xz0m(2 zi1xW&KYzLZYVYmg+55s1a|XHAX+xAl!e8~8qmGKrMz4QkT7%w|>BXI1x!rAy>fx|A z2rk;gf_<$XLJI3Ul(d&U>?RbG@-Dhd*b4kawkq(2-IV!iJbW&M%3LIS)J7=@^!(rE zp4{ownuGSoXb{Agk1!h7|E%->dZPci^Z(X5M8e|y_XxPZt&1mrT&=get5}Ygg~Ih^ zyK!l{(P%ip5`EIRj2a)@M`JmID#TUTxNLW$Lc3+o%~$_!{Pfibxk9I4; zTCi4VG!1sFG3{ivusVtdGBG{gnm8Jc`Z(Y^U>2grGDd#=f4}$QzaH`EC!^f5|5rDY z^`F>(Hmi&M|F)(3D9?{{v76K3O(!xhusMg_CQof~VR!0Jp-{#PQQuT{%t;jLR-c@% zCiOMz=$D-KRxfcH!~tv7bzimJLl_kjK>Jan-D)?ou$HI(5Z3df^~yKz&AYJEZiYP6 zUWP*)X>rUk9Ug2=r-!pr2vNin(QpuU;||(2_0{TH{?)_d@61&c#~}`uD<=BXi2AO- zUYB3VN!0bz?sa3^QzhTi&vCCi8n!#JQ=Zzf8O1oLQD_pg(4E-EzF&QBe!yY8Cvc*j z`Hygf6Yv1X+YmEocTLxX)g}gSU`cMMzstOH9kPH+Ltcd{lIp-{@%RAdATv< z(HZ)g!O551UblyrCXC}~FswD87eq~HYK^GI?+b+&gD4z=VCHQX1Dx@MT9`nH!YdrD zf#<;uB>%N(;ib_aS}`NOHPmx$a!zfqPar^ORPSMUwlLtzLaB{YeY?>fc5arFbGN9O zBa(u+io-Te@;X3ZVBE0hdjc7nemKPb#)E!XAn|34OJKR%OMzKILxS1IwE0>BrFWPP zn@yat*?(9_EePzg)&2y~pg0TO!}RSHp3Y;;n}opob`{}Xm|;JOl=5FLR(L|BzD+q!DwSEWN2^#mXz%P*;U`JoxRR=cq6M(fN@VqVX|+mQzxAE8=Rf$gAhy( z0>iW=q^6)jlN%8(4MGJVp^5xv^pE`h9}OFi?gP9{|Ht#(`G0*AUoHH9kFvwh?qm}E zY4VW+dC=~*XTLtz(_8evwGB9Xv;M!W%GTE6{5KbJAK2*03lO_~F&bba$F`5d6x9{k zBae@cPwjE6P&lW7hDM^&>2Pq|A1)htd=j0WVTTz;gH{N&i z(%z{#I9)Q&_f8K^SIiFwXWt*cJ=5-f*gHAdJ32eqKQ+fE2C+Ra56)mc$ER0j@90MZ zQ}0U{uwZlmnP6qdu{I{CVrZd-<1`}TEpQGD>S#n?>`ZYTgjkGdfw}16P~GME9Vhpz zHVi0KCVAxVv=L?@`dqA_rL1AQg*_v2GS*by5Dpst5srv(52&NHmnR4C0Mw4(oSm9o zb1q&2JPlMSEft-5SzM7vLQegs8MQ)KMRtqhk1nH5f45dDPSI$K%gfqZW+1C}j`udl zK(n;niK984^iHG25sVtc-Lt_6mK^iMnGNYo!E*W-c1Co)2qkI%%p=TUWtJulU<-&D zBx1l^uYy5z0S1l+HJl^jQl4TfW{IbWrR9GG#riRY^k?jA&i!9()c4%}CC+z7{O@{Y zeLKJZSGHjPS;YT-M#9^!7yCbC;bQ;)t?mEVklg0aKf{$u zRfWW%RnOO0hKJ}A9)?5-VkG#&j3G=77?!P!=6CH$&o*%#bk>3zRd|t}y zZod@KIi{8(zR3RZ4S(fZo1*rN$NI;LDY9pL=C5BBdteJ-!!ZXG7YVQ}sY7Zv9SI8M z?!-~jIpM$nPc6*2tm_E77N z6*2(QKpNDbhSw0RH5|pIgmc4IRBJ{IeXdcNt~J|@VTu2&m=DoSoHFKt#0m4u#MKL% zE1cqZm!uU{Jj8LMK}LdCOW2e%p!Uz#pgOWkWpzGa+o0d;m+?!F5_nep=NSDf+gt1F z3;XX}1bkqldFkI-zrPRQ&h)1d66=4F|NB7V&hh#k=x;;9@9qO2{imP*E7f)6hG*^n z)wSv(|8EZJJ-kWg{QqKJ`#(MT(;WcTpzB+(?`!^lMC1P)rhl8BIJe>dk<9+SO@GWR z&PWQs!EHwW$29prqQQS`UUU|DhxrKxe>sA<(893a4hH>3053y#2(CZ~Fiu>FjbOco zhc$UvLsRWI|1L5?n>cXi-paPM;jhl|0Qbc(L@+ch5PzdrB1S?2Q=wen+4cZ}ExUwf zR=Z)Z-)_X&7R|bwO|xEO3}sDquiD4zw1&|qMD!vJ6sNsreOjZ=y5c2<|BX3agLro1 zi)-X@jdy4jL7TtjKoyJ-L6(;3cA^1fS@c*U(;3n2{VjA5y9y-wiW{i2?gl}+FKj&B zi&`7uqwr7B6w_+==Y3re1oxYLQ$NDgBLTBwjX7L@BWiAG4D-Q}#>c;`Ym0Z)iKqGu zA%cC9<;0SucGdpW?oNCQlu3{s9In@n_Rc_>*)Ir-XGn=R_qZrd9erQcUr!xUaHkju zXonIg2NR)lXqI-bEC~+Yz=R-n2w1HSm}Jdd0kyy({F+`tF>f8N8|@k%#KA`zG8BJs z^3hZvA!3SutTgR}0b_WLu4J}XHb9SvpzRNO9}(WGE-HmyoQjSyz!&gbO60qG8xeR0 zL~+%|y+)hyoQW_Raagr@B0WmLnFOpzEA+2Y3;g!x@a=a8 zM_E9yw!3H}qA$>BYRbzlateUR5+drw6PcT?O6)|Q2ctd{G)hU=1n?Itc!Ry~_K(iQ z;8;6c-!WJ|8KLhM!V!0eo4q#&rDbdEV=sbTSY!LJtU{iHp!z=EI_WoFAPBsUj#^z^#HvS|$364sgT4NF)%jAgYU zvllQVt!b7fSp2c$mX;>3YraXdAa*Sz=AaEeAf};7L!yTBQz*diD;;v4mO*q$30&Y( z8nQpZQmNm1OGdvCV=vZIsjHQ!${8zGr01GOw5-asks?Bu;?={%HQ zYj#9-?9HQ%5)1m|9&^d|oXiD%Ci76*-1T1zQPPW#jqIS_jNu??&W90eeV&F94-?br znC-)xdkTUNb+$PH>8hQRI0oeKHHLG=9R@WXWDSdiZB#(FRkPO~_Fm^n+5}NruIzBq3gG0g`*s)BWj+5tP@O|owxpi%M{dj0d z;qYM}A3C$fkZ)xLAT~9d36KZQiW1H_X@l0So(HqGnJM5W4&~ra@IDtiPiT|^`QWZ{ zlT7-<_pKUd!QGuxrS?$ccAa@rQc%W_mGtA2XChKu2lts%7I*<71v+Js%)Xc|<3V(V zD8Mhp5z|IXFhx2$4zas&Z2}u6?K5ryM30z06aw?0gPD}gTLM6l1 z5%z$?5|U;01F~QfWFfRr@~l&FAf1&2wbECLZDi0U9i-xmM6>KA2O>pOu=nz1?cnI_ zRqgqYwbQrHkM^y~J4f?3$0uitu)}eCacZ!c9RIJ!@OTyeA4`PCw*RkftyR{??Ef3^ z|1IMG#_9Fg-`v~&&nWJNq5p32{k|Q4%pm^HZ|F~D%_QTnmcejVTKR;!0iMLC2*(~% z$$Y8X0KTqfEV?JOz79&g`p>XGLC2I0XQIh+vbl2HPRXFa*jRHgg0x`eP=V;e`?SO) ztP)wPViHiOttGBd_t`YaOykrtk$#?DEIXhEU?-7@qW{SQ2~P8DhZ3Wh?62Gz^FOo4 zO%=I`7LtsLz=$soPytFn^yXSbw!yH77(D$h9#3J&!uN}9h;TjI3rB$=Pv%5yTDY>d ziZQsLy7&hoXFUa{B#@XtG1e0Qw$(gLK%5;J?Y!aP_2}PtrdppQ6GQG?H+MK!o)_Cq z;t8f_I8>mc@`J|r6>Z2T0i1&55pnD$xq`4zg`QZnmhlNF%oc4K>#NEOpLPwTVQ<5bynSewWimI+W*hRiYv&|pU^vOU~QIECDSTgVFd@zIkxxunOm$+YR`7{-Hre5p-4zdV>kn89bD(iCw2nF%$K z&y+j~2j{H`w{V-!TI73LEIeV4Fmd;-+1)k%My-2ty1>Y&a#{q-V_~EnpJQ`CVJV2Z z)axRRahim(pv|)ka$ZcHc`q?ied>7m4(1rs2hksI%2NNu18T?Ak~gMeIGHzETX3eg z1$vUbaE7JQ+c5^j^+scXdtjdB()dMM~xj}Bs>mEHh-b;8qpgp z8Tf-x4LoR}#N0qS2ZlqY@Uygl ztuJf~GBy$4aY5=QrTu_X1UA@rEP|WJ-n(ESh8S^0AXSFY;JOa-x!4K&2c?0{8#5T8 z*g`x)_01_og_jJ1Z@8yUt?+81T1;UVQZa(rMH&u^zV;vHq_ z@3DDG##{G)7tnU+9I#vVe?HFJ|Erse{Qob)PIsWc3mHHd?%i167W%KRt>x~2tFEtA z7WDrj^k4XY=Be?87rI7lD{d9WldzyUb#c-zo-DnLJznzqj&_iQl6CP0;@<+Y9^Om*JRs>3`L! z-n#$*34Eve4^^0d7x#aE5eB#m{qI`ZzjGUn<-%)efh-`dG4@-R{;pz|Weo@ps~fmioY!K~aaxV- zvs3M}5KRogoTp}Dnn?c|6~n`BJHC1;*N2u^wITN%PBwCcLNMm7zQ@;xRiiygdZ`V* zwCDF1^l0ggmFha1?KVqq|8_x~0)}&)ji2eA#w#ab?>r_a#pG0uc$RE-wL7jQ=ft%A zebcnvaZ`Hw*lxMxA)n=fcL%RDHTwA7X?ASB(Tq?79sVB_jLf|CV((3jakaHq2ZzWa z-Z88e{c9SaXOsTge;-Rleu4^rCKQKe>f8c~@sP~W9%j?fJR*7#x4=o! zz8QNVVm4}$^wqrNpfSSDVcp@ZSkO5wH^!Pu5{x*^_`f|68P91!oDk4C7mBRuQWE!h_XWk*e#^^=@I7!bJp?nq!-ez^jM-)M znKAa8TS7c&N#OKU1rNuI(+HKLz>UTp%cL^593&WnCILv@-p4R0Y)$3MD{{Vvpu#KE z%|R$1{C@01dqZ(^bh^xHVlI}-JVkvYIgBmGW7W&q?JpAWjb$-Wahj;icnY6;$&TR^ zQPsNOCgTYFd1BsZwuwSY$o;7&?Ich!x?nID1HxSQv3?cThP@i!OC4$K9D2H(KTu_? z@u6nh0{zmBr&+Ttf6`uu(i@<0t$~W6zv#NiFqVd)^P^ zSd=a6l3>t*=dsHh;j|vy-w0l8G;onpYt#{LvpCu@Pr)2OqGD{UFOA60d_A7lz=08W*xOBzI);(s1Md!t}Qt2lHGkQUsW}S5Ei8dyOJtsLFGERJ)DEigr@DKHhx) zd;v4n{!`5zO${@td8oI#-GmJF+tGOZ-3Qi?jJxT~^j(>i<$ zHN$sKp+flR{3gx5b`>PL4ZIUB(E+_@j(MZvcD&y@QC zgFH}>SlA6|uyp0TG&OZC#Y@&YZ&_~DI-yo6^zonE#u>mBU|lz3@9RZIXepT^FCLtJ zvhlgoPZlXAZg0s6GcVUj(|UYXa2~O{MgQL?{;RsNxc|XX==1)*jsEwgxexU%`X4R; z$>0CEwz&V>N8o4t`-v%kUB=0baSJtG8XD9z3=A-|^w18Xo ze?HFT|5i3o5_D1jPPXB6k8}*+S`~Q6?dbfA(M}KOC)_6FFF^Ke^ ze*b56V;jz%?ERmc)kXb>yV3E!tuy&Qi~B!^eN@0&-2X{ORa)OY2_=a8QKQ|$6=u@NMrk2WDP1vy;D(cAv z>WE?25^JX)F;~<=t0nb2nW?b0@hl9#c^0lmjrEPqX5(4CvawO!Zak|*)$OfDt7u=c zRbNLVDY;s#ep792f3vk+Zfr)|<&Ev;v+}d*)@Hfcs8+Vtwzi+uw>OJAoPASEr(;O3 zlr*vs!Yf9)*y_@?u->>O6kmcrip1mqRn%%1xZUqMypj7r&=1Q&&k?{;44YTDFAu>Z zxaoA%$BRr;ndij>cX%g@%wLh^BHvi5XtCMi_Quvm7`E2R)$J%OZvflnZ?>DQ@+MQ# zTi--$KywyR$=rCeiQp^wCOg|qXjc*xhwYVEH`c`fq zOlIh6U!ZW){IhiI;Jqx9bD!HF!^9b({6+f{F44qbC=iTXkQe|JD{Dc8|C`1x8vP{% zaU&?WQ)ccsFh!Qf@zdUsFj;wH2IIM!>od7CQ*3t%W7*NTYPXy9&8@ZQoAUNnbECXb zZ+%n#rcr%X-dK-X;da;xv6L-W*@$>uwpgC^XKUM)a-+J@C~t(%qH=u$ADi{odaE97 zZa!OIv+r>9z|qum^kHQhAUR;$lu&ZL;EXNsWY zy-M*{6joMY;`6Rexk|)B@+@7}Zi?wQvmjOs&0xJbUY@izt9ETpU7Q|?GL^1{JmfN0 zl{+rJKVN&Vfyr|KV;R(8VKTpZgS$VIfkQa(tV>n7!~9 z5-P9VQAawFGP-Z%rf#AK7fXzbakkq(J$`%gV!yU`c6M^`9Jk}vUhlnG!F|EO;rgU^ z5GmBV#_cP}3zwU$<)UK5Im_2E*c-km{VucN*>a_m?u*_%clU4PL>4j+YJVH`hT){k zkfo3Mz#-sZpb%Ttn(C{)>ew2oshFc7*FR{Yw3KF1KBy zTfP&##y=&m>8|&8N+$PKFbGl4wuX3ed;4jo8^66=P3&It03Fg;kh!^L#qb^sd&3-y zKTAfl++X6c7BUW!XS`xSlV-C@2ZwX$qI%DKsuEJL88R)`Y!6WOmf6e6fXgY!QD=Mz zj!>jo_gg6DumFCCPQY;D*vzHai@=AfV`j6dicwtS@jH15WTqDva zqu8em>J2OlOvvIc;>OJe!c*Mu^ln?;N|UM>p3>zdq6qFCH)n5U&Tum*=Rut;uI%f) z98+7x;Or3+u>A#rF4mG4fku25REZFkD-L-C2>4h9~@ou}B z;Rv4Y@4?VnNXg8u#9@lnEH7t~aaUgUXD=WpI7^vu{4a%aHw$bIz!}_s{6QJt*($j5 zZ((nnOsOO06`e0gAJJ;kw0>Uem78}WTQeXBw~Cdy5I;@|{bn3}uB9-G4!-|41YV0E z3anZI6+D$mM?FLbLosC)4Jav-_qXJr)E-ke-6L8u)Z8*b(6~l89h@ge1=uUzr&x4N zY(GwL$Q0c(9xl8y+^sK9-YX{@xJX}@5=Ine*P7io926+Fo!jryCxU~7rrB<|ZKf@6 z-8#o#=4lL4EL9nOFjNe&iKQXu;f5pPd@<<0r^!ecq5CF>;<==ud$--c-)?Tp2zx-@ zWjjwtzw4Hs(21y;_LFwM>;CirO<7RLHADK%z4yCOKpQI{-8UQ;Nu#+XY8QmpC@&~y zts&VJ(^J1Jgaj|HPrD{3D-o^{p!-L!?F8^?#C)Ko#xZJY9CFJi{brV4r(h)}JkJ_C zer%pJg+z3c6MR$db~UJO!0uOty==N1%W=v>Jw0F?r+}ohY9W}W*=7?Q@T9z9_g~;t(O2WB!+Wm*gKBxK3fHq<6dBvX9vmA(+ zR$lBqU;}krJ43~E1M8+YnUT`n!M8Hx4zqXHEz_az$}W>wAq7$p_6SNNZ=tk@gv8pL zCr8#0+Q)7)wfGJS^W7dEI zlOy?~EQ3rZ+@%MSvVDS>5G_9 zZ!{3c73aoUF`Q0X5p>cNCufh6E(dQl{jB-lwEM@a`S|a<`rj@8 zr@FDRwaEXt6O!-Q_@49^cd$k&-<|+6d;O#O!`4{-$3p(!lb&~dEtCJTxc`L;qPb%U z3B2(*RV2{SV-}J?caK<00=+$6ISF(&qoM>|jxJ}ynzwNqM0x=XG;rJ_?y^E~{f76< zow6j5&o%iu?sesMle8fPn0w4vA=|%;Jj~vVf&p===nmzgnThwoxKxfQ?`2a`Tw`pc z4g?jY{v9uwq?GAzxYGzSM;gOVx2Bq4$b7B|zOt!&2ul^?z1+6+rxY=x$w>PUB2wksM8tajbLU*rv*(2itWSIjiSi$}H;oP~nN!J9wlN=O` zRhe!}LC|^cW4jp*c39=LjZmwa1rA=cnGUc!W}9Rz$@mZt%bImE4{Q!!wt{T2(*vM0 zUev?zjthmtR3z17s{OebUxtH-f$_JrOYLV+=G+s zn;s*z`yaPzgAjqQP39oYqG6G-yA6e@-|Ka3h5%SmiqjX7jl%y`r(@bT&odD*xv5o<=K$t>o&`+_PqyK#+>L;AD)5NLEh9o^Y14vWL-l7vYwwx$vK-I$8G z>`HSBK!P0{`)^kpn_G>IXXWUdI+A~yjaIq79&VS{o>kWC_4Ru5+4?tLI#11G{#1&- zJpSQm3U!LM-20BOo-FC&w5Af>j^IZDo>j{A#zt6PZ>=|~Ya5&O_2xD#rV}?ThY&@xU_((f4db^gt-EYH_gYzZT~`VH zybHu;(a#}Y);D%Gx6ID= zvz=;Xr@qD9A1}QoSG4#H`|~}rY%I%2noO!NNvce4fIDQ&3*`Auiz&`X0ejb7@v;@nRwX}%5qtp9AY za(EoG*Y0`jRPQx!fPHFV3Fb2CH1K7y?xVY%EGvTbayy0tx^c$tMnQyd|E9E@#)Og{ zcGToR7c@s#{kWvv!E4uT;?Lb`%BmTVrX&$*7)0@Dy!tdQnx{s*;wg`%xG;Y*MSmTP z-_PN9HD7O&H9zuD{^bu86YHJ zJ+{eAp3_zC5t1Nj$fE``jWhuyMj(P@f=t=Sh}}#?HDOq01qhEw*+nDZO-F{EeL5=+ zp;HnxCX3!S1e4e-CZ|q7+P@0DVb^(qL%iU+r{n-~uwxi#@L>foPJ?M}MqyP8W5<;T3k8W1DW?;Z749Ouz+AJnS^x*hXY{^WlljVJSktfTSIy5PfnS77bwe1yg9&) zjoxXAPyBlb#X_Kd*ok)#vVHFUN84@g+uRuW`yTB}`LAhLHTe#a%djX!a+_4<3Rr6*$lfUF*us@H#EqfwH@KVA z+h5bGa+6l}T4Jx^%l)&xgTvFbtwWnLWuZ7#(O99E2`ZSi!a`uoZwE~8jS2``+q&7kj~-Hv9`N8 zdiPwXN}=K8q^-!5lb$)>nw|Bo{cZ72Q0;2sk`3I_2^xj4rdx61$Zt?d0Dm96cq zwT1kj1GNus5(GFlHaeqbw6kM^U={yA+24EldY^PJJh=Ns2fuL)%vk@U7&m-8!t%d0 z|Fg2bo?ri4)rJ25ML1{I{uJJl+J)p};jCv6zUK@~?tCaVCWC<9pQFZb#})^-x47h3 zX(2qm2mv2BNRDW$)c>viM?=Y=T}R?n_u@ff;I{q0g3CU$_kXQbtH}Rdoc|t3+&Nx% z3!c9VGyyt_C!#=}Oi&YO4wUlx2S?v6l!ZA^{hMquWBp5|rA|kJI6v$9U&mr!tp9%u zdt}!C0sUf@;@S0I*c+4{5WpF6j@LJ?K9Mm%qnRXRiNd^yxth-y-Ce z{lBudR>`ja%GOqOQUB$OuoE`+919qzfTJro07dYdt1!mheTE5qa1H8}5~hAbpI6+! zYuWvXkH^?+~u2qDl2;ihheeF_x# z-qqUm9`-0I29PlHny(n>pQb%D@nvt+K@`EzbRs~+&ZwV;@Oi_lhigQT zYVy;t*BB0>=qHp>7~p(@$dsT^_#x_`dqghu@-r>Q*}48PBlCR8c-!9vQJsLZ7%~wk zwSYjL0nVMP@KZDD4=-`~nYho@u*OIJ7R%3a?%%`7{Z;&#b`zDUULF4kL!s3+nMV8rbc97iS&z=8Tw-@)BV`jI= z|BbEnT>YQQ*818){?CEghc|g*%3qfuN+BOEkUBPOm7nn&-2Q8;y)?Z_EqnbC@j~v& zig}0RFJyR`YOrQX6vIpQrKRORqX(*dSK*DM0n(O~%6nW<1c69nl1Tn`$Xm}OlZw6A zP6Bkn4TD|u>LThxwD02+qSn%O*c1v+cwO;ddQ9GrcB{Pyg=1$)o~ zjBuScZhOL?r;*0#Sv=n7`eyXfXr)SA2siJ@_YJVs3z~4Od*1 z){s4EzX|IxpOlK?W+>t!HZWh=d>5ghK-l4Sj;cPRfvO$ld;Ap8;U2De47)exRCE*E zg=KF+L}FWW1WiM#M3~EH0Br`jb}{T7-HSLdXP429s3vvZe4V_8syaPfJ$#|eX*(^k zlO|F@WsCrggRhIr2H|+3xik<`(7S>z6vaWo>j_bf+}bnmA@Q*Xeal9qKJmMh63IG* zeO&Ctou2)sfJnn9#=dT_;{%7xwq!U{XzI|I!ix(e55-V|L~jU9chrt`p)qc++2gd3 z!TIq99Z9fQ)WmCzn+Eg?vQhL2RTL2NxfFepu&dKI-yaqZBIxL6!aV9sz?(QQ{_Kl5ilyZu_yh5+b)Xt2TIgQT(|Ts7HR?8Y zOf+l+(42d5T!wlE&ZDp+!h-^fkFyb>MHW88?k-Wpgdw1;JWFIG!%*Vc&^(`+laX=B zZaOrhZBcT#F=+SMH~L`-S2Ju6L0lv_j~bU`f2gW%_??|)g+~{PmQ(TIAUR1nCw$-B z*l^ZTd;>lFleq-qIz%4uxr~K%gVhEkK=1?WwlS`dyP6fU`us=Zahi1RG_8*@1suN;KomIx-qZApE0hyfvepwamJ7X1NS3< zcdC*E#Bu1yDFs>Wt#H&C7G-VO*>E9q;am={IyLD|Dl1^R=S#uT`^*#KK-(1E_c%fg zQ8~kwHNcg+^0_Dr{aEoKhb5LPQ&J@Jy5w}~qjm@B*g=8g^P3u53e7Os)0p5PyWOq; zBI-Lr)zj*)rvUfe~t-_uJbI^NmA54wWC$4O1CSl+rFiUos z+qg$eB|t-OF809)Ps~gI^g5}{#08C2tZ)>US5&m_e+qdf&23A>xF?u*5cZ4}O+F_I z-`rrPU0%ZqK{23}tUd?_H{c2Ho!S*-1d+}g0&T?&%o&@cydCrg7pq9zXJLh3KmX}0 zum7LzpPpHR{==0^+rw=Azf!GMsyX}5#^xga@6pgdvm+LrAu}H7mCJd%0?fCl6!Wni zT^F1rE)>qd>bjlaW$Y%lXyPg}Nwicr>KH&=UPY*x$hFa6xfv;Kz?Xt~39zHUjE`Lrnqx>(jQT*&%jjd&>Gf$z=u?&J#qx1b`f?M-)&JUq#q$ptWJA*TbWL#3$0eh5K(gZ+ups z13d$C(2Rhs8yTWLx`+#`J!bcP!QZHkgAXAIJ8@5a7CKj862n8Emfj)oZz$?-q=V)k zjX9uU6FM{$F4!$2z~SH(_Hh8q&El3P3r?fc5mMT*$PgL5P=g<2yh;9_7V#D*lt_-~Lns%rC@FjDrZOb-_j)r5a~w-n}M4-h~$f zN{4#y)3RIDXrG*=d6Y>mSTEFXOad2mIOl9iz63D_Q2-Tvfb}XHH`EV)r{^IznkvKv za!B$jxXqjC<*1HIFITJe#&y_%7+C$NfY#guF*~ZwSSX1D3@SQJ9Zb^k zF;?U`4d0*-G;>M1qK@m1MZ%t#v7D!*2B;`L4ttz3#^-V}QWsXIi#lf~Di1wzJYkKA zr~)d;-NS3hy@+zI9bc)!ZDp!1=pDfR2^@+MQS%EZeXe|r+;gr9@J}Ix;*3f+vH}Mv z3JAH$>090nJJ;b&>_iUFA!Kd5632fAfzdn34YCw)^uD+NiJEXHkXYvDs0*8aY*?T% z-3EXbv_dsM888Av+41|M_)-M{gc!j9X?}pt&c1+rZr5nvV`b}*^w%NjY&)aa#`w=~KqxsGn6O2^i9;?CVYeX}mMFe4{iy%UU~;_y z#1M~{=tV)J4(`sBIM^ZWiMAa(!JkN)pL`)uDEw&WNWRnOH3uFJ$!(OvVR%9Maxg4E**1kC$(_zMYjyqrNxvUgkwUe~jp`!^CJdW#1OrC-lEBKi zJ|EIh(EJDqz%!Tylsh~0UQD3u(T7}1B*!eL)E(ju9c+78IE6t7g#cC~mAE+mpbD0t zj4Kd3Wx`*|&Lnbc{AfZ zTy|tp_Fm{>;|}8+gY`rN0g1HiP$%ibb|cnP&dG{%+2XVZUQY{s15#2pHp7}JOM>8G0R?qL^Mk3bb?X;+`WI>)6K7CHLB7C$6gqq8$k zYf^W1Sr)t{Xad6IMcuF_anf{L92ja#I0% zFL5pH?dh;o4YpQbGT&Y?>j4Z*Fc0DED|<9%9(XaCUh| zaDQ`i;sewz6u3)YQ91H$b-*t{cVgIp3T4_aupl^3G#$Y2I~YM##+x87w~a!o>2T@5 zpum;V#A4x6#In0$w|F1=i4||Ei#EC5RO30r_QVC_nui3-3L?dupl5}@=^q;54NAId+0@jxm2qB>`Ou?Qz<$P@K z(jsuI_Fwy01@+wOU|16v!IW`sxT$Ufp`#&GeptJ(Yy%#-p_p>76!#1)fTPf)h@A7Vo?jsdJIMH5w7oaikm3R$yrGTgx)~C(Xy$-pu?Xu z;ThB^HOYyS=S>8DB*q7@n#cw_Is}_4sh%2`@7z%Nq1js3ohgHpK%pTE%?IR(bAEI@ zPR@c2m-5V=p6&YdRY_P5*lRIf2G0Tp9zB6ur(HD*s!pZ~G_gF0L=N&elgPJ;z=|IB zl?hA$_FA2+yCI@Z&6o2|2MfCKLDV~Wn zY^{D9?b9OaL>W^()Z;ZduhvodAd0BoxED<+3SboC>Dw)UezVTa1O^ozn3r+d6*B|k zg0L%y2?inxXFo1otJDgZK)Rg@SlVuLLT~&{2N~U4gfG#EpV%V+whoa6mQbn|6C94f z9xM@j9l2aiy2J|E_q(?zC|z!eVM^w2WvCA|mQjJuKsvr~g9KDq97a28Q1K?=G)&A} zTtLp=MI9SjbAficYaF#;qcYec9rRr|t3)t?xdG>i^J4j7NU{f>Kk1K}_4nq@+n3KR zOX30!PTcLI6Zb2=^!ZKQGwq=Nst@z_e<_6#py2f*B4oDxe+}1YY>wIgH#ZjfACE|a zsa>6;!ZV^VhvEBz^%Ukfq7aV|@(nS)>mAyTwDt_-1MTn5%Ul-sw`c(v%=1(U*q3+ z0lWO*2F00R^8k=ebH+eHVhFU?T)B?iWe#Ae+#~|;zAbxQ*3R+K@oDbDhGZ1$ovPW_ zETGcAUf48ah|an{!r}Ux&Im>iOi0}MHI0*aU%;O3uxcs=c~-{38y&+avnro$M2+{g zY6wKmCz=F?sNx?p%@YXv5Z&;u1v=Sl)XOCR2u3gcqf;}T@b9=jML#3xMqJhc0p6&G z^{B&5qM&ga?)w}XSH+SYf%>pI4``5(IGdmkO{0JcZ&!VAR1H<)L#(m)h18BPMiARm zp$P>C>Wu$fx}1;8w5AqhMP3S;UU#ezL$gHm$-QQUlS~A*#aWG}A6Q&>L9VIN z_aFZtMSvhu&Zr1eLQFrdVyEUD3_pGD`VU+A`#&q&mF?}t{{JPoL!<2=39x74NS+9H zoMt?eINh0z5Qj}k^}G+T%j`V@B9`wQ>I2e5p=N6b3bg8?8gpMPo2e^Q) zFq-6y@Px#p2ZS7Y8%EgKY}A?^YK6vXIDQxktZ-bbh24%V9wS<$X~Vu$im?yk?D%dda6|MhPk z6TVy4e|2qbBe(x=;F9Tu{{NUXnAX=5^CC%lDK(ZQRoi?YT_YQNSbjqX58~Jn$CiY0 zzQC~^p-U^~FnEK&b}0PT(6vYuM81_eMR6*j7X_yt1}dqM4~R;F8_^1}UN~{_;N$Fs zIC1$6;2wh?aB1(<9Goth=X<9Irz_~=cbF&$74!RpQ}gQJa36Cq&@lGC+ka`E|A_DR z&3A{#&-V_o-~5la2PgZlQE2=WAKn}z2JGdrIXXTw`!5g9k{m;t{NyAgxlvu`v-}GK zYJv^;x5RLTsFzS&f$5Rf`~9{;c>ReVv|Uc(6o5s)`} z!=J67c7=?hwmtIl$c(dNxA(V@dnwap53U%Rx6g1veN6%_)I_SzXPHn*th#C(wa2-5 z_q=G;;%DwYV|?CalG*ZN2c9Mz{{_C!@BgXZ_jr@PW&cOD*D?R!#-je?mHDwo>|afP#Vz ziZP^yAd5ISl}wH!wPPePPnst&7J=z;fs4EP*oL9fm?i6SrAG_MVMdAMfh=*Lf;?rS zNGC)p^JPD52AGnwrYuP;?-(#ZnQmu{{(D2O9q>H?RVZQeqm67=U`XC%AlR$n_M8fU zV_I=-ERmXwK-0)&@(U*-K|&+Mz+BaF(Iy;>q1nG@z&M9m;Bh?9m0YuKw@; z`Tty8i|qFaTowVD@&E7t`Tqm+gN^DZ9Cc{d9Ci;wlFnN12iy!z5EgVqDzzCmiOk!G z5o-u$LWQNYA(De;E$lg;<(O~P9_ir=ALJ_gjL8d3Fhs3K=E$eLmR@_?n~DSFqa_rVZscxbUx+7$ct-?1lYX*Nue zf-?VxYH!d4V7J?suE&6Yy9^U_(Yu%!dT~$y4Zx(uub!mj5fdh4 zXpvq$Tu;xa7yF?jK%-0yQdaP-9*i|~-}Yo1YPi1iqXX(b#(DNf17?LvnuSyu66|Xx zrX#oQWl0E)aNG8>ps3%wj#!*PD#j5A2E4O+91;kTxhzgk3}JiGM}h(5Ffc(yyyV1( z1XL}WDb~AFv9DgqqD-`j}g!cqBMbhxAxTAtYSeMXrL@e=anRqpr#aM z$91X?EF1Y5yqKWv zBR5SOQ-e$R1UNfQTOA0P$V}KF&PqDQtB8~Wx~vrh!N~|X=beP>zMSxvm>7J#Bou(YBn$X4%XK3d)|22#-Gf(nS#hPa|aJNZ<)0S&AU;KZBt zSV;Uf&>9Y*1~XL@Gb3XWTH8el%xx2fsdib-KMDLs&>lhrQNaj~#NIX2wCvvT5?Nr# zdSr1WA|%dWgdD&zCgZ^9>}^jzV)u5}KqL~PzHDEb#={_d;zg!`G61X` zikie91aZ9Ck9@*_Rv=D^E+>o@QbxH+IMydOBuE_t3Qq@7ua#2$g{P%Yq<9t|-jcPz zbG!tBcERzT3>;*ijD#bpTwwusZK}))M-YJ?&!+*;Ge}6%~xA!?S8^J;Hv|0xCR2>t=RBoLY&gk;N7Wun)yr9xuI#}wgm zYE4r=)x=^~5EU!cR=6;9?R!WklQ<10nH;%*#J0K>kqsRe#qDI5ml3Z-UIJ3bzmmXX zXQ*CY&LcZl$B)=t<9@p_LY)>YUWAx`fULuqB9y2d*?!bWB(gvCt8jTw;6w_mr6Wj@ z5N@sXL>(+8f;;&-r(Y-2=ETS=x2ilo*FqR0oiID7WZ2b_rv(0JL?X!nv_FoWAQ;x> zwgV4fogQ>qNNHe$RxWS(sUWHbsM|%W2?+8c@!e^^h_^c(>DF(>-o(R%eYi=3l-En7GZ_t!%F>c8p-Q^F*2K-FSt zZMSRJk*19bjohd(oS_V=O~sQnzi?`q#4>l{6Qnp%|!Ql#O>!^a0|(nFVSrs;H$+q z3!EL|nxt_1H)QxHD zIE@C1lR%W{XRtrrtkEFgF_ClzN2IBstia5}?z3QzmNGc}R2 z(tw>7s{)W7j8vCRj8609spSc$GKm-i>3~+sF?Y~NC7>uiLU&pJdIY8bCH0(`0@js? z2AM8M9c4u`cq<%s(HqZ+tknXswh@>U=;es{m3x$s%AHg+P9$4i3PBFq0u#WcoxZ>W zT#iX1G@*)$Z9^uGvb60HlwwI_M|=K;yTtaAQ7UzjO_O94x8ei=x~M;(q>=;zJy>SvmK`eSAAV0JMPa-8R zU{eCUysj(+0D$#wB+;8_cU(FNtrSY2cGUE`v09r*2AzmZ0q1Xtl2pE$f>*Db$ru)% z%1=(14XaeH#$e%TuNy0@($xyZ2pigtQ!Ycs*Z|ocu2T&8)AA@VnYNy`nqe}xN0PO& z1}H||ByxsZrmeF*xqsA<_2y+qlag`N_2Al*8raF+JjQ*%64#$UCs8190#q>$WqZ&> zmgxXnqT_kQvR?7PY;#%yDQiutQ+|2iSTuX|7HhU1rMdWJ^M%q0EII zfHNEeF)BvuBf%ect*@x3NC!MeC+)4X!myvdp zY6O%nLeLB}JfeHuaMutFjuDdt;^~vtxnwCxe;nF&;dBk!Gi0+!g&@9h@=bSPti1tG z-&9*wl_nqAj<_(>-9CR3aLg(j@}?X`U4RBA`2vf^ZA+-pW$#`f(NOvm=tOC9siz60 zDpCF21UP^HeQXwYj?E>AWD3e=|AP0|nL!veD-Vm`c&gzkwhklmkA4Je06f~5=gn1rCamYTp3_Oz9 zfmFM21(y;QMFGo5y;L{UQNY}CB6D?iZ9fDjwN4=L5rMx1L)gy8z4ZlbK@>U#1mryP z+&hr+*v7e(MdkK9LPR|HQsjlGFLrQRYGF4^G#dQvf^n$esI?mGjQ}4lE}p24j)aa= z(qpQP+Ys*&H}n{mPnwOf&QZvnS2WN)O=y5+tzv*0Q5Z^i@&K{`M^^|0i2PjOG52os;NWBJ1&4gzR6?c2 zmZVKrPu^n7)2cC>%0;RAcNk2iZK(0|?@?wz1)%?U~^?CWmJvp-^c=~;j)qz~Lk zv1&^7ChA%8RG4KK#_aU^@W6l@U@;)2wYVbQfEUv#^p0&r+>%D9mWHZloUVmv(2n8y zz}G#E0Mg@}2zBdcg&2V5bHUq1fjRBVSATLw*Chgoa9ba@BSt+~pBzf=}E*#cAVS z8DZFqUJr&`802g>*L3nhR`wZaN9taoFaiL#B}-*Dg5$VV)fu}&dW?IKh;zjwD>{hmWT>;+&X0O5U zd9FJrc?tKPE!7QpyhZVY6h+N0SBr~8vf|}2J&c?VL(GaW68G2!>XLA$gEO5vqeCb$ zqZV=KZ7tNC1ok271VV{(l}Yp&=ntuAHwtnij{1mTO4cJ_P-_ZV*iIkDZ?*AJX>YEg ziCkvG;86%K5eI{l5Qnnh5MhGPUfOXwKZ$6;5ytj|T|)#E zk0sWb$EmunDM(W>YQuY}A)tXacm3!R(YbkSplYiWSi>oft5p^}@oRC@N^k>rAdV~g z1~g;c1$~}^0yMZ1NkrZ!!zNg?U|>0q;GsyoW!7>GnGCxJzP3hGx)ZYON`tQ|8|j2@ zcwSQy6Q>+cUUTFnH+pJg0t*=Uq2YbEzMqf(WF1m$Y`4C5mj<)rKi4YTo4NbnD;rgm z1X$eveitO)v+X$u{+?Bp>1;-^8rB_MVOjD5^cxqUfEqSPr;k6h`!UrRG}GIuq|PPY zl`>8EjR)@zPR`!$9oF`b-tGQ^udrSIT7Z)6%GcXhZHdJiHwDTG4D8(@U8cg6EkR?- zZ_O{g{xJU4{DMePsXO^AijVbK!Kgg=1=9fAa1%*6U;cpxcZ&;)^h%Zg?7wj}}WX8YZ@-H{<9U|(z|9-0jm)A3|MZ5B> zn`vQ-YzCjX2P;m*Zyxv(>GShnkrPoLbpJZBD z7+kKeR4*N%ps6d7f z8C!8Xw9C*kPd*uZis^kmqq=8x(N+z;)S1N;n>qM0C8mU#YII;GYft zv$^v9v$b^qq@?1lT>Qe2yZh8DkW;A9q*So6F@WE}^b+^Ls=!aQJN~o_tNT-37 z3~K?!(^%;G>y@veGOsEf@Bt+(R)h#E!a|exl9b~;% zMF2G#aS6!13jyA;{;OO0`oGn+t;PM{Uxqh%^=~{ZlfYvCN-0lG^NCyGnXh5MfNrKr zE11HR5rXjqX*E(?l)b>&scmrF#S;Wv^ieq7-h0LGPy&HUPH73(ly(s1#P~s&fLPgJ zX?fz)8m{ykZ&74yKuvVCPd9&OXha+ry$*2AYuY^@qIUvhVYJstK+TgGWgyd6pW;3c z0%x91f-l=fhxg&P&r5*4vYhn9kJV+E7*l{8?h-4P=)lb9BDC$emVQo9H)(FO9-5~H z0~ZdDzYFBDfKshg9P5L(aw zI$~|~rcu1~i7Y$>7D%T#={ax#ep!~=%3-#LwWySB2GdLzy#Ojf{)I4m4d|`WfL4PC z&Kn_*NDG4-1f9Sl#oK~u(Cb?Eo-p3@Czf_OB*`6>IXv+ggEI`A77RcH?>1epMf-=Y zxfNkPSq$23EWOH&#d0=+ovUu|+ym~;Cv@z! z{d9#SOoo9E9Hf7tZ2I;u`2VW>$4_?Iq`<^~>!iMY_vY#T;r@#=gPiV@MVjCc=M3l@>CL z4kCAjYbQ9hp%Vry7M7Nl3g73kEZ@S?2vhv)_`)mk@)@^+$@{-E&(OXgHX59K?_U#a z_Woa4-(1_y*8i%mZB`fh|7WCn+O=Q*ix=LfHS)dt?y(GBB$)=%>frfnd^Ny5udD!u zAL~bOUPSszvWep>=3SZ|Zc1#BWMD7<`>9uKTxU8m8q5Q@hy;uPP(cQueR~f2@Bo{M(AcIo8HyFr7 zHbuxEz01O;`Z`J@Y!RXxKqMIyvf(6#JQ!eU$di4bAdhOb)`%D0)oLUV9q$M*=%O^e z^|%&XVKkV=5x9t?Dch_UyT7mnBQAkC49Pb)F>2cCMsuJTeusILG--I*2601YF|``Y zCe&&r?2zzn(gwzix$Z~SK}Zes&bcW|N&k^!W2Oy`5;f~kn9v>|GMyTOyG}S{AQueo z%D~wN1Oi70{>nb^$3z4FW47TKglIkXUG|l9256gp$ThNcWza(Z()e_u6W5GbOj?D1 z#e7W@yEkhgZ^@kpp2DJhfs+D{IlExRARacMT`s31;yYSGN);%p#2kmwQS$})59 z?X(+LI7ZRA&KQas0vZraAVugVa0L5HesoHlDf(_kTDUId6-bej5tBT*?!=))FE%0@ zN5gWjRkr+(#R`=R>(6z2f-JTHUZZ4FU^J|N$5-tK6?+f?0gVl`Bh2(gP>wj>bn$$s zkpzv-^$XEDG8z* z*UjAoQn;#R)Ls3RvXGs)uxo>NU)$W*sI+{(_C6U2bIK6`2*#z3000+P5|2`%>W{l- zEeAPqMgA$iu(VlvihOxSkRVOwzq?Q4;)*GnV(>GnKoM{RgD6ZXj5+|4yM<$*SZ7C_ z%zd}=Tz*1%uCk*l3MX>)B!0e)l^v7tcMhY1aKSj2F=|BBWbf}ySiGek%1h#AL9Tw> z9Z)K+@)n&IuIL?vxW9v$08x>FQ3(OvxD#IlyCTgeeK98kEGAw__gY`lb;sX6laboG zL+H0Nw+U%7CUhD6iC$6x>%OpiL^7rEWnuw+37iai|9C(XS}LL8u7;@rk#dM<%+pk+ z?2&vKHW*QUYN=XM@)%w3lJp<42WH&XP_r$nIVX9ZsUfFcAV3937qK@%XOeYo2Z!Es zfHJ=c#_#&7nj)aFNa8@!&4f{>N>YguvdrE_{J}slCp?zpQr$a?0wYB>*)RwKh!f0CObbrBP#AcptK2isWDRV5%j8QtB9ZyTylM~NFhgUc z;XvQR2bRX+&GfwQnh|6|5n70;uHZ}rYnVjMU>i<|lBsDd`*S_r0R;O#k%&#XQO0A4 z@kZj^6Ee+ghI+#OjK=#Z4?r&E0Dn`!5=tJ7Z3P}tGJ4FnI{JEh~m*U+Rh-k#>pO~ElDM0;K7VE{RsayYsi1hD_r&=f67#d7x^D;{wy zczL&oSX!LsnAI%;^Zx#1DA$T`uc)|k3Z}D;ScO+P z-(nPE46AE2G${mg~PE?MWuCBg4SsJ@0`k?n@0 zs+d!#HgFiFmncJ5VyNvNgzE|F!Xr!Z1Vao^4pBW$Iq*_{z$c**D#_zXwpmYsri~3-D`~6 z)RLvDbGhS+f3hy#l_=$Un%AWIL=)7|WM#U}-GVT8-i3@R{RuQmoa;;I&#&tr5cQ zB%^TrzkKBQWc^=B&*V7dzCBnGy+!}8R;y$A9~;|?`(Gc7_jx+-^FMrjx%5Krg4{6g z_S7U{PH&Jwj_R5JiJLmQ=D&jf$|~5&3qS6?-g|v~c5KeV4`84+b}^JcxavV0fM1=t zaWHUT$233pzn!JxL#i!;$|lrHf*P{g+6_LTF*%l=n^Xf0oYw=acvU>&YLNgReP25sJ4Y@!Le^uaQ%A z-7w|Oa$*0kZx9xZ*EhLMo|~_Jao_xE%5au^^@|(eSM&bQRAsxO#Yl?Q+b+Td5d(u` zes^?*+AKts%xDy|W{PCLvNR5xsM0C#S)|dED9W*&A|_|PTGQ^C%T+0~+DJOXUu*yJ zyYf7e`*GS#-v9GScMltD_Wobp*hFq<&i+?jU+n)6L;9R=PqzO-9%W2t8+W-9I{)fv zcomNz4)>E=7bv=?n)V)V(|^1$@>^rb?-)P+IA*xS(*%h;whhu?<2Se^a{Lj)#w`I8 zb4;?|-8p7_Y*N@M>3(A>Fsv$8N-^j<3a_}I;I}LDhm8onNr`3Bn?)P21BHzn2?KDE z&^QB%*V}!TuxeI7a9^Zh)X)g5-3^DwB<%VMHT^O@SeRT}p5Vo>Qz*RL`>}Sq&w%_> zoWQr%kZvUZR5uE*_6`pD<@x)<$^PlvLtKHqYkon(mG!*C>^?w@U(#7{Dl>*j97$!c z=*cEZLtidl+lEo5Zg}-)X?M<_mAs<|Zt7IxFSFsffJD1VM?r+oV0ea^os3ZI9Lk?ywNuT^G7E;ZkjW;0A(+}t(fy&p;yO*4AWb3J zj=XuNe130?vK@G{PyYnu2@aCKQl5goNqKCinX)A5WS^zLWFHGDCB!&` zG5b~mBePi24>bL7uvHX?Zg>C8nrq;+-O;f1%vyrjt^4ub<7=PXYlzTI-YY-LMi;>D z#1f()>Zs0KzX1@9;{fez>`ugjwXh}Kmy*9y%n$~gC}^`57Q_1lT*L!G35 z8TfEKwsjgf69<|!hU3|%cOpZTFd&nVxpiaWzQt|^o_M|)NYefx8<&ubI z$r;19+{MVtkS7dktHrqe6ZJuRv5j?8EJFQx!F~)}a&+^M$a2x0s9R9@SPP&2GWNRv>2PS30|iq^CJUI93uuZd$&S&J$<(FtcuVs3H1RXK z!o`$u>p46v8=n?PuI4Z*vR0|d*+;m{ z3zzx8`lv0h+6~lQ*6(!xu?TU)S)`EpYbqW^>RJ6aw;3VWAN*W=pUzmxKhF+i$AzV~ zgig5YWZ)?tg-58g0K38)=EsVd(?qvb{6fkkH#VO-~~tEJ_?jd7t?DA z3m)f5mGf-csX+_uc0r81DE0=^GF80S!u4YX;LJ86szgzOlA0(?QcYh72eB$*y2_&e zEKAL*9z-h&$TFoDR#&|Q?HX5oB{TmSMeV>JOfr;SsA4G^v?wk!BOxaTkmw2k?)E?- zP0UmTdu9$~8z&4M8o+}kM6$DghLmd8c2yHNE*G+poWQLna$eNWpnm|B>?-y(k|(@` zX1^3S!;4Y0ydzM!(>*&=+cGX4!=Vuw)r~>;cj(=m7GgAD!B5H%Gm}D#FiIcXN^DG9 zr+2|y%65uHQ_}qdJ71%RYfq$~zm0m3{;7`T#32R`u%{x!wzyNwCq(LuvN}2siK0+) zGs&s2)7$HzBUPcmttFrFsKN z9fmisvy5m^rnma?5~H*AKZ%8qZdO3_|6PRO=peyVzCol%iZKp{ayhcUddmPbLp6M} z0k;RNHb`FSX<7?3h${rC(Es95Pdh=yY@F3H;E}DwoB#ymr|bwsB{@5Nc9)$Pzv0w4 zE%xV>GnVP3#RJX>Br9hGfUK*Ut$1!l08ZJkF0sx|MkLsr3~R)Z2|(P#o&ge&AI`B> z__3&aFZ@);FOO5U591F2s)g)L)MU86WDOs%aXbEZ=?OgHI>QE`lFVZ;m5{n|Cwlsr zV3wXNbB`;e3S+y8^Z{w;#5};y?t3zqbbn)e2csR^p5!blQnJ`D&b1i=$Z7Q=RdNpk zTbGF-+0Ic%N%T^=j8N7$cS3RRvA8&Mkg=j6(+~xR;_4ufgGnb0tTAyI$VEhu%UGU* z!7Q@bm)B72by)y81O8;Vt@!`iyV~9+vMcL~uSBwrQb>a$ENP@_cu9`w3r&i%nJ!iV`B6 zW_VYKy0E3$E1)f_ZgbQ*p>l~>8BOE5M^TL5)ftsc}RPKZxcJD0YYN71&}>?yz?_p?4j!QtaNLVOI^6(E|b zT-v;NWfr!-#SRu8mv^b83@xpw+@0T($NQc9N|;@*SA%Dya|hlcz{6_fg422hVJb{W zWnhY2as!AUWs-b60xJl!A~KG$<$68PFNt&IVv!;93xyqa#mw@btVE`imA>R#%=S1t z7=Z<>P%N|ZGq8w9{jBW7_t9QV(Yc^AC-T6Pd%Q_*inKQIUX;}H#~M`5Nz2ebh1-7W;>cSX3WDSDQcMJ8%+&W zFLL&Z1I@BA{40(X*bgO6#nWGG&ANq!$eb0vI3NlB$rA%SoS18$p!dj zAD&2~oJa1*Zon2*9JLLu?@u3DRDfO9b$@@YCcgfth@5N?Hg(n**~3_rH3^(6UbKt> zu#`_fu<(2f?+-|)vbQAqpp1R1^W=0QiO8gwTW157lq$Y%enOmbFr}UcXs5|#1@+*J z!M!kHOvQ@WyF^`DJHFyF#!#MlFS-Qf>3rTgxm=zR&Rk|6&btpeJ%#q7qYz6DBzwj~ z4KKCKNxfzze1%_a8Jw-PmPH`o0`(rU3oc=k%Y4$Vrp%^d*(yp}%L|5YiuqKfxM-6O z0_{KE?vVj|^>XL$n7+jzDYpy1L(L+5s6;C%+`$GN`OpXsei+2v!Hxltnip`P7W>yz zi4qgJr9Z#U&)F_4dJ^Aedr4Xl9*5t@=4=e-cV0`@oP z2W*ISe}48`ezi{6(OLZTf@)IS?8iN-ewT);OfWKMr)!b z&NBnYKnf&UsZtW=|K&K(%lW(0(Hz)t=2o;Yy!Btr$Jf7#Ev|6=VUi5kjSO5GM1vWo zE6MVcTp-vp6TQLzx|eiawF#QeRJ^!0@y?U>WHRsq(4@DV4Me-=>0?}vPa!i5o^h2; z;nF&QG{{#Y?O#H?AGGC>LVH%U@uX3jDJN)v1@`_wk8)i1>99yILjF|EI}Z?A2fle2U5_V zAPTK*Y)eX=RaMB;HsK7TVaD?*8KOG=DpwvB^LN|B@Xvs;9EVxxN8nlw&8tB2%1}f+ zNoI`FHv$(kD*zMJ%jk)Vgi1&lIlr@D0D!W7>d+ANRPH23CJ`;?Vo(ojEwuw-!>jB@ z(EiFB6ZA2&M?snV8o@=cA}Zqmn1a+Low3*DX9P}*QUEr;4z#WfstmW5p$DAxLNtLD zWs3E5fz_7S4@tr`ds5g7e|eYrsEXk&l@k`9oqyH<$yUAV zJd8#fzCb)RWR@2X_HQ6foY^%MjpHU|38Izu4u*)#B&qX~{8u!z>zIU1bi{dKz7wE9 z9&4w60J|=D4#0h43Xw26??PCVITU0J2Uk{ZbKH&I{hC~y4HOLKCSjW2EcIY8Bs8u} zkIl!7{lZZ5f(x|2Cl`}BQN^%;|4qx|`gEw^7lw7XE1xcr~u0fc-*`ivV0|0Q|e zYx2L}*}S`1ivMx{hs`Gc<3js)-Aai6ad0|XXBeMy(2qM&ML=y>UYx5&LD0nPIyw^* zBnSn?MP;ExkZ)eS+S%Ls)uM|cGSsiP-fq8qyLDR%*3)!df`;BwmacW1g@q`st?#~T zwQv8a{fAb!UiY}(c>6g07tfFzq5Yh2z5aJ&BeU`siqiNPMH-YEN6FM<*w1lAAK`!|JT9HU~YjpUeI!e*T3U$4mA4xgR*6 zIp_aAxV@WfDVp&h{8voaT4Q$R2^voA11OmR??7 zCLP#L4#%nlYhC{KRsB*?Y9*a2C3{@M{$d;^jvC=Di`?^~ig(FEs7s9ZF<(n>?P4nW zXYgA-pV3RoTRGQZhV5z587jdUDO~32&&=Yd!@9~IfVC+Km@`CfBLGmB1TsFL$>FLj z1*pFj~XnPC6}Zw@^~x1b0D=@o3vL)3UIwl%$3A`t9*WY0@hF~!Xe_x3-usTF29 zg(G+!n;b&puS_{l`JV_oGkYY)%4;1UqvIGv_Vg>hpbCykdMnA48Itck&@RJ;84-88 zl1d~ZIRnky7r=enqVG;?-$*pvGlm1m!cDNAXe6%|lv4AzXa{^7GBbBX} z*C8(G)T?9sOJ=at7&P~;83RJU;!0Eru+$KH(*unIT{F%AIkjP!0zZtaEMaokKVuH- zDo9}!*q^psn$Rb}Qf0(bHH5xOw-wQz#pQ`ns9_q(U>ykh0l2D1Yp`63?MJDY(m?4Z znGih|0wQvCTMyu2IBy~a)PRH17kI=lpqK0;-BX5Rb>j%eosTV#26Z5~{4_)m2u2#4 zP)SE{JfPY4!w2i~q25ZtfLtiNEbZn@+7H&#)rak_1YH7W;y}QX5k#qa#gl&yv9U1x zqH9liZ#jnijn_*!*gy8RcMvr@U48oc)eCBXhKORbg&NPRCZOF!5Wp`<6~iZX_d0Wh ziHqbJC$#gzaN!d}xrEdqKX(zL>!^bUVQE*dYh5u0ouTOY$U60;?c*!OZ^0P8C9S?M zbgcb$GxxG~&JRS!hbfKnMF`}e7JfRToD7js!CXY~9|D0Xz$B1YP9@>t^d3CwEMcYl zl%FJ>eVSQ^86CA^rJoU1LO?&CBO1JI1$7;a(i5Ib(sSI19@Cw9fe>ZLpa9cmS>U%o z_RpU=FZcnLmt02emHmUm!O`~d)01bT@#OMv?<>&!j zZHV6EhyBl|aBVHmF{3dW2nV!<42HOWB%Pjldr;IT4Z*nzNn#!NwGBUi=)-*?UJB24 zy5IR)Gzl_tOwl3CQQs!zbM;e`-FwclS|=SpWH~45Xqsc^Y#rrR4Xa9;Q{qEAb?JoA zsLA>iDe>ZNVL?@zSc2jlew + update-rc.d neutron-plugin-openvswitch-agent remove; + sed -i /neutron-plugin-openvswitch-agent/d /opt/service + when: groups['onos']|length !=0 + ignore_errors: True + +- name: shut down and disable Neutron's agent services + service: name=neutron-plugin-openvswitch-agent state=stopped + when: groups['onos']|length !=0 + ignore_errors: True + +- name: remove neutron-l3-agent auto start + shell: > + update-rc.d neutron-l3-agent remove; + sed -i /neutron-l3-agent/d /opt/service + when: inventory_hostname in groups['onos'] + ignore_errors: True + +- name: shut down and disable Neutron's l3 agent services + service: name=neutron-l3-agent state=stopped + when: inventory_hostname in groups['onos'] + ignore_errors: True + +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + ovs-vsctl del-br br-int ; + ovs-vsctl del-br br-tun ; + ovs-vsctl del-manager ; + ip link delete onos_port1 type veth peer name onos_port2; + when: groups['onos']|length !=0 + ignore_errors: True + +- name: Install ONOS Cluster on Controller + include: onos_controller.yml + when: inventory_hostname in groups['onos'] + +- name: Install ONOS Cluster on Compute + include: openvswitch.yml + when: groups['onos']|length !=0 +# when: groups['onos']|length !=0 and inventory_hostname not in groups['onos'] + diff --git a/ansible/roles/onos_cluster/tasks/onos_controller.yml b/ansible/roles/onos_cluster/tasks/onos_controller.yml new file mode 100755 index 0000000..6d62a2e --- /dev/null +++ b/ansible/roles/onos_cluster/tasks/onos_controller.yml @@ -0,0 +1,155 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: upload onos driver package + unarchive: src=networking-onos.tar dest=/opt/ + +- name: install onos driver + command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh" + +- name: install onos required packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages + +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download oracle-jdk8 package file + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }} + +- name: upload install_jdk8 scripts + unarchive: src=install_jdk8.tar dest=/opt/ + +- name: install install_jdk8 package + command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh" + +- name: create JAVA_HOME environment variable + shell: > + export J2SDKDIR=/usr/lib/jvm/java-8-oracle; + export J2REDIR=/usr/lib/jvm/java-8-oracle/jre; + export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin; + export JAVA_HOME=/usr/lib/jvm/java-8-oracle; + export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db; + +- name: create onos group + group: name=onos system=yes state=present + +- name: create onos user + user: + name: onos + group: onos + home: "{{ onos_home }}" + createhome: "yes" + system: "yes" + shell: "/bin/false" + +- name: download onos package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }} + +- name: create new jar repository + command: su -s /bin/sh -c "mkdir ~/.m2" + ignore_errors: True + +- name: download jar repository + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/repository.tar" dest=~/.m2/ + +- name: extract jar repository + command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/" + +- name: extract onos package + command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos + +- name: configure onos service + shell: > + echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options; + echo 'export ONOS_USER=root' >> {{ onos_home }}/options; + mkdir {{ onos_home }}/var; + mkdir {{ onos_home }}/config; + sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf; + cp -rf {{ onos_home }}/init/onos.conf /etc/init/; + cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/; +# notify: +# - restart onos service + +- name: configure onos boot feature + shell: > + sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg; + +#- name: create cluster json +# template: +# src: cluster.json +# dest: "{{ onos_home }}/config/cluster.json" +# notify: +# - restart onos service + +#- name: create tablets json +# template: +# src: tablets.json +# dest: "{{ onos_home }}/config/tablets.json" +# notify: +# - restart onos service + +- name: wait for config time + shell: "sleep 10" + +- name: start onos service + service: name=onos state=started enabled=yes + +- name: wait for restart time + shell: "sleep 60" + +- name: start onos service + service: name=onos state=restarted enabled=yes + +- name: wait for onos start time + shell: "sleep 60" + +- name: start onos service + service: name=onos state=restarted enabled=yes + +- name: wait for onos start time + shell: "sleep 100" + +- name: add onos auto start + shell: > + echo "onos">>/opt/service + +########################################################################################################## +################################ ONOS connect with OpenStack ################################ +########################################################################################################## +#- name: Run OpenVSwitch Script +# include: openvswitch.yml + +- name: Configure Neutron1 + shell: > + crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins onos_router; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan + +- name: Create ML2 Configuration File + template: + src: ml2_conf.sh + dest: "/opt/ml2_conf.sh" + mode: 0777 + +- name: Configure Neutron2 + command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + + +- name: Configure Neutron3 + shell: > + mysql -e "drop database if exists neutron_ml2;"; + mysql -e "create database neutron_ml2 character set utf8;"; + mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"; + su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron; + +- name: Restart neutron-server + service: name=neutron-server state=restarted diff --git a/ansible/roles/onos_cluster/tasks/openvswitch.yml b/ansible/roles/onos_cluster/tasks/openvswitch.yml new file mode 100755 index 0000000..47f0f6e --- /dev/null +++ b/ansible/roles/onos_cluster/tasks/openvswitch.yml @@ -0,0 +1,103 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- name: remove neutron-plugin-openvswitch-agent auto start +# shell: > +# update-rc.d neutron-plugin-openvswitch-agent remove; +# sed -i /neutron-plugin-openvswitch-agent/d /opt/service + +#- name: shut down and disable Neutron's agent services +# service: name=neutron-plugin-openvswitch-agent state=stopped + +#- name: Stop the Open vSwitch service and clear existing OVSDB +# shell: > +# ovs-vsctl del-br br-int ; +# ovs-vsctl del-br br-tun ; +# ovs-vsctl del-manager ; + +#- name: get image http server +# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf +# register: http_server +# +#- name: download ovs +# get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/openvswitch.tar" dest=/opt/openvswitch.tar +# +#- name: extract ovs +# command: su -s /bin/sh -c "tar xvf /opt/openvswitch.tar -C /opt/" +# +#- name: update ovs +# shell: > +# cd /opt/openvswitch; +# dpkg -i openvswitch-common_2.3.0-1_amd64.deb; +# dpkg -i openvswitch-switch_2.3.0-1_amd64.deb; + +#- name: start up onos-external nic +# command: su -s /bin/sh -c "ifconfig eth2 0 up" +- name: set veth port + shell: > + ip link add onos_port1 type veth peer name onos_port2; + ifconfig onos_port1 up; + ifconfig onos_port2 up; + ignore_errors: True + +- name: set veth to ovs + shell: > + export externamMac=`ifconfig eth1 | grep -Eo '\<[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'`; + ifconfig onos_port2 hw ether $externamMac; + ovs-vsctl add-port br-prv onos_port1; + ignore_errors: True + +#- name: wait for onos start time +# shell: "sleep 200" + +- name: add ovsdatabase feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'"; + when: inventory_hostname == groups['onos'][0] + +- name: add openflow-base feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'"; + when: inventory_hostname in groups['onos'] + +- name: add openflow feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'"; + when: inventory_hostname in groups['onos'] + +- name: add vtn feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'"; + when: inventory_hostname in groups['onos'] + +- name: set public eth card start + command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'" + when: inventory_hostname in groups['onos'] + +- name: Set ONOS as the manager + command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;" + +- name: create public network + shell: > + export OS_PASSWORD=console; + export OS_TENANT_NAME=admin; + export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0; + export OS_USERNAME=ADMIN; + neutron net-create ext-net --shared --router:external=True; + neutron subnet-create ext-net {{ public_net_info.floating_ip_cidr }} --name ext-subnet --allocation-pool start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }}; + when: inventory_hostname == groups['controller'][0] + +- name: set gateway mac address + shell: > + ping -c 1 {{ ansible_default_ipv4.gateway }}; + gatewayMac=`arp -a {{ ansible_default_ipv4.gateway }} | awk '{print $4}'`; + /opt/onos/bin/onos "externalgateway-update -m $gatewayMac"; + when: inventory_hostname in groups['onos'] + +- name: delete default gateway + shell: > + route delete default; + when: inventory_hostname not in groups['onos'] + ignore_errors: True diff --git a/ansible/roles/onos_cluster/templates/cluster.json b/ansible/roles/onos_cluster/templates/cluster.json new file mode 100755 index 0000000..5982c43 --- /dev/null +++ b/ansible/roles/onos_cluster/templates/cluster.json @@ -0,0 +1,10 @@ +{ "ipPrefix": "{{ ip_settings[groups['onos'][0]]['mgmt']['cidr'] }}", + "nodes":[ +{% for host in groups['onos'] %} + {% if loop.last %} + { "id": "{{ ip_settings[host]['mgmt']['ip'] }}", "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", "tcpPort": 9876 } + {% else %} + { "id": "{{ ip_settings[host]['mgmt']['ip'] }}", "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", "tcpPort": 9876 }, + {% endif %} +{% endfor %} +]} diff --git a/ansible/roles/onos_cluster/templates/ml2_conf.sh b/ansible/roles/onos_cluster/templates/ml2_conf.sh new file mode 100755 index 0000000..8af03df --- /dev/null +++ b/ansible/roles/onos_cluster/templates/ml2_conf.sh @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +cat <> /etc/neutron/plugins/ml2/ml2_conf.ini +[onos] +password = admin +username = admin +url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn +EOT + diff --git a/ansible/roles/onos_cluster/templates/tablets.json b/ansible/roles/onos_cluster/templates/tablets.json new file mode 100755 index 0000000..f71be71 --- /dev/null +++ b/ansible/roles/onos_cluster/templates/tablets.json @@ -0,0 +1,63 @@ +{ + "nodes": [ +{% for host in groups['onos'] %} + {% if loop.last %} + { + "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", + "id": "{{ ip_settings[host]['mgmt']['ip'] }}", + "tcpPort": 9876 + } + {% else %} + { + "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", + "id": "{{ ip_settings[host]['mgmt']['ip'] }}", + "tcpPort": 9876 + }, + {% endif %} +{% endfor %} + ], + "partitions": { + {% set key = 1 %} + {% for host in groups['onos'] %} + {% if loop.last %} + "p{{ key }}":[ + {% for host in groups['onos'] %} + {% if loop.last %} + { + "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", + "id": "{{ ip_settings[host]['mgmt']['ip'] }}", + "tcpPort": 9876 + } + {% else %} + { + "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", + "id": "{{ ip_settings[host]['mgmt']['ip'] }}", + "tcpPort": 9876 + }, + {% endif %} + {% endfor %} + ] + {% set key = key + 1 %} + {% else %} + "p{{ key }}":[ + {% for host in groups['onos'] %} + {% if loop.last %} + { + "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", + "id": "{{ ip_settings[host]['mgmt']['ip'] }}", + "tcpPort": 9876 + } + {% else %} + { + "ip": "{{ ip_settings[host]['mgmt']['ip'] }}", + "id": "{{ ip_settings[host]['mgmt']['ip'] }}", + "tcpPort": 9876 + }, + {% endif %} + {% endfor %} + ], + {% set key = key + 1 %} + {% endif %} + {% endfor %} +} +} diff --git a/ansible/roles/onos_cluster/vars/Debian.yml b/ansible/roles/onos_cluster/vars/Debian.yml new file mode 100755 index 0000000..59a4dbd --- /dev/null +++ b/ansible/roles/onos_cluster/vars/Debian.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - software-properties-common + - crudini + +services: [] diff --git a/ansible/roles/onos_cluster/vars/RedHat.yml b/ansible/roles/onos_cluster/vars/RedHat.yml new file mode 100755 index 0000000..59a4dbd --- /dev/null +++ b/ansible/roles/onos_cluster/vars/RedHat.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - software-properties-common + - crudini + +services: [] diff --git a/ansible/roles/onos_cluster/vars/main.yml b/ansible/roles/onos_cluster/vars/main.yml new file mode 100755 index 0000000..1cbc070 --- /dev/null +++ b/ansible/roles/onos_cluster/vars/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +onos_pkg_url: http://downloads.onosproject.org/release/onos-1.3.0.tar.gz +onos_pkg_name: onos-1.3.0.tar.gz +onos_home: /opt/onos/ +karaf_dist: apache-karaf-3.0.3 +jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz +onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base,onos-openflow,onos-ovsdatabase, onos-app-vtn-onosfw diff --git a/ansible/roles/open-contrail/files/provision/cacert.pem b/ansible/roles/open-contrail/files/provision/cacert.pem new file mode 100755 index 0000000..66f82c5 --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/cacert.pem @@ -0,0 +1,70 @@ +Certificate: + Data: + Version: 1 (0x0) + Serial Number: 1 (0x1) + Signature Algorithm: md5WithRSAEncryption + Issuer: C=US, ST=CA, O=Open vSwitch, OU=switchca, CN=OVS switchca CA Certificate (2015 Sep 15 13:35:47) + Validity + Not Before: Sep 15 04:35:47 2015 GMT + Not After : Sep 12 04:35:47 2025 GMT + Subject: C=US, ST=CA, O=Open vSwitch, OU=switchca, CN=OVS switchca CA Certificate (2015 Sep 15 13:35:47) + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:98:04:9b:9f:2e:e2:0b:4a:59:9d:00:74:dc:b4: + cb:fc:8d:c4:7d:32:35:e5:1c:ee:94:f0:13:e6:54: + 1c:2e:47:47:f0:bd:f2:7f:ae:cb:6a:2f:ec:74:5c: + 14:39:80:bf:7b:d1:83:90:ec:7a:7d:02:8c:fc:67: + de:99:53:69:1f:5c:61:d5:0a:7f:93:df:02:d4:16: + d3:55:b8:28:5c:fd:32:5b:6c:af:03:c1:23:92:00: + 0e:2b:eb:32:07:00:99:64:14:32:e4:f8:76:b3:06: + e1:d0:54:5a:fc:92:cd:5e:e5:b7:85:43:9e:b8:79: + e4:23:a6:3c:0c:42:78:f4:d3:7e:33:1c:f2:5a:24: + ac:24:61:2f:72:b3:b1:e7:99:4e:ef:2d:85:26:de: + b6:59:16:25:1a:65:ce:95:9c:fd:c7:3c:30:44:1d: + 4c:3b:34:dd:8d:ad:1f:ee:06:8e:b1:2d:b1:bb:a6: + 68:62:52:98:c2:2d:a3:14:75:a7:5f:24:10:4f:74: + 4f:94:0b:61:bd:c5:f1:6b:78:fa:48:89:27:3b:04: + 4d:25:50:d1:4f:63:3d:4b:3c:cc:fa:df:20:f1:0c: + 3f:1d:44:9d:c2:3e:d4:12:07:72:a4:6a:11:03:2f: + 1d:71:d5:b2:de:b4:a6:d8:ad:7a:ac:c9:c7:8e:12: + 4d:47 + Exponent: 65537 (0x10001) + Signature Algorithm: md5WithRSAEncryption + 28:3f:32:46:dd:a9:c0:30:46:9a:29:ec:90:36:14:aa:a7:0c: + dc:67:a0:ec:81:dc:f9:34:35:c5:e4:9b:48:dd:c6:5a:ed:30: + 78:99:6c:32:8c:60:59:ab:dc:7a:86:bb:94:8b:98:db:62:33: + bd:4f:16:40:50:12:db:e9:b6:0c:f2:0b:0d:90:9d:b7:7a:ae: + b4:36:46:33:c5:ea:6a:37:ec:fe:6e:12:f1:98:10:89:48:fe: + 8a:68:11:1c:96:37:92:d9:cc:8a:ef:93:c3:53:6c:61:f7:f0: + 0b:2c:78:49:8e:e3:19:46:2b:1d:1c:65:c5:d9:6d:5d:04:54: + e7:e0:c7:aa:49:78:7d:2d:35:11:7e:05:b1:47:e4:96:39:97: + b5:5b:2b:6e:06:51:86:32:85:6a:7b:5f:63:08:85:31:6e:c3: + 12:0e:a0:ad:3a:d0:3f:db:e2:1b:6d:24:3a:bb:e7:61:5b:ba: + 1f:34:eb:34:07:e5:09:fe:0b:ba:76:48:49:6e:57:d4:14:76: + 11:af:52:39:9e:73:a7:e3:2a:5a:5c:fa:79:d7:7f:81:fd:80: + a7:d4:92:07:ef:a6:05:60:f9:b4:81:cb:8e:cb:b5:9e:2c:5d: + 40:fb:dc:c1:63:95:82:0b:2f:aa:8c:38:1d:96:63:ed:c9:1b: + ce:d2:d2:e7 +-----BEGIN CERTIFICATE----- +MIIDeDCCAmACAQEwDQYJKoZIhvcNAQEEBQAwgYExCzAJBgNVBAYTAlVTMQswCQYD +VQQIEwJDQTEVMBMGA1UEChMMT3BlbiB2U3dpdGNoMREwDwYDVQQLEwhzd2l0Y2hj +YTE7MDkGA1UEAxMyT1ZTIHN3aXRjaGNhIENBIENlcnRpZmljYXRlICgyMDE1IFNl +cCAxNSAxMzozNTo0NykwHhcNMTUwOTE1MDQzNTQ3WhcNMjUwOTEyMDQzNTQ3WjCB +gTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUwEwYDVQQKEwxPcGVuIHZTd2l0 +Y2gxETAPBgNVBAsTCHN3aXRjaGNhMTswOQYDVQQDEzJPVlMgc3dpdGNoY2EgQ0Eg +Q2VydGlmaWNhdGUgKDIwMTUgU2VwIDE1IDEzOjM1OjQ3KTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAJgEm58u4gtKWZ0AdNy0y/yNxH0yNeUc7pTwE+ZU +HC5HR/C98n+uy2ov7HRcFDmAv3vRg5Dsen0CjPxn3plTaR9cYdUKf5PfAtQW01W4 +KFz9MltsrwPBI5IADivrMgcAmWQUMuT4drMG4dBUWvySzV7lt4VDnrh55COmPAxC +ePTTfjMc8lokrCRhL3KzseeZTu8thSbetlkWJRplzpWc/cc8MEQdTDs03Y2tH+4G +jrEtsbumaGJSmMItoxR1p18kEE90T5QLYb3F8Wt4+kiJJzsETSVQ0U9jPUs8zPrf +IPEMPx1EncI+1BIHcqRqEQMvHXHVst60ptiteqzJx44STUcCAwEAATANBgkqhkiG +9w0BAQQFAAOCAQEAKD8yRt2pwDBGminskDYUqqcM3Geg7IHc+TQ1xeSbSN3GWu0w +eJlsMoxgWavceoa7lIuY22IzvU8WQFAS2+m2DPILDZCdt3qutDZGM8Xqajfs/m4S +8ZgQiUj+imgRHJY3ktnMiu+Tw1NsYffwCyx4SY7jGUYrHRxlxdltXQRU5+DHqkl4 +fS01EX4FsUfkljmXtVsrbgZRhjKFantfYwiFMW7DEg6grTrQP9viG20kOrvnYVu6 +HzTrNAflCf4LunZISW5X1BR2Ea9SOZ5zp+MqWlz6edd/gf2Ap9SSB++mBWD5tIHL +jsu1nixdQPvcwWOVggsvqow4HZZj7ckbztLS5w== +-----END CERTIFICATE----- diff --git a/ansible/roles/open-contrail/files/provision/compute.filters.patch b/ansible/roles/open-contrail/files/provision/compute.filters.patch new file mode 100755 index 0000000..04bf42f --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/compute.filters.patch @@ -0,0 +1,14 @@ +*** a/compute.filters Mon Sep 28 15:13:48 2015 +--- b/compute.filters Mon Sep 28 15:16:06 2015 +*************** +*** 83,88 **** +--- 83,91 ---- + # nova/network/linux_net.py: 'ovs-vsctl', .... + ovs-vsctl: CommandFilter, ovs-vsctl, root + ++ # nova/virt/libvirt/vif.py: 'vrouter-port-control', ... ++ vrouter-port-control: CommandFilter, vrouter-port-control, root ++ + # nova/network/linux_net.py: 'ovs-ofctl', .... + ovs-ofctl: CommandFilter, ovs-ofctl, root + diff --git a/ansible/roles/open-contrail/files/provision/model.py.patch b/ansible/roles/open-contrail/files/provision/model.py.patch new file mode 100755 index 0000000..7f7f7c6 --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/model.py.patch @@ -0,0 +1,12 @@ +*** a/model.py Mon Sep 28 15:05:29 2015 +--- b/model.py Mon Sep 28 15:17:32 2015 +*************** +*** 39,44 **** +--- 39,45 ---- + VIF_TYPE_HW_VEB = 'hw_veb' + VIF_TYPE_MLNX_DIRECT = 'mlnx_direct' + VIF_TYPE_MIDONET = 'midonet' ++ VIF_TYPE_VROUTER = 'vrouter' + VIF_TYPE_OTHER = 'other' + + # Constants for dictionary keys in the 'vif_details' field in the VIF diff --git a/ansible/roles/open-contrail/files/provision/test_vif.py.patch b/ansible/roles/open-contrail/files/provision/test_vif.py.patch new file mode 100755 index 0000000..3e12c72 --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/test_vif.py.patch @@ -0,0 +1,70 @@ +*** a/test_vif.py Mon Sep 28 15:12:56 2015 +--- b/test_vif.py Mon Sep 28 15:19:20 2015 +*************** +*** 235,240 **** +--- 235,253 ---- + subnets=[subnet_bridge_4], + interface='eth0') + ++ network_vrouter = network_model.Network(id='network-id-xxx-yyy-zzz', ++ label=None, ++ bridge=None, ++ subnets=[subnet_bridge_4, ++ subnet_bridge_6], ++ interface='eth0') ++ ++ vif_vrouter = network_model.VIF(id='vif-xxx-yyy-zzz', ++ address='ca:fe:de:ad:be:ef', ++ network=network_vrouter, ++ type=network_model.VIF_TYPE_VROUTER, ++ devname='tap-xxx-yyy-zzz') ++ + vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz', + address='ca:fe:de:ad:be:ef', + network=network_mlnx, +*************** +*** 796,801 **** +--- 809,851 ---- + self.vif_mlnx) + self.assertEqual(0, execute.call_count) + ++ def test_unplug_vrouter_with_details(self): ++ d = vif.LibvirtGenericVIFDriver() ++ with mock.patch.object(utils, 'execute') as execute: ++ d.unplug_vrouter(None, self.vif_vrouter) ++ execute.assert_called_once_with( ++ 'vrouter-port-control', ++ '--oper=delete --uuid=vif-xxx-yyy-zzz', ++ run_as_root=True) ++ ++ def test_plug_vrouter_with_details(self): ++ d = vif.LibvirtGenericVIFDriver() ++ instance = mock.Mock() ++ instance.name = 'instance-name' ++ instance.uuid = '46a4308b-e75a-4f90-a34a-650c86ca18b2' ++ instance.project_id = 'b168ea26fa0c49c1a84e1566d9565fa5' ++ instance.display_name = 'instance1' ++ with mock.patch.object(utils, 'execute') as execute: ++ d.plug_vrouter(instance, self.vif_vrouter) ++ execute.assert_has_calls([ ++ mock.call('ip', 'tuntap', 'add', 'tap-xxx-yyy-zzz', 'mode', ++ 'tap', run_as_root=True, check_exit_code=[0, 2, 254]), ++ mock.call('ip', 'link', 'set', 'tap-xxx-yyy-zzz', 'up', ++ run_as_root=True, check_exit_code=[0, 2, 254]), ++ mock.call('vrouter-port-control', ++ '--oper=add --uuid=vif-xxx-yyy-zzz ' ++ '--instance_uuid=46a4308b-e75a-4f90-a34a-650c86ca18b2 ' ++ '--vn_uuid=network-id-xxx-yyy-zzz ' ++ '--vm_project_uuid=b168ea26fa0c49c1a84e1566d9565fa5 ' ++ '--ip_address=0.0.0.0 ' ++ '--ipv6_address=None ' ++ '--vm_name=instance1 ' ++ '--mac=ca:fe:de:ad:be:ef ' ++ '--tap_name=tap-xxx-yyy-zzz ' ++ '--port_type=NovaVMPort ' ++ '--tx_vlan_id=-1 ' ++ '--rx_vlan_id=-1', run_as_root=True)]) ++ + def test_ivs_ethernet_driver(self): + d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) + self._check_ivs_ethernet_driver(d, diff --git a/ansible/roles/open-contrail/files/provision/vif.py.patch b/ansible/roles/open-contrail/files/provision/vif.py.patch new file mode 100755 index 0000000..103f084 --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/vif.py.patch @@ -0,0 +1,91 @@ +*** a/vif.py Mon Sep 28 15:13:30 2015 +--- b/vif.py Mon Sep 28 15:21:30 2015 +*************** +*** 332,337 **** +--- 332,347 ---- + + return conf + ++ def get_config_vrouter(self, instance, vif, image_meta, ++ inst_type, virt_type): ++ conf = self.get_base_config(instance, vif, image_meta, ++ inst_type, virt_type) ++ dev = self.get_vif_devname(vif) ++ designer.set_vif_host_backend_ethernet_config(conf, dev) ++ ++ designer.set_vif_bandwidth_config(conf, inst_type) ++ return conf ++ + def get_config(self, instance, vif, image_meta, + inst_type, virt_type): + vif_type = vif['type'] +*************** +*** 526,531 **** +--- 536,580 ---- + except processutils.ProcessExecutionError: + LOG.exception(_LE("Failed while plugging vif"), instance=instance) + ++ def plug_vrouter(self, instance, vif): ++ """Plug into Contrail's network port ++ Bind the vif to a Contrail virtual port. ++ """ ++ dev = self.get_vif_devname(vif) ++ ip_addr = '0.0.0.0' ++ ip6_addr = None ++ subnets = vif['network']['subnets'] ++ for subnet in subnets: ++ if not subnet['ips']: ++ continue ++ ips = subnet['ips'][0] ++ if not ips['address']: ++ continue ++ if (ips['version'] == 4): ++ if ips['address'] is not None: ++ ip_addr = ips['address'] ++ if (ips['version'] == 6): ++ if ips['address'] is not None: ++ ip6_addr = ips['address'] ++ ++ ptype = 'NovaVMPort' ++ if (cfg.CONF.libvirt.virt_type == 'lxc'): ++ ptype = 'NameSpacePort' ++ ++ cmd_args = ("--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s " ++ "--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s" ++ " --vm_name=%s --mac=%s --tap_name=%s --port_type=%s " ++ "--tx_vlan_id=%d --rx_vlan_id=%d" % (vif['id'], ++ instance.uuid, vif['network']['id'], ++ instance.project_id, ip_addr, ip6_addr, ++ instance.display_name, vif['address'], ++ vif['devname'], ptype, -1, -1)) ++ try: ++ linux_net.create_tap_dev(dev) ++ utils.execute('vrouter-port-control', cmd_args, run_as_root=True) ++ except processutils.ProcessExecutionError: ++ LOG.exception(_LE("Failed while plugging vif"), instance=instance) ++ + def plug(self, instance, vif): + vif_type = vif['type'] + +*************** +*** 679,684 **** +--- 728,746 ---- + LOG.exception(_LE("Failed while unplugging vif"), + instance=instance) + ++ def unplug_vrouter(self, instance, vif): ++ """Unplug Contrail's network port ++ Unbind the vif from a Contrail virtual port. ++ """ ++ dev = self.get_vif_devname(vif) ++ cmd_args = ("--oper=delete --uuid=%s" % (vif['id'])) ++ try: ++ utils.execute('vrouter-port-control', cmd_args, run_as_root=True) ++ linux_net.delete_net_dev(dev) ++ except processutils.ProcessExecutionError: ++ LOG.exception( ++ _LE("Failed while unplugging vif"), instance=instance) ++ + def unplug(self, instance, vif): + vif_type = vif['type'] + diff --git a/ansible/roles/open-contrail/files/provision/vtep-cert.pem b/ansible/roles/open-contrail/files/provision/vtep-cert.pem new file mode 100755 index 0000000..dc354d3 --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/vtep-cert.pem @@ -0,0 +1,70 @@ +Certificate: + Data: + Version: 1 (0x0) + Serial Number: 2 (0x2) + Signature Algorithm: md5WithRSAEncryption + Issuer: C=US, ST=CA, O=Open vSwitch, OU=switchca, CN=OVS switchca CA Certificate (2015 Sep 15 13:35:47) + Validity + Not Before: Sep 15 04:36:00 2015 GMT + Not After : Sep 12 04:36:00 2025 GMT + Subject: C=US, ST=CA, O=Open vSwitch, OU=Open vSwitch certifier, CN=vtep id:b55b8c06-9593-4406-8a85-f7edd09a1ea9 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ca:57:ec:4d:a3:79:6c:a4:cd:21:c7:52:a8:9f: + 61:85:ee:a5:91:79:4a:f3:80:ac:1b:ac:1a:6d:0b: + 96:b9:cf:1f:a6:23:1f:45:ff:62:de:35:8f:e8:8d: + 4a:63:23:70:d5:1e:78:72:86:04:08:e2:fd:66:04: + e0:1e:ce:57:03:98:f7:a5:92:5a:f1:cc:3c:24:37: + 22:4e:97:0d:65:4b:98:08:5b:cd:1c:eb:67:f5:9c: + c0:ba:86:94:2a:15:dc:5d:47:6e:45:49:03:62:a3: + 37:5f:54:58:42:49:6d:a3:4c:c6:21:f6:08:36:8c: + 69:20:6a:f8:7c:5d:82:30:14:1a:15:ad:b9:42:ba: + 5d:13:99:e2:6f:aa:10:e4:e1:25:58:90:66:a7:e7: + bc:c7:e4:5c:79:2a:1b:b2:b3:d1:7b:4d:78:a6:28: + 66:bc:ee:97:6b:b4:3d:a0:65:16:10:04:fb:e9:4e: + 82:ac:88:c2:6a:a4:0e:d6:e5:ad:ee:bc:50:a7:73: + 97:6d:12:96:46:cb:ee:4d:15:ad:d4:a3:b5:95:82: + 2e:e7:1b:69:70:1d:b5:c9:06:47:44:2b:55:84:23: + 5b:75:56:86:c4:a7:b9:1d:46:9e:fa:8a:a5:dc:f9: + 70:16:6a:87:ee:20:1b:02:d1:2d:83:65:e0:7c:24: + 99:e9 + Exponent: 65537 (0x10001) + Signature Algorithm: md5WithRSAEncryption + 50:bf:af:aa:b5:a7:3c:67:2e:34:92:8a:b8:cc:b9:96:a8:b8: + 16:cd:d5:5d:d3:b6:1c:44:b4:08:c5:89:ea:17:97:88:a4:e4: + 89:b9:69:2b:71:36:77:05:dc:0a:50:fe:2d:8f:8c:72:a5:b9: + b1:45:23:0d:d3:7a:80:c8:9e:66:74:e2:42:ee:96:19:e5:88: + 3d:e3:ea:3c:d4:51:1e:e0:34:1f:0c:d3:9a:f7:99:9b:af:0b: + 23:57:87:f0:dc:8c:32:1c:e9:63:65:f3:cd:e5:22:ed:ea:fe: + 4f:be:0e:23:0d:8e:3e:09:aa:5e:20:2b:1a:4f:70:92:4a:a9: + 24:6e:a0:c6:86:b5:14:7d:52:71:cf:b8:5c:75:d4:6a:92:06: + 30:cf:71:72:ff:44:63:22:10:79:38:53:ec:6f:19:3d:63:92: + 69:3f:f2:f4:28:d4:ef:dd:af:32:84:c5:a0:c0:c9:5f:1f:02: + 47:76:bd:85:85:4e:7c:58:61:1a:ce:4c:03:45:d7:5c:dd:59: + 6c:22:e0:cb:2c:2d:b1:44:4c:03:dd:21:ff:58:6e:f7:09:4f: + 34:e0:24:3a:67:b1:33:ae:4a:bc:85:db:4b:12:ef:21:66:6a: + f0:b9:ea:90:72:b1:0b:34:9a:8d:be:f3:d1:02:56:0f:d7:bb: + 0a:eb:c2:f1 +-----BEGIN CERTIFICATE----- +MIIDgDCCAmgCAQIwDQYJKoZIhvcNAQEEBQAwgYExCzAJBgNVBAYTAlVTMQswCQYD +VQQIEwJDQTEVMBMGA1UEChMMT3BlbiB2U3dpdGNoMREwDwYDVQQLEwhzd2l0Y2hj +YTE7MDkGA1UEAxMyT1ZTIHN3aXRjaGNhIENBIENlcnRpZmljYXRlICgyMDE1IFNl +cCAxNSAxMzozNTo0NykwHhcNMTUwOTE1MDQzNjAwWhcNMjUwOTEyMDQzNjAwWjCB +iTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUwEwYDVQQKEwxPcGVuIHZTd2l0 +Y2gxHzAdBgNVBAsTFk9wZW4gdlN3aXRjaCBjZXJ0aWZpZXIxNTAzBgNVBAMTLHZ0 +ZXAgaWQ6YjU1YjhjMDYtOTU5My00NDA2LThhODUtZjdlZGQwOWExZWE5MIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAylfsTaN5bKTNIcdSqJ9hhe6lkXlK +84CsG6wabQuWuc8fpiMfRf9i3jWP6I1KYyNw1R54coYECOL9ZgTgHs5XA5j3pZJa +8cw8JDciTpcNZUuYCFvNHOtn9ZzAuoaUKhXcXUduRUkDYqM3X1RYQklto0zGIfYI +NoxpIGr4fF2CMBQaFa25QrpdE5nib6oQ5OElWJBmp+e8x+RceSobsrPRe014pihm +vO6Xa7Q9oGUWEAT76U6CrIjCaqQO1uWt7rxQp3OXbRKWRsvuTRWt1KO1lYIu5xtp +cB21yQZHRCtVhCNbdVaGxKe5HUae+oql3PlwFmqH7iAbAtEtg2XgfCSZ6QIDAQAB +MA0GCSqGSIb3DQEBBAUAA4IBAQBQv6+qtac8Zy40koq4zLmWqLgWzdVd07YcRLQI +xYnqF5eIpOSJuWkrcTZ3BdwKUP4tj4xypbmxRSMN03qAyJ5mdOJC7pYZ5Yg94+o8 +1FEe4DQfDNOa95mbrwsjV4fw3IwyHOljZfPN5SLt6v5Pvg4jDY4+CapeICsaT3CS +SqkkbqDGhrUUfVJxz7hcddRqkgYwz3Fy/0RjIhB5OFPsbxk9Y5JpP/L0KNTv3a8y +hMWgwMlfHwJHdr2FhU58WGEazkwDRddc3VlsIuDLLC2xREwD3SH/WG73CU804CQ6 +Z7Ezrkq8hdtLEu8hZmrwueqQcrELNJqNvvPRAlYP17sK68Lx +-----END CERTIFICATE----- diff --git a/ansible/roles/open-contrail/files/provision/vtep-privkey.pem b/ansible/roles/open-contrail/files/provision/vtep-privkey.pem new file mode 100755 index 0000000..673f424 --- /dev/null +++ b/ansible/roles/open-contrail/files/provision/vtep-privkey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAylfsTaN5bKTNIcdSqJ9hhe6lkXlK84CsG6wabQuWuc8fpiMf +Rf9i3jWP6I1KYyNw1R54coYECOL9ZgTgHs5XA5j3pZJa8cw8JDciTpcNZUuYCFvN +HOtn9ZzAuoaUKhXcXUduRUkDYqM3X1RYQklto0zGIfYINoxpIGr4fF2CMBQaFa25 +QrpdE5nib6oQ5OElWJBmp+e8x+RceSobsrPRe014pihmvO6Xa7Q9oGUWEAT76U6C +rIjCaqQO1uWt7rxQp3OXbRKWRsvuTRWt1KO1lYIu5xtpcB21yQZHRCtVhCNbdVaG +xKe5HUae+oql3PlwFmqH7iAbAtEtg2XgfCSZ6QIDAQABAoIBAQCKDMya98J7PkD6 +H8ykYQEfaH+rrc5WLd6+joAFD9gI82hLaEEI98HTi0Wgyu0KkH6F2OEieY69JWjv +NrpWKj8xpCap3x2PROFvb/JHHkW0a4vRgBiD95QY/ZZ8bB8gS4PqXDa+rJ7TqDm6 +H4iLyR81P8caGorl9Iww4uqfpwiQlZ7A/dMexufQgMQXKqDXSKk+TJ36CBRJyLlk +U6GrHIF9obHZyGelNhkkMu/czT54U/gKiufL5tYpOVyjCr8H2a713ovEfYzEFxJq +Z8C0ySIskXsyhZ/pC0+pviMB2R20Nh8kRXiKCvNNbFShEMujB5gUVo7rqUZKFKMz +FCfbcXrRAoGBAPeRwU5zU5nbiSQlB7YQibtFC/sMDzbbOjulN46UeDvkcVh80j4r +FIPYLAPvA/e9OtRV89B6Tc7rZSWYZotszvJVlObs0/ll+L1pUX7PAEligoZCXufR +GUyT0gZunGO8+FEgYIu89S1xN77WIbqopjjEyGQJeN2UX9bPo9AGTU0VAoGBANE7 +5nwtdsR1hjgxBqzgAFEFqCggHR+D050OtQgkLjHkXRT1uHeJZZu4D0x6vEnJknYi +/OCujz196KLDGEQbREIdARtgemy07GoJuBXTwPuvbkw9vjoqDrIKVtMeTf4HSyzO +2ej2pm280A/VI6GyahDIFSUZmFBqMeTUzB5UXNaFAoGAe61RCMQMa7yE0o29QHMa +m3du+MeZgioa+VkcXBpHxoPlK/OPhIc5BHSl6IErVkQuc41M9EVlQY3PRezQra55 +5A5lCMgfTWRn0xgeIl9/ISoZUsEtcFnBbcQbFCOF9T2eP8kQ8j4/raf11VxcFUfT +YmDMS02AGBHbnxC0IWREkdECgYBDaXQyEAfS9jZ/RjRrYGRZtmPeQbKAY927HXDw +JZAInRXsWdrMEKV/DUdIkca2U05v54fn7/XQjw9z2T2pO8u7LVMc+fGXspb09xqr +VaU4seXshHwUi1ZewHwG2x2vubPbxO1qZIVsl8fFQhuPzkbkD0LYyC1Nw1k9692z +6+RZbQKBgH/6OqqsHLnpzQD0drcOjbXws53g3/eECPXCMNzzw0AiSkyrGWzSonMD ++uSMrG0f7DwvHxZ09bn4qqFqCE7yhoCWWUYSBZKEDYzpxTq9krahPmaJznaBXyFi +K2rfym1sYEZvDT9nS5TROtiIW0uANHOjI9yw+a8TQEyQu8CH2/C0 +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/open-contrail/files/recover_network_opencontrail.py b/ansible/roles/open-contrail/files/recover_network_opencontrail.py new file mode 100755 index 0000000..e829b65 --- /dev/null +++ b/ansible/roles/open-contrail/files/recover_network_opencontrail.py @@ -0,0 +1,33 @@ +import yaml +import netaddr +import os +import log as logging + +LOG = logging.getLogger("net-recover-opencontrail") +config_path = os.path.join(os.path.dirname(__file__), "network.cfg") + +def setup_bondings(bond_mappings): + print bond_mappings + +def setup_ips_new(config): + LOG.info("setup_ips_new enter") + network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"]) + intf_name = config["provider_net_mappings"][0]["interface"] + cmd = "ip addr add %s/%s brd %s dev %s;" \ + % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), intf_name) + #cmd = "ip link set br-ex up;" + #cmd += "ip addr add %s/%s brd %s dev %s;" \ + # % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), 'br-ex') + cmd += "route del default;" + cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], intf_name) + #cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], 'br-ex') + LOG.info("setup_ips_new: cmd=%s" % cmd) + os.system(cmd) + + +def main(config): + setup_ips_new(config) + +if __name__ == "__main__": + config = yaml.load(open(config_path)) + main(config) diff --git a/ansible/roles/open-contrail/files/setup_networks_opencontrail.py b/ansible/roles/open-contrail/files/setup_networks_opencontrail.py new file mode 100755 index 0000000..fd7795a --- /dev/null +++ b/ansible/roles/open-contrail/files/setup_networks_opencontrail.py @@ -0,0 +1,107 @@ +import yaml +import netaddr +import os +import log as logging + +LOG = logging.getLogger("net-init-opencontrail") +config_path = os.path.join(os.path.dirname(__file__), "network.cfg") + +def setup_bondings(bond_mappings): + print bond_mappings + +def add_vlan_link(interface, ifname, vlan_id): + LOG.info("add_vlan_link enter") + cmd = "ip link add link %s name %s type vlan id %s; " % (ifname, interface, vlan_id) + cmd += "ip link set %s up; ip link set %s up" % (interface, ifname) + LOG.info("add_vlan_link: cmd=%s" % cmd) + os.system(cmd) + +#def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None): +# LOG.info("add_ovs_port enter") +# cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname) +# if vlan_id: +# cmd += " tag=%s" % vlan_id +# cmd += " -- set Interface %s type=internal;" % ifname +# cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \ +# % (ifname, uplink) +# cmd += "ip link set %s up;" % ifname +# LOG.info("add_ovs_port: cmd=%s" % cmd) +# os.system(cmd) + +def setup_intfs(sys_intf_mappings, uplink_map): + LOG.info("setup_intfs enter") + for intf_name, intf_info in sys_intf_mappings.items(): + if intf_info["type"] == "vlan": + add_vlan_link(intf_name, intf_info["interface"], intf_info["vlan_tag"]) +# elif intf_info["type"] == "ovs": +# add_ovs_port( +# intf_info["interface"], +# intf_name, +# uplink_map[intf_info["interface"]], +# vlan_id=intf_info.get("vlan_tag")) + else: + pass + +def setup_ips(ip_settings, sys_intf_mappings): + LOG.info("setup_ips enter") + for intf_info in ip_settings.values(): + network = netaddr.IPNetwork(intf_info["cidr"]) + if sys_intf_mappings[intf_info["name"]]["type"] == "ovs": + intf_name = intf_info["name"] + else: + intf_name = intf_info["alias"] + if "gw" in intf_info: + continue + cmd = "ip addr add %s/%s brd %s dev %s;" \ + % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name) +# if "gw" in intf_info: +# cmd += "route del default;" +# cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name) + LOG.info("setup_ips: cmd=%s" % cmd) + os.system(cmd) + +def setup_ips_new(config): + LOG.info("setup_ips_new enter") + network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"]) + intf_name = config["provider_net_mappings"][0]["interface"] + cmd = "ip addr add %s/%s brd %s dev %s;" \ + % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), intf_name) +# cmd = "ip link set br-ex up;" +# cmd += "ip addr add %s/%s brd %s dev %s;" \ +# % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), 'br-ex') + cmd += "route del default;" + cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], intf_name) +# cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], 'br-ex') + LOG.info("setup_ips_new: cmd=%s" % cmd) + os.system(cmd) + +def setup_default_router(config): + LOG.info("setup_ips_new enter") + network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"]) + intf_name = config["provider_net_mappings"][0]["interface"] + cmd = "route del default;" + cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], "vhost0") + LOG.info("setup_default_router: cmd=%s" % cmd) + os.system(cmd) + +def remove_ovs_kernel_mod(config): + LOG.info("remove_ovs_kernel_mod enter") + cmd = "rmmod vport_vxlan; rmmod openvswitch;" + LOG.info("remove_ovs_kernel_mod: cmd=%s" % cmd) + os.system(cmd) + +def main(config): + uplink_map = {} + setup_bondings(config["bond_mappings"]) + remove_ovs_kernel_mod(config) + for provider_net in config["provider_net_mappings"]: + uplink_map[provider_net['name']] = provider_net['interface'] + + setup_intfs(config["sys_intf_mappings"], uplink_map) + setup_ips(config["ip_settings"], config["sys_intf_mappings"]) +# setup_ips_new(config) + setup_default_router(config) + +if __name__ == "__main__": + config = yaml.load(open(config_path)) + main(config) diff --git a/ansible/roles/open-contrail/tasks/ext-net.yml b/ansible/roles/open-contrail/tasks/ext-net.yml new file mode 100644 index 0000000..3ef327e --- /dev/null +++ b/ansible/roles/open-contrail/tasks/ext-net.yml @@ -0,0 +1,47 @@ +--- +- name: add ext-network router of vgw on controller for open-contrail + shell: > + ip route add {{ public_net_info.floating_ip_cidr }} via {{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} dev {{ network_cfg.public_vip.interface }} ; + echo "ip route add {{ public_net_info.floating_ip_cidr }} via {{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} dev {{ network_cfg.public_vip.interface }}" >> /opt/contrail/bin/if-vhost0 ; + when: inventory_hostname in groups['opencontrail'] + + +- name: create vgw for open-contrail + shell: > + echo "lsof -ni :9090 ; while [ $? -ne 0 ]; do sleep 10; lsof -ni :9090; done" >> /etc/init.d/net_init; + echo "sleep 10" >> /etc/init.d/net_init; + echo "python /opt/contrail/utils/provision_vgw_interface.py --oper create --interface vgw1 --subnets {{ public_net_info.floating_ip_cidr }} --routes 0.0.0.0/0 --vrf default-domain:admin:{{ public_net_info.network }}:{{ public_net_info.network }}" >> /etc/init.d/net_init; + when: groups['opencontrail']|length !=0 and inventory_hostname == groups['compute'][0] + + +- name: add vgw router on compute(without vgw) for open-contrail + shell: echo "ip route add {{ public_net_info.floating_ip_cidr }} via {{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} dev vhost0" >> /etc/init.d/net_init + when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail'] and inventory_hostname != groups['compute'][0] + + + + + +# create a file with vgw ip on CompassCore, so that Jumper Host could access this to get vgw ip +- name: add vgw file on compass + local_action: file path=/home/opencontrail1.rc state=touch mode=0777 + run_once: True + when: groups['opencontrail']|length !=0 + +- name: update vgw file + local_action: lineinfile dest=/home/opencontrail1.rc line={{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} + run_once: True + when: groups['opencontrail']|length !=0 + +- name: add vgw file on compass + local_action: file path=/home/opencontrail2.rc state=touch mode=0777 + run_once: True + when: groups['opencontrail']|length !=0 + +- name: update vgw file + local_action: lineinfile dest=/home/opencontrail2.rc line={{ public_net_info.floating_ip_cidr }} + run_once: True + when: groups['opencontrail']|length !=0 + + + diff --git a/ansible/roles/open-contrail/tasks/install/install-collector.yml b/ansible/roles/open-contrail/tasks/install/install-collector.yml new file mode 100755 index 0000000..d302289 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-collector.yml @@ -0,0 +1,24 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: collector +# sudo: yes +# tasks: + +- name: "temporary disable supervisor analytics" + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-analytics.override" + +- name: "install contrail openstack analytics package" +# apt: +# name: "contrail-openstack-analytics" + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: collector_package diff --git a/ansible/roles/open-contrail/tasks/install/install-common.yml b/ansible/roles/open-contrail/tasks/install/install-common.yml new file mode 100755 index 0000000..e94621b --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-common.yml @@ -0,0 +1,104 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: all +# sudo: yes +# tasks: +#- name: "copy contrail install package temporary" +# sudo: True +# copy: +# src: "{{ package }}" +# dest: "/tmp/{{ package }}" + +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download OpenContrail package file + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ package }}" dest=/tmp/{{ package }} +#" + +- name: "install contrail install package" +# sudo: True + apt: + deb: "/tmp/{{ package }}" + force: yes + +- name: "delete temporary contrail install package" +# sudo: True + file: + dest: "/tmp/{{ package }}" + state: "absent" + +- name: "make directory for contrail binary files" +# sudo: True + file: + path: "/opt/contrail/bin" + state: "directory" + +- name: "make directory for contrail repository" +# sudo: True + file: + path: "/opt/contrail/contrail_install_repo" + state: "directory" + +- name: "unarchive contrail packages" +# sudo: True + unarchive: + src: "/opt/contrail/contrail_packages/contrail_debs.tgz" + dest: "/opt/contrail/contrail_install_repo" + copy: no + +- name: "find required packages in advance" +# sudo: True + shell: "find /opt/contrail/contrail_install_repo -name binutils_*.deb -or -name make_*.deb -or -name libdpkg-perl_*.deb -or -name dpkg-dev_*.deb -or -name patch_*.deb -type f" + register: required_packages + changed_when: no + +- name: "install required packages" +# sudo: True + apt: + deb: "{{ item }}" + force: yes + with_items: required_packages.stdout_lines + ignore_errors: True + +- name: modify source list +# sudo: True + lineinfile: + dest: "/etc/apt/sources.list" + line: "deb file:/opt/contrail/contrail_install_repo ./" + insertbefore: "BOF" + +- name: "modify apt configuration" +# sudo: True + lineinfile: + dest: "/etc/apt/apt.conf" + line: "APT::Get::AllowUnauthenticated \"true\";" + create: "yes" + +- name: "copy apt preferences file" +# sudo: True + shell: "cp /opt/contrail/contrail_packages/preferences /etc/apt/preferences" + args: + creates: "/etc/apt/preferences" + +- name: create contrail packages list +# sudo: True + shell: "dpkg-scanpackages . | gzip -9c > Packages.gz" + args: + chdir: "/opt/contrail/contrail_install_repo" + creates: "Packages.gz" + +- name: install contrail setup package + sudo: True + apt: + name: "contrail-setup" + update_cache: yes + force: yes diff --git a/ansible/roles/open-contrail/tasks/install/install-compute.yml b/ansible/roles/open-contrail/tasks/install/install-compute.yml new file mode 100755 index 0000000..4e4a5ad --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-compute.yml @@ -0,0 +1,55 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: [compute, tsn] +# sudo: yes +# tasks: +- name: "temporary disable supervisor vrouter" +# sudo: True + template: + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-vrouter.override" + +# - name: "install nova-compute for contrail package" +# apt: +# name: "nova-compute" +# when: install_nova + +- name: "install contrail vrouter 3.13.0-40 package" +# apt: +# name: "contrail-vrouter-3.13.0-40-generic" +# when: ansible_kernel == "3.13.0-40-generic" +# sudo: True + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: vrouter_package + when: ansible_kernel == kernel_required + +- name: "install contrail vrouter dkms package" +# apt: +# name: "contrail-vrouter-dkms" +# when: ansible_kernel != "3.13.0-40-generic" +# sudo: True + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: dkms_package + when: ansible_kernel != kernel_required + +# - name: "install contrail vrouter common package" +# apt: +# name: "contrail-vrouter-common" + +# - name: "install contrail nova vif package" +# apt: +# name: "contrail-nova-vif" + +- name: "install contrail vrouter common & nova vif package" +# sudo: True + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: compute_package | union(compute_package_noarch) + + diff --git a/ansible/roles/open-contrail/tasks/install/install-config.yml b/ansible/roles/open-contrail/tasks/install/install-config.yml new file mode 100755 index 0000000..b66e3e4 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-config.yml @@ -0,0 +1,51 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: config +# sudo: yes +# tasks: +- name: "temporary disable supervisor config" +# sudo: True + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-config.override" + +- name: "temporary disable neutron server" +# sudo: True + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/neutron-server.override" + +############################################### +################ workaround ################# +############################################### +- name: "backup keepalived conf" + shell: mv /etc/keepalived/keepalived.conf /home/keepalived.conf + +- name: "uninstall keepalived" + action: "{{ ansible_pkg_mgr }} name=keepalived state=absent" + +- name: "install iproute" + action: "{{ ansible_pkg_mgr }} name=iproute state=present" + +- name: "install iproute" + action: "{{ ansible_pkg_mgr }} name=keepalived state=present" + +- name: "restore keepalived conf" + shell: mv /home/keepalived.conf /etc/keepalived/keepalived.conf +############################################### + +- name: "install contrail openstack config package" +# sudo: True +# apt: +# name: "contrail-openstack-config" + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: config_package diff --git a/ansible/roles/open-contrail/tasks/install/install-control.yml b/ansible/roles/open-contrail/tasks/install/install-control.yml new file mode 100755 index 0000000..ab7d4ad --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-control.yml @@ -0,0 +1,32 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: control +# sudo: yes +# tasks: +- name: "temporary disable supervisor control" +# sudo: True + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-control.override" + +- name: "temporary disable supervisor dns" +# sudo: True + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-dns.override" + +- name: "install contrail openstack control package" +# sudo: True +# apt: +# name: "contrail-openstack-control" + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: control_package diff --git a/ansible/roles/open-contrail/tasks/install/install-database.yml b/ansible/roles/open-contrail/tasks/install/install-database.yml new file mode 100755 index 0000000..5c89ede --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-database.yml @@ -0,0 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: database +# sudo: yes +# tasks: +- name: "temporary disable supervisor database" +# sudo: True + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-database.override" + +- name: "install contrail openstack database package" +# sudo: True +# apt: +# name: "contrail-openstack-database" + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: database_package diff --git a/ansible/roles/open-contrail/tasks/install/install-interface.yml b/ansible/roles/open-contrail/tasks/install/install-interface.yml new file mode 100755 index 0000000..3f7b43c --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-interface.yml @@ -0,0 +1,34 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: all +# sudo: yes +# tasks: +#- name: get last ip address +# shell: expr substr `cat /etc/hostname` 5 1 +# register: lastip + +#- name: "configure interface" +## sudo: True +# lineinfile: +# dest: "/etc/network/interfaces" +# line: "{{ item }}" +# with_items: +# - "auto {{ contrail_vhost_device }}" +# - "iface {{ contrail_vhost_device }} inet static" +# - "\taddress {{ contrail_vhost_address }}" +# - "\tnetmask {{ contrail_vhost_netmask }}" + +- name: "set interface address" +# sudo: True + shell: "ifconfig {{ contrail_vhost_device }} {{ contrail_vhost_address }} netmask {{ contrail_vhost_netmask }}" + +- name: "up interface" +# sudo: True + shell: "ifconfig {{ contrail_vhost_device }} up" diff --git a/ansible/roles/open-contrail/tasks/install/install-kernel.yml b/ansible/roles/open-contrail/tasks/install/install-kernel.yml new file mode 100755 index 0000000..be9a8ac --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-kernel.yml @@ -0,0 +1,60 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: all +# sudo: yes +# tasks: + +- name: "install Ubuntu kernel" +# sudo: True +# apt: +# name: "linux-headers-3.13.0-40" +# name: "linux-headers-3.13.0-40-generic" +# name: "linux-image-3.13.0-40-generic" +# name: "linux-image-extra-3.13.0-40-generic" +# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic") + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: kernel_package | union(kernel_package_noarch) + when: (kernel_install) and (ansible_kernel != kernel_required) + +- name: "setup grub" +# sudo: True + lineinfile: + dest: "/etc/default/grub" + regexp: "GRUB_DEFAULT=.*" + line: "GRUB_DEFAULT='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-40-generic'" +# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic") + when: (kernel_install) and (ansible_kernel != kernel_required) + +- name: "reflect grub" +# sudo: True + shell: "update-grub2" +# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic") + when: (kernel_install) and (ansible_kernel != kernel_required) + +- name: "reboot Server" +# sudo: True + shell: "shutdown -r now" + async: 0 + poll: 0 + ignore_errors: true + notify: Wait for server to come back +# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic") + when: (kernel_install) and (ansible_kernel != kernel_required) + +# handlers: +- name: "Wait for server to come back" + local_action: + module: wait_for + host={{ inventory_hostname }} + port=22 + delay=30 + timeout=600 +# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic") + when: (kernel_install) and (ansible_kernel != kernel_required) diff --git a/ansible/roles/open-contrail/tasks/install/install-webui.yml b/ansible/roles/open-contrail/tasks/install/install-webui.yml new file mode 100755 index 0000000..6dbe1e7 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/install/install-webui.yml @@ -0,0 +1,26 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: webui +# sudo: yes +# tasks: + +- name: "temporary disable supervisor webui" +# sudo: True + template: +# src: "templates/override.j2" + src: "../../templates/install/override.j2" + dest: "/etc/init/supervisor-webui.override" + +- name: "install contrail openstack webui package" +# sudo: True +# apt: +# name: "contrail-openstack-webui" + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: webui_package diff --git a/ansible/roles/open-contrail/tasks/main.yml b/ansible/roles/open-contrail/tasks/main.yml new file mode 100755 index 0000000..7d0f1a9 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/main.yml @@ -0,0 +1,151 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- include_vars: "{{ ansible_os_family }}.yml" + +- name: backup rabbitmq-server + shell: cp /etc/init.d/rabbitmq-server /home/rabbitmq-server + when: inventory_hostname in groups['opencontrail'] + +- name: Disable Service Daemon + shell: if [ -f “\/opt\/service” ] ; then mv /opt/service /opt/service.bak ; fi + when: groups['opencontrail']|length !=0 + +- name: Install common on all hosts for Open Contrail + include: install/install-common.yml + when: groups['opencontrail']|length !=0 + # Compass install OpenStack with not only OpenContrail but also ODL or ONOS, and sometimes user just installs OpenStack, so item 'opencontrail_control' is kind of a mark that whether Compass install OpenContrail or not. + +#- name: Install kernal on all hosts for Open Contrail +# include: install/install-kernel.yml +# when: groups['opencontrail_control']|length !=0 + +- name: Install database for Open Contrail + include: install/install-database.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Install config for Open Contrail + include: install/install-config.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Install config for Open Contrail + include: install/install-control.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Install collector for Open Contrail + include: install/install-collector.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Install webui for Open Contrail + include: install/install-webui.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Install compute for Open Contrail + include: install/install-compute.yml + when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail'] +# or inventory_hostname in groups['opencontrail_tsn'] + + +# change vhost0 on eth1 +#- name: Install interface on all hosts for Open Contrail +# include: install/install-interface.yml +# when: groups['opencontrail']|length !=0 + +#- include: install/install-common.yml +#- include: install/install-kernel.yml +#- include: install/install-database.yml +#- include: install/install-config.yml +#- include: install/install-control.yml +#- include: install/install-collector.yml +#- include: install/install-webui.yml +#- include: install/install-compute.yml +#- include: install/install-interface.yml + + +#- name: Provision route on all hosts for Open Contrail +# include: provision/provision-route.yml +# when: groups['opencontrail_control']|length !=0 + + +- name: Provision RabbitMQ on OpenContrail config nodes + include: provision/provision-rabbitmq.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Provision increase limits for Open Contrail + include: provision/provision-increase-limits.yml + when: inventory_hostname in groups['opencontrail'] +#or inventory_hostname in groups['opencontrail_config'] or inventory_hostname in groups['opencontrail_collector'] or inventory_hostname in groups['opencontrail_database'] + + +- name: Provision database for Open Contrail + include: provision/provision-database.yml + when: inventory_hostname in groups['opencontrail'] + + +- name: Provision config for Open Contrail + include: provision/provision-config.yml + when: inventory_hostname in groups['opencontrail'] + +- name: Provision control for Open Contrail + include: provision/provision-control.yml + when: inventory_hostname in groups['opencontrail'] + + +- name: Provision collector for Open Contrail + include: provision/provision-collector.yml + when: inventory_hostname in groups['opencontrail'] + + +- name: Provision add nodes for Open Contrail + include: provision/provision-add-nodes.yml + when: inventory_hostname in groups['opencontrail'] + + +- name: Provision webui for Open Contrail + include: provision/provision-webui.yml + when: inventory_hostname in groups['opencontrail'] + + +- name: Provision compute for Open Contrail + include: provision/provision-compute.yml + when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail'] + +- name: Remove openvswitch on compute + include: uninstall-openvswitch.yml + when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail'] + +- name: Config ext-net network + include: ext-net.yml + +- name: Enable Service Daemon + shell: if [ -f “\/opt\/service.bak” ] ; then mv /opt/service.bak /opt/service ; fi + when: groups['opencontrail']|length !=0 + +#- name: Provision tsn for Open Contrail +# include: provision/provision-tsn.yml +# when: inventory_hostname in groups['opencontrail_tsn'] + + +#- name: Provision toragent for Open Contrail +# include: provision/provision-toragent.yml +# when: inventory_hostname in groups['opencontrail_tsn'] + +#- include: provision/provision-route.yml +#- include: provision/provision-rabbitmq.yml +#- include: provision/provision-increase-limits.yml +#- include: provision/provision-database.yml +#- include: provision/provision-config.yml +#- include: provision/provision-control.yml +#- include: provision/provision-collector.yml +#- include: provision/provision-add-nodes.yml +#- include: provision/provision-webui.yml +#- include: provision/provision-compute.yml +#- include: provision/provision-tsn.yml +#- include: provision/provision-toragent.yml diff --git a/ansible/roles/open-contrail/tasks/provision/-node-common.yml b/ansible/roles/open-contrail/tasks/provision/-node-common.yml new file mode 100755 index 0000000..759f940 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/-node-common.yml @@ -0,0 +1,28 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: "disable ufw" + ufw: + state: "disabled" + +- name: "change value of kernel.core_pattern" + sysctl: + name: "kernel.core_pattern" + value: "/var/crashes/core.%e.%p.%h.%t" + +- name: "change value of net.ipv4.ip_forward" + sysctl: + name: "net.ipv4.ip_forward" + value: "1" + +- name: "make crashes directory" + file: + path: "/var/crashes" + state: "directory" + mode: 0777 diff --git a/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml b/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml new file mode 100644 index 0000000..ec6b2fe --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml @@ -0,0 +1,30 @@ +--- +- name: 'stop rabbitmq server' + service: + name: 'rabbitmq-server' + state: 'stopped' + +- name: 'check beam process' + shell: 'ps ax | grep -v grep | grep beam' + register: beam_process + changed_when: no + ignore_errors: yes + +- name: 'kill beam processes' + shell: 'pkill -9 beam' + when: beam_process.stdout + +- name: 'check epmd process' + shell: 'ps ax | grep -v grep | grep epmd' + register: epmd_process + changed_when: no + ignore_errors: yes + +- name: 'kill epmd processes' + shell: 'pkill -9 epmd' + when: epmd_process.stdout + +- name: 'remove mnesia directory' + file: + name: '/var/lib/rabbitmq/mnesia' + state: 'absent' diff --git a/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml b/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml new file mode 100755 index 0000000..c4a6624 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml @@ -0,0 +1,34 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: "stop redis server" + service: + name: "redis-server" + state: "stopped" + +- name: "modify redis server configuration" + replace: + dest: "/etc/redis/redis.conf" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: "^\\s*bind", replace: "#bind" } + - { regexp: "^\\s*save", replace: "#save" } + - { regexp: "^\\s*dbfilename", replace: "#dbfilename" } + - { regexp: "^\\s*lua-time-limit\\s*\\d*", replace: "lua-time-limit 15000" } + +- name: "delete redis dump" + file: + dest: "/var/lib/redis/dump.rdb" + state: "absent" + +- name: "start redis server" + service: + name: "redis-server" + state: "started" diff --git a/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml b/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml new file mode 100755 index 0000000..be1879a --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml @@ -0,0 +1,115 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: "change owner nova log directory" + file: + dest: "/var/log/nova" + state: "directory" + owner: "nova" + group: "nova" + recurse: yes + +- name: "delete values from nova config" + ini_file: + dest: "/etc/nova/nova.conf" + section: "{{ item.section }}" + option: "{{ item.option }}" + with_items: + - { section: "DEFAULT", option: "sql_connection" } + - { section: "DEFAULT", option: "quantum_admin_tenant_name" } + - { section: "DEFAULT", option: "quantum_admin_username" } + - { section: "DEFAULT", option: "quantum_admin_password" } + - { section: "DEFAULT", option: "quantum_admin_auth_url" } + - { section: "DEFAULT", option: "quantum_auth_strategy" } + - { section: "DEFAULT", option: "quantum_url" } + +- name: "set values to nova config" + ini_file: + dest: "/etc/nova/nova.conf" + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: + - { section: "DEFAULT", option: "auth_strategy", value: "keystone" } + - { section: "DEFAULT", option: "libvirt_nonblocking", value: "True" } + - { section: "DEFAULT", option: "libvirt_inject_partition", value: "-1" } + - { section: "DEFAULT", option: "rabbit_host", value: "{{ contrail_haproxy_address }}" } + - { section: "DEFAULT", option: "rabbit_port", value: "5672" } + - { section: "DEFAULT", option: "glance_host", value: "{{ contrail_haproxy_address }}" } + - { section: "DEFAULT", option: "glance_port", value: "9292" } + - { section: "DEFAULT", option: "neutron_admin_tenant_name", value: "service" } + - { section: "DEFAULT", option: "neutron_admin_username", value: "neutron" } + - { section: "DEFAULT", option: "neutron_admin_password", value: "{{ contrail_admin_password }}" } + - { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ contrail_haproxy_address }}:35357/v2.0/" } + - { section: "DEFAULT", option: "neutron_url", value: "http://{{ contrail_haproxy_address }}:9696/" } + - { section: "DEFAULT", option: "neutron_url_timeout", value: "300" } + - { section: "DEFAULT", option: "network_api_class", value: "nova.network.neutronv2.api.API" } + - { section: "DEFAULT", option: "compute_driver", value: "libvirt.LibvirtDriver" } + - { section: "DEFAULT", option: "network_api_class", value: " nova_contrail_vif.contrailvif.ContrailNetworkAPI" } + - { section: "DEFAULT", option: "ec2_private_dns_show_ip", value: "False" } + - { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ contrail_haproxy_address }}:5999/vnc_auto.html" } + - { section: "DEFAULT", option: "vncserver_enabled", value: "True" } + - { section: "DEFAULT", option: "vncserver_listen", value: "{{ contrail_address }}" } + - { section: "DEFAULT", option: "vncserver_proxyclient_address", value: "{{ contrail_address }}" } + - { section: "DEFAULT", option: "security_group_api", value: "neutron" } + - { section: "DEFAULT", option: "heal_instance_info_cache_interval", value: "0" } + - { section: "DEFAULT", option: "image_cache_manager_interval", value: "0" } + - { section: "DEFAULT", option: "libvirt_cpu_mode", value: "none" } + - { section: "DEFAULT", option: "libvirt_vif_driver", value: "nova_contrail_vif.contrailvif.VRouterVIFDriver" } + - { section: "database", option: "connection", value: "mysql://nova:nova@{{ contrail_haproxy_address }}/nova?charset=utf8" } + - { section: "database", option: "idle_timeout", value: "180" } + - { section: "database", option: "max_retries", value: "-1" } + - { section: "keystone_authtoken", option: "admin_tenant_name", value: "service" } + - { section: "keystone_authtoken", option: "admin_user", value: "nova" } + - { section: "keystone_authtoken", option: "admin_password", value: "{{ contrail_admin_password }}" } + - { section: "keystone_authtoken", option: "auth_protocol", value: "http" } + - { section: "keystone_authtoken", option: "auth_host", value: "{{ contrail_haproxy_address }}" } + - { section: "keystone_authtoken", option: "signing_dir", value: "/tmp/keystone-signing-nova" } + + + +#- { section: "DEFAULT", option: "rabbit_host", value: "{{ hostvars[groups['config'][0]]['contrail_address'] }}" } +#- { section: "DEFAULT", option: "glance_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" } +#- { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_address'] }}:35357/v2.0/" } +#- { section: "DEFAULT", option: "neutron_url", value: "http://{{ hostvars[groups['config'][0]]['contrail_address'] }}:9696/" } +#- { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_mgmt_address'] }}:5999/vnc_auto.html" } +#- { section: "database", option: "connection", value: "mysql://nova:nova@{{ hostvars[groups['openstack'][0]]['contrail_address'] }}/nova?charset=utf8" } +#- { section: "keystone_authtoken", option: "auth_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" } + + + +- name: "change database address if same node as first openstack node" + ini_file: + dest: "/etc/nova/nova.conf" + section: "database" + option: "connection" + value: "mysql://nova:nova@127.0.0.1/nova?charset=utf8" + when: groups['openstack'][0] == inventory_hostname + +- name: "add respawn to nova compute config" + lineinfile: + dest: "/etc/init/nova-compute.conf" + line: "respawn" + insertbefore: "pre-start script" + +- name: "add respawn limit to nova compute config" + lineinfile: + dest: "/etc/init/nova-compute.conf" + line: "respawn limit 10 90" + insertafter: "respawn" + +- name: "restart nova compute" + service: + name: "nova-compute" + state: "restarted" + +- name: "delete nova sqlite database" + file: + dest: "/var/lib/nova/nova.sqlite" + state: "absent" diff --git a/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml b/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml new file mode 100755 index 0000000..91517b3 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml @@ -0,0 +1,86 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: config +# sudo: yes +# tasks: + +#- name: "recover rabbitmq-server service script" +# shell: cp /home/rabbitmq-server /etc/init.d/rabbitmq-server + +#- name: "restart rabbitmq-server" +# service: +# name: "rabbitmq-server" +# state: "restarted" + +#- name: "wait rabbitmq-server start" +# shell: sleep 5 + +- name: "restart contrail-discovery" + service: + name: "contrail-discovery" + state: "restarted" + +- name: "wait contrail-discovery" + shell: sleep 5 + +- name: "restart contrail-api" + service: + name: "contrail-api" + state: "restarted" + +- name: "check contrail-api" + shell: lsof -ni :8082 ; while [ $? -ne 0 ]; do sleep 10; lsof -ni :8082; done; sleep 20; + +- name: "wait contrail-api" + shell: sleep 20 + +- name: "provision config node" + shell: "python /opt/contrail/utils/provision_config_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" +# when: inventory_hostname in groups['opencontrail_config'] + +#- hosts: database +# sudo: yes +# tasks: +- name: "provision database node" + shell: "python /opt/contrail/utils/provision_database_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" +# when: inventory_hostname in groups['opencontrail_database'] + + +#- hosts: collector +# sudo: yes +# tasks: +- name: "provision collector node" + shell: "python /opt/contrail/utils/provision_analytics_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" +# when: inventory_hostname in groups['opencontrail_collector'] + +#- hosts: control +# sudo: yes +# tasks: +- name: "provision control node" + shell: "python /opt/contrail/utils/provision_control.py --api_server_ip {{ contrail_haproxy_address }} --api_server_port 8082 --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }} --router_asn {{ contrail_router_asn }}" +# when: inventory_hostname in groups['opencontrail_control'] + +#- hosts: config +# sudo: yes +# tasks: +- name: "provision metadata services" + shell: "python /opt/contrail/utils/provision_linklocal.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --ipfabric_service_ip 10.84.50.1 --ipfabric_service_port 8775 --linklocal_service_name metadata --linklocal_service_ip 169.254.169.254 --linklocal_service_port 80" + run_once: yes +# when: inventory_hostname in groups['opencontrail_config'] + + +#- hosts: config +# sudo: yes +# tasks: +- name: "provision encap" + shell: "python /opt/contrail/utils/provision_encap.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --oper add --encap_priority MPLSoUDP,MPLSoGRE,VXLAN" + run_once: yes +# when: inventory_hostname in groups['opencontrail_config'] + diff --git a/ansible/roles/open-contrail/tasks/provision/provision-collector.yml b/ansible/roles/open-contrail/tasks/provision/provision-collector.yml new file mode 100755 index 0000000..b09f83a --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-collector.yml @@ -0,0 +1,106 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: collector +# sudo: yes +# tasks: + +- name: "enable supervisor analytics" + file: + path: "/etc/init/supervisor-analytics.override" + state: "absent" + + +- name: "redis-setup" + include: -redis-setup.yml + + +- name: "node-common" + include: -node-common.yml + + +- name: "fix up contrail collector config" + template: + src: "../../templates/provision/contrail-collector-conf.j2" + dest: "/etc/contrail/contrail-collector.conf" + + +- name: "fix up contrail query engine config" + template: + src: "../../templates/provision/contrail-query-engine-conf.j2" + dest: "/etc/contrail/contrail-query-engine.conf" + + +- name: "fix up contrail analytics api config" + template: + src: "../../templates/provision/contrail-analytics-api-conf.j2" + dest: "/etc/contrail/contrail-analytics-api.conf" + + +- name: "modify contrail analytics nodemgr config" + ini_file: + dest: "/etc/contrail/contrail-analytics-nodemgr.conf" + section: "DISCOVERY" + option: "server" + value: "{{ contrail_haproxy_address }}" + + +- name: "fix up contrail keystone auth config" + template: + src: "../../templates/provision/contrail-keystone-auth-conf.j2" + dest: "/etc/contrail/contrail-keystone-auth.conf" + force: no + + +- name: "delete contrail alarm gen supervisord config file" + file: + dest: "/etc/contrail/supervisord_analytics_files/contrail-alarm-gen.ini" + state: "absent" + + +- name: "modify contrail snmp collector config file" + ini_file: + dest: "/etc/contrail/contrail-snmp-collector.conf" + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: + - { section: "DEFAULTS", option: "zookeeper", value: "{{ contrail_address }}:2181" } + - { section: "DISCOVERY", option: "disc_server_ip", value: "{{ contrail_haproxy_address }}" } + - { section: "DISCOVERY", option: "disc_server_port", value: "5998" } + + +- name: "modify contrail snmp collector ini file" + ini_file: + dest: "/etc/contrail/supervisord_analytics_files/contrail-snmp-collector.ini" + section: "program:contrail-snmp-collector" + option: "command" + value: "/usr/bin/contrail-snmp-collector --conf_file /etc/contrail/contrail-snmp-collector.conf --conf_file /etc/contrail/contrail-keystone-auth.conf" + + +- name: "modify contrail topology config file" + ini_file: + dest: "/etc/contrail/contrail-topology.conf" + section: "DEFAULTS" + option: "zookeeper" + value: "{{ contrail_address }}" + + +- name: "modify contrail topology ini file" + ini_file: + dest: "/etc/contrail/supervisord_analytics_files/contrail-topology.ini" + section: "program:contrail-topology" + option: "command" + value: "/usr/bin/contrail-topology --conf_file /etc/contrail/contrail-topology.conf" + + +- name: "restart supervisor analytics" + service: + name: "supervisor-analytics" + state: "restarted" diff --git a/ansible/roles/open-contrail/tasks/provision/provision-compute.yml b/ansible/roles/open-contrail/tasks/provision/provision-compute.yml new file mode 100755 index 0000000..d9258ef --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-compute.yml @@ -0,0 +1,262 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: compute +# sudo: yes +# tasks: +- name: "enable supervisor vrouter" + file: + path: "/etc/init/supervisor-vrouter.override" + state: "absent" + +- include: -node-common.yml + +- name: "check cgroup device acl in qemu conf" + shell: "grep -q '^\\s*cgroup_device_acl' /etc/libvirt/qemu.conf" + register: deviceacl + ignore_errors: yes + changed_when: no + +- name: "create cgroup device acl for qemu conf" + template: + src: "../../templates/provision/qemu-device-acl-conf.j2" + dest: "/tmp/qemu-device-acl.conf" + when: deviceacl | failed + +- name: "combination of the qemu configuration" + shell: "cat /tmp/qemu-device-acl.conf >> /etc/libvirt/qemu.conf" + when: deviceacl | failed + +- name: "delete temporary configuration file" + file: + dest: "/tmp/qemu-device-acl.conf" + state: "absent" + when: deviceacl | failed + +- name: "fix up vrouter nodemgr param" + template: + src: "../../templates/provision/vrouter-nodemgr-param.j2" + dest: "/etc/contrail/vrouter_nodemgr_param" + +- name: "set contrail device name for ansible" + set_fact: + contrail_ansible_device: "ansible_{{ contrail_vhost_device }}" + +- name: "fix up default pmac" + template: + src: "../../templates/provision/default-pmac.j2" + dest: "/etc/contrail/default_pmac" + +- name: "copy agent param config from template" + shell: "cp /etc/contrail/agent_param.tmpl /etc/contrail/agent_param" + +- name: "modify agent param config" + lineinfile: + dest: "/etc/contrail/agent_param" + regexp: "dev=__DEVICE__" + line: "dev={{ contrail_vhost_device }}" + +#- name: "get last ip address" +# shell: expr substr `cat /etc/hostname` 5 1 +# register: lastip + +- name: "fix up contrail vrouter agent config" + template: + src: "../../templates/provision/contrail-vrouter-agent-conf.j2" + dest: "/etc/contrail/contrail-vrouter-agent.conf" + +- name: "delete lines for contrail interface" + shell: "{{ item }}" + with_items: + - "sed -e '/auto {{ contrail_vhost_device }}/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top" + - "sed -n -e '/auto {{ contrail_vhost_device }}/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom" + - "sed -i -e '/auto {{ contrail_vhost_device }}/d' /tmp/contrail-interfaces-bottom" + - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom" + - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces" + +- name: "delete lines for vrouter interface" + shell: "{{ item }}" + with_items: + - "sed -e '/auto vhost0/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top" + - "sed -n -e '/auto vhost0/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom" + - "sed -i -e '/auto vhost0/d' /tmp/contrail-interfaces-bottom" + - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom" + - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces" + +#- name: get last ip address +# shell: expr substr `cat /etc/hostname` 5 1 +# register: lastip + +- name: "configure interface" + lineinfile: + dest: "/etc/network/interfaces" + line: "{{ item }}" + state: "present" + with_items: + - "auto {{ contrail_vhost_device }}" + - "iface {{ contrail_vhost_device }} inet manual" + - "\tpre-up ifconfig {{ contrail_vhost_device }} up" + - "\tpost-down ifconfig {{ contrail_vhost_device }} down" + - "auto vhost0" + - "iface vhost0 inet static" + - "\tpre-up /opt/contrail/bin/if-vhost0" + - "\tnetwork_name application" + - "\taddress {{ contrail_vhost_address }}" + - "\tnetmask {{ contrail_vhost_netmask }}" + +################################################################################## + +- name: "copy vrouter script to compute" + template: + src: "../../templates/vrouter-functions.sh" + dest: "/opt/contrail/bin/vrouter-functions.sh" + +- name: "load vrouter driver" + command: su -s /bin/sh -c "insmod /var/lib/dkms/vrouter/2.21/build/vrouter.ko" + ignore_errors: true + +- name: "run vhost0 script" + command: su -s /bin/sh -c "/opt/contrail/bin/if-vhost0" + ignore_errors: true + +################################################################################## + +- name: "delete temporary files" + file: + dest: "{{ item }}" + state: "absent" + with_items: + - "/tmp/contrail-interfaces-top" + - "/tmp/contrail-interfaces-bottom" + +################################################################################## + +- name: "fix up contrail vrouter nodemgr config" + ini_file: + dest: "/etc/contrail/contrail-vrouter-nodemgr.conf" + section: "DISCOVERY" + option: "server" + value: "{{ contrail_haproxy_address }}" + + +################################################################################## +########################### restart vrouter services ########################### + +- name: "restart supervisor service" + service: + name: "supervisor" + state: "restarted" + +- name: "restart vrouter nodemgr" + shell: ps aux | grep contrail-nodemgr | grep -v grep | awk '{print $2}' | xargs kill -9; + +- name: "restart vrouter agent" + service: + name: "contrail-vrouter-agent" + state: "restarted" + + +################################################################################## + + +- name: "restart libvirt bin" + service: + name: "libvirt-bin" + state: "restarted" + +#- name: "set value of nova to nova config" +# template: +# src: "provision/nova.j2" +# dest: "/etc/nova/nova.conf" +# when: install_nova + +#- name: "delete values from nova config" +# ini_file: +# dest: "/etc/nova/nova.conf" +# section: "{{ item.section }}" +# option: "{{ item.option }}" +# with_items: +# - { section: "DEFAULT", option: "quantum_auth_strategy" } +# - { section: "DEFAULT", option: "quantum_admin_auth_url" } +# - { section: "DEFAULT", option: "quantum_admin_tenant_name" } +# - { section: "DEFAULT", option: "quantum_admin_username" } +# - { section: "DEFAULT", option: "quantum_admin_password" } +# - { section: "DEFAULT", option: "quantum_url" } + +#- name: "set values of neutron to nova config" +# ini_file: +# dest: "/etc/nova/nova.conf" +# section: "{{ item.section }}" +# option: "{{ item.option }}" +# value: "{{ item.value }}" +# state: "present" +# with_items: +# - { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ contrail_keystone_address }}:5000/v2.0" } +# - { section: "DEFAULT", option: "neutron_admin_username", value: "neutron" } +# - { section: "DEFAULT", option: "neutron_admin_password", value: "{{ contrail_admin_password }}" } +# - { section: "DEFAULT", option: "neutron_admin_tenant_name", value: "service" } +# - { section: "DEFAULT", option: "neutron_url", value: "http://{{ contrail_haproxy_address }}:9696/" } +# - { section: "DEFAULT", option: "neutron_url_timeout", value: "300" } +# - { section: "DEFAULT", option: "network_api_class", value: "nova.network.neutronv2.api.API" } +# - { section: "DEFAULT", option: "libvirt_vif_driver", value: "nova_contrail_vif.contrailvif.VRouterVIFDriver" } + +- name: "set values to nova config" + ini_file: + dest: "/etc/nova/nova.conf" + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: + - { section: "DEFAULT", option: "network_api_class", value: "nova_contrail_vif.contrailvif.ContrailNetworkAPI" } + + + +####################################################################### +###################### nova plugin workaround ####################### +####################################################################### + +- name: "copy nova plugs on compute" + copy: + src: "../../templates/nova_contrail_vif.tar.gz" + dest: "/opt/nova_contrail_vif.tar.gz" + +- name: "unzip nova plugs" + command: su -s /bin/sh -c "tar xzf /opt/nova_contrail_vif.tar.gz -C /opt/" + +- name: "remove original nova plugs" + shell: rm -rf /usr/lib/python2.7/dist-packages/nova_contrail_vif/ + +- name: "use new nova plugs" + shell: mv /opt/nova_contrail_vif/ /usr/lib/python2.7/dist-packages/nova_contrail_vif/ + +################################################# + +- name: "restart nova compute" + service: + name: "nova-compute" + state: "restarted" + +- name: "add vrouter to contrail" + shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" + +#- name: "reboot Server" +# shell: "shutdown -r now" +# async: 0 +# poll: 0 +# ignore_errors: true +# notify: Wait for server to come back +# +#handlers: +#- name: "Wait for server to come back" +# local_action: +# module: wait_for +# host={{ inventory_hostname }} +# port=22 +# delay=30 +# timeout=600 diff --git a/ansible/roles/open-contrail/tasks/provision/provision-config.yml b/ansible/roles/open-contrail/tasks/provision/provision-config.yml new file mode 100755 index 0000000..3214247 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-config.yml @@ -0,0 +1,343 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: config +# sudo: yes +# tasks: +- name: "enable supervisor config" + file: + path: "/etc/init/supervisor-config.override" + state: "absent" + +- name: "enable neutron server" + file: + path: "/etc/init/neutron-server.override" + state: "absent" + +# Compass is using this +#- name: "enable haproxy" +# replace: +# dest: "/etc/default/haproxy" +# regexp: "^ENABLED\\s*=.*$" +# replace: "ENABLED=1" + +# Compass is using this +#- name: "modify haproxy global configuration" +# lineinfile: +# dest: "/etc/haproxy/haproxy.cfg" +# regexp: "{{ item.regexp }}" +# line: "{{ item.line }}" +# insertafter: "^global" +# with_items: +# - { regexp: "^\\s*tune.bufsize", line: "\ttune.bufsize 16384" } +# - { regexp: "^\\s*tune.maxrewrite", line: "\ttune.maxrewrite 1024" } + +#chenshuai, add later +#- name: "delete haproxy configuration for contrail" +# shell: "sed -i -e '/^#contrail-marker-start/,/^#contrail-marker-end/d' /etc/haproxy/haproxy.cfg" + +#chenshuai, add later +#- name: "create haproxy configuration for contrail" +# template: +# src: "provision/haproxy-contrail-cfg.j2" +# src: "../../templates/provision/haproxy-contrail-cfg.j2" +# dest: "/tmp/haproxy-contrail.cfg" + +#chenshuai, add later +#- name: "combination of the haproxy configuration" +# shell: "cat /tmp/haproxy-contrail.cfg >> /etc/haproxy/haproxy.cfg" + +#chenshuai, add later +#- name: "delete temporary configuration file" +# file: +# dest: "/tmp/haproxy-contrail.cfg" +# state: "absent" + +#chenshuai, add later +#- name: "restart haproxy" +# service: +# name: "haproxy" +# state: "restarted" + +# Compass is using this +#- name: "create keepalived configuration" +# template: +# src: "../../templates/provision/keepalived-conf.j2" +# dest: "/etc/keepalived/keepalived.conf" +# with_indexed_items: groups['opencontrail_config'] +# when: contrail_keepalived and item.1 == inventory_hostname + +#- name: "restart keepalived" +# service: +# name: "keepalived" +# state: "restarted" +# when: contrail_keepalived + +- name: "node-common" + include: -node-common.yml + +- name: "fix up contrail keystone auth config" + template: + src: "../../templates/provision/contrail-keystone-auth-conf.j2" + dest: "/etc/contrail/contrail-keystone-auth.conf" + +- name: "fix up ifmap server log4j properties" + template: + src: "../../templates/provision/ifmap-log4j-properties.j2" + dest: "/etc/ifmap-server/log4j.properties" + +- name: "fix up ifmap server authorization properties" + template: + src: "../../templates/provision/ifmap-authorization-properties.j2" + dest: "/etc/ifmap-server/authorization.properties" + +- name: "fix up ifmap server basicauthusers properties" + template: + src: "../../templates/provision/ifmap-basicauthusers-properties.j2" + dest: "/etc/ifmap-server/basicauthusers.properties" + +- name: "fix up ifmap server publisher properties" + template: + src: "../../templates/provision/ifmap-publisher-properties.j2" + dest: "/etc/ifmap-server/publisher.properties" + +- name: "fix up contrail api config" + template: + src: "../../templates/provision/contrail-api-conf.j2" + dest: "/etc/contrail/contrail-api.conf" + +- name: "fix up contrail api supervisord config" + template: + src: "../../templates/provision/contrail-api-supervisord-conf.j2" + dest: "/etc/contrail/supervisord_config_files/contrail-api.ini" + +- name: "modify contrail api init script" + lineinfile: + dest: "/etc/init.d/contrail-api" + regexp: "supervisorctl -s unix:///tmp/supervisord_config.sock" + line: "supervisorctl -s unix:///tmp/supervisord_config.sock ${1} `basename ${0}:0`" + +- name: "fix up contrail schema config" + template: + src: "../../templates/provision/contrail-schema-conf.j2" + dest: "/etc/contrail/contrail-schema.conf" + +- name: "fix up contrail device manager config" + template: + src: "../../templates/provision/contrail-device-manager-conf.j2" + dest: "/etc/contrail/contrail-device-manager.conf" + +- name: "fix up contrail svc monitor config" + template: + src: "../../templates/provision/contrail-svc-monitor-conf.j2" + dest: "/etc/contrail/contrail-svc-monitor.conf" + +- name: "fix up contrail discovery supervisord config" + template: + src: "../../templates/provision/contrail-discovery-supervisord-conf.j2" + dest: "/etc/contrail/supervisord_config_files/contrail-discovery.ini" + +- name: "fix up contrail discovery config" + template: + src: "../../templates/provision/contrail-discovery-conf.j2" + dest: "/etc/contrail/contrail-discovery.conf" + +- name: "modify contrail discovery init script" + lineinfile: + dest: "/etc/init.d/contrail-discovery" + regexp: "supervisorctl -s unix:///tmp/supervisord_config.sock" + line: "supervisorctl -s unix:///tmp/supervisord_config.sock ${1} `basename ${0}:0`" + +- name: "fix up contrail vnc api library config" + template: + src: "../../templates/provision/contrail-vnc-api-lib-ini.j2" + dest: "/etc/contrail/vnc_api_lib.ini" + +- name: "fix up contrail config nodemgr config" + ini_file: + dest: "/etc/contrail/contrail-config-nodemgr.conf" + section: "DISCOVERY" + option: "server" + value: "{{ contrail_haproxy_address }}" + +- name: "fix up contrail sudoers" + template: + src: "../../templates/provision/contrail-sudoers.j2" + dest: "/etc/sudoers.d/contrail_sudoers" + mode: 0440 + +- name: "create directory for neutron plugins" + file: + dest: "/etc/neutron/plugins/opencontrail" + state: "directory" + +- name: "fix up contrail plugin for nuetron" + template: + src: "../../templates/provision/neutron-contrail-plugin-ini.j2" + dest: "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini" + +- name: "modify neutron server configuration" + lineinfile: + dest: "/etc/default/neutron-server" + regexp: "NEUTRON_PLUGIN_CONFIG=" + line: "NEUTRON_PLUGIN_CONFIG=\"/etc/neutron/plugins/opencontrail/ContrailPlugin.ini\"" + +#- name: "change owner neutron log directory" +# file: +# dest: "/var/log/neutron" +# state: "directory" +# owner: "neutron" +# group: "neutron" +# recurse: yes + +- name: "set values to neutron config" + ini_file: + dest: "/etc/neutron/neutron.conf" + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: +# - { section: "DEFAULT", option: "bind_port", value: "9697" } +# - { section: "DEFAULT", option: "auth_strategy", value: "keystone" } +# - { section: "DEFAULT", option: "allow_overlapping_ips", value: "True" } + - { section: "DEFAULT", option: "core_plugin", value: "neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2" } + - { section: "DEFAULT", option: "api_extensions_path", value: "/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions" } +# - { section: "DEFAULT", option: "rabbit_host", value: "{{ contrail_haproxy_address }}" } +# - { section: "DEFAULT", option: "rabbit_port", value: "5673" } +# - { section: "DEFAULT", option: "service_plugins", value: "neutron_plugin_contrail.plugins.opencontrail.loadbalancer.plugin.LoadBalancerPlugin" } + - { section: "DEFAULT", option: "service_plugins", value: " " } + - { section: "DEFAULT", option: "notify_nova_on_port_data_changes", value: "False" } + - { section: "service_providers", option: "service_provider", value: "LOADBALANCER:Opencontrail:neutron_plugin_contrail.plugins.opencontrail.loadbalancer.driver.OpencontrailLoadbalancerDriver:default" } + - { section: "quotas", option: "quota_driver", value: "neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver" } +# - { section: "quotas", option: "quota_network", value: "-1" } +# - { section: "quotas", option: "quota_subnet", value: "-1" } +# - { section: "quotas", option: "quota_port", value: "-1" } +# - { section: "keystone_authtoken", option: "admin_tenant_name", value: "admin" } +# - { section: "keystone_authtoken", option: "admin_user", value: "{{ contrail_admin_user }}" } +# - { section: "keystone_authtoken", option: "admin_password", value: "{{ contrail_admin_password }}" } +# - { section: "keystone_authtoken", option: "auth_host", value: "{{ contrail_keystone_address }}" } +# - { section: "keystone_authtoken", option: "auth_protocol", value: "http" } + +#- name: "add respawn to neutron server config" +# lineinfile: +# dest: "/etc/init/neutron-server.conf" +# line: "respawn" +# insertbefore: "pre-start script" + +#- name: "add respawn limit to neutron server config" +# lineinfile: +# dest: "/etc/init/neutron-server.conf" +# line: "respawn limit 10 90" +# insertafter: "respawn" + +- name: "restart supervisor config" + service: + name: "supervisor-config" + state: "restarted" + + + +########################################################### +############# neutron plugins workaround ################## +########################################################### + +- name: "copy neutron plugs on controller" + copy: + src: "../../templates/neutron_plugin_contrail.tar.gz" + dest: "/opt/neutron_plugin_contrail.tar.gz" + +- name: "unzip neutron plugs" + command: su -s /bin/sh -c "tar xzf /opt/neutron_plugin_contrail.tar.gz -C /opt/" + +- name: "remove original neutron plugs" + shell: rm -rf /usr/lib/python2.7/dist-packages/neutron_plugin_contrail/ + +- name: "use new neutron plugs" + shell: mv /opt/neutron_plugin_contrail/ /usr/lib/python2.7/dist-packages/neutron_plugin_contrail/ + +########################################################### + + + +- name: "restart neutron-server" + service: + name: "neutron-server" + state: "restarted" + +# Compass configured +#- name: "add neutron service" +# shell: "keystone service-get 'neutron' || keystone service-create --name 'neutron' --type 'network' --description 'Neutron Network Service'" +# environment: +# OS_AUTH_URL: "http://{{ contrail_keystone_address }}:35357/v2.0" +# OS_USERNAME: "{{ contrail_admin_user }}" +# OS_PASSWORD: "{{ contrail_admin_password }}" +# OS_TENANT_NAME: "admin" +# run_once: yes +# when: keystone_provision +# +# +# Compass configured +#- name: "add neutron endpoint" +# shell: "keystone endpoint-list | grep -q $(keystone service-get 'neutron' | grep '| *id *|' | awk '{print $4}') || keystone endpoint-create --region 'RegionOne' --service 'neutron' --publicurl 'http://{{ contrail_haproxy_address }}:9696' --internal 'http://{{ contrail_haproxy_address }}:9696' --adminurl 'http://{{ contrail_haproxy_address }}:9696'" +# environment: +# OS_AUTH_URL: "http://{{ contrail_keystone_address }}:35357/v2.0" +# OS_USERNAME: "{{ contrail_admin_user }}" +# OS_PASSWORD: "{{ contrail_admin_password }}" +# OS_TENANT_NAME: "admin" +# run_once: yes +# when: keystone_provision +# +#- name: "add neutron user" +# keystone_user: +# user: "neutron" +# password: "{{ contrail_admin_password }}" +# email: "neutron@example.com" +# tenant: "service" +# endpoint: "http://{{ contrail_keystone_address }}:35357/v2.0" +# login_user: "{{ contrail_admin_user }}" +# login_password: "{{ contrail_admin_password }}" +# login_tenant_name: "admin" +# run_once: yes +# when: keystone_provision +# +#- name: "apply role to user" +# keystone_user: +# tenant: "service" +# user: "neutron" +# role: "admin" +# endpoint: "http://{{ contrail_keystone_address }}:35357/v2.0" +# login_user: "{{ contrail_admin_user }}" +# login_password: "{{ contrail_admin_password }}" +# login_tenant_name: "admin" +# run_once: yes +# when: keystone_provision + + + +#- name: "set values to nova config" +# ini_file: +# dest: "/etc/nova/nova.conf" +# section: "{{ item.section }}" +# option: "{{ item.option }}" +# value: "{{ item.value }}" +# with_items: +# - { section: "DEFAULT", option: "network_api_class", value: "nova_contrail_vif.contrailvif.ContrailNetworkAPI" } + + +#- name: "restart nova-server" +# service: +# name: "{{ item }}" +# state: "restarted" +# with_items: +# - nova-api +# - nova-cert +# - nova-conductor +# - nova-consoleauth +# - nova-novncproxy +# - nova-scheduler diff --git a/ansible/roles/open-contrail/tasks/provision/provision-control.yml b/ansible/roles/open-contrail/tasks/provision/provision-control.yml new file mode 100755 index 0000000..e719a46 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-control.yml @@ -0,0 +1,69 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: control +# sudo: yes +# tasks: +- name: "enable supervisor control" + file: + path: "/etc/init/supervisor-control.override" + state: "absent" + +- name: "enable supervisor dns" + file: + path: "/etc/init/supervisor-dns.override" + state: "absent" + +- name: "modify ifmap server basicauthusers properties for control" + lineinfile: + dest: "/etc/ifmap-server/basicauthusers.properties" +# line: "{{ hostvars[item]['contrail_address' ] }}:{{ hostvars[item]['contrail_address' ] }}" + line: "{{ ip_settings[item]['br-prv']['ip'] }}:{{ ip_settings[item]['br-prv']['ip'] }}" + with_items: groups['opencontrail'] + +- name: "modify ifmap server basicauthusers properties for dns" + lineinfile: + dest: "/etc/ifmap-server/basicauthusers.properties" +# line: "{{ hostvars[item]['contrail_address' ] }}.dns:{{ hostvars[item]['contrail_address' ] }}.dns" + line: "{{ ip_settings[item]['br-prv']['ip'] }}.dns:{{ ip_settings[item]['br-prv']['ip'] }}.dns" + with_items: groups['opencontrail'] + +- name: "node-common" + include: -node-common.yml + +- name: "fix up contrail control config" + template: + src: "../../templates/provision/contrail-control-conf.j2" + dest: "/etc/contrail/contrail-control.conf" + +- name: "fix up contrail dns config" + template: + src: "../../templates/provision/contrail-dns-conf.j2" + dest: "/etc/contrail/contrail-dns.conf" + +- name: "fix up contrail control nodemgr config" + ini_file: + dest: "/etc/contrail/contrail-control-nodemgr.conf" + section: "DISCOVERY" + option: "server" + value: "{{ contrail_haproxy_address }}" + +- name: "modify dns configuration" + replace: + dest: "/etc/contrail/dns/{{ item }}" + regexp: "secret \"secret123\"" + replace: "secret \"xvysmOR8lnUQRBcunkC6vg==\"" + with_items: + - "contrail-rndc.conf" + - "contrail-named.conf" + +- name: "restart supervisor control" + service: + name: "supervisor-control" + state: "restarted" diff --git a/ansible/roles/open-contrail/tasks/provision/provision-database.yml b/ansible/roles/open-contrail/tasks/provision/provision-database.yml new file mode 100755 index 0000000..9c99270 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-database.yml @@ -0,0 +1,209 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: database +# sudo: yes +# tasks: + + +- name: "enable supervisor database" +# sudo: True + file: + path: "/etc/init/supervisor-database.override" + state: "absent" + + +- name: "-node-common" +# sudo: True + include: -node-common.yml + + +- name: "update hosts" +# sudo: True + lineinfile: + dest: "/etc/hosts" +# regexp: "^{{ contrail_address }}\t{{ ansible_hostname }}( .*)?$" +# line: "{{ contrail_address }}\t{{ ansible_hostname }}\\1" + regexp: "^{{ contrail_address }}\t{{ inventory_hostname }}( .*)?$" + line: "{{ contrail_address }}\t{{ inventory_hostname }}\\1" + backrefs: yes + + +- name: "make directory for contrail analytics" +# sudo: True + file: + path: "/var/lib/cassandra/data/ContrailAnalytics" + state: "directory" + + +- name: "modify cassandra conf" +# sudo: True + lineinfile: + dest: "/etc/cassandra/cassandra.yaml" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: "^(#(\\s*)?)?listen_address:", line: "listen_address: {{ contrail_address }}"} + - { regexp: "^(#(\\s*)?)?cluster_name:", line: "cluster_name: \"Contrail\"" } + - { regexp: "^(#(\\s*)?)?rpc_address:", line: "rpc_address: {{ contrail_address }}" } + - { regexp: "^(#(\\s*)?)?num_tokens:", line: "num_tokens: 256" } + - { regexp: "^(#(\\s*)?)?initial_token:", line: "# initial_token:" } + + + +- name: "set first database host seed" +# sudo: True + set_fact: +# dbseeds: "{{ hostvars[item.1][ contrail_address ] }}" + dbseeds: "{{ ip_settings[item.1]['br-prv']['ip'] }}" + with_indexed_items: groups['opencontrail'] + when: item.0 == 0 + + + + + +- name: "set second database host seed" +# sudo: True + set_fact: +# dbseeds: "{{ dbseeds }},{{ hostvars[item.1]['contrail_address'] }}" + dbseeds: "{{ dbseeds }},{{ ip_settings[item.1]['br-prv']['ip'] }}" + with_indexed_items: groups['opencontrail'] + when: item.0 == 1 + + +- name: "modify seeds list in cassandra conf" +# sudo: True + replace: + dest: "/etc/cassandra/cassandra.yaml" + regexp: "- seeds:.*$" + replace: "- seeds: {{ dbseeds }}" + + +- name: "modify cassandra env" +# sudo: True + replace: + dest: "/etc/cassandra/cassandra-env.sh" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintGCDetails\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintGCDetails\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -Xss\\d+k\"", replace: "JVM_OPTS=\"$JVM_OPTS -Xss512k\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintGCDateStamps\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintGCDateStamps\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintHeapAtGC\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintHeapAtGC\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintTenuringDistribution\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintTenuringDistribution\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintGCApplicationStoppedTime\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintGCApplicationStoppedTime\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintPromotionFailure\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintPromotionFailure\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:PrintFLSStatistics=1\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:PrintFLSStatistics=1\"" } + - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date \\+%s`\\.log\"", replace: "JVM_OPTS=\"$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log\"" } + + +- name: "modify zookeeper conf" +# sudo: True + lineinfile: + dest: "/etc/zookeeper/conf/zoo.cfg" + line: "{{ item }}" + with_items: + - "maxSessionTimeout=120000" + - "autopurge.purgeInterval=3" + + +- name: "modify zookeeper log4j properties" +# sudo: True + lineinfile: + dest: "/etc/zookeeper/conf/log4j.properties" + regexp: "(log4j.appender.ROLLINGFILE.MaxBackupIndex=.*)$" + line: "\\1" + backrefs: yes + + +- name: "add server addresses to zookeeper config" +# sudo: True + lineinfile: + dest: "/etc/zookeeper/conf/zoo.cfg" + regexp: "server.{{ item.0 + 1 }}=" +# line: "server.{{ item.0 + 1 }}={{ hostvars[item.1]['contrail_address'] }}:2888:3888" + line: "server.{{ item.0 + 1 }}={{ ip_settings[item.1]['br-prv']['ip'] }}:2888:3888" + with_indexed_items: groups['opencontrail'] + + +- name: "set zookeeper unique id" +# sudo: True + template: + src: "../../templates/provision/zookeeper-unique-id.j2" + dest: "/var/lib/zookeeper/myid" + with_indexed_items: groups['opencontrail'] + when: item.1 == inventory_hostname + + +- name: "remove kafka ini file" +# sudo: True + file: + path: "/etc/contrail/supervisord_database_files/kafka.ini" + state: "absent" + + +- name: "set first zookeeper host address" +# sudo: True + set_fact: +# zkaddrs: "{{ hostvars[item.1]['contrail_address'] }}:2181" + zkaddrs: "{{ ip_settings[item.1]['br-prv']['ip'] }}:2181" + with_indexed_items: groups['opencontrail'] + when: item.0 == 0 + + +- name: "set second or more zookeeper host addresses" +# sudo: True + set_fact: +# zkaddrs: "{{ zkaddrs }},{{ hostvars[item.1]['contrail_address'] }}:2181" + zkaddrs: "{{ zkaddrs }},{{ ip_settings[item.1]['br-prv']['ip'] }}:2181" + with_indexed_items: groups['opencontrail'] + when: item.0 > 0 + + +- name: "modify zookeeper host addresses in kafka properties" +# sudo: True + lineinfile: + dest: "/usr/share/kafka/config/server.properties" + regexp: "zookeeper.connect=" + line: "zookeeper.connect={{ zkaddrs }}" + + +- name: "modify kafka properties" +# sudo: True + lineinfile: + dest: "/usr/share/kafka/config/server.properties" + regexp: "default.replication.factor=" + line: "default.replication.factor=2" + + +- name: "fix up contrail database nodemgr config" +# sudo: True + ini_file: + dest: "/etc/contrail/contrail-database-nodemgr.conf" + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + with_items: + - { section: "DEFAULT", option: "hostip", value: "{{ contrail_address }}" } + - { section: "DISCOVERY", option: "server", value: "{{ contrail_haproxy_address }}" } + + +- name: "restart zookeeper" +# sudo: True + service: + name: "zookeeper" + state: "restarted" + + +- name: "restart supervisor database" +# sudo: True + service: + name: "supervisor-database" + state: "restarted" diff --git a/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml b/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml new file mode 100755 index 0000000..89a4966 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml @@ -0,0 +1,60 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: [database, config, control, collector] +# sudo: yes +# tasks: +- name: "delete line" +# sudo: True + lineinfile: + dest: "/etc/limits.conf" + regexp: "^root\\s*soft\\s*nproc\\s*.*" + state: "absent" + +- name: "check EOF" +# sudo: True + lineinfile: + dest: "/etc/security/limits.conf" + regexp: "^# End of file" + line: "# End of file" + +- name: "add lines" +# sudo: True + lineinfile: + dest: "/etc/security/limits.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertbefore: "^# End of file" + with_items: + - { regexp: "^root\\s*hard\\s*nofile\\s*.*", line: "root hard nofile 65535" } + - { regexp: "^root\\s*soft\\s*nofile\\s*.*", line: "root soft nofile 65535" } + - { regexp: "^\\*\\s*hard\\s*nofile\\s*.*", line: "* hard nofile 65535" } + - { regexp: "^\\*\\s*soft\\s*nofile\\s*.*", line: "* soft nofile 65535" } + - { regexp: "^\\*\\s*hard\\s*nproc\\s*.*", line: "* hard nproc 65535" } + - { regexp: "^\\*\\s*soft\\s*nproc\\s*.*", line: "* soft nproc 65535" } + +- name: change value of sysctl fs.file-max +# sudo: True + sysctl: + name: "fs.file-max" + value: "65535" + +- name: "find supervisord conf files" +# sudo: True + shell: "find /etc/contrail -name supervisor*.conf -type f" + register: supervisordconfs + changed_when: no + +- name: "modify supervisord conf" +# sudo: True + replace: + dest: "{{ item }}" + regexp: "^minfds=\\d*" + replace: "minfds=10240" + with_items: supervisordconfs.stdout_lines diff --git a/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml b/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml new file mode 100644 index 0000000..d342659 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml @@ -0,0 +1,87 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: config +# sudo: yes +# tasks: +- name: turn off rabbitmq server on control node + shell: sed -i '/rabbitmq-server/d' /opt/service ; + +- name: "start supervisor support service" + service: + name: "supervisor-support-service" + state: "started" + +- name: "stop rabbitmq server via supervisor" + supervisorctl: + name: "rabbitmq-server" + state: "stopped" + server_url: "unix:///tmp/supervisord_support_service.sock" + +- include: -rabbitmq-stop.yml + +- name: "update hosts" + lineinfile: + dest: "/etc/hosts" + line: "{{ ip_settings[item]['br-prv']['ip'] }}\t{{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}-ctrl" + with_items: groups['opencontrail'] + +- name: "fix up rabbitmq env" + template: + src: "../../templates/provision/rabbitmq-env-conf.j2" + dest: "/etc/rabbitmq/rabbitmq-env.conf" + +- name: "fix up rabbitmq config for single node" + template: + src: "../../templates/provision/rabbitmq-conf-single.j2" + dest: "/etc/rabbitmq/rabbitmq.config" + when: groups['opencontrail'][1] is not defined + +- name: fix up rabbitmq config for multi nodes + template: + src: "../../templates/provision/rabbitmq-conf.j2" + dest: "/etc/rabbitmq/rabbitmq.config" + when: groups['opencontrail'][1] is defined + +- include: -rabbitmq-stop.yml + +#- name: "create cookie uuid temporary" +# local_action: +# module: "template" +# src: "templates/rabbitmq-cookie.j2" +# dest: "/tmp/tmp-rabbitmq-cookie" +# run_once: yes +# +#- name: "update cookie uuid" +# copy: +# src: "/tmp/tmp-rabbitmq-cookie" +# dest: "/var/lib/rabbitmq/.erlang.cookie" +# owner: "rabbitmq" +# group: "rabbitmq" +# mode: 0400 +# +#- name: "delete temporary cookie uuid" +# local_action: +# module: "file" +# dest: "/tmp/tmp-rabbitmq-cookie" +# state: "absent" +# run_once: yes + +- name: "start rabbitmq server" + service: + name: "rabbitmq-server" + state: "started" + +- name: add rabbitmq user + shell: > + rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }} ; + rabbitmqctl set_permissions {{ RABBIT_USER }} ".*" ".*" ".*" ; + +- name: "check rabbitmq server" + shell: netstat -lpen --tcp | grep beam | grep 5672; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep beam | grep 5672; done diff --git a/ansible/roles/open-contrail/tasks/provision/provision-route.yml b/ansible/roles/open-contrail/tasks/provision/provision-route.yml new file mode 100755 index 0000000..0168728 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-route.yml @@ -0,0 +1,50 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: all +# sudo: yes +# tasks: +- name: "delete existing route file" +# sudo: True + file: + path: "/etc/network/if-up.d/routes" + state: absent + when: contrail_route + +- name: "create route file" +# sudo: True + file: + path: "/etc/network/if-up.d/routes" + owner: "root" + mode: 0755 + state: touch + when: contrail_route + + +- name: "add template" +# sudo: True + lineinfile: + dest: "/etc/network/if-up.d/routes" + line: "{{ item }}" + with_items: + - "#!/bin/bash" + - "[ \"$IFACE\" != {{ contrail_route[0].device }} ] && exit 0" + when: contrail_route + + +- name: "add static route" +# sudo: True + lineinfile: + dest: "/etc/network/if-up.d/routes" + line: "ip route add {{ item.ip }} via {{ item.gw }} dev {{ item.device }}" + state: "present" + with_items: + - "{{ contrail_route }}" + when: contrail_route + diff --git a/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml b/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml new file mode 100755 index 0000000..3ae0bec --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml @@ -0,0 +1,85 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: tsn +# sudo: yes +# tasks: +- name: "create temporary directory for ssl files" + local_action: + module: "file" + dest: "/tmp/tmp-toragent-{{ item }}" + state: "directory" + with_items: + - "certs" + - "private" + run_once: yes + +- name: "create ssl files" + local_action: "shell openssl req -new -x509 -days 3650 -text -sha256 -newkey rsa:4096 -nodes -subj \"/C=US/ST=Global/O={{ item.1.vendor_name }}/CN={{ ansible_fqdn }}\" -keyout /tmp/tmp-toragent-private/tor.{{ item.0 }}.privkey.pem -out /tmp/tmp-toragent-certs/tor.{{ item.0 }}.cert.pem" + with_indexed_items: contrail_tor_agents + run_once: yes + +- name: "set tor agent list" + set_fact: + toragent_index: "{{ item.0 }}" + toragent_params: "{{ item.1 }}" + register: contrail_toragent_list + with_indexed_items: contrail_tor_agents + when: inventory_hostname in item.1.tsn_names + +- name: "fix up tor agent conf" + template: + src: "templates/contrail-tor-agent-conf.j2" + dest: "/etc/contrail/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}.conf" + with_items: contrail_toragent_list.results + +- name: "fix up tor agent ini" + template: + src: "provision/contrail-tor-agent-ini.j2" + dest: "/etc/contrail/supervisord_vrouter_files/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}.ini" + with_items: contrail_toragent_list.results + +- name: "copy init script" + shell: "cp /etc/init.d/contrail-vrouter-agent /etc/init.d/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}" + with_items: contrail_toragent_list.results + +- name: "copy ssl certs" + copy: + src: "/tmp/tmp-toragent-certs/tor.{{ item.ansible_facts.toragent_index }}.cert.pem" + dest: "/etc/contrail/ssl/certs/tor.{{ item.ansible_facts.toragent_index }}.cert.pem" + with_items: contrail_toragent_list.results + +- name: "copy ssl private" + copy: + src: "/tmp/tmp-toragent-private/tor.{{ item.ansible_facts.toragent_index }}.privkey.pem" + dest: "/etc/contrail/ssl/private/tor.{{ item.ansible_facts.toragent_index }}.privkey.pem" + with_items: contrail_toragent_list.results + +- name: "copy ca cert" + copy: + src: "files/cacert.pem" + dest: "/etc/contrail/ssl/certs/cacert.pem" + +- name: "delete temporary directory" + local_action: + module: "file" + dest: "/tmp/tmp-toragent-{{ item }}" + state: "absent" + with_items: + - "certs" + - "private" + run_once: yes + +- name: "add tor agent to contrail" + shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ inventory_hostname }}-{{ item.ansible_facts.toragent_index }} --host_ip {{ contrail_address }} --router_type tor-agent" + with_items: contrail_toragent_list.results + +- name: "add device to contrail" + shell: "python /opt/contrail/utils/provision_physical_device.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --device_name {{ item.ansible_facts.toragent_params.name }} --vendor_name {{ item.ansible_facts.toragent_params.vendor_name }} --product_name {{ item.ansible_facts.toragent_params.product_name }} --device_mgmt_ip {{ item.ansible_facts.toragent_params.address }} --device_tunnel_ip {{ item.ansible_facts.toragent_params.tunnel_address }} --device_tor_agent {{ inventory_hostname }}-{{ item.ansible_facts.toragent_index }} --device_tsn {{ inventory_hostname }}" + with_items: contrail_toragent_list.results diff --git a/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml b/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml new file mode 100755 index 0000000..8bd6dc0 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml @@ -0,0 +1,104 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: tsn +# sudo: yes +# tasks: + +- name: "enable supervisor vrouter" + file: + path: "/etc/init/supervisor-vrouter.override" + state: "absent" + +- include: -node-common.yml + +- name: "fix up vrouter nodemgr param" + template: + src: "provision/vrouter-nodemgr-param.j2" + dest: "/etc/contrail/vrouter_nodemgr_param" + +- name: "set contrail device name for ansible" + set_fact: + contrail_ansible_device: "ansible_{{ contrail_vhost_device }}" + +- name: "fix up default pmac" + template: + src: "provision/default-pmac.j2" + dest: "/etc/contrail/default_pmac" + +- name: "copy agent param config from template" + shell: "cp /etc/contrail/agent_param.tmpl /etc/contrail/agent_param" + +- name: "modify agent param config" + lineinfile: + dest: "/etc/contrail/agent_param" + regexp: "dev=__DEVICE__" + line: "dev={{ contrail_vhost_device }}" + +- name: "set vrouter agent mode" + set_fact: + contrail_vrouter_mode: "tsn" + +- name: "fix up contrail vrouter agent config" + template: + src: "../../templates/provision/contrail-vrouter-agent-conf.j2" + dest: "/etc/contrail/contrail-vrouter-agent.conf" + +- name: "delete lines for contrail interface" + shell: "{{ item }}" + with_items: + - "sed -e '/auto {{ contrail_vhost_device }}/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top" + - "sed -n -e '/auto {{ contrail_vhost_device }}/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom" + - "sed -i -e '/auto {{ contrail_vhost_device }}/d' /tmp/contrail-interfaces-bottom" + - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom" + - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces" + +- name: "delete lines for vrouter interface" + shell: "{{ item }}" + with_items: + - "sed -e '/auto vhost0/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top" + - "sed -n -e '/auto vhost0/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom" + - "sed -i -e '/auto vhost0/d' /tmp/contrail-interfaces-bottom" + - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom" + - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces" + +- name: "configure interface" + lineinfile: + dest: "/etc/network/interfaces" + line: "{{ item }}" + state: "present" + with_items: + - "auto {{ contrail_vhost_device }}" + - "iface {{ contrail_vhost_device }} inet manual" + - "\tpre-up ifconfig {{ contrail_vhost_device }} up" + - "\tpost-down ifconfig {{ contrail_vhost_device }} down" + - "auto vhost0" + - "iface vhost0 inet static" + - "\tpre-up /opt/contrail/bin/if-vhost0" + - "\tnetwork_name application" + - "\taddress {{ contrail_vhost_address }}" + - "\tnetmask {{ contrail_vhost_netmask }}" + +- name: "delete temporary files" + file: + dest: "{{ item }}" + state: "absent" + with_items: + - "/tmp/contrail-interfaces-top" + - "/tmp/contrail-interfaces-bottom" + +- name: "fix up contrail vrouter nodemgr config" + ini_file: + dest: "/etc/contrail/contrail-vrouter-nodemgr.conf" + section: "DISCOVERY" + option: "server" + value: "{{ contrail_haproxy_address }}" + +- name: "add tsn to contrail" + shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }} --router_type tor-service-node" diff --git a/ansible/roles/open-contrail/tasks/provision/provision-webui.yml b/ansible/roles/open-contrail/tasks/provision/provision-webui.yml new file mode 100755 index 0000000..99441b6 --- /dev/null +++ b/ansible/roles/open-contrail/tasks/provision/provision-webui.yml @@ -0,0 +1,74 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#- hosts: webui +# sudo: yes +# tasks: + +- name: "enable supervisor webui" + file: + path: "/etc/init/supervisor-webui.override" + state: "absent" + +- name: "redis-setup" + include: -redis-setup.yml + +- name: "node-common" + include: -node-common.yml + +- name: "set first cassandra host address" + set_fact: +# cassandra_addrs: "'{{ hostvars[item.1]['contrail_address'] }}'" + cassandra_addrs: "'{{ ip_settings[item.1]['br-prv']['ip'] }}'" + with_indexed_items: groups['opencontrail'] + when: item.0 == 0 + +- name: "set second or more cassandra host addresses" + set_fact: +# cassandra_addrs: "{{ cassandra_addrs }}, '{{ hostvars[item.1]['contrail_address'] }}'" + cassandra_addrs: "{{ cassandra_addrs }}, '{{ ip_settings[item.1]['br-prv']['ip'] }}'" + with_indexed_items: groups['opencontrail'] + when: item.0 > 0 + +- name: "modify webui global js" + lineinfile: + dest: "/etc/contrail/config.global.js" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: "^\\s*config.networkManager.ip", line: "config.networkManager.ip = '{{ contrail_haproxy_address }}';" } + - { regexp: "^\\s*config.imageManager.ip", line: "config.imageManager.ip = '{{ contrail_keystone_address }}';" } + - { regexp: "^\\s*config.computeManager.ip", line: "config.computeManager.ip = '{{ contrail_keystone_address }}';" } + - { regexp: "^\\s*config.identityManager.ip", line: "config.identityManager.ip = '{{ contrail_keystone_address }}';" } + - { regexp: "^\\s*config.storageManager.ip", line: "config.storageManager.ip = '{{ contrail_keystone_address }}';" } + - { regexp: "^\\s*config.cnfg.server_ip", line: "config.cnfg.server_ip = '{{ contrail_haproxy_address }}';" } + - { regexp: "^\\s*config.analytics.server_ip", line: "config.analytics.server_ip = '{{ contrail_haproxy_address }}';" } + - { regexp: "^\\s*config.cassandra.server_ips", line: "config.cassandra.server_ips = [{{ cassandra_addrs }}];" } + +- name: "modify webui userauth js" + lineinfile: + dest: "/etc/contrail/contrail-webui-userauth.js" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: "^\\s*auth.admin_user", line: "auth.admin_user = '{{ contrail_admin_user }}';" } + - { regexp: "^\\s*auth.admin_password", line: "auth.admin_password = '{{ contrail_admin_password }}';" } + - { regexp: "^\\s*auth.admin_tenant_name", line: "auth.admin_tenant_name = 'admin';" } + +- name: "create symbolic link from nodejs to node" + file: + src: "/usr/bin/node" + dest: "/usr/bin/nodejs" + state: "link" + +- name: "restart supervisor webui" + service: + name: "supervisor-webui" + state: "restarted" + diff --git a/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml b/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml new file mode 100755 index 0000000..0714d2e --- /dev/null +++ b/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml @@ -0,0 +1,46 @@ +--- +- name: del ovs bridge + shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv; + +- name: remove ovs and ovs-plugin daeman + shell: > + sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ; + sed -i '/openvswitch-switch/d' /opt/service ; + +- name: stop ovs and ovs-plugin + shell: service openvswitch-switch stop; service neutron-plugin-openvswitch-agent stop; + +- name: remove ovs and ovs-plugin files + shell: > + update-rc.d -f neutron-plugin-openvswitch-agent remove; + mv /etc/init.d/neutron-plugin-openvswitch-agent /home/neutron-plugin-openvswitch-agent; + mv /etc/init/neutron-plugin-openvswitch-agent.conf /home/neutron-plugin-openvswitch-agent.conf; + update-rc.d -f openvswitch-switch remove ; + mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ; + mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ; + update-rc.d -f neutron-ovs-cleanup remove ; + mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ; + mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ; + +- name: remove ovs kernel module + shell: rmmod vport_vxlan; rmmod openvswitch; + ignore_errors: True + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: +# - recover_network_opencontrail.py + - setup_networks_opencontrail.py + +#- name: recover external script +# shell: python /opt/setup_networks/recover_network_opencontrail.py + +- name: modify net-init + shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init + +- name: resolve dual NIC problem + shell: > + echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ; + /sbin/sysctl -p ; + echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ; + diff --git a/ansible/roles/open-contrail/templates/install/override.j2 b/ansible/roles/open-contrail/templates/install/override.j2 new file mode 100755 index 0000000..2905494 --- /dev/null +++ b/ansible/roles/open-contrail/templates/install/override.j2 @@ -0,0 +1 @@ +manual diff --git a/ansible/roles/open-contrail/templates/neutron_plugin_contrail.tar.gz b/ansible/roles/open-contrail/templates/neutron_plugin_contrail.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..080770497e45bf6dc26b071eda13d8473d365a32 GIT binary patch literal 73771 zcmV(?K-a$?iwFSA>X}vm1MEF(bK5wQ^I5+FrPP$9S%sG6N2YwGs&yP^MthF!vz_d2 z`CKfTf@F>Ya&*&m zLf^3Lou4*%4d8lnaKQe;mHx{$>Gwp{{G*^V)NufDiZg8_56gaQNmYGS~opmDg7Ox$>ujdkYg68Z0@D0j;yZ*oaRYU$u&-C3zNJHb^rmNV4^7X$9a^F|wkAJu8|3$7>WNFQh z2*@@FU2n8cf(L6FK3S9S0l&D@Fm7tKUXc^md-ko_LrA)2j|>O9LvrC!=Q=dZNAliX zI1?iTrO}}4v27F9H6T6>sK2BWxD5?(omnPm7o89oKz$NEP;%@UCj1dCee#L=sD5Oq z4M-O|v_(_9_h01MU68r)%z-&>NEQJFC{{qGmQ4x$Y*H^ImP1T;?%9^%n3OzO;RBmV zz|qLRITYgFhlb^FU`N{leVoLvuL=pF6sJUu;W218qO{n?JqI|n;w(keOdNk~#W77Z`@#n(wf$sZfVt9a=iEE13#uIUUV$6IBZJ~>pKlxT@IkP^K!IR-r z4rpQpp>N&8a+H}D;Ry_oI{^C(hqRBc$@scW-X33%ulwYm1K6d^eNfU51F-g6LgJSP&sIU0~Z zK~3T0pMS3f)0Bs>I`VmITCkSi#5UT z9mBTWC*5;{5Wc=QY_x09%|&uA9v2N?8ny+7lrkXUvqz7RNAT_3b?BFDe=E?JR$u`h z(C5ZqiYi{_@`XhZ7eIz&?$C%SB#tc#&gmY0Koe7;93mb=ZI(cKVWFj6q$V5+u zGLmInr+?~eqq`!;t0T=+C8&jSuKa3YEKdm5$8bVDPZ68(36U31E&cu(V8BD`xeU!* zOa*G<59k}9hE@a~%-f~2T{>k*TTTcrgJJ6q4cAxZI_T%lZh4!SywTj~Lwq~R=uK$>_YY;^rNe)8+Va2jT7_}Y~N%!g{ zSxquhEWEEzavq?YIJnwr^zj`HOyBZC*B3rmyWLhix_A&Yv{;Y7GuXmU#~Xk&nbD9L zs;+4V0uo+64HrJdGYpH@3}O`&usBArKA4Fh3@|SM_UZD3_z9&!LaJ7W(g4E9G`X)J z5}df1nMCk8am_&T#W4VdDR&YH1v?Nu>?|P&twLbop;ev~PzFK%-s{1e*Temr-NDfS z|Nol(TLl(kMR(!D13m?qDFp)_fQui(XVDK^;$HJ-%t`sMv|(l4cH!aMC-Beh(d*s2 z9*Z#H8wSexUb+UYLAQ4o!^7Z8yFB}Nb$oWMaf-VW%M828M7>_z8(7KaO~&&C8Qc?r zlXlMIp%3CKQq22w6EKKo#0Dm3MEF(?96Ei~8|vI0DJ#`5;#YcFfn0qR{9O9rD)7=I+L;KgD??SDRA+#FxS>@$d+s2|H3 zjVxQa3cOy5BnKD=Wch#+#?KUL0Weu|V+|ky1k08)V-R{R1qA&JtduNrw4`Foye>-l38+2olO@j=N_NJYMmNc51a=Gzp}MxjBn1!j z#1?>ekvs6O%U=6{W7Y4NWDiB8sDhk=P4C zEy@E}(I--qM7jsFltZ3So0dbiCXE1clUfdNb#&SDL`t`$VNWd^%uq1G=mEb`dm_FR zt1GUjXavhXiq8=2JyO4FG%RVfSg2ui#5~Y-2+HG?L`5UeLTTiuN*cV}wu;)6!EUHG z+yj!*0hGa-msAn-&_QtJ5SQqwYCE*_W$<=!^716 z-@));Z@d5Z64!d06FIvbEnEP4lo|RvVJnaj!*1`Gn}H3u`G$lre}Ug>wOG_4nBn@tEl+!;S5fBAmn?RIX$L(X@+ zNVM__FecR+`=TDm_WG7r4gYWLQqacrpTk{r3Ag^=3tTniAKm*}^55S-7^daFJKWvM z|0S+fx3eOpI4ayzuG)-bV4Cb+vH?ZDn2J;sJb*qgoSePWI#aERVlzznj%1Ms-hg{` z`q$qOtc!lIxCi~vEzg**&gGR@+=Q4?f})xfmW}hjb2X2U4XC;BEr_OOfVTIeAkCDe z2-zQJ&7!vJ<+U_5rfVErvq25DzTVFMiJ)u)k!l2r#9L46Y|%$7Upq4G<{{SmzQfY} z*MnbQ?+#yd*FC%M{iE!}y&_3$8_7DO;6|GA5@#*2Uz{{P`% zcQ@t#4-VdJ5P7klJy@yj^F8*fY0gGd7X;um#wC*RS#XC88vA7 zK8smdBL!=3t-b%#i02pq=!?K|W^1#(!PLBTS6Q7KaCXmmYD55B$IQq}wdxYGsrR-*248<2^c|96e|8yx7zq?AM zVLm)8ty%#lTgQa}SFOYo)pA9ESH!S>gW;9vp-T)B(2%&( zc81MuB+n4!-P^ADxJ?$kc(Q<1RBTfP(zQ($@aua@6-Ya#Xkis|yK0^QH9r$8!@X5< z1i7##E5MtHBB!sA9mof}5_W5322zsTCINZ{MX^`?)LoGm$<6r zkIve}~7+U*>9Z{_iX0 z0AQ?8B>z}AmPg~=fM35rV-Wo=Kk@kIF$T{BxkQz@AT^X)THlK=2sscR&NC7N&YLBX6U&yKK(afha_O zaJ~F-aqHceOn{m2LDM;Ac%)Q>Wv^92Z9bW>pg_$Q0l=Kl$^$^yRn8HX*Oex?#XEUC z;F)HnC%c!^H%Uw@-J{z%BjhBrGA3!b`x1|`rfMf~ENv>!1QaGsnBwBXC{-)JSI$kA z=0f|HJHPnUxl591d9~guT7P|6S!q^1S4qzQoiG^qE$5)py{1RU=dvu-5T%sQ83j}4{N5w5NNit`D1Iv$(3TZDA9^r;;AoE31z+yQqT*P02T_ZLt z8kpj6nioavt;5&qQ@8|ctM&UM0~O@5*E9UkG8Z;@&Xt_5sr(g!i3s}&vLVV6$dgbG zSBr&IDWmG6*BB86VtL0#2da+e_LSKd&ADOI!3ky9<}yx9tG=y>y!$z5lb=bK5A7A& zh41~GH<{g66lo4_q_bd~7$7_yr&`Me8yd>1#0V*X(iSN|iHRvywHG&;A#9e09@yj}UUHQ51_@2}ti)^P#lDJpzGwQC)hP90I?)1^?54@}Q< z?bHrHN!j?~d6eVH^X5@IWad$u*Y)LAIPNgtn{()sqH<*qHSCvuswtBtYYlJLa_W@R zfY&h135xl)Igf9tr&^42;@YL86!%a~c%?Aba_%JWqf$^sY*!t<0-i^>Zp_TKGt4RE zc!t$E3nK2Fvn*Vm0T`$HOiS=>NuoXCENxvvRABh-~TD1d5Vq?e^haq z-$A}tU8L73|6w!6Byu;Jc@U~RHsC^(ERMtued~n!4|c7$|FgG&^IwO%Z?fmV4&H3{ ze_rHD?dTkAH*mHaIA7t18^!KQXwB(b?-Z21GXUT6I_FgTi;pN&8ldmm?3WI0cZTyt zn@ZL14&Z?Nu^eG=5u6Ylo~RS5lM_CGgFcbAvsBxQC6)+D`YNC;9aERylMr8}MiVq3 z*#Mhb9y$!Vh^!GN-V$GnMzJ%HQp4E2igCPnJv~y~tr)B9J*WWWraDmk41mzD8W_yd zgWyi(7=Mw)R@|)`%gP)}9I(7ZBFj5$2{N?P-b=S6L7v zZw@cO_;yQB>a$p&r^NsgM;Xb7$He>OH+zt?U6s*&(mB2yU!PumI=$-jQsEtTwXJ)= z83P}+5v-F#O?-4E;Rw5f=YHcr=12J5G)zfN0(DD|x5IM_qJ2e$A(W6fl|N02Z^mQ} zaspQaYtv&!;4eglcr-ay7KH92g@;kxF*O#8WS?ArQfqwFIR!&x??mZ-H5jRX0?D)V zz+d{h<;gH&p=qoGV*+%^4r9x+Y9MxshsKY_7#5((mRl7OQNLtMmp>?R(7AiPFdtwk zO3%x@3EI2EH`)OHx7%)2pyc^(=$fuw2d${@N~lQ_vG-c4F-tMM5G$GQjWFU9wD%77 z4&GFvv+!&22)eC=3)OORk^EW6+WeB zXzxe}y<%HQ&)_UWAZB3a*Ss@E35#S=%Qlxix8jzk2Mne#yo&p%2xMr+D{bY4gA`MB$L)x4OKtIsDkm4xfsQt`pL2w#h*$>M=N@yeCx;h zJc>S)z(5Tf#hbA(UQPMyW;oamyEM=!=OqFY)ZZEUM4Zdrb-@{ zScQht(mAHMveFMr@e3?DO0PIgdFL?7It-yC1iur)p1N!zjE?~r5z36Q=GEKd6KM?Z z78pz55;c7ADOt%7hLWD;QQMN-jsio%AdKV)3LQ*y;rsBt+tcDOYH9d^#=0v|ltrK+ zyChB}AA9K~AHSQOv)q#xc$f@weVUXIVl?A7$#h^lL^qRsdz9^;>^c|u$wihezjqAo z5yU#<`=2?pMriV%e<`3LZy6Q4^GAggNM(arck(tmxtwi1CIN`{Nfayp&cAny<=-d2 z{KCoYmCwXcV-uE>qQaWb1r3XV01)hyGn zSU^4N@0210u;n#SJdrc*0YxZq}q&_wHR*!Z<3)iDRaW=?@tk^?F( zSI`pSQ-nf=s`3|$VOBX?qm3_(`6iW=pL7PZsF@v&A3L;RB^C8CSytv`g|y_CI;$C3 zD$J$H)g~vKqFpb|vADN_m0?eb8LJm(+jVk5i@2424W9K8tq}j?SC&bM0j3KLM1?dE zg$|2ceTFr?MrcTLh@V^TN56FD#GP&<0C$=d(mC$)DU`;)jy4(<{ghYMqnS`UXM$V6 z;4ZD)c@9m%*100T1nu~}a?YSUL1knSNui&EW?G2V3{R>dGA{ybkUV_XzZq>3UJ^~| zO3YHSa&&3qgoz*XZcbad?9z1%C3JpG>NpyWsgo&DYXS=CNbmB0MWWcxDK;Z1zXn0% z02fXJ0vi(+@EPLNW~Cv)-m8(9;U_xT{O2%YJia8P8l4`;EZ*#U!`QBsn3o#SieMIjSKh75cJKRd_Gt!A# z@&Qppz&3-mx5h*S!0|82k0Q^sk28(nzn+3#M@-Ve$5a`f*f++mvbRLpiQym*IP%Zr z-VGlFnXiF1+!I9<;B)qDoz*F;qpA^mM=46!IWI9^B)=w>P5y|4JIc!`piHb|w7*jl zAT)?ea_WLp6KT2X-tc)HuLdmvmE0kC)m>tB)Vb_am;q z7tz34R{Ps&X_0cQs;C*|Q~~8FYW@13M)hCDj5=ZSQy`k3|1ca3_P6Ihyu`JF{F4-_ zHPt7+wFI3nEd#7L|6%`N{~#s*!~N~~4=;0JncL=y`Zib5S6$B6u$FH#g?yW=;@e;e z-{$J~N{aXR;gXkzkL3~K-eO^84hcphq1l$R_#79DMbE(Mo*q7Azj;Ak^@Oemy}^O7 zTV=-z^TxzriibL@c}zZ&gu(p$2=0{}dzMnrr=<$}eyLSS1$uRJbv!=PPu~BgzaO8S zvYI0rno-?RZ0i5uXSu}oeMkKfG|h6XPOH^oAvbQU=|Eu@{?k3C5?Fi0juPw>*W-Uh zRh95|l^wgKA$HcqXy)#v0B~PRNVBW%u&GXvteV=nd%=cR_q*ovv5vhoZUTAkb_!X$e{8`EtaioB;@1J{!Dd(NensiJc z<3XlFX-ouMz(7TRb>x*Q7C6z2Gw=7MoPEO5M@l_B zO?gREU^MjQLcRiFDJUVI-gl2h$?;CCh5SWAzku~x&eBX)WpNQh%rjARBdH!M zap4S57ErmGrBpl%ptv+92ZT`5EELh8mc0n0$!SUzOq5ngBm&MinS!-=_>H@z-WjBH{SfST@&{qB-rl#AE{%dLCYT(9`mo_SK5Jg z*R=3}>%=i;){dVcmAwDQ-%)1m%Xu>1n#$eL2}L(Nff|oz&<%O4jGLTdim+!#Rxd3- zTo|@K2PfNda7SbsbbHY}SPNXExNJGs(N|@K|)9%C1=8$cw6iAyRA z4IXsXWF#if7G((ym0>D=T#H@no=pKR*u}Y*NjQam$@87%I$c=ZiDrRO}ZwmDaJ?4C z`9&cisi6JCMKM01h%}kaq(Q%RaK$i8hEW5dxMV=>Iw|$|1s%GCnK&%0q0-EcYUbYH*z-O^dJ=^c? zT{qJn0NqEBdwtRd#)QrIEp|r)NoQY;EuWTZ8d%zuR9n0%&IpZ5U9YS%_Ncm-5VuR5 z^q`2K<%;igew&+P(`b_IQfA573fS6xQ{}*1s|=aMK_&swt7AR|JpUZ0N@4eGG=Yob zGSepvkUiTT3?ZCP8A|ZZ;R46VFxRzck$d)$9M@$=1U#{{O>=kDlDF z|4lr%5&dri!hGiu{rlP5^YLP&{;RI?e)e>EKFNNWov5xxaa^u3vdrCPp=GKP#(EIO zhb|<_VtH6hAwOd|jML=0g1?^6mZPEioq?sruOq*l^TD%2hLoE5n5g9Ws92PBRd`I! zy0Vi8btU-7>dG4DDD;gIGx`Qs70lG|V!NfOfd1!vSxm|Xb^=&Vs@k&E|0!ov4NL7N z_|c$kg=mY>r1)Q*%QFe+TboLhfIxESiYfM=?-cUMA;>NQ^g|pCm}jmHcVr!OTSYt4 zwiU-GXG4F8c7EU+E!Xh*i=Uo;c=>+#_jki*FW$Z%{`le5^OrAnJNK7o^ZO<$z)`OD z>p56)4(9n;$JKiJW>2W55R6uTCF^NT@K9mnS1+Eu-=%O7?DMk^Z(oR|AZ9|tI5|;- zoOjCw1<3LB>?X(!)3--lH*hw)U6ue=-C?9wgFipMdd3n^c3Eg0p;6MQ+h$R>o@nt6 zh6iid)^o*VVq<~>GHdX+I14qk_5%Hot@4)T>+ETVMM@C~jVdN~q%h*;vN$R$=Gqp4 z30UuRBSSfpXWMI5nDAtq$q8qz9oT%HTHWzd1WUN=fj6zJYYGK*Rms|_R3>b7RNp2Rkp%Ey zo%5+2LZ`}IKU5Gr3KzLyAq!`FO20Jd?7zeHfUk}B4q&!w(kC{rfSrQ9-giChyYBUE z-(o`%w%m?B+qE4-P5E6ZS4 zT1zSg11zhAh%97V6<3LOv8xnqUR6Hcxh#N8xM5ZKXt%oJ;U)#>qa^HLV;0BZuq9oD zqpmfDvRl1(Ds(#a@H{}lM8ot0aV4D|FrIMpipRmtoB2tH2Yyu!+wFA*v(Etb9kgh> zTga{VdcfNxHoG(zdZ#-=iR3V$d1XI?$XOWMo%CI&iup6a_&FH`QIzttITE9D2mlBX zqvpjueSXCKWA=G4m5P?0&p?2+>w;-ZJB8PFbZ(NvjR~UW%6y!qka}xg-OTK7UEn|zwS5o51axhl z&@Cv$y2$SpWqF=w>fkCic{DK~w0;HBAjJ#@JesQ4gHZEo{!(X{S&Q+)TCJ$<+F@9O z$!w(4Iq8w0$8%Iu;)y(H``yve5t^axcd-EcA=(V{9`ASm-@T_F-v901``3Se@k)nq z3R{W*ux(^QfJ9fF#jJg=y#||<{O#YKPr-{p%T&5g6%1g}^>)a98Wj_D=(~{}t?63j zJ*)@U_BQ^;Asm~AZEO&M(F2Tfm^=K_w01iKq32+zZ_%EK8=O{bxf)<>ky~v%vy@)* z&9=;BbZLOQeKMSnmZxm9_}&cfa&-eO@}saFeI*9~vu z;1uiKbDwXUNQ^6WL^wu#zXv-V1D2(+RB(#;Mcj99VbIJjjgVBJ8*2ghw=PN2btouZ zf^I?<5h`4zVEJeBd@<6?rM4qRDrT#0+yJ{6R|OMLDl81Ta<|&{oBGpeB)q3vltYwH zinZ-W*QG^ELGaS2K#2=mheBzJSaOM{`*hH6d3**-X;p0>%YfeZ33XT4~;yooHx3nvJ6 z4PxJe0c&7^;ELfZ9a!#o=GYRhsvIKiZ|yFJIa2?+Kkgn|Qy5o8jFG3s$gVeZ&yn;4 zcjvwbBRm~&Ak&DZv=%vM>KDYvM+wAqT!Iis(S2Y(W~$rB28gWHj7AkL#g>IS2Q7j<;QJ??z6Wqp1yzKagAk=H&i;{(a|=YZ`mb_ z3rv?JLw%Ep_=lb?h3P`|Z=LY7>_{#D)-^RKJ|>OY)}z<>_t%&{x8K$ivRCbDkan`O zg(A<|)1{g$qguc*3uj0^LXEd8&;%^`a)|N1{f1Th_4zm3uHLNl8}44%FyF`4y=r7; zwWU5Fh_=^*ZnzApUF!~RzOcU&O#R`_^Y0&1(+>MSw)<)Ux?Q~0)Fzkr_?Ysc-`pM- zSAYf`uYfAwZt9UGF2-NKr$g}k#mg7oAqaSZKi2XB&1maYPlmU+-Nt{o ziKo^2qXYWQ#q*`qcHx@gC~&x z=XU+C|GAr86lXixa&%ZOXY<{MS$SF_h?h943K0#86#9G;-Wl%$`j=^Y;q5Ruq0HL9k9sH59Jz0?fp zSlbPwVt^5`HTKrw1O6|xKv_C;)Loa2;~9k()z;&#;5&LC1qS&kN$ z8HSlhcZyswJD?Q8Vdy|e6padC(154D-)uT%KwbZReeUC7_2Ahs3fOuSCHql^5iCbrL4o2ynwaayBW-?0jy^eLe-B zTJS(^kbK}u!5|s|kOD$PP#9V=DV-?IhbWJ!4*&tC*s?6AzOISBv1)x5dZPwJE9Z<= z9WvZ1tQe5O0>LyXD~6|VK+J|}y!AHj%HZw_=~vQ00+J~%qJstxU9?uX=&twyiE>|< zHy8#lK3U+fHYg`~KJPuyZQsqnE6BqYP9Bb-0Vy|2AOp9i_|AB`B$Lp<#_kzZr?_Ku zu_D}(SttE8ThPmU_Y@KQ>n%HZ{=TmfX z0Z`5$#9aNSBQW_Z^JO+Wwjk81&Swx7Qx6~;sirVa;&Q2DM#2q`9fZ*;Y7HAmm{)xU zT&M-@BTdA-r5XA#{WP6@o;unL9n~CGR#b(v>_!Yq6~H+87_4?pwiq*?*e5Qd$EvJj zd#Zt{4XYWBQ-PaZP_Rd};Ocf9>zM~*VGHB6DuUx+9mhc)=6akg6HciPT0TZwZfZ2@ zT}dknlf0bGveV+^bf8*+75-d62vPtxGwN5PKLtY4jc{i^T8z%j&fGyl%{aF? ztvN*bME6tL2!pV`+R(J4H3+-GG3plXVVs$$q8-*w_7q(K)bs$YNmIVaPt;uG3v{`o zk`WBO95Mh}D%*?E$p{SRy{QeJprbrn(B13>#-fK~Lo#8E8u~G4VNa~C2C!CkI{@i`|EDD ziGAlU{~e%sONmX^tfWO1JuGb&5Pz8U6RFjBQFLy^%Ee&8_d`08*fEo=1bnq1H=suo zZwyQ-6U;Lc&C~I(T8*~fC204?v$GL8!`U|kp&NzS*UjtuZa3@>?m_$(`y3LhK1gJ- z3L$%8>TC)(8fq}3Rg>9N$>_CTwW3l7YL~RK7hcnS+%hJ1B5)S5X+IrHbpl7!M0{^B zC_Avr(ao`+jc2*OYH3OdRDp-tMT477Czm)PwJib>fQ)~mjS4$r=;t8&@L}(H&+YP! zxV%m*QoExeH~>&FbT$U+4uWV$ z2!Lr1VI&g(re^>nMpXfPWkLh_SZVd) z1xI|fVWx-3oAjC*FfPj#qxTal4vd>(sL z-emp*C(LkKFt48=k5fxuOaoM4acB!-Bj`dqyQYfUUx>qHNn9|;`t57tyq5bcroaK+ zh^&V;dsFwG4xk{(Yj6bykbr19*7#Y9;i8LG96E9p;j{eV)7T*4+1Br;RlU&+KCR*H^m5Q}Axn2oS z6%kJ!WcpS@zBIKb-9hhb2KM)~NkPD7Wc4w+8Va7Y%n3+lP$ zu$%okAk4ffc3TQmD}xqh0W@o&_s)&%c-Y`)qfcnV3>oNQk($Z56|PoTzE6g{KI4g^ zVA2p}w#7-pCjnLXwdDxSu-u3vj_2D-%oOmAwWbliVsJoKh^+ z7B*57eRc?`wlIgXOuMmL21h@?9E|gP4uABpdB$a%(L41l%+??Zm)3=nT{aHZNL6qu zoPv#=tfP1p+A=-Y=!SaR`R{2b_RM!>+^hmmk($yWi_R2B+_isOWwpy{iApKFk;NLx z8S(;NMbi`Xi48FDsjr!!NPs0x3<(D+=cZ!ceV{Nz(!|V1i)C?iK2djldXDNhTuR=v1FpL%ke_$ zpNdoLf$F$v;bBks#xlV_0QFG}l#T~u^*v}Cr{EkFD^r6Xb^H;90T5hz4Z=tzbb0t8 zR8p%@jTBjgk(zlF7SqnGssNHr25`TtLyjECa~hUPFz~JTKU4Uu7>_485T}j2z-suv z?FWxS|G!6D4{!6o-^ddX$bPb3>wYv_%qFv=(WG52UpQpij@9lLv_UO3vm>XMu&F_L z)WKgo0FVI(jY3CD43iJ%n@De)WJ0mE+IwI=Dwp}7m=?$)-YI9=VwW{~qe0V;9eMfJ=%dD7pQa;=LURTel!AJ)C9;yYu1UnlNS^N1hElo^e{gLqMoFx zca9+e%j8lO2DY+vv!Ku6AbdeK!RpF^1tI}|_^dfF|6v0z*qU{-X3WV_upQv+#U*fRokBN4peQSDh_4+?^JZ+V?{|5pZ(+{3T); zKdFi4@ANSn;C~xc8^acqY6%RO-sZ843MDSuwMtHO<_RUVwxVP?#BG8 z+m3F#-C^HN?OXA6;<=Li7vs%bRsQ?%;ll@^|M#PZkG5{_|C@MjCAzl~-S1qY+t1#f zj~65LUqvGHv!~1RN%qU^

s`%HHe7NgDLM7uHptiG;wlLj7tMCIHPWjH`QXjDWz^ z-HMsje-4W&&=Do#%~V0HsaC%gWip-pI`YdYwo^?Y_R5TEGMRnO$D{EWR&+ip7G+%( zc(a&lf9|@nlLvJr_{i$Y8fR?u#r6uD62W~0Gc~-}ZW&2J|AVP-8-$U-a#GcntwP2p z4NGkp_|c$kg-wgm#5fuz0e$O88rnG!x?+k2iLBZ&h7ChF%;qQ-+t-BR@yX!ba`Aeu zNLCxGSObXkEE$vZp?HSX2#Y_%uzEr7YvN^oj zaBQoMTB9W0O&T)^!V_&Z8QQ4vz((zcb?4yDosqg6E|Xg1u^4}M%vQ#LZNcR+Ttu8XDGMUu)T zJEN$snH<{~-UgY`On<}nPDdqLQmS_Qws9$278->byGy@x?@xd7BdP|lCl6YH_6pZQ z_6UgXOvaTKw9gMjS_u&IaGVFz8+<5e5+N)Rk30q=3JY#L$6Xv262wJ^ES=a@`WxHc=+qcKJew`@9~J%9lWDqvR|yJ!+-|1N z@)l7IQ2+WFemP*xKt*Kv0Pw_*`VD~jrT9`vsV9YplhuP4?1!3g1VFZx`Pkj+LpAH8 z5`Db(J7!$>aA2K&*3of@#8~vv)4^!v1_soofx(|@p{L7+aJ0t>)G{Nwrv?GP4MJ=P zh_?Mb+V}Tx<2S6UnqDY2==5j~caUnlOD4>VE~RM$VsTkqxtd?6)6{z(Due!FG%f?>yKCLm5$Mca z==sjS*1?bWzNlNp`A&=_yqpa)??iR84%5`?!W-mFeUI%z3va?u$%ortt7f( zp3>4bpzYGY8sAuMriMczS5QVa9QMeqCdh{x_#nky!`*BN7Ra+<_VJH8l75E7Nk`bo zAdH?W1{>W%?DnSs{23k*ILAg8Ydeu3zp9BNafZw(*cH&huCP6|ChhRNn2a&7*Kn-n zQ0?m;*I)^sgF8q^wzL(vB9&`T&*%A~S4nI2u}2#N3&M3ZCUs0CA>^a9kDv*4#dVJc zf7{}~6!wgO2GV&L#NMr~<#U#hp!D_2moJ{Zf6ePsR^9O44ve;tq*=*o>#i4Ft3cV` zBtKK&6xKBc0;jV@@xLJNDisqjpSuVhCim~bnv(JcYk!6OqXDF=rh^Q?t(I;t0CMFN zX#u>|lUxbYPT@@>v&W=GuBT4-?PzVNRB_EB3+qJF^jb>cY|N`a>X@NMU*~6m@j4ip zLJNH~4f;o|x_8wk%PC7`W{}%_2)r{D-PnjsiW`hToa=@r#mz?$*$1Z=p}CcCylQk< zYlq!tXSoq^Q(uB5S^Jt#&I;B-0M=!CLUg%b%fwY;{c$fgHQ1rjFvDHc>jh*{x{`;?(L$)TOHT z$A&5H6{y}MpZ03tZDfBHH<_i|&PHEowuh`@Qq&2e#=og@Z?V{mFQ=pP5(4$vq++|& zA+c54OD#Ju#%j+DVFQ_!Q*5t%C{S3nTi0ubK=enwoyKnKhIgZhi5_h-`DmjH7Qxm| zSYA7v8;FbIW{&o-fsgM4N~P);R9wfpfE~0?v7|=q-K%W|AE2fc``nC#&|qpP2kJr9 z_(Hm&w$l>oX-G6keGXPAq82I_X_Xxs(+o93Zs7exgAgbJ`f~jQLB%|Y+j3ph06`Ns z;(lr&WU&;AA0@(}5`$JSQ3v9{uS_Y3L-v3t34~;y<2}sb3FLv6YmS*D!_WIbtr-JI?c~68Jx43+K+?mDV~PQ?jlEV zJV@Y2C;4J>sZWMh92m!eAhh+}?0;q$JR52v7~KnyChH0#i2A);jMNR7FD~8bFhkrA zv}7-yeR#X~{=a^H`})J1cNm#W&j8-I-j)hdW`N3 zt9j_5b$={}>g0&6b54(PoA%X^iW6|WfSNTlw<1NLRGe{nW<5Oi-jr@*KpXy%0I3rw zffN*sR9<3=i`mo`N8%KM>P)*!9aBdG9=o+&n3P5BWRxZ6TD<49On_k7>I16f zn_+(^kk~_cJu5m1&u6g|O(8f64oT_Rb>(4wlbejSzR#;;SVdjmf1gka@KFr({^_xI zvhs+=W6JBFa#6DdjoQeA9dmUEw{L4j{G4=D;a!TC!*fDs#!(1Ykb#8=1{?P;%2Xe( zOti*0BEp2&cB<8wT&?VCJg#R; z4bn*d?8etr(kN|pw=p6#nX&j1Uw=XOe{?(9fs$z^vi~Tt}y4Z7E@za?O`C1LJlqM`C&fWO5Z|18z8k!RFbI zJfO|kUEqO;wQ>VVt%c9JX~PTbVq!*o21hub`(1rvLNS7QP@|sWccPiCVQg@EuFTfx zN%n@x04G_RL|-F@hQSKh8AZ&X)fc*$>ZqNIGQ2mnVOSLZ58tNlPW_eb8(&2FQs=&- z)Qt^q1p%R^~LN?91A7PwC_jN~_p&W6na9$b=wCz1$M_J4pUj~&! zj88ftwz|%zFlKTc5@@rkcggu&5AI}>S+0gbQ=PBEqHGjb#-wbrgI*t7@JU=++4%^c zI)D+`XT{Ix$&f@&qF~#pIlQkaB;_~N22X`Km~G3=4tdbMKqFB-vXk(vvta5i&VK1h z`5PbqGhL0PgE?-6sj17GRS+?s z`&Ge;h!r7MV^=JaLb1WZK+)ClETyf#60+63tvR9cs88^9-t`z@EI|kN9 z0IP}r`S`(B82{_>=7Zb#UpMmH=6|@27WPjSE$q9?|L_KphFsyVkO@M!7KP@gPH;t?or&6f5V~w@+S-4-{#B)spjixJ^z;6vQ%Uerp~f@4 zHdjn2L07!ID(Ptst@wgDW_@ct-F1m58ceaEjg_X>8r9UyZsj3pXbfIC&y99}gH;hs zLe{bQ*-ZH7Kw2VyQrECb)?B4>l^U3>YXnuDwn7BLr&urxN$&m1%oT2eD0hXc7MYa5 zhc)M~K%hn5AcaN6ev9ll!l_s_jYZ2u7G{Rlkji3}9qPKUzI+yDeJa+Z$eU%gNG?kq zj$LPB3%|z|xh)a}|5f<)bw926e@uj{=>O^Rew#Lc$$#?bQHuY^zkUAy!RCYQKV**p zK|sF0u4~Zz`4{K^S6zQ{V1iH!q^L}bfhy%cTbobR{|D>;WcxP$`^`LoMCZ}10OwYK zvwi`Nk*J)X7h?)3eROq$(|X?fnw|@0hS695F?X$OT^FLsG9ZHZw`z%B4BtQf@#Tx*tEa!b z7`}P?;-|g;2XO;lz4-9{?dw;j;QOu>JiS9~jIk zkkDQ-pp$_aNHBN+{3*ZexmW0U=ks6l<1avw+9CsTgw_euhC1dM4_tNpWjMhcH#K#j zRF4s@CjQ8-S=hne8xmAQgthxWlwD-3j=%IxBQd?H3TCdXb+uL`D}f&r^9@(uQT-+Y zKrvnn;qXjG*2X}8+A@D^v&n(tuvJE9hvQLJPIs#N?orm|G%=O^)F(Y-Y|zmB7Kj+s zzqE>5{?~2)>vl9@2x6ceF8*DY-5y|!M!**Or}uRWzuply6hx38o+*s0&J9f-8T<4z zwb4SBsW?`j8^mR#78C@_8$oN6;&AZkVrcw)d!b7aT~CCW2Crs&V-`-|YXD(}VWe$T zyskd5uV%}i6#FucV^-HaF0951A~eJL?HUK{zyXuHT*DlBep~8y^9e4-mGi4$tvtc3 z0oVi;BY-Oh-WtrOF&sL6*%IEt^wCwry~5%Pn40h#Zajy%6p%rwF5qRan{_w#HzR*) z+BdX7fKSyX+%EYALyk6%UKrbEdKTJk7n)k! zC2k5J?Wn?mAhOc!4p{u8q5aJ7le^jb*Uw+~HnLKYnwnUesq=hB={syW_+S8FtajTL z`p}D$t;T?%QK{&D7i7gc9**veiBsVp7z`9)A)p79gC{yC>$$DIvs)n)yDZ?ToZ3>s zTDZ1?zyT04`Uq!pIG@cXCD&%*H(05%pSh4qeY`{1<_Y5G(G|v>s}_#p4*yL!{lHZO<#MkTWoK_gXA%6$u%AX3p3WC zhE=K<8=@5q;*A(1ZNsLZf`;1!0Mcp@8y4rDB1e&=M}6PLbik$b3m1hai0IMcb~07I z-%H1%1w#%Fua2k+aY2mHsbv=%SyndTA7k%iwoYb?{RR%rJ3RPS7y7JT!9QQ)jmKbA zGSf$=`4KFzVtSU433zy+#c4eVI7#NC3A7?h92SULz(z7*zDZ zbwHr7{7Wao1<>%ufa*gyL;BP2OhQ6a!AnJB;n{QgF|!kIl(X^#l1%9gC@eP-b+Eh9 zMEqd{>B|}HFSB2NO;X*z>QINcYc{TV4sd{YriwDl&*sZZVHO0gFqBHfi)se5uLpAA zNp0v_fG_rX6QnwD+hG}f3Q>x}=FAq{%{HwF!R{mV_Tlx;Uj0Pr3AQc^Dz%AY{UCE}yqd3~x;TF*g?=vda!A6bxEk%@GTP&zVA<1E~V zfjaHua{t~I2C&+G^a#GnM6K6rzusmd%GDay&#q(J+rm8Fng3ZYM+QuQp6U)iE+OrF zH+zZ}kQi7+k-C50(vjm}8Y@=cqp{7B;yKg;RbWs2Jr?0goldTpUpzLU#D);Auwny# zs~g$Qt+|e&pIRA61L_3lDJ1SI2BmprBOh?bDgtpJRD($k#0v0W5F6lRtW-f@wc!yZ zrZ|YXmMe%y7l7>(hjb7VsZ<{lrKD>EGbPj|Ihv`3W~j-U`m` z;lgy{kpuN?CjGCwG=bmuSi9l5hzHsYnCx(*0n(;z!`^=1GthYhr;LVAl!HC#*_=*#)TJCFpOSKqm>yd%e7n}tPO{I0Wl7y90??j@i7#&XBG5ZcIpBB(tNAe zSPTzkI>pl(XZ=buZ)U$6%zmM_V^>B=NGkTc>G*3f9o796W>QVq#zr+g;nl_oZrQ8= zfHi(|tMyv%P|$xqwTJu-UQIP_nU_?HM)1@d@O94-H$@L%)&<>Jupx;af3hn?hBzUFrO`7;pFMTyQ4C^q%)wvu0N zm6SD@Y8ldTG=2agPy2G7;E*ZArExQDD0m0dK;5%;isslH%0IWV?aTtblKn(`DW73=9`Qt`XXKFJ({E5IA5 zgb7bG0j;<|`wq0)Eb87brg)qHiStj>{LAumHt!xB$VZg(k^+Ir1`uu|3@Mr3L0Vw8 zo9?Dq3%Wa0{oNJE5N|Qozj%7%z2WyAtUYl6a#k7GJNXm}VannvaSrUwBTVY+F`m5k zK?Q#E>w6oxoSH23zK9uK^6ZLEbK^$2Hy@qpEfz16uCQy0vSOa)oWDx;UeNbU@}6Mh z-eqI!M<|eCI3WT>$hYq@>>iXEKvQtiXsh70ve$xWUKVQVZ-X4A=~Hr$$+ z3%w){_c5z|m>+|~eF>ReM%k$Xg+^@_tMgtr(Q6ykAC#`OwX3nGW_xBM+~>^RGG#;? zvm_=pTiAPTVe2}`v#7Df8k=Ki>?~=(3}|o}ROlBN;2>*W8eFrZAAs}E1}hg!fW5>6 zvVoekqz2uEHMBXl*e|kW6Z_>62r*J+pFIFWgZ(?3omr}sIjDqzU5^ft(FRhDewuq;FzRTZ06wIYUI zb^t`g88Lc*90hP9%TuxZKF<=YJp1S`dXk=wxg5?K=$DB#X8pw|Bxn@Id4U~P1iRP9 z{3pvSe-Obj+(d8VYgMnJB@25jE}Wmqb5{iD;ZDNwh1w3MilPYb3J*#*ArG2lTW z7;4C`^+vz#`Q300_%PUvFc?Hg0oT{F1L*SxMVAl;OxUgs+(P&S`KFEZvbV9nb8kzK zo3e`L=yiz>xJN|wJt!ooA^a)u{TTv_%F$SsP?!zGyRoY0O8ts7n|mE6pIjs~37bgT z$&JuLOUz$=p2U`G;ETcO&tPE#8#fY6@9nR>>qyaWO>Nk9~gycXZ>(zcNc5BNqh_qSW5jo|!gH@lq-@~d_ga0*=3#iT8m!W5I? zmKYQXV@D{GQ#x2fXb9#HOAwe6&Ree=p)M{X_b)wN&v<}v%=;F^OwRTMu>~=N>g=h# zXHu+Y$WoSg@J>Ew`SNJc;QG+solF7~RDoPK(s|*c-&p9|>iX1=PxvGx(a@WKy4Ug5 zs;GOfM3TCT0vA zOK|()O;v5GD!0?R%_xvQAU||#;-ehOV9a}l7l z9=iECUsmB7_c=r~9J&r$sNH-(VL~*PIiV$f@A_3>Hz#W#vgr!Q3@}6rlchp7-WXb2 z#XY~hbAH=x6h+PucTlzK#XpmCWkZjCi#nV6=o@Ii)({uTlbTdDTmH`O?^I4$>Wc3? zIs7#uK7RWu$wMmQY(|Hv^ynxL4fLYQVe@dYa4;KzNRs-EIAwGULQ{wzc4;K2)W-|5 zELbrjYBHX;?_bP6#0q=Zd346ruDg&_IZiZf^q~e+yp>?)is^Y2nMDhs85od5Z~5ZV z6^^JGLE~cz&()5qX|ly}#YU%1==LidtpuNFm}}j<*4(B_f+RR7&Ws~cwcOQWDv7ei zuolS-Z5=eyhz-o(0b_DHBHELQbQv)5?0x0wp>j?SqOQFPi*Aw^RjXveN8J48L7R9J zGPkZ>u5VEorUZ>wnNMwa-F^lOxl-V6e6(cY3ZrS$*Ohz8cHfnTO8ffzA1^yNR~m4{ z7E90fFkU$HR~spaCcgV|GTWxBomq{-(_Yr$Q#tm8-dAX|N&^hvWBo6Ggh(1xuR+8N zkHACXOs4NroRh-fXwX36Ja<_%&%wu6`n)j)(o+<=;fy8|iXaydRSu_78fA z{i`P{GK>IY4pK8>c!hqZr-k1WPg2fO6>wUi6TTz=>0Ix^Eg9MYSNY-9^*QW}eE-$;0I9r+?Z}NUE4Jcw zIqubLLUE4P>9Hs8%wWXdhr?dDGgJ3dJjt`kc({TUvt1+Bgkul%RsJ{i5~riAVGJsO z4ImqUx&Mpfks=1P_3si;syxTRjSfrc-S})o@^lAt(#{_;ICOma8aPI-aIAR?!HK?B zTMkodRi#H0X>6LaHR=qHwe_eyNik^Z@}S3wqhg2h>GNoDQX)q5`rl2}R2PXR136}G z7P5??;K1)DiGA5ZwbCfB6wL%<$mm%H3oYZ7M4cqLH*1?8@O3`aWcsEygNQS9bI6?r|d9S=Q-1F`S|XoA71SL$4EW+iFBQ6X|DcW z1zeVGkuT`|?T6~;U+~9v7J!3t+q@jrdMK28=#|s8wjadh9>?uIl-hmd!Fy~v7?#@> z#<3NaYd9@jWRbRb4l?#E7ir|yY;HkC|35cuH5 zrVy6Z3wyG^6I=_1v4PM}fH;w{RnX(%u{vM4*yc}$6o?~Ug2tyd@z5#GxxW3q% zQm_ydiTFzJz(&WQV_|)d>$!zvX-`I*1SmbKe0xSlx{Q_u081y+z zf1~M3TpfcUXXq?4Z!|r+b;Zbs9gQrDf|~W$OV}T>XXOD&vh%5V%bRv1pD$)d5JbX1 zd7f|S=Tk`4C=~SPPllu&PQxX!XHD#${hl{1L4eI_-4)Om`>rkC3v7zji6RFe_xeLw{5bqy0KHnWMS<~zZKBkQdJ6m1O zEtcrGstVa16E=5+9Gpbn`X8Pp z)OOqL>F_=m=cufkqeV;@G+9$()1@{YAcOJna*)8()b~4L^w?*kqtjxVGl)jms+(h` z(Rej!rHCS_DlaLZWz^SsxuXA?X`F?6XfGz>PUG+?*gQf0o1{=_{o@+j%7RLGl4fPqBErxsHyP9lK{av<4ue%CI+QYExI$VfuYpP%b8EAU~Q-Dq0we(2Bsx?w0}KLY|3!Z6I3};PL^XOI~0Zf@x@$XJ)buB z8mh2QblsD&N#&e*jqFqF2aX&jGwA?15znWO#8){cob z)Z_*L0{2m&db_0$z*jE29bD7a!r5qd7x2gHPl&q5uvE5B;OZJq%K-A3ML`jC7z@i) z27{AgSmZ{PDKf_d6VQhNf&zTw&KmAp+_s4W?bAc`Ckk?J>Ee`NA=zNH<*`3TPm~}% zI$THiHZ6Z@03M}gwv-GeUf}s`-qQnYBM!Tp9nO|IhZWGNU<$0}O`G=uGpO!T>2mP4y%h<*V^$>ch%5K}fPBkPRNLFaCW z<0ubU{$+HuoOlB4h}61^K^9=PP4t`5WI`FlGBjC*ZI4kRh`wWFAepp=WVOzv2)CzT zH_Qx#``O{>2)uibL7w5cw7GS#MYMq_uqfTh?4&qS1T&6yF*HR$=iX(_^AA-=rh7b@ zjWA4zSLYx=YUJ3bhaH7H(&9AyNc@}Y2hkz1DBIz>nymp3!|PhWC0v}?K9A=RL9qnq zmGCb1!cLVp@H96gcpRG9rBNZZ@57d{1kn}I=BsKv6`TymKi)N6*zdv)WDd5$4y=kI zQ$ao@k^hQUu%oQpS_h)bT2uX9X;>vz6XExeZDZ_+yZKsvD(e=w~;i;9` znu&AMX3ABFq;TykqRyK60AsHG?E}hnX@}dAFHR?aljV371{W(!_`GJvyZ(YFG!kx{zm&BaHn&H@Dus=}VZ9K-})dq&%fkj0#wc?=O@L~+8f^2zsvixc|Nr-VfP&w4sncsn;=`q@YF&DtJpW<4m-p--KS|C3$L&J}Zaseu7Yd0yrwhTkMN zSQedJ&*qZ?c9#yUxO7bnvj5Uh6o039v8e_&QdQ=czyPwM*=T8cj?d|GIp4W||MTb1 zgAsOnFk775pXkEn{g->sUc7qu;-2cdsrF$yfdpdKT^Evy%`rqIC?_YQ&pk8|`u6S9SMT>;yvts{&7Qq}^?dLB-s@ND*H78gSO1m$eec!tewG&) zjWYi-$24fb2Ew$e1J{Ax;uEld3`=nG?wNJbP9qPs7npvD0QjS6_W0h6&56{ z1(z#Gm-q4osduWidFei*53)jg1u%4GMxji;SQcRx1XDcVBB7_cb9&a6iyqg{Wq;1P>VJPWN;_{l z!!0lkfOWgE=eH?-NAE63hFP_prBl&7@Lwe}6rQuth1(1B*NHX>a9bU}h965Xs-h)~ z9XiG!SW(*#r(1dC@t!!>zf zrpa$WT_}SbmZ;akmC}UN^a2oTgnAZ;B3+mi>SlpF zdtxu9i@U3mpGSvbk$kwL0_M?p~xSIIX25w>3XDkPsQ6 zI&}+qy+$jjOJaH{2B|7c43#mmtgEfJ6f~x8YS=NHFV3fVa0uD3hVJmepRuhzwqF)} z4@c#6OLPkXf0LgV5x``_;NnO=C9#qg( zbr$eIthOgyDPtESTwNDfZ{qb!QnaRkbEC$I@>w@FIA9K|`ALB^mzYbz>AE}`fv)GA zC(VARH_kiw^!i+g26Hz0;&`Clt$sE-S4)ndc7w13#6HK+&0db7Be3~wQXE|h`JE^E zam0b|b~=6`kx$>%Gs*ovfU6FzMD2|1+P|XV=x9>rb5Abl=jaZG?GIUJl`dQ~D!)rj zoEs`+9=p_QftSKwCOLQiJU35V=HrZjZ>_{R1<$S(bHF|pHmwKx#maODAi@jdXSKx$ zJX%B3Q?C>9z;@%22}|yvv~N@5X_~T&clz=kxEf9%wA=x&Cm9TwJz#2R%xN5mO2K!C zgk~iZnwiSIGDsyTw`G^o1U?A`juEY}RsTmG3zd35^VI179z1;V(AWQMKYp^cb*uloiRV`TcdP&Vr`7)%T)+2bbe7Va zAtDp?WhMh-=sDIvXJ)H5)tWKDR&U1Mp}n@2L~O`)td7g(D)rPdUfC;X+$?n2Yn!)n zc5=3q$Kg5JVX=PZOTU1}@U3%M0@g5Wt}Ph&>*Yx+Tz}>ubIW8Fpig|!V~ETBnshsVE8Cf{SVITpQ`&7uj_UPTu{;nO;@v)A%c50R5*~ zabcWDilCK_^av*v5PG(c#BHRS zjsv5V;<5%&x;g^PzpC!=3aY{@EAxV}GF{%3C0cK)$Fu6YYf_@Z7w>btilJV~Lax#*!OLVOL5{@OxUfi!WbrkIDnBd!Hy@ix z@`zMjH5m?;x`(BL1eWQPg{GHP84LMbFjA#$x{Ktc+KkW%Gh+CZl>RDd+~f$!mbTG^ zVk@#nqK6Z7dcTGex*??!5Xg+$q1G+A6VouJrW_Aj;8@$xI&2TETNCT;iMCqU7zuf! z64bGlpygv~sfn#sta@XLl&Uq#k!XAC&8yi>{gLH130dn_`d7np{mwP8oj?QINoim! zDF5~-!=MYp(h`dT2e)UJtj}j{qqXMB9l^vrely(_&Y!HaS`o2uCc4(}A-c^{g=#f7 zJ`4ZP<}S&JsM7w_>3<$=Z9aS)>VF0P`C)x z=SwV)_)7m=yU*BJ5jl;iSD|Lfm{A4VB}>W~^a}DbO(X=d!3-tH={b;GI_-h}%(K;n zT8=mh0eR-AEj6EvF1acKwL%WScnHfG1VS`#eSJz7^IHxf{xon6UDAZ;?gi`>0T}3< zaeN&6A(#WH<0bEf`6EcjVit2p8+w&h9nwXN4yE)v6an(PbQiJU68Abts}UVd^Usq@ zCTl_AL1G}LDe46B==%r#EDU{H=3wEnDaZwlHVMOkcq560D_Z0h(3bQ|*QMQH!NySu z35Ib(qYQ6amdD`7A{UrDFe3#_KT_Os0mA^wKm8SvjfXIqI=z~Wt0 zorFc8n~V+thvOdv9SVhN#~Uvh*kmD_nnhtsjFp)bq1vB+d$f4?k;PKpON=4HT~r;p z!&6M@u=F|(b&Y5}53I^DnBG&i9j~pNMK6nc&6j`vI4P10Q(s5!I<;QfYra^ zmryXDuz_$!7RoaE8R;67B8!UqvGe~p}0_E)Y66)H< zh~`0eT6<-KUnS58k$FrlxQXk)Qui84?fyp(J)uq zP)yHTx&p2uNB?fEMZ2f^&BkX!y9*XeV6+Jh{j!Xzy_pj zO@Y){Cso@HP%HLPJcYyKy*0r(Lz>2waX6GF%AU6r`S0m#(kD?me4(7ItxKyfV5`%s zc0Y zZH46UO1fYsENHF{zD`~6wfW(S+_c>p{6Z&N*K9TkPTWrvZhLS3Mt$tr@Z0x_`8_D!wfCNYV~og>$Lz*7 zsIR=uz;D%#*UYWJxCr~Jnt!SICfhdJUPKpiTZ_Ny3pq6Ci!NkcfL+L@ip3YSxPUfz zmP)tW0)YZTr2v7Lloi*yWmXV6t!tB+x-o(>DIWa|{5F9yDa?QJ1%UL-H=g#ueQyk%me z@ryHzio{kp9Y>vc$fZ`(We{L$?!m`jY|>3JP>8tX6fi^sgy8Mz^#z>nr-5B6A_@{5 zb%`G9;&h0u3JGK(How4zta+#GO>wE;;mW+rm$1DnkbwyvA_ze0wv>y!G6egEA#!mQ zY)f<|{d+yKS!Oi7zd+Y&x(p|6F=MJOQ7jsFubMEANys+zZdf}>HVS3krmMVW;L&qc zb?2pSyZFSWDCbZXbB#2#SISL80&Teo5K(1}BvWISY4*khxa>YKrOYDVmJkoNJX!59 zEk-S+E|3;78e#N%1G3!Y;t0v|-vg1`JUVbSdFwmohQzJ*&F%VCJWt_>90o~9mo}*E z%xjBc+bPe{zIG8J#YW0c2^0F6D*n+FiMchztA(dJX02{W(JC!t)>`Nh0UUHY(8rL^ zRiq&WoYcxuwhi^KS4ZcJOS)&pBf0KwdAaPOeo1HSf@J&TT`z6U)ZR6yemaH|jaxYB z`)l5k&RCCLJPG2;t10olj(_XgYofc8#UbI+PdB(tsrpy9Y@}u*`EVF%0g`1;Ud12m01rTqeJA(@Q8qW!ZeHenpwK_~g^tJD4dbr$(MI!o(iO?g4Pe=-_ zphgX+EQSg&b-6os9rY`;o4$^klF0UL8Wio5sx+WxIrjBhpFDhyjT*JVur{L7Y%!bUIuF`uzQ{A?FA?Ls24R@)nuWVppg@vB&xGi@ z7DeB6zMMvGlc@<&gD+>!p)0;b?`HpVuIO=)*kGUMv-vp$Iks1(P(ud?Qk^Y76^h&) zXNURG2s~cB8%%xA&*sa^wcIAsj=xL2h8xlh*+m-a>)~Km5A(_Fvv^{zJD*L17+F{< zh?6B#dGNH|%)V5~Jg}oGaNKnr>-dCJ9}Rg+yWQMs?_O!&w%fC9sh?()PaJNn8KD#R zCcdju^r1&&h$LI0(*_-jTRq8#^9i{E>(N{Szkc=tb?PF65wAnzBbc7&=(HslGO~9T zSM*5D#?e0rl%)IGz+Q@lOXh9r}(Kf4$qbIJ43$7RWSoV2Wnu_4A*olfHQG5otnS(YvF<;yEm8yB_=vWnv zr6Z$E{#7Q90;Ra$)H>&l?gwTJw8A9#&O_;$@Eu%QOh;5vZdvj;X&xjpaKC9sGWwO9 z&Y>vyTNOPUq20RBa|LQ{E@VrJ+pRp$U_244%NhrE%NbAuMg5;c8su1484322lKoiL zEd;Bck_YFvmvFCOR~2}lU6V`0t5nngH+M}!1FXwvqaSYNc%6AX)i6NvH;kzYG+aeY zRayN~6=Br^jP*s$O*y_BTXNz=t`$*5K@n6f6b;W;u zu)Vdl6~=#k@c1_W-;F%C@n3J_zurLnSBk2tL)m=zRQo! z)pB0`yqKNOQyBzHDl|NSQgSXqg)ZB8^X`M_kn4cwo6OejOe@Vp#p7IPi|(TlJ+$#(Jqu9d<+y z)(*f#yfSv=B%)T(}p4orvGY3uA0r zet2{Tyo0wP^*a&wI$jJ&W0_uwFyEzJ2@2prS>l6gOcWWos9RrT*up(U1OPPws|BSY zyrH5rfY1Qv?Tv6j7FGngT=d{iMC=a*)o6Jl>7y`Gqhar#s-LRubeLs-NV{e*9JhiI ze{UoEbJpGIs{fWJf|a8l)*8Bl!Npv>XZ(r9&BSd6^DvnmLDZadJHZIMSfLwDCascy zuNFrJJp2=VAD2t>-Swq`=8(Zn)C-_T-x1leu;CcS7Er!Czlf7zFYTI5)~D-N9kAW* zUv+$^{Gmh6p?hkNZntGic$nx$*(a_5>FbmVUe&PiBg%Gj++l!)3dQ}Vv~fK zB7&r*>4c^le51hx8BYT#c_{XGnNpkR`#d>%6s<29y(XVYztvVCo>@jTB$7)`38_UV zc4{~CZtkE?&sR;EYDB>i2th%CG=~1JLlOg%h0$ir=@5NyX+KiEAVsR@+E-0qS;~r& zZcTQ9ERR@6>hh3d@sgK&t+5yiQZM>~tfVHdd9bLI+1ws>T=$ckcj~ONNKyUyF3nNw zbs!HMyM67pDBcF(sY$5=+!V!M`(%doYSEf71y{9BsY}uRy~Wmm=Wh0XQJkEBAk$ty zXpg&`Wg5HK*X1SgjqyTP250ppWzF{xThRR~X;!6C2)I|n2R-M$w428{n*zGj9u8NbtI|V8GnFJn`Lu;rBYd zfz8tEc48|rjyP&gS$x9d!Nt2PX?MnEAWT_bo`fN0oQoc{8PZ5?UGw@=O#jZ%#)+*hm^IZyLSVW!}zhG!v*kXEa)c+ifyQ<7(A#X(nBCYv~?p< zYy^G6-|}V@4d5)`w$24?39214a5kNqC-%*tj|ta|>+U%hj5iuUm7c0?|A97TcE4?C zPQPdEXwMKvXaz~#sYrrZSgjRF5S1<;xVQ-C% zXfxhcI;d>}uh*a26%Tv8+$N0b9-eOfT{_)V)5&m1+RH3a#3-YlmcBvkR$2p4vj$c! z+^B?^lYBq2nrh{^bX)x@Xs0T$;O|84(_ka$O^H&$Ust)V8(h=!bqfv3r?_oCqjem| zFzZox9QF53to9ex{lS@Ol@drFu;$r-Tx#eBGN<+u%N4_sH%$v9kZj3qZHip8q7W89 z2k_=3^oFNVQSB!6gs2+i)-jEH5hxL9WgAzo$XetZxx_bsoIWSm#y5xGS4ZbDD)*E~ z;c0~ddi*uXb`3?SNIjrq$3RMw_L@jZk%HusFunsS)L|_|brqPwL27CO`Db-`vaYzlSy4qVBN0uWz@7D& z%xW}LFs%^^S!)zs3e7o$V!{4#)LLNvdv&AGaLFz1Fr8tm|9e~7#6n&Fnm+#htm(Cr zj;x`2%25>2)H?y;HEH&#nL8t^eQe!2gfX)WEM`6PZJKme+jUK7$lHFf6Fw24x{DT~Lr^)U&jJOD-9> z6*{R+nA-_Zu9Kv~uU|2k80zvHy9HXF%$kcJRKcD}hcCvj4Gfy7WKt!A{S{-@Hnt?H zi{D`9^i4%-xthmBtSEqN^*e1r!Fqz)(JXN?XCkwitRP~Y>>eI*S%(2pWF`$yyLH*3d!J1|FvSx@(+_0vX z`dv6Xiyp7C30+d2r%P?pZWOkpKt*oxv%OAjqe@2(D1$fx#O< zRY~$#f{{Bl!3HZEpfz5hbZcz)@IIP~8?iQx8f8=ty@^5(Iw}VRFS1oD9y=jm!Mbl~ z*Pxjw$~6=&m2oLhqbY)QGNHxpnV3+0@Ax^#|DAi=T#?2uomR09IB=(EgPrMZ4T1Xb zB!ooe#vj~)j9y;_yHIXxc#Zk34LHG_=L!T5flato!E-m;bGXzjdsfVWHv``0BdumJ zSDfC((LKeM6id~~&&6aS0GkxkPaYV>EOwB~?qqf@pebOu72A}3{H2t@jLxTuZACux zbaYRx>z7Mg#hgLK+$2p{h5a4+ zMT8cpl@QdfnK`*-c#_tBZkGpJAS!vNQ0=D`31XCpa+-stJJ46_qvazipQ{|aoShU$ zqsd!*o5EJ`BzOwe4vso*lb?&BE$V160ohuk8gs%uxjrxF$7->^#IpPnvh-7cMuajP zcSz7oBuCR_AA^f^?JQso`%5fq_%kR;f6M)eNJCX2jj=0l7Jf5aP0*L9&$vs|Zrh<6==Rv(bfOw&uW?qO9-kD=b1Xi$#d(b#ft zI?lfcX!0)m0ugv8V|Af{>2sNVKFv}7Ia)3u!Hl{pBVGqw&M)$86kQDsm6BRL1?)$0cUJBxAKWIiE2pa!0RcAoRU{ zMU}~6!Zx^y13R4v(ZnS3WltKKc>)T~5%HMFVt|5(lc_OK3bBV6p_lhLz(KlxWsQ`; zm_)^bf7@#AKau78YG91ynf{He{Spb|_2KPxk%ya(CRvUs1d)cCRKNv_wl=ufp@%`% zmAct|Gs%IJUQ#N{-Cr0!n}|NT$01Y3i$Rbt^Ndt78vOnCGyEDJSkh z&rC|fB(%RkeMdyWKW$`7vrdDt&HJA-Sn)xDl>qXMSaEPOQnnwTl*E&5jr~glOnX;jNke zD6N_7_Mit9-z`jSMYn9gl-%^KQIRiIw)>Be%T& zZ>1X|7HO@8xFZJlq~xHr#Dt`cy@mue*KlaBfN~qg`&cEM!rkmGMA!h2ct>HB+Ehgw zU9h6}P%zLdH^3NRG(9?<1z%0DkyL@HYTSKN6ozBMOjHFm``%%Zf<3T2uAa!2#1_$e z7YZN>qtqrW!1c!jz>tkr5>wqFH&zSRig4UvU{3c`ES!Nx5hH>r*l+Jsdf>@wOz*|R zo{C*!bW%(y8zw!KQ|Y#WO8^?EY*XMkQ%hM#Na1NJ#=B4w8CEX8PGDXmPo#Y6NxWq@ zjwrEg<9{AGeK$gDD{)(<>l`7rO3?P5Chp=lMs5182*+1t@CqPHRD~faBswF#ObH=q zlL{U`(4>M#yBhN{X0W}g{qIoCNwFLbt22M-r`rBk|LxoVKH7S4oB!iRo>kXBFmqhp z{(EbC^U;GRk^T3hN4M*L6VI*v_pSZ+_ig`eK3*uaqb@)5dzvpXfcVPh;eH+U&-A!B zu~m;x)@?){c+9wbG;uDPs0@aUkILz{q5b(ZGC!Y2*5}LuO2}&SAD~DsjvLK#bc9AEV(0KyF2O^qnUTI{qDAQPJIM1yy@@9-Gi@R z87yoDAvLSZs#4d02m7r{6Q?-7olIMQsCpjAa|iP4fzOu|%lr%@iD`%-iCTivcQ*r% zexLbKRX;&Js6Y6WUq%U(;ZZ~|7Sbby(*?!%KOGQDqCcqLGLK=K*V(>9c1eFuM(D&o zhlDVLl-L%=eB>-Yo2jj*75+V)W3ucUBSH1o#Tov=hHa|gnv#SbH^*0*w564+EMlze z$e5|JtTC9nM|)E3HgBydRVSFC>ETFLXl4-mZUhJWlw#9l+txo5)y$xxT7r>}cjPIZ z#NW6b&vZ!OXazO`wz_a^wON_!L3wj@uy1TWO&<%c2DS5cX6Nm!K@-uEshK$Q=kLsK zgf^!EQM*4WUC!yz99YoO8aJ^NC-e1}Yqgkur*&S*5J zD47kLNN=EcQQ^yeSIv)S0;ifBsCUv;2%n0%HZM2P4u&9^fk=DVgY87TS8OIQwCny* zc0o5UfebSd!6W-1RDr)=*}`fvyYvLaJr35Y=FE7dD-VquDaa!&>C~h5eY)HF78&yZ zZ2BrPf;r*JLpL0D?FES)s5Ph#xg9t-IlV)V*n+{{8!{&_9oisxSMj&He6C zFr11ygjtJ6bLm zdS-PSh9HPDdpihjdJ$ecT$l-eqr2h*{pTusz6MBYvxEMnYN5kLh2EzG^Zr7*o(%om zGlt9s(+cOGLN|ar1v=u3qJm+wbAMW0jj83>7wa{ozNmI!0ecp5!a+%=JqovuEueP~ zgDjmFXYIp^iU#qDk;|7yr-~iX29^tLmSkh7AAvCCK8%@yPAz=)M|9P&I%;pyM3NH; zL0{*mMMZzlFB3NSVv}C8HC|TaOcm!oV9hu11%SsYfBgBMn^b5_BPBXgN#~wfX*I>s zD=LosJsl3g`Cs`wv}ruusK0*e=q4=>G&cXrt!)k2oY!OxI( z26G2UTO%XEuaCN9J~Fo}p3(|f6V>69&!7*6!=-v^y}?XRdG12i;ZCRXLf6-U-bTy( z^XL)-#A~O~?D$s<{5EhC_2R@CL%Ra#w**FzBtTW@1J38(-p_+GOlK_%HZNE1t=aXXD>#BZtMal@P?Qb2Jf2su#WyKHEJ2KXI9ddLOuY zI!BEDjKS=SOp5`t*>rNLxAl=a%cI4mtz{;aLIGCp-3y{eOrkCC#9$TixZoEaY{A@7 z23~MIk+30hLzM59Sk|CP(6i9c2%WaH7(+@Hm+(ClZOy`u1ntFVmD*s(n6d%p<&7Y8A1)mKHgC=%aGdMu91k%}d`QKnk&M=M zjHYl!=w5TTRS@~P->e#@;81VjHUV88;+(|jHQGTt{?B=KI=aZY=a20$p&V?5>$$FJ|BMo}npSL=_#QN!#^b^XD99b`Y}`aU!} zHkPKr+2I}!!5u8$z7jub0Lcu1w=t+>RY9|#xgY2A7a#YYy%@g!$Ez1_hi_khc(4BX z`5zGlY@6zPz!);X=rhi?OdF?GAcEZnnS$bYm=uvJVL*!w@X!t6T@s&Gp0y?n)IyjB z(kGMa&zN#SE0l6kjI838S7Lo|(T}}-?}9S$y(r|btAxZmen*-CYpQQ*IMD+&-fKAHTo37SVJRn%99wq8)>0YJ9|uKR+FQHrVl!lawtI@s2t zz38-hMhELv5cYwiyqrSNmxu#cBLZy?)e28zpb@Fp9(qhI26GaTZlooRNNuJL)f*#X zqGaKXg(p+djtkdRG`S`>TL;qBcdQZihhqK}1*wrJRqjxf zA>CIPpv73U9ivR%9)r%LJ=0l+k?l)&5^PpQ@T^{}GcjY7sGU?3W(=|xkM)PLEHVay zC0bMk61f)dW~UIw0mfSxKXEo-^T=}f>cwCQ`s4^w{?-JypdqMM!M8CYFj*o)O*Zos z1R@>pog|5+H+yyEJ?f1k=6Sr~*~W2X_Lp~WdA(1v&JtLx!Xax;##E`H!y_Q^ecsTf zhXK;*GP_-YVio43qIb=M^)wuA1AxyJ?|3Ib{Q=kJH$ceU=y!~@>yC-`vLka#UsJst zkbAF#8;0hLV-fkBq8yBfl}Sb;Vr3XFjPXOyoP1HCDL7RjKWx7T8B#l)&bxf6rC+*) zwV_sZnbze!+PI%j(Ni;1obinEJ(fCY<7Au#8<2$Y?mjiaJjc!=UbT_+;G&#fZe*k5 zCD~n1^UonP3x-7eAT9|2^5l?QvvP6Pg!~CE9Ewe#>c$#xG7+>9{;IA5E)W80ci&qH zQJTaXHpp6o@i&_X37Yq-Mq)7*qm5mWAAivkU@pp3S`-o!JsjZzLKwsCv@ z4ozY@Azp5ew^ON%@bhVcI6#oct&~rcM>=8QOOTP*flDAFQNFRNu4JW7Jx4^o_x#mt z`C8%r@sF49wOel7#cHA7Zenp_j9#%!axmknJzK4Xa`zh{Wiep2e^#qngC}Wo!S=2c zLBtmk*IHXt<#jcW3&F_nd=b#6Wr*k1XCk0sfDLFUiqb>`DPXV1 z2{VyE_BtXVN6YCNr&>DTcQH4)@jCji;GW48WZfTxCFPAAI*oO#bR_BZ!9i;2@T6h zFeG_~%*GId@Vq-x)bd4s5AM$K;iVsY4aD7CRAwaI)z<0!Yb-woWu?kRn-MuMBi81r8T?$5vlk>BDy6gtAy6x_>q&|)Msora39sEYYNXhf# zu2gK3K5kF0u~I$Y`F-Pd0~xUG@lH695>K=w&FtAq4pcKNAbX!cV;mJQfhP-z6kABH z$ynKhn=aJZ0{pzQ{b-9yRI$a#Tdat}&Ee#gN!_ObhiWf*KJ1v0e4rY>D|x zHlfT&w$i3J5|A($JQOl`oZ{ZFa`NzYupj!Qj)@6iT4`T$X*OFd`oHo*>j*-+TRI>R zbx{;ESUYS}OlOB-m|o0&V(RG)g?|>_GzxAMvLW~}-$F%c)=4d#QS)0`u{#?fwCc5SZ$fpNVjrvJO2^PyHEbfyTZG=gYlbhrID}E z1^&zE=&8}(zK;`Ac-3$ytdZWn*z&zjqnV~ zs`ExTLu4FNf@dF($gKvQV~Y9I3|fP;9*53ML_k}Yn7w)rpJWOx3eYZ4M&fAQI5zD4 z9hq%X%{U1KBi=0KMb3s21*Wd#s(v+tms@zL)w=;VnI#<{be2_uzEv`*jolBiEE+b3r)f|=Qa1ko4#nmsTFhiurn{s zc5a$%TK1_FMNIJ>G;jxKn`P_}#ftSMX(BVzM2Faxx4}xsj-g4>a+R%`$Jpl2A-BRs zaRJ)ynZgraz`i>jU{#?8dHRwylKH;isdW`LkDs=SKa~#W17l^pyPG{sL${b2B#jC; z%YB_RZt8Kq8&abLKKXqjby5#K|&L++W0KcqEW!IrNaOqK3X*O!yW zt-=$-(=77_3M;j6RSihXEHM|9P4FnQ#lhh0N<}ABf9bMIU09R((0K5hS&9jVUPLj} zduLZZcHt}g7A!H7Ox1Wj{%yIOSJ@aq_;o$AWj0#mxch7-t*G=0`)9&T=kIa8F0Lby7xxQ|Q%V-6x=`sOcf*}({#0fcvqTre5(A%h!3ry4juqkK9l zkup9yJ{dfF{pzPaXhy%VP@VI9G1P;J?CXFBaKqZj{+bym0~toVxITJyz-lpTFt`*_ zy|U_2ECH8yXK(M#JKW9S%%;YxgV}bN_}b_ytcOZkHP+Tc#}Z(zBoV$;Y8s}wo;t%s zLU%0lO@&s>I$il7jZ+gbNs_OXkza`Q+_aH9R3ekhDqcj|m(}DG;X+taFIbocVe~Ur zDsV<+DN5=L=A?9)E>$LPmZ!)tT^K9kzL@MnwACOsCkjnS(iPApst+ZJDpzzqGL5M3 z&;|fRNPOPph_p~+cb89HB9IoG(3SCg$BEq$%XdOu6m(#hbs~-H1q>dAJzjJA*(joN zZ35c#3dZSZVV&$rLJf_aW74$dVk~ntN&Rs z6=Oh0#Uvn~>Szu^wCPf5M*?G$<33QbPJ{c(;VH?NGe}HibR=R5rbqy`+|n>w&ACe%bSy{>g8B2|VE>j0hq zMNM{kZ?QV28(zPB4Jf(ilV^iPY&`>|Z&!E~`GJ|cU8g<^x?=JJr@HDXPKi>o1cp?$ z&h#2FSENPdzVp9IiX=1rv5(+zh&Rq~2m;g@53X+>Yi(?;^a#k$XbEh>t##a7`zVYy z(uuQF=JlHzEf2|)gjx$H3iUpMQqOd05XZ05<1#B4hB;|tFP>721$SzA2}XKL7Q_Ls zrEpoji_rTR+&e4LH!Y1IkuBjJ!ke0RALLzU>g{MbTU>Uy9pZdGxirxqPiN&)$2D?< z?WfP9#R*b2OWg0&z(0S_nzbwEUJ|*3PO;B+|m2 z>D|E1u3rn+bEnX>`8{JMt1u1aZOK*FDTM^(y#RL#ILyToGPnSL!Qd=sssp(V+^V&` zC3#{vXTuWi57wY}z+_t-(H}|T*k~#oqb3{hc%z>d%R<5W-(2@IaF=wqnwL)95Jv%7 zl&mIMuZ4DN)_#%oPH}l46`SUoI0co>>^Qr4i0Ap@F{s3X4r?jM@ZuqLH8W{E!UB&| z1)O?62r%4KRjw7`9#1MN%x_iYW@@b=sK0cu$Z1yBsrO6(BsAyu)QE{-)xsu;Qq94nW_&Qb9j$&*dcE8rn)Z?|y)Z?0&di)*F)MJ<_hBTq&ftwjr!QiA;L zW`xF7iHtiwpb$4i^w4*HaY|UBF9p@DKu2wHzn4o~#QhDkjnh+y`$^s31o!Kr67DB* zOUC`|0JcN5cIj|0_cMu_yG}IO;b^iGpvh`Q#HvBW-Od%lh`jL@3+agqv;H zp=9KAxlb_&FCRn@2|X40J!D5#y6=pYQn-o#OK5l0o0V5jd-hy^*xMbM2yLzN5`aO~~!Am8`kCu*T_kb>i2vsbhnv_1$Y(q5Fu!YZ|+`F`sXI_vJ>JQ`_zoy`j+|8LSfi`YBONwUk#s?zeo5}f?uoi#hG>DEZwDkv6II1!(?g4?P-dPe zk)ZB#zm~v+E2;^3sqOc=er2?UYe1R;U#P2E<37y|fYPNZqfF|iOr5?2N+PGRyF}xc z5PMV$i*@)7_XE?%J9(!0U}QFv`v^6#1&Q?BM4`o=@MXmy80XB<{l{ie_e$K{+3N#`yQV2h8NPSr99`>{W$|ZbN*aaCaZ~S~Ilqo8<);;xR#qG!qlxyrXyDGPvSK2$ z6-{+;K{Sl9=yzzB{p~Dl%W$HJ;K$h%JI5w^9X-Uyg59-k$6YjqwX2_^%WiHO6I(So ziFb7r{xis^JJU%NAZrXpUxp-IkPuAap+%w29RzpNI+S@Cs;}_g1}F1d%&~m#v7OJOVyQ;n+xQ2=yGx#`QKSHPG+VSV zXVeZ$;Cm3pbcrl6m1`s%hwo`E#B1 zs-cpC&jN~2D#sydD>#(qEV3Bs!A^_WSav{Zr3edhXpj;Mefg(9X^pTy_ik5u#8=Oj z7Bm(NC~aKAP#Ra3_Lr01kODhs<;2ppUBB|%myY3>HG^K%8RW)3N`WI(;0HxRujn9^kbSG-9tO%~_R*rp z)ovrHAHF@$>1&6oResVXkmht_E{Hh0lsi&~94P zEMMdxhM_oxGPUC0<1xu0B9-t)>GSKs&I(KJ6HhQ*0aQ2+M%Eyv_w|e<>XmGBQQv=C z#bEoWZ{`CfP7B8pO4NPV+u*89Opv{4f-7W2W%*s)jcQMxj+ilZ#Z6n`rwamFcdAV! zO5eqiTufZKreR{qbg ADxbh>4xg#1(@{1SyU&|;$8?Kq&7!6T3m*m(>V}a3MEBQ zm0;ssDxRQ6ulf{dwNHK5&Sio|NW6;c8WFLQYuO95YI@*j@@mnOz!1gEkjn;vSRQBv z7wDj2UjsWl-BE6$(jxe;l=cc9+-Oxfui|7^@QBnVNg^_-396c& z#uO5iPZ{&c?cSV4-)bg%jSY$5S+}KLopW=QNnWdtR~Z@Hpxu+-V`;ZhheDGmcm zhRy&lBZ<9~#P*fM>LuZYhZByoue$FK-EERC>$`Bo1M)k1==oH^B;mU$o@-1 z$!3RO)2D#}wwj~(ffdb0OVe|FPM6F1&i(tJKYv!VF?4${Tb$gV=)&dwmwV4%yn6TI zp6a@(_F*~!Q^5rVg8_*Jco`K(XrnY4eFh=x=wv~8tm9 zFWzOZ-)7HVzk0s+e(&`w_3NkX>8txuT>Bl&&aTj|Pt*riW=&3!1&lQHbL$ ztM$hsNAe>z!0E~P=p@fhW(xJA%w}%G{b^1LW2T{tbE$zvqZ{10gLlZ$@yVG&F=uCL zc&Z=3-B`L5^3fW~R+py07^WosymN(GNc^&T9Af>LN3nUT^_(ddzG{OC zyZpyd36xTeMO|n==b6BD{2P8W?g;HLyR4}3N=7m`kkyuayenG$+M$CBI#U}4p^;#r zQmha$`+6TIdHt0 zX)sb0n6YZ^O(_#Ht1d!WI+~bu1rHm5B#0dyXBsJvsk-9OmK@vsq3l3>36JGsb5qWJ z>|e*SvB;HdaxEOfJoof2H&~%Ylh{m&2r>_sg`KN%W|IjdgZCxhif0@exi_+sKn|+k zI)A5SCndF?5LQqW2Q=1YCG{K82bs_IN`UBosH4=fHquN#(@Z3Hv>dXQrqIe?9l*f+ z&zm@#-b|jeWM@c5QU+YD77NmsHrqtCBCR7=uFf_H+4PC zNzUVGwz8=PCwkU3-z-K$ZOD=|;zp8bcP`Ehv+%Cag#9XG0!(ddmi!536esv=?>3Ez_C)W173ujn)o>N{5OHRp=dT{jTleef}g&<2yQQe@Bboba0kdS zcM{03x%enTmMtUH)CHXJkeU7{Au~C|Ol^P}+4=n@gUE!~snZBMMYA4DbekyM6Ff+h z3Jq_(#J>1lsoTH1XI1_Gd^RbLE^Af)_|)kCA3oXI^!5MSPqrUDyw(5T#B=vn{eP?e z|7TbKzd>Y`QvDMu6Se;I&DZ?X%LdwiR=ueOfCrpK5@m0-797Q}8>xC$74N#>Ywm;c zSFj^sEr_-RK})K@q9Db-Ff8M5LZ3|O6}_2!9Rlb-c^D|xb18->iwehiI1kE>G!=Fc zz+?5iRHp_&+MSb#h#V4DFr9*7rCa6uIvGGb;1GZr;__3(m$u1ys|cFNq#>Z3+FCUj z;xHgsr4)h^*}Co|f=FUCNF!L3UeZ9jlI@bf8oF3E%plU&q$bscSsv5SU^{8@xsqwe z^%8x40$HN^5}9H{vd<%84Ff{fxq9dJ*xdE`G(@xOO^wK7iQH9VwFPZ@scKslexs?b z*4x)U@li4h^@7RkVSGrv`on zg_*#7Ygw*MWk^NKldzC_6RH*8Q3Cs)A%Feb^R#oZpXq*xm{U|Ysz%^4g4R`;cJ#2S z1UBeZ!cm2^o!0WVubhF!mgTUGhGwyqJ&Q(^(_qsL;*9EoKrh$tNB6$_gVazaDc6~40IY^KOHN_g$%0-#R{B1!kXuO*~6 z(sK+j^*k+Xs=!?5MYP4oLh*6AOcjT{f;pMBeJ3A6Pe&n|%#KErAlukj;m#v8Ggk%j z69C-x3Q#AoGfYGb@tUvLnnGF0=E|VCFpf@ZIZcETD)xom6L!uKE5Lg8Hd^cwjL&c8 z2h+`lxtsib#1f}R^;SuoPIFlSC(NCWi<(^1C~Q=E_5Y|wGn!s@D{_ceSJqoc5Et9? zr6w#1BC3_N#0I->3lSRL=??y9rfv$2y|__uFO0Y)cIzrg)I4cq%IuSOZIiO0vK%z4 zqQ6F4o-Rrl&8zE2zp#xrM(^MiWrRnc=_qmq4-4o(l-Z6bV7mb zS@Lx7=I!hEuZORm{_<)33}gF(K>X1bOc+16bb${cW$vq z#Q}pela@3z&zjiarjg|sOn^~k(`OE}EO7{|(883=jq*6Ov}6+kpfIHCEsuqkMp^>8 z3T}TqCcuIaP+54AY$lWgIRfu0>4Y*XLo#E&60lTIlt3hk0^Nq_7ut?46#$#8r=X;rJVKd@NrM zR_uc|U;_^X{vJ$bpVfxgu<8x*eBc-RA9#{t6lb>T&k(yV>@mN0PP`dm8M9eO2Yaz$v27TGF2&X_pHa zP7v$p>bm^BDRKjEjFKnbEoM%3X~poLayRJvK);q0>~&belq20|?;}7$t!F<2FJ}BR z=Dht46VImYQF3)W2>*QZ>Ts81_^jT^WPtFAn@*?m>@+|6lr2w3%gmLA1CBrjssumK zNQelUH9`n*PU3z?RZs;+RmALEAsIj*tY&hZ7A|JV-ja3XKZ@lkq?eQ8H8kDYpsWkJ z3_Eszr5gaB6t+Kz&Pl}*8UHwOndGIystW7LizuZ!<3ZNok?kw4Zmnicoa)kB=uyII ztuZ#5NQR{j95P8R{3j7fyB`vObPg-9%mw=sI-ZAp5I) zN3A;T$*JEvI-4W%?$V$&znUH!bZ=Kr5~8_X*XeYnrVB{(unLC4W}0sBtMA-j!18!{ z0@DZgU1H<18H&2K+E%aThs7`_s7z;*a2M*2&L{e^pgSzo3;eoPBa$}uBg_&elEg7{ z4)0rxpBR+Q6bLlcxFe0c0`Df9Nz|!d%0KFWQ*U*?7rfu=y_{cjjgq@VOmJ7nRPm#jS4&F z*k}~&i0cG)G(N6oYJVqptjlCDj+R^AKi;wBvkr&6AtF}KT$GvgM^^t}b^2G|(jSey z)qCu2z%IeMH^oO}EMBEHmpfpaiF;uB)~*8~gBOtvf@NS}AFHK82|$T!EH074Kt4ET z6fB#M{hBZwzQYR{8MWFZ@F!~QAFBO=!8cgc4Fsr#Lj)U5>pb-Wi^8D2YC#AQZ(D1V zuxmQdcc#j_>MtKpSQ$k$;eI*K6=xS$H7y7L2Gv9t$5uzkFu3Wwcw%(Jc?LU7}CF}LA1Sn#C2&hVmrt{DX5ncdeTUd zNPKJZ;tO`UYtn3LRrYzMvE~3j3np2F+cZ6=6YmWlB{BqoHf$ zbKD*^HL#j#u=JcMaOj1R%v2zO=j zK+u*Jft07J8Qn{r#z8Ek+BnzwS{T|Kyn69|`0oAF_aEL3pFVrP_wj{T`Q)?ab7(G* z31z%9ChqVJkn0g#xGy-E^fHPtU8kDug*1@gY~~tt6X$(eiAU+`WgC{$(Ne*K1L#|m zRvgz<{|6MB^tZ05yh8^)jf%#wAoAF`^sY8OnYR)nuxT3tI2&lImQZ-rz1 zp-V2i7?#2jHK(&rTObgjR@ql-phuH@v`B*8h_RK}3lC>ucgzJ5kioVDQk222@9|2Z zSdk9OMDrCs?(8|ZLXQ(e4Ex-;o7(SpKbCwp@1n#ct=|zQ=E3S5V7fuy_4h9KebF`a z_u#G){I?o;UO8SwPP}lHv)P3P6lPl{9ZNgGRJkg(rdTic3HpnJq@*ij#Cz*9lSSty@Aj}|97l!Mp5 zZbfBv=^#n(ItO_tN8_wmW{@-xq*eOlLBI%{;N)b50s74_qY(Hay%tky;YOm`2|iZh zkNTC|YQ?t%Zdt!`Z9Q%Yj>0gNn&(!inG0V> zy~>2pYDq(sPS+YUsL)e?y21o1;px)f`OS^Yfhe%_Lz6I!6`mjO>I0rjy0hd2EqVgw zHtqG#gk(%SwwAk%k6yUhnv#2xHSU>VG^rj(#O7LZB;Rs?&Y5!uM!}JrxNPLEs7>&oIUnlh&(W$R0{E_+*eI z^A_s?rp9Ivofy;Q2~BEMegy>3LS*p0Y%T2`i!BSIsCCIdN)S;B}q4cJT3+JwwH^ zTSOp>tc|c+D$s3cK@|b5*0OT7sey4fgNP`Omda3l43I+#fR2014bfisM)=TY`f<}( z84XjVVy^@Y0~OSapc5k#7&YM{l0C*|1SW%Kp4NaZ^?>c1p87YPxE1_gTPvOyhgVqBc(x|plx=e~loeucYA+Up$=F&$06V%0U4r)M z{m+%z*}GMN9vfY6cGgyM36wU7uS;zbk&;AqFi2Vkc4zl-1L!D#hxugoStO%ZPdCp& z?5;6rC5&B>dZnJ)omL$SW-F z>Q-I#yVKX*%Qsz2`#H;MuUu(??ABjZylMW_t>yjOW>fZUZ zfCwTz*G*$NA(IjfyBgVHv_~{yXX{2Aen@KK>xhPM#VQr>$+@T;ubZd_lf%W-+(fIl zkr7i2Un{1la+*vFtFyVRkLiK5V|UYVDxyEzzBJ;D;b~_a5TC0go9xtMiax9i70s3m z%L*Dccs-mc%JZ!F-+bJQqPC*V6jV)y{h)qgDbGr=^p>6B+g>t8EWv{0XBI$0bZNQI z^`&&kvUNz=vu`F1^=h;i<*V5)mcyYD5D?qW{JGBOYMLt-LCeKA1{PN2AWSrs6C|X? z3Qc4R{%hav9Cn`ff?|dwT$p_^6xD>dCB}tXzd^4q{U=~8Ha|%I;}`U)cV!~pLVq)w ziCqUeDf~4xpg*2u^a!HqA^AN9k1w*O(K!WU`x!TeW;Z6Ag-T9YH$j4cAf`Nxxuq~e zlvW_*FG*sDFT@unYk@Ya+lyhvLy}GFB6#{R%9k13dMNzcg~9Z>$Vgrgx{%aPp{S-1 zU;+#gu-75=a5vjqcFSx&E6W1x@%0!`tfKhdMGhV^=%@nJ8A)jR+2Q#zfHE$QkMjk_ zEXVaP2VQ|`{#kVcw)w7cAsOtwA?vjs-!eWGj-H$OuL>od&CvFj=I|cPVcFfue*0Rp zGwn>=E9T8A&=|1nSFbF|+5*X(S#r^8l$#!Lv}FWlx`+{p#Y91SLrvL)@3{DqkJT!a zmAz4$1t}{l?A6U*)&0|3NpXxc=c;pw&^#4*S@+(;NRx$Rn;3Th^SR75cTCeH6I~E6 zc)s7YSg{rxo97I(oc22U76fC{(FE4)5|T=0BOR-5X3)0!UTb{%gD39Qa!T6~VT^Vr zuhH5TEo#qFX-rk`^l1pDP$q9FzL*srdG{+q>a&fL;EIw+C&o7BIU6mHPPbSkV8Zcb zF+IWN%brDO{cIoJB$zU+wOtL$fMLT+vv|}uwOcjl^nO#M+6Yl5gWKKV*K_@uW^1(5 zh7jmdW6fK!O{hP<)}5hV=NascAH0jB1ea7gJPqu&ZDn;-RMm@oF&Qb;h(54K8ajb( z0L-=g1MSR>va_KMC@jMFauAy_mo9y8s9r6Q^nvQ=jUk(YYW<*gMAEhsO)d_*6d?<0 za*u|HHN2fXO-*>s*{cn@P2l`Raf-#MoCcYU^J2%R2Fuum@J}n6-8s|zRrHHy9q8ZI z^~d=|0raD!w}MB_3b5UF#xdLX)|#TxuHx1Kd!BS*`aV57FPG|my#SB*(a~~pnL&76 z>~OaCCX5(px1rhzc2KB@ z6K7J>SWF?ES(b&(>?kvyOE{y!?9eUV{h_@7L)itIg^f}F>8G)mqRK~44Gf6t+1rTh zK&u#EndSAk*+}yyqb8qR4|W6*s=(DjKWs}qVNbwgwjti+YJ3!X&RxgCR!PsFe4qRG zEe;Ce!~7QSBBztuY3FV0Pmbqw8~4_C@MI`iC~A+XD!Ffd2wKF@jaNH*3gs-$15eF> zx=xm!eV*n(?{K%>=Py3)J$o^H{f}2K-VWcs{_tM?v-j%#Pttg`V`#SC`V_K*_L->0q5~!HE%+L4jgm^-n3+Kbb z%1DM=u{QVO3<@VK!Hj%OFmZyI8(38~$@J`U!)3#zIr*%Y4PdCQUQH}gArz67=&X;v zx$cTGB2)dFh=vlQPoWEe!gq)R0z(=$#gHfo4h55;HD^$gtO+yGOImxG7L%ld3-~Jw zkZy*~d!eHjX$h27NFarG$kI-7aygC7J+@0`9lc&ua*!5li^Ox-8_!2JmfG{n+V4Y? zdpqT1n{JPMHzFL*$O_oENdR%pgmVaU=(XESt5JO#uFI*Kke+Ep+||-?T@#FC$1YAx zLe-I@gbH9RT$K&^DX;0YXKOp~rk0)6Fy@4#V#1t~HzDz0k$zSaY6QyM)gurv^HX_a zHh2PbAt;fWvG&0Xxk{KZAOQ47CiY*!9aA&czZ4}3a(|jFKupwr`mQSDsgz211;BR9 zn*m0j@yS{@JM8nrGZaD`2ddg8>S4)rp3Pu}Sx1hOn&M%f{%@7z29LL)b}P1_EYm+! z#M`F^G@{Wny)$|wlcRR3P`M7CxUedyq-@*img=-hb^E+BAau>sM6Asb>%*A^J>jQ8Kk|qt6~b@KEI!5)%ed+Y}Ql+=<4{-+YcXXJqhDK zKYH@;HvaQXJh$TM4f~;K}+XaK>A)P}sH6S!K;Akof!$(u4LvU)5LNik@ zs7>{O`qe0o^D}D2*jSs(edYyoPFp4Svp@YwI}CFBWKL62WG?kr@Lg9^pYy&qaa+ZM+dw?|*2QYOYMfwAAE(-`w-FcCciY9$FibH42w zhQh|-Nlc087b&_y#&$qd%u3AZ_kLHMG*s?}jI)VuU<>L^-aX(>zT?31ocrL_xLC>u zYixc0#7RwHv8pIA$;DxD9_qGyEc6hfYpA|gu3nn2rnrZ3_$KvSamZ|@MQYKhE8^^fVHRjtV64tFR)*ZH3EzyHuGxx#~4tG ze&Mk>j+9=!!ku_Rv>B0&@nL1!8F?g1)^oaC9Qh|9FiA{!G~b>q!t0-LSVYM*^~-on z3S&YqrSa@c(U&wd{lj18yBP#8>1|}oLJ`fn1YiJ7#cB~3UAM3p>gFhyT&nK;OZU5E zEptyp>GEJK<=l_29SpGa0wBqQW0PdzVU{Wc0On&v&xo;9dkOqd7cSLlgZ0%ThVAr2 z%Z4C~mfssW1uc`?t%#T>;zr}jq8jty^dh$hwcImseV%<*?JIhCG_gY`6WtM@E42zB zHOajE0PwVk!$%z(r1GAMg>f(aS=LI8-{d8-3Z>*Ds`ikZX z*MzTZ{N3}twV$;wo_gbW0e>D?xEpIDrGp5H41l_?Z886(>Z1EyJp_zRNwAl_9;d{q zKp&zvZDfCLe6aL-gj1!XKy7$RlCB=LO$4o5m%B?vZE_aqhefE>_uK!ccrgr9R$OPfIIPPl*+}FL%MiPei+t%ye!(ajZtlRZ=TCzouDRqt~ zvk?erLB)+9JN_U|c!|mA>~K8F=88)ZM=YG1YdX2gWJ^)Q~qd^IyJ#UO?iu`D{!hoO@Pv_d zG`G%2yi#oWVDV&I#b{pRyPX$tX_5@eDh{DDUL2*D6Xx@b^4ped2;;o?vm#C-5clS z(V{>B#cszW?apw0wESo~%dk@&zutKXYNY~Wq3i~6LhTZoZh6VT1A}tlz0vP;`_&H}ZQpynHMZGU zHw>??(6L1+gW$Y~1bVV``u?#?C!oS;TN}|y?V%8HGi;26Ydt8ja0&!tVZVbhR%nz? zEygkMB`9hIt)s#4yaF{vT{ZpkQBI-W&TJ+bQ8_5n zKjRG%5Et4B;vX3%))6Edv{TvKS)&y4<}hv@G}L-06*7bcij8Dhs_OCRte9Nxb}t@w zIcl9756|CjV^g>irn^TW&b;wjmC$g32cbP{pM|emG6^Bj}rF3`fuO< z_rZg$M}Np3UEi?X^Dl1yyZZX;jGa@EX2I5`%T||d+qP}nwr#V^wrzCT?6Tcu+x(`^ zf97Ilt|lTQBQNqQBlgafYrX4vk*TsBzEw8^e%yS#a5n-3Hq`SSuKFDQ0kc?G0PpZ@ z0MY!AECccsU`GngP*dwJX{1~N@M!gNg&&BRFnlQYd?)Jn4bVM&-p=4n!8l#) z1i?DmS-TW?MCPs!Da0n;(S}k)6f0>#PW(9v&ApKND#eLRGC4HC$noLKLM=uPgZjX; z#a6{Se=sib0g*0l5@B|9Z-2P1_{kbyl+OnEzTcl!Tl^;i?)JM|+yNY{e)zWVmSPGM z?pwG)3QvblG$H{wajj7wISa&~Wf+;np)&1XW^BSZV!t=Dk1umK*hiG|&d_jTEezeamGU>SR&-3raE-q$NM`*O-r?&_$GQ6T7sfB9?9zZ5I+yUfxZ3Y0nK4a~pSNhEj0n0E1e%PS@E>Ou(BI88J?P}^?6Yt-!c}2aq2!zUuX&+oTauO78gFwv0RR)hXH}MZaN>TcX zA&QHNoNJbIW!O2h1E7&(H~)bD5~^z=&Hjyo>%6_jCFaiTTjL0OO=~6zgEAsj91a+e z_Mb8O5c>6AJ=+vfmj-N<80wQ{g zRA@=<(=_3o})llts z4wklEzHzr5Y8C=zy~NgiS5w27ht!SKiaMrCGsDDhJ(hw%jxTrO1zX8$4@s@HOa!U7|@5U}m z?}9Bh|4ZHt(p(K~YYoM<8h$)t(2`#%!f0<5&=lg-qqF7dkPDT~*+v;8Xsbz-Nir&1 zw*E#~6p9_S73#ehgc9+zi?p!Ih#1{c!npy&if9h&nPf7tvPd7`3BjEk-?@us6&s(; zV(G3V2&Lw>9WotgUorfI#dV?xKWk^8hU{!=)_@KJNI#!8cK3I--s_Jy!NTnDHIVC$ z$tWZj$jm8srXNQcl$onSh)U;B7AWX$mv9z_^fUsmy=ZrUU*Etm^fcs}_JH+D z4w{56b91X+#E4tgJ3CIjU|i(bXk>^YRj%v-k(0h^3Kxy@vHIZa6a{OQ2I_oh1(&^X z{mc(dD{`dgpv9M%!h&nS*kZ}0JR!%SZLg9IzYx2(;c@)(ha}{T%SAOM66JGFRz426 zWipe?XK)`(1XA$QUs%Ni9^F@dHod$VT$`$^z{Tqi(T*`ELNN37L~xA?9Fy==E0&x}ap5>2REKCXR?;*6tYte)IDC*gu|`fj(IPE! z>*hQl^Xnm1(k#3!YoX)R{P-q>115YM9E>sWsuKqsRHU5!0X^jJn^5y|%{%*59z|oZ z3S+yFrCS1jtKRI!NlzpyzEJsjpSp}!K6>Y zRB%y{Y|=E+j_-Ek-q-`tC1y-9&E7NG4oO`TGd~k3l0zB2rRaaT2oAqyBE_+un)j46 z9qF2Fl##?c*aAV_(n@SalkaFq2t2EPAik=V7jt z73(XZcf1&q{^kG)^ywcLTcC;#S!Yx?t1Sb~=AQBP4lwbvh-&Myq1XmA0ploNvWFZ@ z2!UG;yu^u`n;4x~wnUe`qeY!nw08*u7FP`C(zi_ss?Vp%)v{mcwSH{VL-%jjO=Hpg ztpA<+nmJWRl}LKK-?cN0-4)=X=p?Ec4OSV0-Zg71sHc# zLf^tX_c`iqZ2)%qT}q!EEJ1WV00J8u2sZ!;9Yp&ZK(^YhvbuKUcI*l!dplvX zX!&~Viru{nV^b}ys~U6Bf2$YRWg}s;9io+A#SYM= zGuCUtZ`Ei6dp`Q}keeYtC5?V56Vfh@XQVLxsnm``;kCUr?xamiM<=+Y0rrq%!KVKk zaHU5Be_4RXUYC`4^Gi;DoBvObowG^TAKaM?5W1J1`}jqxkp9GXN9)xcwfmT* z3+|0bd`fR?^s#n+^%-DkA2>+S1X#Hku&J(K2)Ofn*sJ}%5zlMg41iM{Yqe8VCUhJr zS)rlc*r@zAT{e-1MJk8 zqa5lQy(I^`7`t_4O{_=Lv3s^&=Vrx=CFbX>nW{Cyepc7eb*{Q{EbF4bI_1SmSlhU- z4h|c{AI4YfGmXX1#Z|%%4iza91za~!M`pNdm^|OVzc%-(S8pP2cDg;=UKV#^PNrLu z;fzV9NhL_#>pI-#50O-egg(WBYC;wGY0M@yteCrfb;+4nsfa7TzmJ0o(fjbhVy0Mg zlBJ}T?N&6;TlKgT^By0Isyb_qOu04+6;s)n>fr7v^&E6lsu;M%jWa^-E&14zAP3+MP=ORb*>ly6KK>I_*?^_EbPTKH*fLU+r{sIw#sZ3y&~K zy?QaeD)t|B%ux1$)idCgWi`HKCI^wQ1xShLX>Hh*-+Lc!qknpn?SyyK|F+s{$=H0f z`UxlFQKf4)DZUf0`PVE^!rk~!XU)1yA_+c?vpD*)s z$;NQO$^RCtAE@QqamHwc|Kk<7%!x`DZYf+Pc7`rrRoz6mjle0a%$ewapRGRVo=i)k zjb=Q+D^AofS#(V3zFQ}_PAcPwsdBk`Aac;-mT=U_ zsan&%R9J~7DXQ!U_PSRH4}^v2)U;n(ePYX&Cy3-jokd^)r#}zOIuNMtV89@#GX}+6lnE`w-jk z`8wkO0VF?QUR?uy~R|WNR*FQNb;xd#Y0Zqy$uwtljY2}K;-_yM(2B=IB z2*ZY3@zdZAEA$)nS~1*I0&0bS>JwijeCm4y!skZHuz~ujr$aydb4P=}6)mqrtPb%H zHLM7c5vf~H5>~yDz%M@8%#}l)Nwnv?+EB>ZF2Tm^S-*k%c5;)S54A=jAfJf{up8NX zu6K9(0zhRz0KKq_wv$~QC&lRT;>4yu#NC8(;Q^H^2T=&xQip_i2RZRga-V^EBtzhv zVzwraf8Zfh=jb_k^$U>4yn@f#=y~&AQp?gNb@-X3g)ul>lbDfJpB!^}uVd%=iedZk zx8aYJ-zEA9%p(p5@4irvr5`c&V}FnpyEK%VmfG4lz6z>jS`*rT@@aI94sXI6VU*g78vATmk%7o@dQw6B`({%Q1IX)^ zQ!fKbUjq{Ic)Q~O?j*DChkYC1avM-=_q&jYOIZmE*)TsR{-is32g3n+70+Vs!#`Nu(U zX3nc}gR9Ys07*kr+friAg$*l?DjQwTO8jgpvrWA~r(I5*rtQ^4jm?lSxZ6ZDPIc*3 z$0nxJ0W;*XP1v3?SyzBBs7+UOFn2JJ-VVQ~W*zcC)ri#x`d#%TA-ByF?I~e(-U!^= z{5zo?EgGP|?PVPt%yjUwBpj>#lxKG`z7NtDsaJB$pOde8l&MzY94pcke?ri{ZCOa&syPJ}`smQva{2$59czKo403G@hNeoaSESwWJlY96IpxE#f zM8z6Ty-~~lqDEz%u{I&slH9h*8v#T6yct)tIYT4ml#*>5^52A=LT=k1DACZbc>7aZatsxD>@JpJ$*@B6TVznh3$9~@+vL<{5 z(Zbq7p&I}q@_}8VUt1(HJPzV-L{TLLX_0Qpl|^gxkb;jBX<_wwJi?dR<|DxjECy{w^1W)T{-p z$N!Mi391Dgo&-4VeEaSKsH1fTmD1hcGdDAYc76U53aHPyjX-(7y;~4kK8X&AJu81z z)@>J>b4B1dTc@CZ|FXJT7Gol4VkP~w*kw4X%=yIFWvo!NPSHT1ZOKwUf5B6woYVJm zSrYYNtGvHXp>6FmctVqxG*X;WY|JWV;b1t7m@YfnuVH);X?84ZOxb{sEW>Q@M7F)! zxRr^$HbA1VrFNm>35m11RUDl_;5Z(L(Z6ona4(V-avMdVR-F!xTIsw+(dF9_b+2H#ICZzLtY>7^fyX zF3cjums!NB%ujVRA9NV(4O9|cG(E@`&w-`068o$l+ITfUmY`sAxyI$(lXA7+jxy6b z+8ad34u$2>Wzt>S!yXF5gSiTAZ`Kj~P_H|r|F)c7MP;oK&#w)CpbHNBdY}Sz7%x5Y z&e5z>z<6rR(79u_y>`Hck9D)Xnk28YAg0P86QpH#B{Ch?+d~?vD^Co)<%ApXW?lAe z-)Rw_>tS6R^SfGy@7H#VAnFuhP=!v+VkDdva+hmaL`lpsSZ(7Gdzk{AJmXz127}xv z#wx|y`PZr_i22wD2`ZZ&ZB^zJPrS2k0VjSW3LWx)S5Z;kEAK;H9q}2qShV}(i*~L5 zM360G<{6^FPoUWdUng2>3LSdWwC1|z;|A<51~nplm6&%LhDfDIi}%cLH>kz0w~6b= ze%X#i->r~oN8u3|J$u4uF@T)L)#v4v=)Yp;RV%t$_rBu-4Y8wE(uCQ;dF&hH$zkHJ zLjMCRgm4Nfb7p*Uuhy!gJj(1Mp9yRC#Jh}ftM4`60WV80W7mSf_geh3nJxDczI$@e z@x!5iS1#tMJZHVg2qWaQA4)##iw}7i++>42bK?bT&>y8aW3GSK3i(VN+{EyvM2xp& z1Lx3;CD+MGG~FR@ALIXJmy$?o7prY5f)1xTDg7qI)|U`idI29d5%KaBJ_d(pT91m} z%#Ld=rd@rj-sF;95;#KTw*E);gP*0g44KZ{2fy#f)2kl1^j8$ed9ai9j<$&V%YB$A zzbxsti}F>D;ehP#nW5Kn#9~+Hgnz2>)sz2n;6a{tH2JIF*K)px$)a)%BjZT>mH*8S zQ;!Y^Afmi3g8@tfd!>LB>18InMV2X%E6O8b#B?>_AiOaRcCJE#@Y@g|CN)g2fQ7!Y zO}2%Ee#$E3l$707vC!V9#=+9m9G^RiOE_U5wZdM)*|(?9+Jv&~$2tIj!&l8LK*ugP z6k0J3@KQ{H*d`3P`FU)$KmUB~HOl~>!o~`=(ZGthUZe2V7%WR1Pn&rc-=e-sftm1L z@Yo_s^7N_wpMUFr4AW)NA8RA~p-M;NE%~g*=-a9<;64YpsE;K~!eh#>#*{Jxwt7{p zWFDNnA6@m=uoSv-qsUqkvR|QNX-W~F@hUTTr?C0}vxoxrFUmvAdH*D{m9~B>BGj`P zoRA&=^a}HOKEE$3gebh3{mG%SM!e5ru*knZ>qmn-50)0PTXQOYFLMM6$xt??yBKJ= zM_A~_OpA#bK{6vlA6?||xzX?bdfmSQg6#(mCU#=Wx$lftq?|?*m|wYudZOah*!htF);pL;$m>xzS=go6x`W za#$wW7CagFvFOQLazF8%w~SY9Oyw>f@L1yGU+Ye|o!p>T0-M|57y6UREe7uj_iqya zPco8hs;9PkW_d%{%oln)yGty0zATr|rz2nGMoh{-y4v z+TrE7?yZTpacgF|mEvA4Yv@vCS`kRMpcs?;x8Cd7i9tNC+f6s99ZdfGg$rihyQOC4j_B)+1 z+X5NtsL!6&UT@`wRH0pe9)@e1!+KP~R}sgL&o2*@1%}f+YrmFt4G)-W{K88eu8{Pu zBZGF?hIK>|5>aX$bkVoKRVcSDYU9~Z1Wg#@GG;J-1bL?VY4nHG3^FV^p}d|PI$FAs zYjCMiLb&&Ps4H?U6_^q^8W}U<as`7J6{E|?;ojU5Rf`GqrYN3CB zOEp!`YT46FnX(xcxw0V?E`Y6?z_88p2!8lN&rKjgsrzS(<1#2FLhHz0GVOkSU`9z^ zi-@!k2&vP{m``CR#(rJ<6v5Ri`TANFPR-UCBgKy*aB^{aA`b0NAW+k3Q>?aq=tX%- zC?oP5<+_z14EHw2rT)M)Il!9?Ut!vwaptk1Bp2rub+Ilr+j>c|c-tD3A6kZe>0-osXRW2 z!~INj>7~R7ai3+M@Rf(zNj?h~(Zcr%)F!^b1j@hQo5p|(7^7{!@`<~3|B2+OWZSFu zQjFO|XgUbq8!c}Q=&}`mjqpuqdZ1P>`7l;hKG4;5ND4~9UQQ#6UC*mNh(W;>m$z-i zW?Vp%yTfd!&0pke_xybq21~L4vTh3SN;m_6A|kSX>f^ZmL>UFxW6TYVXjCK|1s?53 z?|Q*ZY5(${**_rU`aU6aI-}trzLRuZZU4g$Mq18!H1M`v{tdGH;K!^GbtSjn7PjxG7gM6Dsp5Bvz`v0Wpa!zvMd!@ zd+xmM^!a~ts|q5ASL4#8GKuth9ZlHzFKm96T>qI?@XpR>1@h0%e`FUpyWtTyd&z=) zUz)?4{+M&>SSZksYl|u``sP-N)(V*P`bY9^BHVfvIH6 z;f31IyQT~LCnTL+hT3YkHJwMaIF54qCPR9cg{s}dUeP&>;j*VJDZU4?9JPAL4-L{z z2-%oH#WrvQ+i>lf+bvf-dEz#0r5Y5Ukd8So`1M_tmq0xq&yUzwfi^k$AckMG5ecv=4R=E)WP1z9 zx3!PB6CiNl_jRrY^hKTI(*kbV+xTAHCqnxe4uWsE37CW|;IF1NZqs!n-i;~el* z=2bI=)@p`?NB6ilAs-M9ZtsNO!D*y`Zv;~~sAgbjaeJ!UQ(oP<;!&ElEM%ROBwc&k z!?3MvQKmUs^AxxHOF++e-A*`;3ArrTR@!UYfRI>HFe6VXUtA8VilkCe8xUnk z9vMzRT4RE}AT~Q(0MypVu-v!?mcv@@1L=&Qy4api z7p)v1rBka|cX_f_!HDQ#Wg{uI(fdAvjl9=N3|xN%d`$J@ni=#?cl&00t@91!X?m=f z@p3F%r4JQ@YMLC?OkL)9qm$0`VP;i(P=_ninCXW{+sO35y(-^o#oyCw9O@!G&{4S1 zEj||6tqF&tAq%?N+_s-dD|uDIZfK;=!t{wEFA{Ba@N}#nG=UhE@u2x@D$t*6vv6Y0 zYZ$Z=?%W2&JI8Nk?(>ba^@fkrig62W6mU3%|G+=KKXzj31i`~J1SF}e$HV@i)1a{a zQbyx2c^_n)5{Q_N!I^t{18Od`}=zukwSKHn-&@pGv$Zh4!sVymoz)3{rZ$opx>iT@T31fVD2|1 zgN459k_M?;ePV}wwL^FGv~Lc4aN02k?wba==DYvbD~9f9&_t;n>%Zkgf)k;SxnBD| zixv{C`|6dh!LAP`I%3ShhBT76t&idpfba*~2kKjz0NOQ$?SD~5?Skd`e_Evhw6S0@30v3L{4kOrLZJBuPxF;VIp-x6m8F5eKi;g zCa#Z2Iu{o$H=Lo@!A~{U`VT!{YE2x43sgZIvPleBU7W}C!{88Ul=UEHzmP2#E@`3q zVA_`$wrw}19)#*K(ygMy+0F4(zEcbv*gH3VB|ElRkISXhJl z48%6wT(o}{SrK9U&SO=uiv+-SyAam(y$8$!YdP?I3~FbvH)!|Xj$108%Iv#3(O=Xc zcMmRpHnWh=VQNAqrTJ~6oP)JuLMXysfUA)0K<81}uWiqX3%z8~&|rJyg%mN}YBnDF%mk|UEyv;`WEPs=8ScGPxFsPu&HerW#Ny(FaVXjhak82AgIw3*;h;?Ee2qTzJ@>F#-1w8vS3AU1m2 zA(CWcAOF4TQ14_NaabLyAjeMT!krl_3rDJ*`a?8vOx2QPY}efwzUR9(wb`Eu2%gQ_ z7XW#d?*Z~a0;0G|IS{YbenWGInXB)wVT&GjD)gxf%{ zQ0~S5j`Ww%hUb<;jE?Y1yqIV)Fk*-%?`dPp9iDf3JU{M6Hi6NBctOSXtvUDH&C+K%pp+^-FF4buEIFTr?KOvHb!8x_H6Tq^q0@%4-FS$mY z`B-9u5)s9KZGo&U%4!q5hp z{qjp+Ym*EaeffDJnY8r`m2HkGkjCdQUV%|<|Cf$g-XJ>n3y*HuE4xOnGHPfMr?}#4 z9z7iKv`Yl^cmo%Kgb)gUJ~~Qn(nOjSVQa%ajH2nD-Mour3xVORe=Iak*k2>f`kR9$ ze@43Lek|Qa8^C!N%g=0$@HgjL??z+y`bzIX!sR!n;>7pzEPuk%bT~VNw_Da3(J$O= zqQN~PWehq0Oa7?P+7=V|CvHd8wZM{p41aM31jXk&LY$LM9(F<``NA)IoJh9eO>3Nh zzEopvyJ$q@kR1UCybt3EpVG;MpIeK|xl3j}_TRzv=Q?+bN9r606v?%65}EwNL0YV~ zC~}P&-VyPEqK8jE5H`L{Hk~Ro6fGJknzDL?P5bmtZOheFTy=@0!#D{DFCAq-2sLWqFxSyFZ!abgJfQ;)O(_c-{BkhZWW%@X^d;%kGv--d%Lmt+=6D zH9IBxA4=o=3bEdJn<@L(YS)WPmkYTXWcs%`qcs&840EG}25KZNA)jv(aY@j)FWdHvq-! zPepgZY5U3}i+Y1z-@D5%8!UOyVlYkp2#i8qnn4kO%BSuU2nF}Wu zZWeg^G-4$Bh0yGKT_dM+POVMS zQl@yOH7mcraJDc}p_HDq?Qh3b2fZEwch?Jp5gxPCv9_8SRI0)z`SN5wx~X-gAoUG$ zoz{tvbU*r~gv6EHd+FyJ_bB3_=4D}MhxfeB6YA~R^QRkh-_B-pdF@e9zRep5^FP(@ z-RzhZc9X))d*Dy;n5ZxmA(Di@Sshh;W)C-)l6ftmyQ^t4NCQ{HZ95LsP?93*E&uk1 zWVI-O;)!!44h=Kjpz3g6+XK^=t@MD=CRxTz;FCuANypg5R|unYRglsOHpLKfC#fnG0avFuF~%-FZ=$5Uiv35;)k$^3qm*x@X2GK zM+*nroZIiw-s~Qd`h1G@p{1G8{11|6a#@S)Y2e(*X$XpyQ+-(br@s7SLsc(t&NzHk-K;(fJb?9@8TebX&x@T9lWQlLAnux$~b2M5v_)|1@3ljbdeAmZw zS|$0&7CE`+Wbss4g(O-Hr)$+h=vKh|3~nNjZIVEZ4|q!tyf#I{Hm`KrCb-yy*ClMetj?`yzw6-0_)ISlNt-cLw5YXxDhl{iYa*h`oD;sQ7SQ&-wmtn_ z)0|Pk+R+JJn7wbpEo?TENT2n0q@v?!Nl6P6CwWSzHuZ#Pu$ zfa;64I<>S=$eqU0yn!{$7RY!Wg&Rk|QWQXG>v34{05J`r=N;qWXyyAD>%d!SmZ?lC4xfajGtl=>Rv&#(ba8&n5%$KeMNb&D2{kvV->kHq4Ru^HJfIy7{beG{0nIa3zButB8`?YU~tm9s>?Gz`T#{K~`JM;SaQmb1kg~r@75f2U#TiTMU~- z8}P382UIS2+#7FTao;;c0PXjmPG_q{-LJKAq77uvto>#^G}v-t+=`*9^)Ar|)Nye3 zNIo8f$hFL4U(Jk^Q#Ihe6@?d!)?y^UoXxgxL~E=Yo|{wxQj5ggWq(nEMUqZ$l0UalsKWL z5Sm`f!LzUxMVSWSF&ixtMg9`PnsX0P!X<;)Yq8nzLTEi(*sAjP(C4$ms`E!SX+r*K z{z`Bvvd8dIUd5reK>8h$Xh64UdhsUl(&eaR8M*DFhfS%2%Sk?iTJjy5iA1+YrNV-3 zhoG1-4mr_!Gm`oU5+LKcXlW~DEXs|XTksr8bBl!=i=cH$%q#GpZsG&2y3G73Tu-^y zwx}l!5t`dT5moH_Aju|{0bul=-$mp1JEW)wdImP*$0Q8|RSEgQ8i)Y{o(|47>&9&^ za5~*%N6a9>Ct}b-Wi^dRb$C=hHdv4-?A564w9EuO_~aGAkD562t>~YJ6>frO%|%8V z5^SZ`=i}{bG2@JL{l*7@w$te%bVeBBA;B+>oUcvHFNx)FEh3$PzRjHhP(?rFbNH}b z+76SeLS=9S#!*B;IeS(E7VHtIM#+tK+}~^9=r+h8W+td+-bb53>%o}lIsS02veHg% zx`MCevpY9_zC?t$@88EG)h9mn{`mX>clRS7823n?*jZ1p#db~fJmxECf9la0qWQvx z1Y9+KpV9T4*;*` zE7V`(u?s*QJ{IlVskp_aXKxE}MBZ{`WJ@|KP=I32Ohzcdw&tXH3{itc{rFWvF@RXV z>e~ltV=0f(B{mL3-Oa5Wn00~(^EgCPkx$2`SHNqKPuD^X%!S&tL^!AZF(+VJ(>(0> z5sz}IEt(dyM(mBT31rYvzpZ_003jul7V(<}C>BOK3iYq%?fM~6X7a7dAcpG`&vExe zxt=dmSV*GrNiz-zEw(8FhMtar1z76%8YA8V^C@a4NIU!dGGJ64L;K6sNql&XKL|QI z-fVSn%u{&MpebWKz=?TEg0%yCUj);)fYFH{V*g4=4V67pt1xp0hFH^l_W zL^`o|P>;x%;mQ(Lt_`w?g!TM|?KtrJ&h#6#!!5B!l1(nnauPd3SoqWNS2z5k$*Jq9>7U^E%1HNIVgWcm$9`^@>iG$Lw zo#yAtHo!u_$Fo5;VjKI9^xA%3suqAuOtfvFfYZ>gfX`>$`>m6JFR=bC@@&aBts&5U zVbJ(}@?+KFhtgsI7)v0q+phc37sIdYvHf*4@o^=zdTjJ z@h*7(4ruv0oDVVlBH;DD{rCL+lT)%s&khin%#$-eIhc*^>j8bWKJ%*si)v7wOoTj`k;Hj+J82$emk^so)iNGsnpw9c*|w zW@lNLrhw8y%4-uY>N4y#NwrLaVAHH0z-!ydt72f;Y`y@Z_}bNgt&?*=5p4R4!B4Z} z-T8KmPt`@)M#LFdQXkzoHpG%NVIM<0WBQWmBbMgeFUqMgL(m%UZ!aam=sAD&nfUbl zfhI8ExR@kmmb(d-ucb7qwtl!J9<7tJF54SDijTXqt{#uCO9G!8!1Mc%5BxQO!fvnM z`}x;SjK}&#`+Dk^5uElkpPb%7mAs7vCaJP;ThO zuJ3vLTt@uWvhvVJx%KSieTY+zBVPY=awAN{BIQ^+KcMErn=PI4<$Jmn0IPOR%6Yzk z-#c7@lic_sckvz-oPFzvDZ^Y_!kYR~-Rt*=SM*^|(FIg%ZI zmRvA5Fi3z~lg|t4sQ6ON?>kd>)`iU5%Y_5IV7?M4zkSB=JLOOawZx}lEdJ#D6*%AB z{`e7Vb1F1al1%cofwagGTgxyAsK895qs0dn)|ku8N$6Ix!~S4t-lU-R+C%H^sgO6@ zogsvh+WKHgNLd4;0!fJ?Vg=fpKy{2Julp<0e~I!#-!nZD-@*;ADid=m-g!AYdz_^R z<^x2EDiEuf+P%D&cEc!Uv-9-z)NY3G5i2%|pi*f%cY;HU--<)U587>f>uNYhkn9!{ z4|e!*8jQ1MuTtW`x16lM5eQZggvN7CH1|1r#N4~}7{ES~@~)lH)gNicdezzOXO!w6 zM9%73Cea*c5eF(;Up!Cr%k$mBbBCKM_*}TDt8a;u%FfOr$|0J6e9uC%M#cMyb$%&& z1Nv3e+2?1UrycTbL8mMvI@kM%&#xr?oJuTZLl#JJHgUi7h6CS|+oq!C70v@0X~-># zCkgRF`Oe(pzPbkUY83hx$ER$mZwDKwWLb_Mf?5`8nASFh@GeDS;}0gJ+_(|lyp;hS zogsisw8e%yN^9!YkuKj-H^aapGbF1!J;{}760i`v579z%A~f3Uct@#2@Yqp!x**|u zPgj1e9h})2)Q4TW;*Kk71V`;1n)2e(3#;?q`*y(*>&RA`V^n6=-}Ns%e0DdL3|78o zaE>@_JI+Qo!R?0O-d>K*NG&Hyu5}&N{KQaBkoI2u<@g)kS}9X2mNCNFXeLd@CA;QT zx23#==;<4shF%curG~#U1DVvJIx{YdyzyO8IZ5^7?i_NsowFe3RB9q^mk+xXxQktp}V!K)xfQZIt4ut!^Uo9Np7a1&}ep zl>Fx8KRFDUx&B z*%ZpdY7lG27}CFLaC~p52dSc%lLfJt5w#b9g-7{b#Ool6$8H$EfyDWmYhlCjM$^yo zHp~ymtJ{AeVWq_#xm`RH@a~aU+DRqIZg@vvq2M3=C^aRcFbo-` zs$zLDN(Hb$#VpW^rSAGpAz;sewY-yX5&A!{(#9d|_2%u7sI4|3m-@df$yy1prbc$2++7Ry@PrxjE8TB!(I4hB zPyHcgPobRS5`ir=r*>JWrkq^KjIq@Y zuJgVz+^JFdvq@p=p|gdxvbWEgjsRY6arcAgr43BFQn=fvgj?`^FNncWrVR&4O)2LOkn@OCy*o zQ*%Z<05z=G8p*P8iMC)Ot?6@P>*}g4OEjBPG{44S2UY?bO;!LuGGgn$zIADF-PeRn7zy@Jkk41gX%5q3 z=F|WR`U?3NUzSTTMikD3n5cu!k{fs2^WU!1-b>S50`~U4z|0{G$0nYnCx~#)?Gn9n3HCs(J`Dj(Xln1iLbCN}PPWH%Y;Q+zd#GAh zlD}#DSHf&$xoJiDcrTGCR-T4c_YL@L0-L)ay#*B&Kv!I}rI;J-2M_ldzL|+8YLsjC z*Hv=`|A-v*|HR6`qAVqZMB~wk>`6K!prN&!o;|N98EcX7%e9}k6_~}>7ZLa0h-MsC z^hc{jy$DP3xz%2@y{fx&QPM+F{Iy8P!n3F2iE1Z?H}E3`=Z!n%6L8#c;TCtDg%uK; ziN3T>3_DPfl+c2%m{4M&4(jfuC*nd-_1;zMe-rs#J&WpwXN#xeg0e!KqBLIZQ^BKw zsCB_c@Bmz!RZXE4mEGBP6xHeki`xAmuBp(mfJ+-=?D_Y1lnYDVw z3wyE^@rj#(@U4Dm75~ywH_{x2HQv>1-eevqpOxWGQBohku>T{S)J`h|&8!R`eY+l- z4zQU}o(r*mTRgI-#N118OGmGBd$GVV8rH6BcGEYz=o)=QBloik;47^wJHMb{;s-8N zowr#ATL|ZRS$@$>-S9J0ox_H25dx;c2cS#zO*A^VA98#=zq)fzXGYs+667*OA0721 zk6ilFX=qecd#mNmfuQ3&i$#fA!s}3jX$dss` z=g9WQs`)l~z~uWJ5`<7aJk56 z-R&w+wRVeDe8UKHKvXp<^qwwFEfKC?N}?4;B_hzuPK}IOV_}kpEqmX7lS2(5I4ZG> z6?#~EdjxqkM6&Y`S}}8+zBErDzN9WSwF1r2$&%7746>w5L^{PT9u}S>sj|v=SsiC% zS1DBPo3SatfsL6q zLfwS$d!XP6Dg+zPc|YJ%_B3Ms7=WI2iUX%vdR`#`b)`y+PYx9} zN)%tC#9Z27D#vDBNtRc$z^3Piy->SS$bz`Q?J9?!+gQy_x=~t z6#A*PZK=0)jsq8F?v3k`jksI{3~|*|nT;aj%8SnswZfGUzI- zXI6)SrTX7$d_pf55M_vQTGg`ACBYPw_#H`W4X;O=yI3(z=Bpw&#E0iHSB`h?7hGWp zGSVB$^xtd6GkA?IRnjI$2Td~ptRff-s`WU!ztm7Poix<6P1LXp3S`=3 zrI1lBp}pWq0^z_-D=VE680$Zp7d32P3yv?RHjMH!P!U<0$!}V`mXS=k&%(Xp>s34GJ{2lYZYCGrH1?XhoPy|XqZT}o!3-J zRG(Nw#Dl82S*hu53mR*vr9ThIOJ4()b;x-Vx19B1%t^s^5~HuO)01kup^c_j%?;|m zg%osm{gcz)r#s7xoMeG^0$1!znN+uvSiVA^rxL5BEFlF!mzh7t=|<4w#Pam{s};U3 zgPFCS?9f98kiIy>Mev_;;9*8hwUZ!3==xsY4xyCskrNJ9q5N`x~e3x0<4% z{a3FK-1VP~ic)HO2w(Q|cJ0s{1c$vlA2-8{!Fu*Ju`$GkhGHt>9bGU%g}+*g-Sl7jgaZQhkB04gdn4uebAdpS#(f8+8KR z?WGm!+xivyfQam$jt7_(z}?Eh=IboS{g&BYemh4yzB!S%~P7K}HaQ$am0nD#&c<6O0PpgNK)oaGaR zSWvGtkR^CnSYX-@s?BqTa7Po}f-B+7_ay!(lacB>mh<&2%ql| zCYg=}(&~Ir9&|IjW`jlSbTH^`Ovg(Z>E}s(@@NPdhcR*1uQiKGD2At&*o54FxW&7K-EVgJw|%&LA71h!9P)Yr*=#_kE!YNv zeQ*r;lx3r$fdOTUmv48oVQVM!de-eY(#e4@STkkGQ-NUQ%AO0Dv$l;^N?T1{g7d?m^QUMxo%&`fmvv1t`J48Aytsi31^oq z$=1f;k!J$BGyF%3iV(&%9d}v31vW%igudC~GkXn%m7?94IH&vIM!7OXdydjV~J9zUxa0r$!dI`ADkDHIOjMG)kL={2) z_F_;S8NvGIYv}&RvSGjezXC`ex8d_eaiK3|1-NuTU_P;`zK7L_@V-4?%x5L#Z#20Z zaq1N>xST0g4+ahNOxJxnouMI07yf0h@f_`Dj}*PFU&1^?91t|Fpu@w;pDzG-hlnKz zyF3AiI{@H5>j?$rH=RSMkjDUg(PGH>+3Xwfn_dx;KDZKQeg5;KnaekLa^L5m=PC0Z zySfIibgk!yy3EIIQ;L?-oO+{W?*34kI=Y5wld(`H({Ey%>E6-ZVfZ=$vo{$v-GS|hdN)VjG> zJD_%UKLVjC#hBQ6B%h-_pvM$Rr3TrM0liBAy4CZ7$-f7z{ zvs|ics)twU*7qTCYhl;q>WJ4qHd(#UbufS=(pM5^Pq-9d0FSRqK#*xJm zYS%Pf%r73Jx2NJ{#(CMcebxMEi8(Mz5FH)nY70&CCT*6WQdx|K3O!$c`(*A#?Zdh# zLZT>P-9Ll-#o^^}2C}9`&40`nYRYHlC#TRPGJv=M=)|l!!l(n-|#F2 zGh{}wjk2aAC!fI`M6;yq%J{b%oC_)loq)SbGxX}D%_i&ZkOQ6p@Vc1^`ck(p>+ir= z4Se+-@(f~+x{J0vot;m{c;A|4?`H3)hj6^L!pHEnrQ)bZQ+3P0>;F8T&YEHe*i9E& z^)!6gbjk`&zrjplhw4cXoE;zq{H40?W^ugg;839GwYTPP18nUGy)M)}P=fkvI6gg! z$;#^(C75TTcB}+E3C#|5G=%Yu4BIt-BcoLab2vNJr1WGmx{)CkARR9iK>@crMKE#W zzWFZb<-M65#@p3&jzW1C*MK(a1`Slz^-Z}?P2wR!I#O|@v!gD)=k_jiBie@R>XvovW#+JJoQtWQtWJrMqGcin53Y95w~Z z3KAV1WM&=B3HF~m(lV#u*DRYYKURe)VXL#S|I49F&a6aqP$H3654{WOzw6* zo-K%*(i=lFRR-$;{ZP{Z5K_FW6jHdsjnC0sWQjpg8x8CywSpitI?RtoU=%Y0SKkur z!v=s;9<>JnR%t)ibH(qBm518RB=HadlDexYo4snkT{FXnv!i7#+qHUYAS{9wwYK5f znjt)oCk$m_pxkQI+6S;}*1I+A5HCh6u1$WEQp+cCI&B7>Qv>>BtCYq^k zy1HdTBiQnd3gpr_#2#lOG+9AjuySY24L??71PV&NaVKu9b-o4Hz9N;aA?dOIA7u(}I#h4!v8l?~bpaaF@T(GIYVHK1dS zgrfn4`Wm+e5!X@1Hf)H99E1{JZHCvb z4%U0~y|8hDZ=`7od!l~;?J;fM14lMt6bjXkbhAb<VoK0-lzO96GS^CbY{#jkOr4*Yj!{6;jd@F=$kKNZ6vFt|t+So&>^+g@d^)>Id4>4`D#*CZ0z7`?oj&627QW!up#bBnhsJ$_+&?;S4H1TTE zk98lMw4=GU{u;WWkJ@#1g4UMxWjX_**B&~1!0R+vE4IuHpm5BNrJNm;_Gp=pz*tKc zy`D1{7)MRpS&p=A)L4fSRtF|yF&#B!oY#U@xW)qJ9n{3uXyuwNF^Kw6EbgUsvsOIX?zmWh5kDSWgTHP9SEA_B9hsR4%d37|&w>)7LdljB z2i(LNjys@MO@OOH?M_1}8(eDGi^JpYd$Y;WJL|4lq3*#se-jn1Nu zJS@B)t);yv6}QMhZxJIw{lD-|yTS8#3r_yX3gB&OIy0zLhlSytvkvNlGVDSp6pev- z#PlLjk-qAx+3ZtT8&p%|rjA9*;b?IZ!s>K75bSOB`5Lf!80=@*s%{Qblz` zG=Obc&nZ~nnK%}K*Rc(H0M>rTo15dqJU`yt+CF}`wRQNHe;Ysk%fD^^CC~rzWb;9u zcO}5awL64ty!Odwv+I4Nm+Jeyt?)w_StA70_i77)ERg`CTJVotzyYDZykhnTZP;DD zSD6m-X!(WQm+b_*Njjd*%(6gKYMbdTd*o8J7?)PDoHxJreLs7&kvRwoVYyw{av3p# z7M%9Me6&0|9q1qzJvP=_$$vLG0+mtB@gbQMx+?6tS=xYjH4!)#D*+xmQFJueVd$yV zXs)I9XTEv8eY+3`LKX#eEb_(j#jodzn>9z2jj(zw{#kA}oM=RiI=8=h;P;Q$iI#!@ zAS<0iNSh<=F4nrI$xg2d#XAJw1uONte5&a`i12+lnt&O`;{M8Q0Fn3P(W5r}--As> z|36yeIFrx6IQ`dR{TVaRjvuU~|F`_U?Pq(_ zqd_VXXk9c#zK#5{Kme-+P~^zb?CcEeiXG2!zAPp|@I^y6`-Q=SY6&vCx=4+q z#deFLPK3My6U2v3C9?mAReQhWXNQW86yCK0^JX@i^rJVmASiZtWtopUOW@wT@BXX>OeN_^R2RiCN4m)8 z{BUN2Z{}Z)?$}*Bzq})`-)2uz;>Ob7LYi(=blG94HrUaUL@+={2e0nj8NPY>;pe?q zERtHYJ=>)n+7&kMAl|QvyBL~S>E_rDOr$*gatBFviaEwk*1*2EmP)T#NBGjCFhD$% z18Y4RhzO3)kU7E>E{$xBs@kq}r5oZ~+Yet^K&#->xfW#&UcP?%{Kuy+pT2tb;%(o< zl!Lb~-o5_t_SuW!)A#S+?)~`T{fpr*Pv7`}yb8S2eYJdt5FN72HKnZ+$KMHhP8$N5 zK?R{)&gPtC^Jc6Yp1Y>BUZQy|>Gd><*OF>(rzs&`%x{m_WetayeoHN`$?c3R;c?LBOY z`S37Gt-ly1#^G*|fhHA8PTGdE7~@3i;FoEP&v+0y)3u?L9S3qqh)Hrfolf>v$08i* z&_ZU&Hd?z5c$$xOR;AJgGsQDy=f@=d*a*LwOdMh^8dJt4E!j7ML&{_W`SalBhyvrf zrs*&HonkAR*KTG!yhX8L8q5)GtdN%BLsdVk#|Z@}9}1Q8M&W14G6PVRLZ|p*&I2a0 zPC^mVlB+_!^k`TE@foOq5F8C6C^%wZwFhM}&WHK&v0fqfAv$2%;?6r@W;jSIBr`(O zG*l1t;tX6(a6vW?!Y^jUa7B88{EhX_6qlyaeoOY=ty)cYi+QTMYEB#PAKo`r&+nV@ z%rPJwR*Z@ayu4!JCE%yeGF3SC33h?kV=NF?myc)w=)Z-fk}Jjyuv~EAfYj}s{ZPW4 z-J>nhH+#cvj*DpyUGUk7p4E12#4ValV2luPKta<0HC;2c`75WT$>~WqX?JYUhPbQQ z#vd(5s1#BAPSFFubupjTP6ofXDijTB{>r08F$Xh2D2_jeb(O5uu&QEKEM1x$m;wCO z_LIS;`d=k<6Q>)ofU5PE%?5=5`rqEae^Ut(_tZd(r(`0Y_01QytQFhb7}Am;4=}Y&0 ztliU@t}FR7e3hRSaeCKdONWJTG_PO^73_!s4@3G7Ywrc4zH}*v!t?EWm)Dhy%1_7d zKAryl{-;ocTCw3NRbKoaVqw&zwxTMYV1p|CK-I02=eTezvkb-{mX)e;)w?xbz6k-Ut#YQHJOv zvaV`D!r%*9;a!zMbvL!wFxbwiP^@FD;>G=KLi*eJ=d+W^Z|A?9onAg^8e10;}QWM;Flt?rI)9VMWi(*Q3g9UAdak{`Fs2r)X`Fnf-Zjxi|)1RHWV%xiz zMI6fHh?N(|`DBBRGj!Zw1!VWCw!-iB7mYGfmtm7taC7QRoV`UY+AMZuA)sJ`Z3ReFq|*R541lG z+iFlXw=tHRm6_JwbFd=a;F7f%LLA`8K#;$oR^K7)Ek*jn#?%ZlhTMiHfW0z9#)FOh zaTHEj4n35UuO5dnhyqI*dkJCiqekys;BE+d0Zc{k)oX&2DVtz{2dp)pA$o%!spCym8Y2X1%d$mHAPwwNQ=nV*qBS?%E}AYIhHFZUyaL|qyZLZbdJ zQwWJ;EekBCDF#p|nuvMHntyA_o6gaZ93j`+Ub^SAA$7jSEPI+}bG*E9th{k*c^|t- zHv}wbO%%aeG+_v8FuUGs0MeAPQ4DM&g@+~sdA+o~BWO2#HzD&2KFuye;NatLH+$H2 z%CD~YM znq1c6%m5v1uz?M-RcK^|8ta@lx!TDrT*vS{(HsylNY2@ubHu|Q_6nWaY(2}~7{7E0 z0FG~rTK4;4$;C;*?G|>ih51Y!pZ%xG0ge(RNb4({-2gjRt02i|qWJhEp22~c7&>B1 zKOpMLtSxoT=!28uhCTR3G`{lV_PQkdgxP%0!H$xEf4C1|82Wil^}L35jw|P_$P`a?jPcm`cG zH*?&xhy85%j=N1jJ-g!K<<-V^Z&hhGh$yl9qJEF;)UBT5BbtTl*XN#dCAs&BV>8UZ zFV8{LXo@eUL%aeic>zvPkzOWz9pKN%e6YFCAYp zO6jHKmPZ;tCJcGDpgw7Fwk)r-EYC6OM=#F4DYfZI5n{5BVCc4U!Mf`*#FV)aMjgPO zl-sGx?_1GP7fT;wtPFR728(jZy+)erTE_x~w!U>l{a{icVxU0}JH~U_N69bf^8HH2 zZB1cNdCvtU4SD35UUa-TEwvl&T!E-es-Ihb;_hUijvNGvMvz)^{n`K=Q1-0EUwoch?jhcxVRf#N{Juz-gd@@5$3fzD-S-iSdmM` z{5#WD^&MKdNJFY+?%m%`=Jj7n&o6M%d$qBuRZ4JPlBE$_h=3#3Yem`#~zpy2Z0 zfvF&$%tgU`Sva4r11j4?Sf7{xb(Cda7K!?@U`JGD@x`!J$^70!Lx64BWL;ATB(7RVt>Ve{&1tq^NKh-GZ-b>+}=lLAi! zd^Lms(!fS!Koo+lCkeutq{FbNJg~l{iG&m|w$`-oI5Qs^xrYXBt+Dn%x2mCVqJYN~ zq9CqKUlYuYWER0tZ3nhAC{?|Rfx@ZidlYF-v*sHW>0q?o0YOs^<)}7$8ue_%Mj;QQ z#d1KPTD#@WlZEk^Q#5ti<57fXoJKC*l!{`oiotkliZH!RV=`W#qRpW%P^B@-hp8wR zf0!zb&wj6pa?AIs()jZcXT@6kRY&N1oGQ9ocp9uA2va-n>X7`Ik^%VOgNzY)8**2M zxt;oD`m(we32ucU%p1(XI+j!{t3;CuWD}IY2z_;g9O<=Z0glU`Dyv=Awv`W&+wAW2 zPWGhDzL%qwAlv8_T9}PhJFit66WXhU)Vh}t;mxpcot?sKw$>J=d3ZGBb#ZAh8KU@f zGirHSX6bl!hI)%~g!d*Wi6ZkFd3{e!?uMlS4$#0hL6{j)a2tS|E2>NAbA?X{qqaLd z1**16eJG47qH{T5Ria5#586fy-6nx8S5d2ha%XC70o1!08JUn%C6z5xV|&@6F@I}+ zSgH0{i1&j@lMIOb1q+iXeBO*mNy{u9PR|f5C`X=bNdeufVm7r_Z09M0^|mndWA;`P zu}*+f*Er^cGs%zUW7PGZyMQ@5!&TjT8_7NeHAmPU6gm$*x9|6Q3#~qjJ#^M>K2u&p zA3!o`{&>5BrVz58@cknnrdB`4fa>n9yzR03bLj{@8o4vUv;6-HebyOP1d*FSevLan z39>UlUH>xEeaKFK_9?nSPs zTpUH@uO44l=V|WNwzX)gsDiIvX7U7v-=(v?+%@}pmy1pwKYI1wj50uzxTf?;RWvy7Alb-u|o3=-ygwq%9fzb*J-cy`8UH z+~m(BgAJWA>UZWSbGu$HI%3WZm#Z0F)Hf6;S&&uVF-W(Yg1+6u=xH!)DssgClI&DS zgws`!488?3Kx5Mdvk;M~gESv`5JCTGwph)nM@)!TJBa=+Kyl3`eT3c7A!a@xI~M;nHO|N2C9lrZQ|yNoGN7I zw>xK8KqoA%{!bV)m2m*wfrVC6!M!iSVqq-QN4mV-{?)~CxkyNagzWM6otdiKhh*|S zEjQrTM#wz!5lBIbCcY7xxASJGWP?%I?e4hRY}P3eNfoUX*+cyd?G)#UF-y%=I<2Vm z0C~d8e1S|A4uTGXYyQ@7%7*#kzbKkWgEU<@PK^6D_{we~{#dxmKJ=Wo`Gp8F_u)5Y zB^X2N38L%HU5q12YMjZn~*T>MO=Xkn%?lUYJ5Gnpz_<7QcbSLjXh zKfekGQYEzrq)TTDIsZDKa dQ=8hJsZo7$9-{s*pBWGMj92LL|zR3-ob literal 0 HcmV?d00001 diff --git a/ansible/roles/open-contrail/templates/nova_contrail_vif.tar.gz b/ansible/roles/open-contrail/templates/nova_contrail_vif.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..78dac037bda3317cdc602c521ded32c6ad9739d5 GIT binary patch literal 3552 zcmV<64IlC!iwFQ)^_f-x1MOS=a@)8OPpAE(qiNp(<4lHBs?d(k_RXZ#%{h*9_0?y4 zY$w+n*TaEGNWz&Sc>pL|b8`>dXXp!b7XUv*Qj(K%uI*JwWKzIlvDjVwuvk)YGNXQw z#7a;e`7=J+e)8yL2g>Jrd-@8ce%1Fc`}@25{eJ&t|K*dNm!I|bcAt>F#}}npGO4H_ zzWVFgyDP))+k=aXgVU?y zqf2sjK@QJO-yB~ZpPjV(>FcBKy@$@yH5qq0z~DgRxl{JWT<$yU{*3p zj94;d0Uz-ICK!)1I%Z^?%$SH_UIZLz$|aguf{-DJ_>?QE@VjBwn4Q<@z_5u>M9yV? ztN4`V_hY6MyH`ciL@*k{Sb0Svy_A9zP)YDwuwye8tB9%2NF-CBiW0=h5g!vv5RAqh z(}*DI#m4{h)`-X1y$_Y0j+MaUFtoo)vg)*CjWS)|a+K8m8lCpJF=6 z7{!9i7g_CfiwWER9GS`fN|T19>6mdNdW$+C%$OAAQOVc!F)NTW<29U zc@ZDtH75<3MC^Suiu@Xkr(8ALczaZLZhw_EMi!J=8GM0OxwfqM63Iu z;NbJbcvL2|i=&0L1X$ZaWG?8y86?x*ablQ!#XuS!ygz>XMpJksjk57}7V8TbKX+D| za_uI-Py195Q7Idup)zx?^vD$ll&)4SiUO>%6!3)6DGPf97cb(HUh-8+jjiu55Zu@=nDf48E9+@TtY%eKuXSFLP3b8voKA;oPR#PX(qiDRRDBebsm#?&_H zb2A-mhsPhAxW~AWvLi5vLH3Y20J$9TiiD3{JbOaRA>GletE|th-s>Gmpv5KLH5!DQ(rMl57^#mJ3_yV4ZI&>Ra?U71T_9hf1;1`yJPOC7__Z@LCDV0qGb z?p`}pP7Xpm6=OYdyuw!qTRga(-Pn${Gj97_7ThcT=9#H@<*a&*8C7^`)2`|bUipuP zKh+tw3EkU&sc>0mcifX%tONFZBScp!)mt^{YG{G| z4S{6QbmJ%)(r9(hg%0cB>tSkWb)Pkw8?04UMX#3t0IX^klEZj1{2m+%^;$+2ZR#{A z4y@rkp^9p&uZp`?4e;?xvf>SJf{F}wEuh4b1qW)eGI}~(b-UO7o9mq$Zv?S31hrbB zu7iH0bkv%Ly9pJj7KI(6e8HxJYcJo z0OC`?o+df~0)VJpuY`pLAT(+W*QX4Qm3g;LfiG~l06oC07I4ad&2#Vt*kztSzrDk| z7Lc)iQQK>*>S%-~&N&9~zXpKUMl~S6&N}T%t*f&zOKFXeZS3(0?6FL(%Ppc;OOs)~ zZ8wCjpEs;pwzjsQDnG{|a@wBcZo*;$F{94@oSK;519IaF=VupJ{?&Kqn5J+}0c_uY za}J>Il%7(H3u+2_RRfZ`ybTzsAP+Qn#ra6GDNQH1V-1SLkXKZCm-*dN20_H37xZo( zsoXHJA`h-a#!x29ZLma$au7^(34OXMawOhce$T>V4DV+&Ix!80n0GSVZW&8`8o~5y zY&;$Oy4?00^t%-`m%TKbs%Bf}>4CC&67ry83g=ZlY)kJTP(O6`gdZ0scq#0cK?=raz_1Mk9}0B_^vz zL2&SKJii zEw^|DaE434!Aap1&t-s3N!)Tgt`$HOpGqGe+UP}94sTOD)!d(VZoV98Q80N?W`6Q= za`u(S@|=e2pB%YcZ($XKKiy66;R2@J#y0TZ>XtSeRE|0|SFn>SJ)q3mS?lM0d&m{O zP9Zw-VaZU_nMc(fBLDup-8y0PB>#w6qG|Fv%5EwrKvrG7qU_ zk0JOvHJ5^eU*}5+1<*S@o9WdA_a&W;E(#8d6Kq%sOF>&nC{=QQwsE1GRU=ZLCYM<- zIZIWGKG$(KjH|L&s9_rhEOqL~B1B-s*gHUE`>+a;v4ke5uRS|afu&h@CDWl21D4k^z-J$P8FP} z1E+pGQlKUxZyR2&!ohV5cm4XbOZp>x2TQsC*-amU7nGps^dGrhVR_6gYpEXoYjDaM zWgS*wsaU@jbS;KkHRkDT*Qa5qA9e%p9Z&yr>O}h$BK=&T+AgypSRI={>DP{Z*x}q< zJGRBSS)fN?H7z+K4eTNk`uY-sC=Ak*koR4(h$pl+p{O??0!%*L9@lwB&-Z5{E5q*ozy z((9}^$hQ#lbbnE=`|W!DX+0W2Cwz%w(0(Wdu0BfUmh;H6_8B~|XzTtg4v08EYxKg8 zKbp;A+NWCVCTFzTzPvtglX-xQE!$AFIX^74J!}57d!QY;7!4mC*FZ-)Ybl$}_pOOn1**sf5cwdOMbD7`WY4 z`p#*KiE3~y^Pgob74krJbvW4QAsZC8W}So#i{;3{ic6=Y5x8re-`GY^WbsDSpEBtot=JvulD $f/rps_cpus + done +} + +function insert_vrouter() { + if cat $CONFIG | grep '^\s*platform\s*=\s*dpdk\b' &>/dev/null; then + vrouter_dpdk_start + return $? + fi + + grep $kmod /proc/modules 1>/dev/null 2>&1 + if [ $? != 0 ]; then + insmod /var/lib/dkms/vrouter/2.21/build/vrouter.ko + if [ $? != 0 ] + then + echo "$(date) : Error inserting vrouter module" + return 1 + fi + + if [ -f /sys/class/net/pkt1/queues/rx-0/rps_cpus ]; then + pkt_setup pkt1 + fi + if [ -f /sys/class/net/pkt2/queues/rx-0/rps_cpus ]; then + pkt_setup pkt2 + fi + if [ -f /sys/class/net/pkt3/queues/rx-0/rps_cpus ]; then + pkt_setup pkt3 + fi + fi + + # check if vhost0 is not present, then create vhost0 and $dev + if [ ! -L /sys/class/net/vhost0 ]; then + echo "$(date): Creating vhost interface: $DEVICE." + # for bonding interfaces + loops=0 + while [ ! -f /sys/class/net/$dev/address ] + do + sleep 1 + loops=$(($loops + 1)) + if [ $loops -ge 60 ]; then + echo "Unable to look at /sys/class/net/$dev/address" + return 1 + fi + done + + DEV_MAC=$(cat /sys/class/net/$dev/address) + vif --create $DEVICE --mac $DEV_MAC + if [ $? != 0 ]; then + echo "$(date): Error creating interface: $DEVICE" + fi + + + echo "$(date): Adding $dev to vrouter" + DEV_MAC=$(cat /sys/class/net/$dev/address) + vif --add $dev --mac $DEV_MAC --vrf 0 --vhost-phys --type physical + if [ $? != 0 ]; then + echo "$(date): Error adding $dev to vrouter" + fi + + vif --add $DEVICE --mac $DEV_MAC --vrf 0 --type vhost --xconnect $dev + if [ $? != 0 ]; then + echo "$(date): Error adding $DEVICE to vrouter" + fi + fi + return 0 +} + +function vrouter_dpdk_start() { + # wait for vRouter/DPDK to start + echo "$(date): Waiting for vRouter/DPDK to start..." + service ${VROUTER_SERVICE} start + loops=0 + while ! is_vrouter_dpdk_running + do + sleep 1 + loops=$(($loops + 1)) + if [ $loops -ge 60 ]; then + echo "No vRouter/DPDK running." + echo "Please check if ${VROUTER_SERVICE} service is up and running." + return 1 + fi + done + + # TODO: at the moment we have no interface deletion, so this loop might + # be unnecessary in the future + echo "$(date): Waiting for Agent to configure $DEVICE..." + loops=0 + while [ ! -L /sys/class/net/vhost0 ] + do + sleep 1 + loops=$(($loops + 1)) + if [ $loops -ge 10 ]; then + break + fi + done + + # check if vhost0 is not present, then create vhost0 and $dev + if [ ! -L /sys/class/net/vhost0 ]; then + echo "$(date): Creating vhost interface: $DEVICE." + agent_conf_read + + DEV_MAC=${physical_interface_mac} + DEV_PCI=${physical_interface_address} + + if [ -z "${DEV_MAC}" -o -z "${DEV_PCI}" ]; then + echo "No device configuration found in ${CONFIG}" + return 1 + fi + + # TODO: the vhost creation is happening later in vif --add +# vif --create $DEVICE --mac $DEV_MAC +# if [ $? != 0 ]; then +# echo "$(date): Error creating interface: $DEVICE" +# fi + + echo "$(date): Adding $dev to vrouter" + # add DPDK ethdev 0 as a physical interface + vif --add 0 --mac $DEV_MAC --vrf 0 --vhost-phys --type physical --pmd --id 0 + if [ $? != 0 ]; then + echo "$(date): Error adding $dev to vrouter" + fi + + # TODO: vif --xconnect seems does not work without --id parameter? + vif --add $DEVICE --mac $DEV_MAC --vrf 0 --type vhost --xconnect 0 --pmd --id 1 + if [ $? != 0 ]; then + echo "$(date): Error adding $DEVICE to vrouter" + fi + fi + return 0 +} + +DPDK_BIND=/opt/contrail/bin/dpdk_nic_bind.py +VROUTER_SERVICE="supervisor-vrouter" + +function is_vrouter_dpdk_running() { + # check for NetLink TCP socket + lsof -ni:20914 -sTCP:LISTEN > /dev/null + + return $? +} + +function agent_conf_read() { + eval `cat ${CONFIG} | grep -E '^\s*physical_\w+\s*='` +} + +function vrouter_dpdk_if_bind() { + if [ ! -s /sys/class/net/${dev}/address ]; then + echo "No ${dev} device found." + ${DPDK_BIND} --status + return 1 + fi + + modprobe igb_uio + # multiple kthreads for port monitoring + modprobe rte_kni kthread_mode=multiple + + ${DPDK_BIND} --force --bind=igb_uio $dev + ${DPDK_BIND} --status +} + +function vrouter_dpdk_if_unbind() { + if [ -s /sys/class/net/${dev}/address ]; then + echo "Device ${dev} is already unbinded." + ${DPDK_BIND} --status + return 1 + fi + + agent_conf_read + + DEV_PCI=${physical_interface_address} + DEV_DRIVER=`lspci -vmmks ${DEV_PCI} | grep 'Module:' | cut -d $'\t' -f 2` + + if [ -z "${DEV_DRIVER}" -o -z "${DEV_PCI}" ]; then + echo "No device ${dev} configuration found in ${AGENT_DPDK_PARAMS_FILE}" + return 1 + fi + + # wait for vRouter/DPDK to stop + echo "$(date): Waiting for vRouter/DPDK to stop..." + loops=0 + while is_vrouter_dpdk_running + do + sleep 1 + loops=$(($loops + 1)) + if [ $loops -ge 60 ]; then + echo "vRouter/DPDK is still running." + echo "Please try to stop ${VROUTER_SERVICE} service." + return 1 + fi + done + + ${DPDK_BIND} --force --bind=${DEV_DRIVER} ${DEV_PCI} + ${DPDK_BIND} --status + + rmmod rte_kni + rmmod igb_uio +} diff --git a/ansible/roles/open-contrail/vars/Debian.yml b/ansible/roles/open-contrail/vars/Debian.yml new file mode 100755 index 0000000..845aa78 --- /dev/null +++ b/ansible/roles/open-contrail/vars/Debian.yml @@ -0,0 +1,48 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +package: "contrail-install-packages_2.21-102-ubuntu-14-04juno_all.deb" + +common_package: + - contrail-setup + +kernel_package: + - linux-headers-3.13.0-40 + - linux-headers-3.13.0-40-generic + - linux-image-3.13.0-40-generic + - linux-image-extra-3.13.0-40-generic + +kernel_required: "3.13.0-40-generic" + +database_package: + - contrail-openstack-database + +config_package: + - contrail-openstack-config + +control_package: + - contrail-openstack-control + +collector_package: + - contrail-openstack-analytics + +webui_package: + - contrail-openstack-webui + +vrouter_package: + - contrail-vrouter-3.13.0-40-generic + +dkms_package: + - contrail-vrouter-dkms + +compute_package: + - contrail-vrouter-common + - contrail-nova-vif + diff --git a/ansible/roles/open-contrail/vars/RedHat.yml b/ansible/roles/open-contrail/vars/RedHat.yml new file mode 100755 index 0000000..d760b4e --- /dev/null +++ b/ansible/roles/open-contrail/vars/RedHat.yml @@ -0,0 +1,9 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- diff --git a/ansible/roles/open-contrail/vars/main.yml b/ansible/roles/open-contrail/vars/main.yml new file mode 100755 index 0000000..6facb47 --- /dev/null +++ b/ansible/roles/open-contrail/vars/main.yml @@ -0,0 +1,86 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +#package: "contrail-install-packages_2.21-102~juno_all.deb" # mv to {os}.yml +kernel_install: no +#ansible_ssh_user: "root" +#ansible_ssh_pass: "root" + +#contrail_keystone_address: "{{ internal_vip.ip }}" +contrail_keystone_address: "{{ public_vip.ip }}" +contrail_admin_user: "admin" +contrail_admin_password: "console" + + +# network infor adapter for compass +# contrail_address: "{{ internal_ip }}" +contrail_address: "{{ ip_settings[inventory_hostname]['br-prv']['ip'] }}" +#contrail_device: # compass openstack device +contrail_netmask: "255.255.255.0" +#contrail_gateway: "10.84.50.254" +contrail_gateway: +#contrail_mgmt_address: "172.27.113.91" + + + +########################################################### +### we make an independent NIC for OpenContrail vRouter ### +########################################################### +contrail_vhost_device: "{{ network_cfg['provider_net_mappings'][0]['interface'] }}" +contrail_vhost_address: "{{ ip_settings[inventory_hostname]['br-prv']['ip'] }}" +contrail_vhost_gateway: "{{ ip_settings[inventory_hostname]['br-prv']['gw'] }}" +contrail_vhost_netmask: "{{ ip_settings[inventory_hostname]['br-prv']['netmask'] }}" +########################################################### +########################################################### +########################################################### + + + + +contrail_keepalived: no +#contrail_haproxy_address: "10.0.0.22" # 10.0.0.80 +#contrail_haproxy_address: "{{ internal_vip.ip }}" +contrail_haproxy_address: "{{ public_vip.ip }}" +contrail_netmask: "255.255.255.0" +contrail_prefixlen: "24" +contrail_gateway: "10.0.0.1" + +contrail_router_asn: "64512" + +### Modify when need openstack provisioning +keystone_provision: no +install_nova: no +#rabbit_password: {{ RABBIT_PASS }} + +contrail_tor_agents: + - name: "test01" + address: "10.0.0.81" + ovs_protocol: "pssl" + ovs_port: "9991" + tunnel_address: "10.0.0.81" + http_server_port: "9011" + vendor_name: "Juniper" + product_name: "QFX5100" + tsn_names: [ "system002" ] + - name: "test02" + address: "10.0.0.82" + ovs_protocol: "pssl" + ovs_port: "9992" + tunnel_address: "10.0.0.82" + http_server_port: "9012" + vendor_name: "Juniper" + product_name: "QFX5100" + tsn_names: [ "system002" ] + + +# adapter for compass +kernel_package_noarch: [] + +compute_package_noarch: [] + diff --git a/ansible/roles/plumgrid-plugin/tasks/main.yml b/ansible/roles/plumgrid-plugin/tasks/main.yml deleted file mode 100644 index 7784be0..0000000 --- a/ansible/roles/plumgrid-plugin/tasks/main.yml +++ /dev/null @@ -1,148 +0,0 @@ -# -# Copyright (c) 2012-2015, PLUMgrid, http://plumgrid.com -# - -# Create a PLUMgrid sources.list -- name: Create plumgrid sources.list - lineinfile: - dest: /etc/apt/sources.list.d/plumgrid.list - line: "deb {{ plumgrid_repo }}/plumgrid ./" - state: present - create: yes - -# Point to LCM repo create a PLUMgrid sources.list -- name: Add plumgrid-images to repo - lineinfile: - dest: /etc/apt/sources.list.d/plumgrid.list - line: "deb {{ plumgrid_repo }}/plumgrid-images ./" - state: present - -# Update repositories -- name: Running apt-update - apt: - update_cache: yes - -# Install package neutron-plugin-plumgrid -- name: Install neutron-plugin-plumgrid - apt: - name: neutron-plugin-plumgrid - state: present - -# Install package plumgrid-pythonlib -- name: Install plumgrid-pythonlib - apt: - name: plumgrid-pythonlib - state: present - -# Modify template fies -- name: Setup plumgrid.ini - template: > - src=plumgrid.ini - dest=/etc/neutron/plugins/plumgrid/plumgrid.ini - owner={{ system_group }} - group={{ system_user }} - -- name: Replace plugin.ini reference - replace: - dest: /etc/default/neutron-server - regexp: "^NEUTRON_PLUGIN_CONFIG.*" - replace: "NEUTRON_PLUGIN_CONFIG=\"/etc/neutron/plugins/plumgrid/plumgrid.ini\"" - -# Modify neutron configuration -- name: Add plumlib template - template: > - src=plumlib.py - dest=/usr/lib/python2.7/dist-packages/neutron/plugins/plumgrid/drivers/plumlib.py - owner={{ system_group }} - group={{ system_user }} - -- name: Replace plugin with Plumgrid - replace: - dest: /etc/neutron/neutron.conf - regexp: '^core_plugin.*' - replace: 'core_plugin = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2' - -- name: Replace mysql connection spec - replace: - dest: /etc/neutron/neutron.conf - regexp: '^connection.*' - replace: 'connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron' - -- name: Comment service_plugins - replace: - dest: /etc/neutron/neutron.conf - regexp: '^service_plugins' - replace: '#service_plugins' - -- name: Update nova.conf - lineinfile: - dest: "/etc/nova/nova.conf" - insertafter: "DEFAULT" - state: present - create: yes - line: "{{ item }}" - with_items: - - libvirt_cpu_mode=none - - libvirt_vif_type=ethernet - - scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler - -- name: Add plumgrid_plugin template - template: > - src=plumgrid_plugin.py - dest=/usr/lib/python2.7/dist-packages/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py - owner={{ system_group }} - group={{ system_user }} - -- name: Add plumlib filters - template: > - src=plumlib.filters - dest=/etc/neutron/rootwrap.d/plumlib.filters - owner={{ system_group }} - group={{ system_user }} - -- name: Update Plumlib authentication - replace: - dest: /etc/neutron/plugins/plumgrid/plumlib.ini - regexp: '#admin_user = admin_username' - replace: 'admin_user = neutron' - -- replace: - dest: /etc/neutron/plugins/plumgrid/plumlib.ini - regexp: '#admin_password = admin_password' - replace: 'admin_password = {{ neutron_service_password }}' - -- replace: - dest: /etc/neutron/plugins/plumgrid/plumlib.ini - regexp: '#auth_uri = http://127.0.0.1:35357/v2.0/' - replace: 'auth_uri = http://{{ internal_lb_vip_address }}:5000/v2.0' - -- replace: - dest: /etc/neutron/plugins/plumgrid/plumlib.ini - regexp: '#admin_tenant_name = admin_tenant_name' - replace: 'admin_tenant_name = service' - -# Enable Metadata -- name: Enable Metadata - replace: - dest: /etc/neutron/plugins/plumgrid/plumlib.ini - regexp: 'enable_pg_metadata = False' - replace: 'enable_pg_metadata = True' - when: enable_pg_metadata == True - -- name: Enable Metadata mode - replace: - dest: /etc/neutron/plugins/plumgrid/plumlib.ini - regexp: 'metadata_mode = tunnel' - replace: 'metadata_mode = local' - when: enable_pg_metadata == True - -- name: Replace plugin config file - replace: - dest: /etc/init/neutron-server.conf - regexp: '/etc/neutron/plugins/ml2/ml2_conf.ini' - replace: '/etc/neutron/plugins/plumgrid/plumgrid.ini' - -- name: Start neutron server - service: name=neutron-server state=restarted - register: service_started - failed_when: "'msg' in service_started and 'FAIL' in service_started.msg|upper" diff --git a/ansible/roles/plumgrid-plugin/templates/plumgrid.ini b/ansible/roles/plumgrid-plugin/templates/plumgrid.ini deleted file mode 100644 index 49d6ce5..0000000 --- a/ansible/roles/plumgrid-plugin/templates/plumgrid.ini +++ /dev/null @@ -1,14 +0,0 @@ -# Config file for Neutron PLUMgrid Plugin - -[plumgriddirector] -# This line should be pointing to the PLUMgrid Director, -# for the PLUMgrid platform. -director_server={{ pg_vip }} -director_server_port=443 -# Authentification parameters for the Director. -# These are the admin credentials to manage and control -# the PLUMgrid Director server. -username=plumgrid -password=plumgrid -servertimeout=70 -connection = mysql://neutron:{{ neutron_container_mysql_password }}@{{ internal_lb_vip_address }}/neutron?charset=utf8 diff --git a/ansible/roles/plumgrid-plugin/templates/plumgrid_plugin.py b/ansible/roles/plumgrid-plugin/templates/plumgrid_plugin.py deleted file mode 100644 index dde32bb..0000000 --- a/ansible/roles/plumgrid-plugin/templates/plumgrid_plugin.py +++ /dev/null @@ -1,811 +0,0 @@ -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Fawad Khaliq, fawad@plumgrid.com, PLUMgrid, Inc. - -""" -Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) -This plugin will forward authenticated REST API calls -to the PLUMgrid Network Management System called Director -""" - -import netaddr -from oslo.config import cfg -from sqlalchemy.orm import exc as sa_exc - -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.common import utils -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_db -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_db -from neutron.extensions import portbindings -from neutron.extensions import extraroute -from neutron.extensions import securitygroup as sec_grp -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.plumgrid.common import exceptions as plum_excep -from neutron.plugins.plumgrid.plumgrid_plugin.plugin_ver import VERSION - -LOG = logging.getLogger(__name__) - -director_server_opts = [ - cfg.StrOpt('director_server', default='localhost', - help=_("PLUMgrid Director server to connect to")), - cfg.StrOpt('director_server_port', default='8080', - help=_("PLUMgrid Director server port to connect to")), - cfg.StrOpt('username', default='username', - help=_("PLUMgrid Director admin username")), - cfg.StrOpt('password', default='password', secret=True, - help=_("PLUMgrid Director admin password")), - cfg.IntOpt('servertimeout', default=5, - help=_("PLUMgrid Director server timeout")), - cfg.StrOpt('driver', - default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib", - help=_("PLUMgrid Driver")), ] - -cfg.CONF.register_opts(director_server_opts, "plumgriddirector") - - -class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_db.L3_NAT_db_mixin, - portbindings_db.PortBindingMixin, - securitygroups_db.SecurityGroupDbMixin): - - supported_extension_aliases = ["binding", "external-net", "provider", - "quotas", "router", "security-group", "extraroute"] - - binding_view = "extension:port_binding:view" - binding_set = "extension:port_binding:set" - - def __init__(self): - LOG.info(_('Neutron PLUMgrid Director: Starting Plugin')) - - super(NeutronPluginPLUMgridV2, self).__init__() - self.plumgrid_init() - - LOG.debug(_('Neutron PLUMgrid Director: Neutron server with ' - 'PLUMgrid Plugin has started')) - - def plumgrid_init(self): - """PLUMgrid initialization.""" - director_plumgrid = cfg.CONF.plumgriddirector.director_server - director_port = cfg.CONF.plumgriddirector.director_server_port - director_admin = cfg.CONF.plumgriddirector.username - director_password = cfg.CONF.plumgriddirector.password - timeout = cfg.CONF.plumgriddirector.servertimeout - plum_driver = cfg.CONF.plumgriddirector.driver - - # PLUMgrid Director info validation - LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid) - self._plumlib = importutils.import_object(plum_driver) - self._plumlib.director_conn(director_plumgrid, director_port, timeout, - director_admin, director_password) - - def create_network(self, context, network): - """Create Neutron network. - - Creates a PLUMgrid-based bridge. - """ - - LOG.debug(_('Neutron PLUMgrid Director: create_network() called')) - - # Plugin DB - Network Create and validation - tenant_id = self._get_tenant_id_for_create(context, - network["network"]) - self._network_admin_state(network) - - with context.session.begin(subtransactions=True): - net_db = super(NeutronPluginPLUMgridV2, - self).create_network(context, network) - # Propagate all L3 data into DB - self._process_l3_create(context, net_db, network['network']) - self._ensure_default_security_group(context, tenant_id) - - try: - LOG.debug(_('PLUMgrid Library: create_network() called')) - self._plumlib.create_network(tenant_id, net_db, network) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return created network - return net_db - - def update_network(self, context, net_id, network): - """Update Neutron network. - - Updates a PLUMgrid-based bridge. - """ - - LOG.debug(_("Neutron PLUMgrid Director: update_network() called")) - self._network_admin_state(network) - tenant_id = self._get_tenant_id_for_create(context, network["network"]) - - with context.session.begin(subtransactions=True): - # Plugin DB - Network Update - net_db = super( - NeutronPluginPLUMgridV2, self).update_network(context, - net_id, network) - self._process_l3_update(context, net_db, network['network']) - - try: - LOG.debug(_("PLUMgrid Library: update_network() called")) - self._plumlib.update_network(tenant_id, net_id, network) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return updated network - return net_db - - def delete_network(self, context, net_id): - """Delete Neutron network. - - Deletes a PLUMgrid-based bridge. - """ - - LOG.debug(_("Neutron PLUMgrid Director: delete_network() called")) - net_db = super(NeutronPluginPLUMgridV2, - self).get_network(context, net_id) - - with context.session.begin(subtransactions=True): - # Plugin DB - Network Delete - super(NeutronPluginPLUMgridV2, self).delete_network(context, - net_id) - - try: - LOG.debug(_("PLUMgrid Library: update_network() called")) - self._plumlib.delete_network(net_db, net_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - @utils.synchronized('plumlib', external=True) - def create_port(self, context, port): - """Create Neutron port. - - Creates a PLUMgrid-based port on the specific Virtual Network - Function (VNF). - """ - LOG.debug(_("Neutron PLUMgrid Director: create_port() called")) - - # Port operations on PLUMgrid Director is an automatic operation - # from the VIF driver operations in Nova. - # It requires admin_state_up to be True - - port["port"]["admin_state_up"] = True - port_data = port["port"] - - with context.session.begin(subtransactions=True): - # Plugin DB - Port Create and Return port - port_db = super(NeutronPluginPLUMgridV2, self).create_port(context, - port) - # Update port security - port_data.update(port_db) - - self._ensure_default_security_group_on_port(context, port) - - port_data[sec_grp.SECURITYGROUPS] = ( - self._get_security_groups_on_port(context, port)) - - self._process_port_create_security_group( - context, port_db, port_data[sec_grp.SECURITYGROUPS]) - - self._process_portbindings_create_and_update(context, - port_data, port_db) - - device_id = port_db["device_id"] - if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: - router_db = self._get_router(context, device_id) - else: - router_db = None - - try: - LOG.debug(_("PLUMgrid Library: create_port() called")) - self._plumlib.create_port(port_db, router_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Plugin DB - Port Create and Return port - return self._port_viftype_binding(context, port_db) - - @utils.synchronized('plumlib', external=True) - def update_port(self, context, port_id, port): - """Update Neutron port. - - Updates a PLUMgrid-based port on the specific Virtual Network - Function (VNF). - """ - LOG.debug(_("Neutron PLUMgrid Director: update_port() called")) - - with context.session.begin(subtransactions=True): - # Plugin DB - Port Create and Return port - port_db = super(NeutronPluginPLUMgridV2, self).update_port( - context, port_id, port) - device_id = port_db["device_id"] - if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: - router_db = self._get_router(context, device_id) - else: - router_db = None - - if (self._check_update_deletes_security_groups(port) or - self._check_update_has_security_groups(port)): - self._delete_port_security_group_bindings(context, - port_db["id"]) - sg_ids = self._get_security_groups_on_port(context, port) - self._process_port_create_security_group(context, - port_db, - sg_ids) - - self._process_portbindings_create_and_update(context, - port['port'], - port_db) - - try: - LOG.debug(_("PLUMgrid Library: create_port() called")) - self._plumlib.update_port(port_db, router_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Plugin DB - Port Update - return self._port_viftype_binding(context, port_db) - - @utils.synchronized('plumlib', external=True) - def delete_port(self, context, port_id, l3_port_check=True): - """Delete Neutron port. - - Deletes a PLUMgrid-based port on the specific Virtual Network - Function (VNF). - """ - - LOG.debug(_("Neutron PLUMgrid Director: delete_port() called")) - - with context.session.begin(subtransactions=True): - # Plugin DB - Port Create and Return port - port_db = super(NeutronPluginPLUMgridV2, - self).get_port(context, port_id) - router_ids = self.disassociate_floatingips( - context, port_id, do_notify=False) - super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id) - - if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: - device_id = port_db["device_id"] - router_db = self._get_router(context, device_id) - else: - router_db = None - try: - LOG.debug(_("PLUMgrid Library: delete_port() called")) - self._plumlib.delete_port(port_db, router_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # now that we've left db transaction, we are safe to notify - self.notify_routers_updated(context, router_ids) - - def get_port(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - port_db = super(NeutronPluginPLUMgridV2, - self).get_port(context, id, fields) - - self._port_viftype_binding(context, port_db) - return self._fields(port_db, fields) - - def get_ports(self, context, filters=None, fields=None): - with context.session.begin(subtransactions=True): - ports_db = super(NeutronPluginPLUMgridV2, - self).get_ports(context, filters, fields) - for port_db in ports_db: - self._port_viftype_binding(context, port_db) - return [self._fields(port, fields) for port in ports_db] - - def create_subnet(self, context, subnet): - """Create Neutron subnet. - - Creates a PLUMgrid-based DHCP and NAT Virtual Network - Functions (VNFs). - """ - - LOG.debug(_("Neutron PLUMgrid Director: create_subnet() called")) - - with context.session.begin(subtransactions=True): - # Plugin DB - Subnet Create - net_db = super(NeutronPluginPLUMgridV2, self).get_network( - context, subnet['subnet']['network_id'], fields=None) - s = subnet['subnet'] - ipnet = netaddr.IPNetwork(s['cidr']) - - # PLUMgrid Director reserves the last IP address for GW - # when is not defined - if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED: - gw_ip = str(netaddr.IPAddress(ipnet.last - 1)) - subnet['subnet']['gateway_ip'] = gw_ip - - # PLUMgrid reserves the first IP - if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED: - allocation_pool = self._allocate_pools_for_subnet(context, s) - subnet['subnet']['allocation_pools'] = allocation_pool - - sub_db = super(NeutronPluginPLUMgridV2, self).create_subnet( - context, subnet) - - try: - LOG.debug(_("PLUMgrid Library: create_subnet() called")) - self._plumlib.create_subnet(sub_db, net_db, ipnet) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return sub_db - - def delete_subnet(self, context, subnet_id): - """Delete subnet core Neutron API.""" - - LOG.debug(_("Neutron PLUMgrid Director: delete_subnet() called")) - # Collecting subnet info - sub_db = self._get_subnet(context, subnet_id) - net_id = sub_db["network_id"] - net_db = self.get_network(context, net_id) - tenant_id = net_db["tenant_id"] - - with context.session.begin(subtransactions=True): - # Plugin DB - Subnet Delete - super(NeutronPluginPLUMgridV2, self).delete_subnet( - context, subnet_id) - try: - LOG.debug(_("PLUMgrid Library: delete_subnet() called")) - self._plumlib.delete_subnet(tenant_id, net_db, net_id) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def update_subnet(self, context, subnet_id, subnet): - """Update subnet core Neutron API.""" - - LOG.debug(_("update_subnet() called")) - # Collecting subnet info - orig_sub_db = self._get_subnet(context, subnet_id) - - with context.session.begin(subtransactions=True): - # Plugin DB - Subnet Update - new_sub_db = super(NeutronPluginPLUMgridV2, - self).update_subnet(context, subnet_id, subnet) - ipnet = netaddr.IPNetwork(new_sub_db['cidr']) - - try: - # PLUMgrid Server does not support updating resources yet - LOG.debug(_("PLUMgrid Library: update_network() called")) - self._plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return new_sub_db - - def create_router(self, context, router): - """ - Create router extension Neutron API - """ - LOG.debug(_("Neutron PLUMgrid Director: create_router() called")) - - tenant_id = self._get_tenant_id_for_create(context, router["router"]) - - with context.session.begin(subtransactions=True): - - # Create router in DB - router_db = super(NeutronPluginPLUMgridV2, - self).create_router(context, router) - # Create router on the network controller - try: - # Add Router to VND - LOG.debug(_("PLUMgrid Library: create_router() called")) - self._plumlib.create_router(tenant_id, router_db) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return created router - return router_db - - def update_router(self, context, router_id, router): - - LOG.debug(_("Neutron PLUMgrid Director: update_router() called")) - - with context.session.begin(subtransactions=True): - router_db = super(NeutronPluginPLUMgridV2, - self).update_router(context, router_id, router) - try: - LOG.debug(_("PLUMgrid Library: update_router() called")) - self._plumlib.update_router(router_db, router_id) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return updated router - return router_db - - def delete_router(self, context, router_id): - LOG.debug(_("Neutron PLUMgrid Director: delete_router() called")) - - with context.session.begin(subtransactions=True): - orig_router = self._get_router(context, router_id) - tenant_id = orig_router["tenant_id"] - - super(NeutronPluginPLUMgridV2, self).delete_router(context, - router_id) - - try: - LOG.debug(_("PLUMgrid Library: delete_router() called")) - self._plumlib.delete_router(tenant_id, router_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def add_router_interface(self, context, router_id, interface_info): - - LOG.debug(_("Neutron PLUMgrid Director: " - "add_router_interface() called")) - with context.session.begin(subtransactions=True): - # Validate args - router_db = self._get_router(context, router_id) - tenant_id = router_db['tenant_id'] - - # Create interface in DB - int_router = super(NeutronPluginPLUMgridV2, - self).add_router_interface(context, - router_id, - interface_info) - port_db = self._get_port(context, int_router['port_id']) - subnet_id = port_db["fixed_ips"][0]["subnet_id"] - subnet_db = super(NeutronPluginPLUMgridV2, - self)._get_subnet(context, subnet_id) - ipnet = netaddr.IPNetwork(subnet_db['cidr']) - - # Create interface on the network controller - try: - LOG.debug(_("PLUMgrid Library: add_router_interface() called")) - self._plumlib.add_router_interface(tenant_id, router_id, - port_db, ipnet) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return int_router - - def remove_router_interface(self, context, router_id, int_info): - - LOG.debug(_("Neutron PLUMgrid Director: " - "remove_router_interface() called")) - with context.session.begin(subtransactions=True): - # Validate args - router_db = self._get_router(context, router_id) - tenant_id = router_db['tenant_id'] - if 'port_id' in int_info: - port = self._get_port(context, int_info['port_id']) - net_id = port['network_id'] - - elif 'subnet_id' in int_info: - subnet_id = int_info['subnet_id'] - subnet = self._get_subnet(context, subnet_id) - net_id = subnet['network_id'] - - # Remove router in DB - del_int_router = super(NeutronPluginPLUMgridV2, - self).remove_router_interface(context, - router_id, - int_info) - - try: - LOG.debug(_("PLUMgrid Library: " - "remove_router_interface() called")) - self._plumlib.remove_router_interface(tenant_id, - net_id, router_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return del_int_router - - def create_floatingip(self, context, floatingip): - LOG.debug(_("Neutron PLUMgrid Director: create_floatingip() called")) - - try: - floating_ip = None - floating_ip = super(NeutronPluginPLUMgridV2, - self).create_floatingip(context, floatingip) - LOG.debug(_("PLUMgrid Library: create_floatingip() called")) - self._plumlib.create_floatingip(floating_ip) - - return floating_ip - except Exception as err_message: - if floating_ip is not None: - self.delete_floatingip(context, floating_ip["id"]) - raise plum_excep.PLUMgridException(err_msg=err_message) - - def update_floatingip(self, context, id, floatingip): - LOG.debug(_("Neutron PLUMgrid Director: update_floatingip() called")) - - try: - floating_ip_orig = super(NeutronPluginPLUMgridV2, - self).get_floatingip(context, id) - floating_ip = super(NeutronPluginPLUMgridV2, - self).update_floatingip(context, id, - floatingip) - LOG.debug(_("PLUMgrid Library: update_floatingip() called")) - self._plumlib.update_floatingip(floating_ip_orig, floating_ip, - id) - - return floating_ip - except Exception as err_message: - if floatingip['floatingip']['port_id']: - self.disassociate_floatingips(context, - floatingip['floatingip']['port_id'], - do_notify=False) - raise plum_excep.PLUMgridException(err_msg=err_message) - - def delete_floatingip(self, context, id): - LOG.debug(_("Neutron PLUMgrid Director: delete_floatingip() called")) - - floating_ip_orig = super(NeutronPluginPLUMgridV2, - self).get_floatingip(context, id) - try: - LOG.debug(_("PLUMgrid Library: delete_floatingip() called")) - self._plumlib.delete_floatingip(floating_ip_orig, id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - super(NeutronPluginPLUMgridV2, self).delete_floatingip(context, id) - - def disassociate_floatingips(self, context, port_id, do_notify=True): - LOG.debug(_("Neutron PLUMgrid Director: disassociate_floatingips() " - "called")) - - try: - fip_qry = context.session.query(l3_db.FloatingIP) - floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() - - LOG.debug(_("PLUMgrid Library: disassociate_floatingips()" - " called")) - self._plumlib.disassociate_floatingips(floating_ip, port_id) - - except sa_exc.NoResultFound: - pass - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return super(NeutronPluginPLUMgridV2, - self).disassociate_floatingips( - context, port_id, do_notify=do_notify) - - def create_security_group(self, context, security_group, default_sg=False): - """Create a security group - - Create a new security group, including the default security group - """ - LOG.debug("Neutron PLUMgrid Director: create_security_group()" - " called") - - with context.session.begin(subtransactions=True): - - sg = security_group.get('security_group') - - tenant_id = self._get_tenant_id_for_create(context, sg) - if not default_sg: - self._ensure_default_security_group(context, tenant_id) - - sg_db = super(NeutronPluginPLUMgridV2, - self).create_security_group(context, security_group, - default_sg) - try: - LOG.debug("PLUMgrid Library: create_security_group()" - " called") - self._plumlib.create_security_group(sg_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return sg_db - - def update_security_group(self, context, sg_id, security_group): - """Update a security group - - Update security group name/description in Neutron and PLUMgrid - platform - """ - with context.session.begin(subtransactions=True): - sg_db = (super(NeutronPluginPLUMgridV2, - self).update_security_group(context, - sg_id, - security_group)) - if ('name' in security_group['security_group'] and - sg_db['name'] != 'default'): - try: - LOG.debug("PLUMgrid Library: update_security_group()" - " called") - self._plumlib.update_security_group(sg_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - return sg_db - - def delete_security_group(self, context, sg_id): - """Delete a security group - - Delete security group from Neutron and PLUMgrid Platform - - :param sg_id: security group ID of the rule to be removed - """ - with context.session.begin(subtransactions=True): - - sg = super(NeutronPluginPLUMgridV2, self).get_security_group( - context, sg_id) - if not sg: - raise sec_grp.SecurityGroupNotFound(id=sg_id) - - if sg['name'] == 'default' and not context.is_admin: - raise sec_grp.SecurityGroupCannotRemoveDefault() - - sec_grp_ip = sg['id'] - filters = {'security_group_id': [sec_grp_ip]} - if super(NeutronPluginPLUMgridV2, - self)._get_port_security_group_bindings(context, - filters): - raise sec_grp.SecurityGroupInUse(id=sec_grp_ip) - - sec_db = super(NeutronPluginPLUMgridV2, - self).delete_security_group(context, sg_id) - try: - LOG.debug("PLUMgrid Library: delete_security_group()" - " called") - self._plumlib.delete_security_group(sg) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return sec_db - - def create_security_group_rule(self, context, security_group_rule): - """Create a security group rule - - Create a security group rule in Neutron and PLUMgrid Platform - """ - LOG.debug("Neutron PLUMgrid Director: create_security_group_rule()" - " called") - bulk_rule = {'security_group_rules': [security_group_rule]} - return self.create_security_group_rule_bulk(context, bulk_rule)[0] - - def create_security_group_rule_bulk(self, context, security_group_rule): - """Create security group rules - - Create security group rules in Neutron and PLUMgrid Platform - - :param security_group_rule: list of rules to create - """ - sg_rules = security_group_rule.get('security_group_rules') - - with context.session.begin(subtransactions=True): - sg_id = super(NeutronPluginPLUMgridV2, - self)._validate_security_group_rules( - context, security_group_rule) - - # Check to make sure security group exists - security_group = super(NeutronPluginPLUMgridV2, - self).get_security_group(context, - sg_id) - - if not security_group: - raise sec_grp.SecurityGroupNotFound(id=sg_id) - - # Check for duplicate rules - self._check_for_duplicate_rules(context, sg_rules) - - sec_db = (super(NeutronPluginPLUMgridV2, - self).create_security_group_rule_bulk_native( - context, security_group_rule)) - try: - LOG.debug(_("PLUMgrid Library: create_security_" - "group_rule_bulk() called")) - self._plumlib.create_security_group_rule_bulk(sec_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return sec_db - - def delete_security_group_rule(self, context, sgr_id): - """Delete a security group rule - - Delete a security group rule in Neutron and PLUMgrid Platform - """ - - LOG.debug("Neutron PLUMgrid Director: delete_security_group_rule()" - " called") - - sgr = (super(NeutronPluginPLUMgridV2, - self).get_security_group_rule(context, sgr_id)) - - if not sgr: - raise sec_grp.SecurityGroupRuleNotFound(id=sgr_id) - - super(NeutronPluginPLUMgridV2, - self).delete_security_group_rule(context, sgr_id) - try: - LOG.debug("PLUMgrid Library: delete_security_" - "group_rule() called") - self._plumlib.delete_security_group_rule(sgr) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - """ - Internal PLUMgrid Functions - """ - - def _get_plugin_version(self): - return VERSION - - def _port_viftype_binding(self, context, port): - port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_IOVISOR - port[portbindings.VIF_DETAILS] = { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases} - return port - - def _network_admin_state(self, network): - if network["network"].get("admin_state_up") is False: - LOG.warning("Networks with admin_state_up=False are not " - "supported by PLUMgrid plugin yet.") - return network - - def _allocate_pools_for_subnet(self, context, subnet): - """Create IP allocation pools for a given subnet - - Pools are defined by the 'allocation_pools' attribute, - a list of dict objects with 'start' and 'end' keys for - defining the pool range. - Modified from Neutron DB based class - - """ - - pools = [] - # Auto allocate the pool around gateway_ip - net = netaddr.IPNetwork(subnet['cidr']) - boundary = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last)) - potential_dhcp_ip = int(net.first + 1) - if boundary == potential_dhcp_ip: - first_ip = net.first + 3 - boundary = net.first + 2 - else: - first_ip = net.first + 2 - last_ip = net.last - 1 - # Use the gw_ip to find a point for splitting allocation pools - # for this subnet - split_ip = min(max(boundary, net.first), net.last) - if split_ip > first_ip: - pools.append({'start': str(netaddr.IPAddress(first_ip)), - 'end': str(netaddr.IPAddress(split_ip - 1))}) - if split_ip < last_ip: - pools.append({'start': str(netaddr.IPAddress(split_ip + 1)), - 'end': str(netaddr.IPAddress(last_ip))}) - # return auto-generated pools - # no need to check for their validity - return pools diff --git a/ansible/roles/plumgrid-plugin/templates/plumlib.filters b/ansible/roles/plumgrid-plugin/templates/plumlib.filters deleted file mode 100644 index 2ea6713..0000000 --- a/ansible/roles/plumgrid-plugin/templates/plumlib.filters +++ /dev/null @@ -1,23 +0,0 @@ -# neutron-rootwrap command filters for nodes on which neutron is -# expected to control network -# -# This file should be owned by (and only-writeable by) the root user - -# format seems to be -# cmd-name: filter-name, raw-command, user, args - -[Filters] - -# neutron/agent/linux/iptables_manager.py -# "iptables-save", ... -python: CommandFilter, python, root -ip: CommandFilter, ip, root -kill: CommandFilter, kill, root -rm: CommandFilter, rm, root -ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root, ifc_ctl -neutron-ns-metadata-proxy: CommandFilter, /usr/bin/neutron-ns-metadata-proxy, root -pg-local-metadata: CommandFilter, /usr/local/bin/pg-local-metadata, pg-local-metadata, root -pg-tunnel-metadata: CommandFilter, /usr/local/bin/pg-tunnel-metadata, pg-tunnel-metadata, root -ping: RegExpFilter, /bin/ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ -ping: IpNetnsExecFilter, ping, root -ping: CommandFilter, ping, root diff --git a/ansible/roles/plumgrid-plugin/templates/plumlib.py b/ansible/roles/plumgrid-plugin/templates/plumlib.py deleted file mode 100644 index b06145e..0000000 --- a/ansible/roles/plumgrid-plugin/templates/plumlib.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Fawad Khaliq, fawad@plumgrid.com, PLUMgrid, Inc. - -""" -Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) -This plugin will forward authenticated REST API calls -to the PLUMgrid Network Management System called Director -""" - -from plumgridlib import plumlib - -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class Plumlib(object): - """ - Class PLUMgrid Python Library. This library is a third-party tool - needed by PLUMgrid plugin to implement all core API in Neutron. - """ - - def __init__(self): - LOG.info(_('Python PLUMgrid Library Started ')) - - def director_conn(self, director_plumgrid, director_port, timeout, - director_admin, director_password): - self.plumlib = plumlib.Plumlib(director_plumgrid, - director_port, - timeout, - director_admin, - director_password) - - def create_network(self, tenant_id, net_db, network): - self.plumlib.create_network(tenant_id, net_db, network) - - def update_network(self, tenant_id, net_id, network): - self.plumlib.update_network(tenant_id, net_id, network) - - def delete_network(self, net_db, net_id): - self.plumlib.delete_network(net_db, net_id) - - def create_subnet(self, sub_db, net_db, ipnet): - self.plumlib.create_subnet(sub_db, net_db, ipnet) - - def update_subnet(self, orig_sub_db, new_sub_db, ipnet): - self.plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) - - def delete_subnet(self, tenant_id, net_db, net_id): - self.plumlib.delete_subnet(tenant_id, net_db, net_id) - - def create_port(self, port_db, router_db): - self.plumlib.create_port(port_db, router_db) - - def update_port(self, port_db, router_db): - self.plumlib.update_port(port_db, router_db) - - def delete_port(self, port_db, router_db): - self.plumlib.delete_port(port_db, router_db) - - def create_router(self, tenant_id, router_db): - self.plumlib.create_router(tenant_id, router_db) - - def update_router(self, router_db, router_id): - self.plumlib.update_router(router_db, router_id) - - def delete_router(self, tenant_id, router_id): - self.plumlib.delete_router(tenant_id, router_id) - - def add_router_interface(self, tenant_id, router_id, port_db, ipnet): - self.plumlib.add_router_interface(tenant_id, router_id, port_db, ipnet) - - def remove_router_interface(self, tenant_id, net_id, router_id): - self.plumlib.remove_router_interface(tenant_id, net_id, router_id) - - def create_floatingip(self, floating_ip): - self.plumlib.create_floatingip(floating_ip) - - def update_floatingip(self, floating_ip_orig, floating_ip, id): - self.plumlib.update_floatingip(floating_ip_orig, floating_ip, id) - - def delete_floatingip(self, floating_ip_orig, id): - self.plumlib.delete_floatingip(floating_ip_orig, id) - - def disassociate_floatingips(self, floating_ip, port_id): - self.plumlib.disassociate_floatingips(floating_ip, port_id) - - def create_security_group(self, sg_db): - self.plumlib.create_security_group(sg_db) - - def update_security_group(self, sg_db): - self.plumlib.update_security_group(sg_db) - - def delete_security_group(self, sg_db): - self.plumlib.delete_security_group(sg_db) - - def create_security_group_rule(self, sg_rule_db): - self.plumlib.create_security_group_rule(sg_rule_db) - - def create_security_group_rule_bulk(self, sg_rule_db): - self.plumlib.create_security_group_rule_bulk(sg_rule_db) - - def delete_security_group_rule(self, sg_rule_db): - self.plumlib.delete_security_group_rule(sg_rule_db) - diff --git a/ansible/roles/plumgrid/tasks/main.yml b/ansible/roles/plumgrid/tasks/main.yml deleted file mode 100644 index 121f24c..0000000 --- a/ansible/roles/plumgrid/tasks/main.yml +++ /dev/null @@ -1,156 +0,0 @@ -# -# Copyright (c) 2012-2015, PLUMgrid, http://plumgrid.com -# - -#- include: plumgrid_packages.yml -# when: enable_plumgrid == True - -# Create a PLUMgrid sources.list -- name: Create plumgrid sources.list - lineinfile: - dest: /etc/apt/sources.list.d/plumgrid.list - line: "deb {{ plumgrid_repo }}/plumgrid ./" - state: present - create: yes - -# Create a PLUMgrid sources.list -- name: Add plumgrid-images to repo - lineinfile: - dest: /etc/apt/sources.list.d/plumgrid.list - line: "deb {{ plumgrid_repo }}/plumgrid-images ./" - state: present - -# Copy GPG-key file to target nodes -- name: Copy Plumgrid GPG-key file - command: apt-key adv --keyserver keyserver.ubuntu.com --recv 63F65885554E46B2 - -# Update repositories -- name: Running apt-update - apt: - update_cache: yes - -# for compute hosts -- name: Create nova ifc_ctl_sudoers file - lineinfile: - dest: /etc/sudoers.d/ifc_ctl_sudoers - line: "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *" - state: present - create: yes - owner: root - mode: "644" - when: inventory_hostname in groups['compute'] - -# Install package iovisor-dkms -- name: Install iovisor - apt: - name: iovisor-dkms - state: present - force: yes - -# Install package plumgrid-lxc -- name: Install plumgrid-lxc - apt: - name: plumgrid-lxc - state: present - force: yes - -# Install package nova-network -- name: Install nova-network - apt: - name: nova-network - state: present - force: yes - when: inventory_hostname in groups['compute'] - -- name: Disable nova-network - service: - name: nova-network - enabled: no - when: inventory_hostname in groups['compute'] - -- name: Stop nova-network - service: - name: nova-network - state: stopped - when: inventory_hostname in groups['compute'] - -# Remove openvswitch -- name: Remove openvswitch - apt: - state: absent - force: yes - name: "{{ item }}" - with_items: - - openvswitch-common - - openvswitch-datapath-dkms - -# Modify template fies -- name: Setup Keepalived Config on Controller - template: - src: keepalived.conf - dest: /var/lib/libvirt/filesystems/plumgrid/etc/keepalived/keepalived.conf - when: inventory_hostname in groups['controller'] - -- name: Setup nginx Config - template: - src: default.conf - dest: /var/lib/libvirt/filesystems/plumgrid/opt/pg/sal/nginx/conf.d/default.conf - -- name: Setup plumgrid Conf - template: - src: plumgrid.conf - dest: /var/lib/libvirt/filesystems/plumgrid/opt/pg/etc/plumgrid.conf - -- name: Update qemu settings for compute hosts - template: - src: qemu.conf - dest: /etc/libvirt/qemu.conf - when: inventory_hostname in groups['compute'] - -# Update hostname -- name: Update Plumgrid hostname - replace: - dest: "/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hostname" - replace: "pg-{{ inventory_hostname }}" - regexp: "plumgrid" - -# Update hosts -- name: Update /etc/hosts - replace: - dest: "/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hosts" - replace: "pg-{{ inventory_hostname }}" - regexp: "plumgrid" - -- name: Create ifcs file - lineinfile: - dest: "/var/lib/libvirt/filesystems/plumgrid-data/conf/pg/ifcs.conf" - line: "{{ fabric_interface }} = fabric_core host" - create: yes - -- name: Add gateway int to network node - lineinfile: - dest: "/var/lib/libvirt/filesystems/plumgrid-data/conf/pg/ifcs.conf" - line: "{{ ext_interface }} = access_phys" - create: yes - when: inventory_hostname in groups['network'] - -- name: Set mtu to 1580 in config file - lineinfile: - dest: "/etc/network/interfaces" - line: " mtu 1580" - create: yes - insertafter: "^iface {{ fabric_interface }}" - -- name: Set mtu to 1580 now - command: "ifconfig {{ fabric_interface }} mtu 1580" - -- name: Ensure PLUMgrid services are started - service: - name: plumgrid - state: started - -- name: Restart libvirt-bin - service: - name: libvirt-bin - state: restarted - pattern: libvirt-bin diff --git a/ansible/roles/plumgrid/templates/default.conf b/ansible/roles/plumgrid/templates/default.conf deleted file mode 100644 index 5652c44..0000000 --- a/ansible/roles/plumgrid/templates/default.conf +++ /dev/null @@ -1,143 +0,0 @@ -upstream sal { - server unix:/opt/pg/tmp/sal-web.socket; - keepalive 16; -} - -upstream websocket { - server unix:/opt/pg/tmp/sal-ws.socket; - keepalive 16; -} - -upstream pgCli { - server {{ nginx_virtual_ip }}:3000; -} - -map $http_upgrade $connection_upgrade { - default upgrade; - '' close; -} - -lua_socket_log_errors off; -#lua_code_cache off; -lua_shared_dict rest_servers 16K; -lua_shared_dict apache_servers 16K; -lua_shared_dict tc_servers 16K; -init_by_lua 'lb = require "lb" -init_servers = { - ["{{ real1 }}"] = true, -{% if real2 is defined %} - ["{{ real2 }}"] = true, -{% endif %} -{% if real3 is defined %} - ["{{ real3 }}"] = true, -{% endif %} -}'; - -# Redirect http to https -server { - listen {{ nginx_virtual_ip }}:9080; - server_name $hostname; - return 301 https://$host$request_uri; -} - -server { - listen {{ nginx_virtual_ip }}:443 ssl; - ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers AES128-SHA:AES256-SHA:RC4-SHA:DES-CBC3-SHA:RC4-MD5; - ssl_certificate /opt/pg/sal/nginx/ssl/default.crt; - ssl_certificate_key /opt/pg/sal/nginx/ssl/default.key; - #ssl_session_cache shared:SSL:10m; - #ssl_session_timeout 10m; - - server_name $hostname; - root /opt/pg/web; - index login.html; - - location /cli/ { - proxy_pass http://pgCli/; - proxy_redirect off; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host $host; - } - - location /vtap/ { - alias /opt/pg/vtap; - } - - # REST API calls start with /v[0-9]/, a keyword, or a capital letter. - # Note: Regular expressions have higher precedence than prefix matches - # so don't combine with /0/... - location ~ ^/(v[0-9]/|pg/|docs|api-docs|[A-Z]) { - set $active_upstream "http://sal"; - access_by_lua 'if ngx.req.get_uri_args()["server"]~=nil then - if ngx.req.get_uri_args()["server"]~=ngx.var.host then - ngx.var.active_upstream = "https://"..ngx.req.get_uri_args()["server"]..ngx.var.request_uri - end - end'; - - proxy_pass $active_upstream; - proxy_http_version 1.1; - proxy_set_header Connection ""; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - - location /0/ { - set $active_upstream "http://sal"; - access_by_lua 'if ngx.req.get_uri_args()["server"]~=nil then - if ngx.req.get_uri_args()["server"]~=ngx.var.host then - ngx.var.active_upstream = "https://"..ngx.req.get_uri_args()["server"]..ngx.var.request_uri - end - end'; - - proxy_pass $active_upstream; - proxy_http_version 1.1; - proxy_set_header Connection ""; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - - location /0/websocket { - set $active_upstream "http://websocket"; - access_by_lua 'if ngx.req.get_uri_args()["server"]~=nil then - if ngx.req.get_uri_args()["server"]~=ngx.var.host then - ngx.var.active_upstream = "https://"..ngx.req.get_uri_args()["server"]..ngx.var.request_uri - end - end'; - proxy_pass $active_upstream; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - } -} - -server { - listen unix:/opt/pg/tmp/sal-rest.socket; - - # debug socket - listen 127.0.0.1:9080; - - location / { - set $active_upstream ""; - access_by_lua 'ngx.var.active_upstream = find_next(ngx.shared.rest_servers, {{ rest_port }})'; - proxy_pass http://$active_upstream:{{ rest_port }}; - } - - location /_debug/rest_servers { - access_by_lua 'find_next(ngx.shared.rest_servers, {{ rest_port }})'; - content_by_lua ' - for _, ip in pairs(ngx.shared.rest_servers:get_keys()) do - ngx.say(ip.."="..ngx.shared.rest_servers:get(ip)) - end - '; - } - - location /_debug/tc_servers { - access_by_lua 'find_next(ngx.shared.tc_servers, 12349)'; - content_by_lua ' - for _, ip in pairs(ngx.shared.tc_servers:get_keys()) do - ngx.say(ip.."="..ngx.shared.tc_servers:get(ip)) - end - '; - } -} diff --git a/ansible/roles/plumgrid/templates/keepalived.conf b/ansible/roles/plumgrid/templates/keepalived.conf deleted file mode 100644 index b2b638d..0000000 --- a/ansible/roles/plumgrid/templates/keepalived.conf +++ /dev/null @@ -1,30 +0,0 @@ -global_defs { - router_id {{ hostname }} -} - -vrrp_script chk_nginx { - script "killall -0 nginx" - interval 2 -} - -vrrp_instance nos { - virtual_router_id {{ keepalived_router_id }} - - # for electing MASTER, highest priority wins. - priority {{ keepalived_priority }} - state BACKUP - nopreempt - - interface {{ management_bridge }} - - virtual_ipaddress { - {{ pg_vip }} dev {{ management_bridge }} label {{ management_bridge }}:1 - } - track_script { - chk_nginx - } - authentication { - auth_type PASS - auth_pass {{ keepalived_password }} - } -} diff --git a/ansible/roles/plumgrid/templates/plumgrid.conf b/ansible/roles/plumgrid/templates/plumgrid.conf deleted file mode 100644 index 6fa3cc0..0000000 --- a/ansible/roles/plumgrid/templates/plumgrid.conf +++ /dev/null @@ -1,10 +0,0 @@ -plumgrid_ip={{ plumgrid_ip }} -plumgrid_port={{ plumgrid_port }} -mgmt_dev={{ management_bridge }} -label={{ inventory_hostname }} -plumgrid_rsync_port=2222 -plumgrid_rest_addr=0.0.0.0:{{ rest_port }} -fabric_mode={{ fabric_mode }} -start_plumgrid_iovisor=yes -start_plumgrid=`/opt/pg/scripts/pg_is_director.sh $plumgrid_ip` -location= diff --git a/ansible/roles/plumgrid/templates/qemu.conf b/ansible/roles/plumgrid/templates/qemu.conf deleted file mode 100644 index d486a79..0000000 --- a/ansible/roles/plumgrid/templates/qemu.conf +++ /dev/null @@ -1,27 +0,0 @@ -# This file is managed by Managed by Ansible - -# This is the basic set of devices allowed / required by -# all virtual machines. -# -cgroup_device_acl = [ - "/dev/null", "/dev/full", "/dev/zero", - "/dev/random", "/dev/urandom", - "/dev/ptmx", "/dev/kvm", "/dev/kqemu", - "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", - "/dev/net/tun" -] - -# If clear_emulator_capabilities is enabled, libvirt will drop all -# privileged capabilities of the QEmu/KVM emulator. This is enabled by -# default. -clear_emulator_capabilities=0 - -# The user for QEMU processes run by the system instance. It can be -# specified as a user name or as a user id. The qemu driver will try to -# parse this value first as a name and then, if the name doesn't exist, -# as a user id. -# -user="root" - -# The group for QEMU processes run by the system instance. -group="root" diff --git a/ansible/roles/secgroup/handlers/main.yml b/ansible/roles/secgroup/handlers/main.yml new file mode 100644 index 0000000..e4e11ec --- /dev/null +++ b/ansible/roles/secgroup/handlers/main.yml @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart controller relation service + service: name={{ item }} state=restarted enabled=yes + ignore_errors: True + with_items: controller_services + +- name: restart compute relation service + service: name={{ item }} state=restarted enabled=yes + ignore_errors: True + with_items: compute_services diff --git a/ansible/roles/secgroup/tasks/main.yml b/ansible/roles/secgroup/tasks/main.yml new file mode 100644 index 0000000..43a3f7f --- /dev/null +++ b/ansible/roles/secgroup/tasks/main.yml @@ -0,0 +1,20 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + tags: secgroup + +- debug: msg={{ enable_secgroup }} + tags: secgroup + +- include: secgroup.yml + when: '{{ enable_secgroup }} == False' + tags: secgroup + +- meta: flush_handlers diff --git a/ansible/roles/secgroup/tasks/secgroup.yml b/ansible/roles/secgroup/tasks/secgroup.yml new file mode 100644 index 0000000..5e8684d --- /dev/null +++ b/ansible/roles/secgroup/tasks/secgroup.yml @@ -0,0 +1,35 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: make sure template dir exits + file: path=/opt/os_templates state=directory mode=0755 + tags: secgroup + +- name: copy configs + template: src={{ item.src}} dest=/opt/os_templates + with_items: "{{ configs_templates }}" + tags: secgroup + +- name: update controller configs + shell: '[ -f {{ item.1 }} ] && crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} || /bin/true' + tags: secgroup + with_subelements: + - configs_templates + - dest + notify: restart controller relation service + when: inventory_hostname in "{{ groups['controller'] }}" + +- name: update compute configs + shell: '[ -f {{ item.1 }} ] && crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} || /bin/true' + tags: secgroup + with_subelements: + - configs_templates + - dest + notify: restart compute relation service + when: inventory_hostname in "{{ groups['compute'] }}" diff --git a/ansible/roles/secgroup/templates/neutron.j2 b/ansible/roles/secgroup/templates/neutron.j2 new file mode 100644 index 0000000..7b39e18 --- /dev/null +++ b/ansible/roles/secgroup/templates/neutron.j2 @@ -0,0 +1,4 @@ +[securitygroup] +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = False + diff --git a/ansible/roles/secgroup/templates/nova.j2 b/ansible/roles/secgroup/templates/nova.j2 new file mode 100644 index 0000000..91fa6cd --- /dev/null +++ b/ansible/roles/secgroup/templates/nova.j2 @@ -0,0 +1,3 @@ +[DEFAULT] +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = nova diff --git a/ansible/roles/secgroup/vars/Debian.yml b/ansible/roles/secgroup/vars/Debian.yml new file mode 100644 index 0000000..a666908 --- /dev/null +++ b/ansible/roles/secgroup/vars/Debian.yml @@ -0,0 +1,35 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +configs_templates: + - src: nova.j2 + dest: + - /etc/nova/nova.conf + - src: neutron.j2 + dest: + - /etc/neutron/plugins/ml2/ml2_conf.ini + - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + - /etc/neutron/plugins/ml2/restproxy.ini + +controller_services: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler + - neutron-server + - neutron-plugin-openvswitch-agent + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-metadata-agent + +compute_services: + - nova-compute + - neutron-plugin-openvswitch-agent diff --git a/ansible/roles/secgroup/vars/RedHat.yml b/ansible/roles/secgroup/vars/RedHat.yml new file mode 100644 index 0000000..4c04f6d --- /dev/null +++ b/ansible/roles/secgroup/vars/RedHat.yml @@ -0,0 +1,35 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +configs_templates: + - src: nova.j2 + dest: + - /etc/nova/nova.conf + - src: neutron.j2 + dest: + - /etc/neutron/plugins/ml2/ml2_conf.ini + - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + - /etc/neutron/plugins/ml2/restproxy.ini + +controller_services: + - openstack-nova-api + - openstack-nova-cert + - openstack-nova-conductor + - openstack-nova-consoleauth + - openstack-nova-novncproxy + - openstack-nova-scheduler + - neutron-openvswitch-agent + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-metadata-agent + - neutron-server + +compute_services: + - openstack-nova-compute + - neutron-openvswitch-agent diff --git a/ansible/roles/secgroup/vars/main.yml b/ansible/roles/secgroup/vars/main.yml new file mode 100644 index 0000000..209e1e0 --- /dev/null +++ b/ansible/roles/secgroup/vars/main.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] +metering_secret: 1c5df72079b31fb47747 diff --git a/ansible/roles/setup-network/files/setup_networks/log.py b/ansible/roles/setup-network/files/setup_networks/log.py new file mode 100644 index 0000000..fffeb58 --- /dev/null +++ b/ansible/roles/setup-network/files/setup_networks/log.py @@ -0,0 +1,41 @@ +import logging +import os +loggers = {} +log_dir="/var/log/setup_network" +try: + os.makedirs(log_dir) +except: + pass + +def getLogger(name): + if name in loggers: + return loggers[name] + + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + + # create file handler which logs even debug messages + log_file = "%s/%s.log" % (log_dir, name) + try: + os.remove(log_file) + except: + pass + + fh = logging.FileHandler(log_file) + fh.setLevel(logging.DEBUG) + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setLevel(logging.ERROR) + + # create formatter and add it to the handlers + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + ch.setFormatter(formatter) + fh.setFormatter(formatter) + + # add the handlers to logger + logger.addHandler(ch) + logger.addHandler(fh) + + loggers[name] = logger + return logger diff --git a/ansible/roles/setup-network/files/setup_networks/net_init b/ansible/roles/setup-network/files/setup_networks/net_init new file mode 100755 index 0000000..c27a8bf --- /dev/null +++ b/ansible/roles/setup-network/files/setup_networks/net_init @@ -0,0 +1,20 @@ +#!/bin/bash +## BEGIN INIT INFO +# Provides: anamon.init +# Default-Start: 3 5 +# Default-Stop: 0 1 2 4 6 +# Required-Start: $network +# Short-Description: Starts the cobbler anamon boot notification program +# Description: anamon runs the first time a machine is booted after +# installation. +## END INIT INFO + +# +# anamon.init: Starts the cobbler post-install boot notification program +# +# chkconfig: 35 0 6 +# +# description: anamon runs the first time a machine is booted after +# installation. +# +python /opt/setup_networks/setup_networks.py diff --git a/ansible/roles/setup-network/files/setup_networks/setup_networks.py b/ansible/roles/setup-network/files/setup_networks/setup_networks.py new file mode 100644 index 0000000..e58d6c7 --- /dev/null +++ b/ansible/roles/setup-network/files/setup_networks/setup_networks.py @@ -0,0 +1,73 @@ +import yaml +import netaddr +import os +import log as logging + +LOG = logging.getLogger("net-init") +config_path = os.path.join(os.path.dirname(__file__), "network.cfg") + +def setup_bondings(bond_mappings): + print bond_mappings + +def add_vlan_link(interface, ifname, vlan_id): + LOG.info("add_vlan_link enter") + cmd = "ip link add link %s name %s type vlan id %s; " % (ifname, interface, vlan_id) + cmd += "ip link set %s up; ip link set %s up" % (interface, ifname) + LOG.info("add_vlan_link: cmd=%s" % cmd) + os.system(cmd) + +def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None): + LOG.info("add_ovs_port enter") + cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname) + if vlan_id: + cmd += " tag=%s" % vlan_id + cmd += " -- set Interface %s type=internal;" % ifname + cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \ + % (ifname, uplink) + cmd += "ip link set %s up;" % ifname + LOG.info("add_ovs_port: cmd=%s" % cmd) + os.system(cmd) + +def setup_intfs(sys_intf_mappings, uplink_map): + LOG.info("setup_intfs enter") + for intf_name, intf_info in sys_intf_mappings.items(): + if intf_info["type"] == "vlan": + add_vlan_link(intf_name, intf_info["interface"], intf_info["vlan_tag"]) + elif intf_info["type"] == "ovs": + add_ovs_port( + intf_info["interface"], + intf_name, + uplink_map[intf_info["interface"]], + vlan_id=intf_info.get("vlan_tag")) + else: + pass + +def setup_ips(ip_settings, sys_intf_mappings): + LOG.info("setup_ips enter") + for intf_info in ip_settings.values(): + network = netaddr.IPNetwork(intf_info["cidr"]) + if sys_intf_mappings[intf_info["name"]]["type"] == "ovs": + intf_name = intf_info["name"] + else: + intf_name = intf_info["alias"] + cmd = "ip addr add %s/%s brd %s dev %s;" \ + % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name) + if "gw" in intf_info: + cmd += "route del default;" + cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name) + LOG.info("setup_ips: cmd=%s" % cmd) + os.system(cmd) + +def main(config): + uplink_map = {} + setup_bondings(config["bond_mappings"]) + for provider_net in config["provider_net_mappings"]: + uplink_map[provider_net['name']] = provider_net['interface'] + + setup_intfs(config["sys_intf_mappings"], uplink_map) + setup_ips(config["ip_settings"], config["sys_intf_mappings"]) + +if __name__ == "__main__": + os.system("service openvswitch-switch status|| service openvswitch-switch start") + config = yaml.load(open(config_path)) + main(config) diff --git a/ansible/roles/setup-network/tasks/main.yml b/ansible/roles/setup-network/tasks/main.yml new file mode 100644 index 0000000..727b24e --- /dev/null +++ b/ansible/roles/setup-network/tasks/main.yml @@ -0,0 +1,62 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: disable NetworkManager + service: name=NetworkManager state=stopped enabled=no + when: ansible_os_family == 'RedHat' + +- name: enable network service + service: name=network state=started enabled=yes + when: ansible_os_family == 'RedHat' + +- name: add ovs bridge + openvswitch_bridge: bridge={{ item["name"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: 'item["type"] == "ovs"' + +- name: add ovs uplink + openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: 'item["type"] == "ovs"' + +- name: add ovs uplink + shell: ip link set {{ item["interface"] }} up + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: 'item["type"] == "ovs"' + +- name: ensure script dir exist + shell: mkdir -p /opt/setup_networks + +- name: copy scripts + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - setup_networks/log.py + - setup_networks/setup_networks.py + +- name: copy boot scripts + copy: src={{ item }} dest=/etc/init.d/ mode=0755 + with_items: + - setup_networks/net_init + +- name: copy config files + template: src=network.cfg dest=/opt/setup_networks + +- name: make sure python lib exist + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - python-yaml + - python-netaddr + +- name: run scripts + shell: python /opt/setup_networks/setup_networks.py + +- name: add to boot scripts + service: name=net_init enabled=yes + +- meta: flush_handlers diff --git a/ansible/roles/setup-network/templates/my_configs.debian b/ansible/roles/setup-network/templates/my_configs.debian new file mode 100644 index 0000000..5ab1519 --- /dev/null +++ b/ansible/roles/setup-network/templates/my_configs.debian @@ -0,0 +1,14 @@ +{%- for alias, intf in host_ip_settings.items() %} + +auto {{ alias }} +iface {{ alias }} inet static + address {{ intf["ip"] }} + netmask {{ intf["netmask"] }} +{% if "gw" in intf %} + gateway {{ intf["gw"] }} +{% endif %} +{% if intf["name"] == alias %} + pre-up ip link set {{ sys_intf_mappings[alias]["interface"] }} up + pre-up ip link add link {{ sys_intf_mappings[alias]["interface"] }} name {{ alias }} type vlan id {{ sys_intf_mappings[alias]["vlan_tag"] }} +{% endif %} +{% endfor %} diff --git a/ansible/roles/setup-network/templates/network.cfg b/ansible/roles/setup-network/templates/network.cfg new file mode 100644 index 0000000..75ba90c --- /dev/null +++ b/ansible/roles/setup-network/templates/network.cfg @@ -0,0 +1,5 @@ +bond_mappings: {{ network_cfg["bond_mappings"] }} +ip_settings: {{ ip_settings[inventory_hostname] }} +sys_intf_mappings: {{ sys_intf_mappings }} +provider_net_mappings: {{ network_cfg["provider_net_mappings"] }} + diff --git a/ansible/roles/storage/files/create_img.sh b/ansible/roles/storage/files/create_img.sh new file mode 100755 index 0000000..0039292 --- /dev/null +++ b/ansible/roles/storage/files/create_img.sh @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +seek_num=`echo $1 | sed -e 's/.* //g'` +if [ ! -f /var/storage.img ]; then + dd if=/dev/zero of=/var/storage.img bs=1 count=0 seek=$seek_num +fi diff --git a/ansible/roles/storage/files/get_var_size.sh b/ansible/roles/storage/files/get_var_size.sh new file mode 100755 index 0000000..9d679f9 --- /dev/null +++ b/ansible/roles/storage/files/get_var_size.sh @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +size=`df /var | awk '$3 ~ /[0-9]+/ { print $4 }'`; +if [ $size -gt 2000000000 ]; then + echo -n 2000000000000; +else + echo -n $((size * 1000 / 512 * 512)); +fi diff --git a/ansible/roles/storage/files/loop.yml b/ansible/roles/storage/files/loop.yml new file mode 100755 index 0000000..776cf8c --- /dev/null +++ b/ansible/roles/storage/files/loop.yml @@ -0,0 +1,9 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +physical_device: /dev/loop0 diff --git a/ansible/roles/storage/files/losetup.sh b/ansible/roles/storage/files/losetup.sh new file mode 100755 index 0000000..8a22a62 --- /dev/null +++ b/ansible/roles/storage/files/losetup.sh @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +loop_dev=`losetup -a |grep "/var/storage.img"|awk -F':' '{print $1}'` +if [ -z $loop_dev ]; then + losetup -f --show /var/storage.img +else + echo $loop_dev +fi + diff --git a/ansible/roles/storage/files/storage b/ansible/roles/storage/files/storage new file mode 100755 index 0000000..775e8fd --- /dev/null +++ b/ansible/roles/storage/files/storage @@ -0,0 +1,2 @@ +#! /bin/bash +loop_dev=`sh /opt/setup_storage/losetup.sh` diff --git a/ansible/roles/storage/files/storage.service b/ansible/roles/storage/files/storage.service new file mode 100644 index 0000000..924db25 --- /dev/null +++ b/ansible/roles/storage/files/storage.service @@ -0,0 +1,15 @@ +[Unit] +Description=Storage Service +Before=runlevel2.target runlevel3.target runlevel4.target runlevel5.target shutdown.target +After=remote-fs.target nss-lookup.target network-online.target time-sync.target network-online.target net_init.service +Before=ceph.service +Wants=network-online.target +Conflicts=shutdown.target + +[Service] +Type=oneshot +ExecStart=/bin/sh -c "/etc/init.d/storage" + +[Install] +WantedBy=multi-user.target + diff --git a/ansible/roles/storage/tasks/loop.yml b/ansible/roles/storage/tasks/loop.yml new file mode 100755 index 0000000..21b393d --- /dev/null +++ b/ansible/roles/storage/tasks/loop.yml @@ -0,0 +1,31 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: get available /var partition size + script: get_var_size.sh + register: part_size + +- name: create image file if not exitst + script: create_img.sh \"{{ part_size.stdout }}\" + +- name: do a losetup on storage volumes + script: losetup.sh + register: loop_device + +- name: debug loop device + debug: msg={{ loop_device.stdout }} + +- name: get device + shell: echo '{{ loop_device.stdout }}' | sed ':a;N;$!ba;s/.*\n\(\/dev\)/\1/g' + register: loop_device_filterd + +- name: create physical and group volumes + lvg: vg=storage-volumes pvs={{ loop_device_filterd.stdout }} + vg_options=--force diff --git a/ansible/roles/storage/tasks/main.yml b/ansible/roles/storage/tasks/main.yml new file mode 100755 index 0000000..b48e676 --- /dev/null +++ b/ansible/roles/storage/tasks/main.yml @@ -0,0 +1,57 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: check if physical device exists + stat: path={{ physical_device }} + register: status + tags: + - storage + +- name: load loop.yml + include: loop.yml + when: status.stat.exists == False or status.stat.isblk == False + tags: + - storage + +- name: load real.yml + include: real.yml + when: status.stat.exists == True and status.stat.isblk == True + tags: + - storage + +- name: make setup_storage directory + file: path=/opt/setup_storage state=directory mode=0755 + tags: + - storage + +- name: copy setup storage scripts + copy: src={{ item }} dest=/opt/setup_storage mode=0755 + with_items: + - losetup.sh + tags: + - storage + +- name: set autostart file + copy: src=storage dest=/etc/init.d/storage mode=0755 + tags: + - storage + +- name: set autostart file for centos + copy: src=storage.service dest=/usr/lib/systemd/system/storage.service mode=0755 + when: ansible_os_family == "RedHat" + tags: + - storage + + +- name: enable service + service: name=storage enabled=yes + tags: + - storage + +- meta: flush_handlers diff --git a/ansible/roles/storage/tasks/real.yml b/ansible/roles/storage/tasks/real.yml new file mode 100755 index 0000000..e99f185 --- /dev/null +++ b/ansible/roles/storage/tasks/real.yml @@ -0,0 +1,16 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: destroy GPT lable + shell: dd if=/dev/urandom of={{ physical_device }} bs=4M count=1 + ignore_errors: True + +- name: create physical and group volumes + lvg: vg=storage-volumes pvs={{ physical_device }} + vg_options=--force diff --git a/ansible/roles/tacker/tasks/main.yml b/ansible/roles/tacker/tasks/main.yml new file mode 100755 index 0000000..2759e96 --- /dev/null +++ b/ansible/roles/tacker/tasks/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: Install Tacker on Controller + include: tacker_controller.yml + when: inventory_hostname in groups['controller'] and ansible_os_family == "Debian" diff --git a/ansible/roles/tacker/tasks/tacker_controller.yml b/ansible/roles/tacker/tasks/tacker_controller.yml new file mode 100755 index 0000000..7bdc32e --- /dev/null +++ b/ansible/roles/tacker/tasks/tacker_controller.yml @@ -0,0 +1,128 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: get http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: creat tacker_home, tacker_client_home, tacker_horizon_home + shell: > + mkdir -p /opt/tacker + mkdir -p /opt/tacker_client + mkdir -p /opt/tacker_horizon + +- name: download tacker package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_pkg_name }}" dest=/opt/{{ tacker_pkg_name }} + +- name: download tacker_client package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_client_pkg_name }}" dest=/opt/{{ tacker_client_pkg_name }} + +- name: download tacker_horizon package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_horizon_pkg_name }}" dest=/opt/{{ tacker_horizon_pkg_name }} + +- name: extract tacker package + command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_pkg_name }} -C {{ tacker_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" + +- name: extract tacker_client package + command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_client_pkg_name }} -C {{ tacker_client_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" + +- name: extract tacker_horizon package + command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_horizon_pkg_name }} -C {{ tacker_horizon_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" + +- name: edit ml2_conf.ini + shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security; + +- name: Restart neutron-server + service: name=neutron-server state=restarted + +- name: "create haproxy configuration for tacker" + template: + src: "haproxy-tacker-cfg.j2" + dest: "/tmp/haproxy-tacker.cfg" + +- name: "combination of the haproxy configuration" + shell: "cat /tmp/haproxy-tacker.cfg >> /etc/haproxy/haproxy.cfg" + +- name: "delete temporary configuration file" + file: + dest: "/tmp/haproxy-tacker.cfg" + state: "absent" + +- name: "restart haproxy" + service: + name: "haproxy" + state: "restarted" + +- name: drop and recreate tacker database + shell: mysql -e "drop database if exists tacker;"; + mysql -e "create database tacker character set utf8;"; + mysql -e "grant all on tacker.* to 'tacker'@'%' identified by 'TACKER_DBPASS';"; + when: inventory_hostname == haproxy_hosts.keys()[0] + +- name: create tacker user with admin privileges + shell: . /opt/admin-openrc.sh; openstack user create --password console tacker; openstack role add --project service --user tacker admin; + when: inventory_hostname == haproxy_hosts.keys()[0] + +- name: creat tacker service + shell: > + . /opt/admin-openrc.sh; openstack service create --name tacker --description "Tacker Project" nfv-orchestration + when: inventory_hostname == haproxy_hosts.keys()[0] + +- name: provide an endpoint to tacker service + shell: > + . /opt/admin-openrc.sh; openstack endpoint create --region regionOne \ + --publicurl 'http://{{ public_vip.ip }}:8888/' \ + --adminurl 'http://{{ internal_vip.ip }}:8888/' \ + --internalurl 'http://{{ internal_vip.ip }}:8888/' tacker + when: inventory_hostname == haproxy_hosts.keys()[0] + +- name: install tacker + shell: > + . /opt/admin-openrc.sh; pip install tosca-parser; cd {{ tacker_home }}; python setup.py install + +# - name: create 'tacker' directory in '/var/cache', set ownership and permissions +# shell: > +# sudo mkdir /var/cache/tacker +# sudo chown :root /var/cache/tacker +# sudo chmod 700 /var/cache/tacker + +- name: create 'tacker' directory in '/var/log' + shell: mkdir -p /var/log/tacker + +- name: copy tacker configs + template: src={{ item.src }} dest=/opt/os_templates + with_items: "{{ tacker_configs_templates }}" + +- name: edit tacker configuration file + shell: crudini --merge /usr/local/etc/tacker/tacker.conf < /opt/os_templates/tacker.j2 + +#- name: populate tacker database +# shell: > +# . /opt/admin-openrc.sh; /usr/local/bin/tacker-db-manage --config-file /usr/local/etc/tacker/tacker.conf upgrade head + +- name: install tacker client + shell: > + . /opt/admin-openrc.sh; cd {{ tacker_client_home }}; python setup.py install + +- name: install tacker horizon + shell: > + . /opt/admin-openrc.sh; cd {{ tacker_horizon_home }}; python setup.py install + +- name: enable tacker horizon in dashboard + shell: > + cp {{ tacker_horizon_home }}/openstack_dashboard_extensions/* /usr/share/openstack-dashboard/openstack_dashboard/enabled/ + +- name: restart apache server + shell: service apache2 restart + +- name: launch tacker-server + shell: > + . /opt/admin-openrc.sh; python /usr/local/bin/tacker-server --config-file /usr/local/etc/tacker/tacker.conf --log-file /var/log/tacker/tacker.log + async: 9999999999999 + poll: 0 diff --git a/ansible/roles/tacker/templates/haproxy-tacker-cfg.j2 b/ansible/roles/tacker/templates/haproxy-tacker-cfg.j2 new file mode 100644 index 0000000..93bbe79 --- /dev/null +++ b/ansible/roles/tacker/templates/haproxy-tacker-cfg.j2 @@ -0,0 +1,10 @@ +listen proxy-tacker_api_cluster + bind {{ internal_vip.ip }}:8888 + bind {{ public_vip.ip }}:8888 + mode tcp + option tcp-check + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8888 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} diff --git a/ansible/roles/tacker/templates/ml2_conf.j2 b/ansible/roles/tacker/templates/ml2_conf.j2 new file mode 100644 index 0000000..a5ccdaf --- /dev/null +++ b/ansible/roles/tacker/templates/ml2_conf.j2 @@ -0,0 +1,2 @@ +[ml2] +extension_drivers = port_security diff --git a/ansible/roles/tacker/templates/tacker.j2 b/ansible/roles/tacker/templates/tacker.j2 new file mode 100644 index 0000000..2e51496 --- /dev/null +++ b/ansible/roles/tacker/templates/tacker.j2 @@ -0,0 +1,29 @@ +[DEFAULT] +bind_host = {{ internal_ip }} +bind_port = 8888 +auth_strategy = keystone +policy_file = /usr/local/etc/tacker/policy.json +debug = True +verbose = True +use_syslog = False +state_path = /var/lib/tacker + +[keystone_authtoken] +password = console +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +project_name = service + +[agent] +root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf + +[DATABASE] +connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8 + +[servicevm_nova] +password = console +auth_url = http://{{ internal_vip.ip }}:35357 + +[servicevm_heat] +heat_uri = http://{{ internal_vip.ip }}:8004/v1 + diff --git a/ansible/roles/tacker/vars/Debian.yml b/ansible/roles/tacker/vars/Debian.yml new file mode 100755 index 0000000..59a4dbd --- /dev/null +++ b/ansible/roles/tacker/vars/Debian.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - software-properties-common + - crudini + +services: [] diff --git a/ansible/roles/tacker/vars/RedHat.yml b/ansible/roles/tacker/vars/RedHat.yml new file mode 100755 index 0000000..59a4dbd --- /dev/null +++ b/ansible/roles/tacker/vars/RedHat.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - software-properties-common + - crudini + +services: [] diff --git a/ansible/roles/tacker/vars/main.yml b/ansible/roles/tacker/vars/main.yml new file mode 100755 index 0000000..2df4ca3 --- /dev/null +++ b/ansible/roles/tacker/vars/main.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +tacker_pkg_name: tacker-2014.2.0.dev206.tar.gz +tacker_client_pkg_name: python-tackerclient-0.0.1.dev85.tar.gz +tacker_horizon_pkg_name: tacker-horizon-0.0.1.dev687.tar.gz +tacker_home: /opt/tacker/ +tacker_client_home: /opt/tacker_client/ +tacker_horizon_home: /opt/tacker_horizon/ + +tacker_configs_templates: + - src: tacker.j2 + dest: + - /usr/local/etc/tacker/tacker.conf diff --git a/cobbler/conf/modules.conf b/cobbler/conf/modules.conf index 8087910..fde469c 100644 --- a/cobbler/conf/modules.conf +++ b/cobbler/conf/modules.conf @@ -1,7 +1,7 @@ # cobbler module configuration file # ================================= -# authentication: +# authentication: # what users can log into the WebUI and Read-Write XMLRPC? # choices: # authn_denyall -- no one (default) @@ -22,7 +22,7 @@ [authentication] module = authn_configfile -# authorization: +# authorization: # once a user has been cleared by the WebUI/XMLRPC, what can they do? # choices: # authz_allowall -- full access for all authneticated users (default) @@ -64,7 +64,7 @@ module = manage_bind # NOTE: more configuration is still required in /etc/cobbler # for more information: # https://github.com/cobbler/cobbler/wiki/Dhcp-management - + [dhcp] module = manage_isc @@ -76,7 +76,7 @@ module = manage_isc # manage_in_tftpd -- default, uses the system's tftp server # manage_tftpd_py -- uses cobbler's tftp server # - + [tftpd] module = manage_in_tftpd diff --git a/cobbler/conf/settings b/cobbler/conf/settings index be8bddb..f6d0b96 100644 --- a/cobbler/conf/settings +++ b/cobbler/conf/settings @@ -52,8 +52,8 @@ build_reporting_smtp_server: "localhost" build_reporting_subject: "" # Cheetah-language kickstart templates can import Python modules. -# while this is a useful feature, it is not safe to allow them to -# import anything they want. This whitelists which modules can be +# while this is a useful feature, it is not safe to allow them to +# import anything they want. This whitelists which modules can be # imported through Cheetah. Users can expand this as needed but # should never allow modules such as subprocess or those that # allow access to the filesystem as Cheetah templates are evaluated @@ -91,13 +91,13 @@ default_ownership: # systems that reference this variable. The factory # default is "cobbler" and cobbler check will warn if # this is not changed. -# The simplest way to change the password is to run +# The simplest way to change the password is to run # openssl passwd -1 # and put the output between the "" below. default_password_crypted: "$1$huawei$9OkoVJwO4W8vavlXd1bUS/" # the default template type to use in the absence of any -# other detected template. If you do not specify the template +# other detected template. If you do not specify the template # with '#template=' on the first line of your # templates/snippets, cobbler will assume try to use the # following template engine to parse the templates. @@ -126,8 +126,8 @@ default_virt_ram: 512 default_virt_type: xenpv # enable gPXE booting? Enabling this option will cause cobbler -# to copy the undionly.kpxe file to the tftp root directory, -# and if a profile/system is configured to boot via gpxe it will +# to copy the undionly.kpxe file to the tftp root directory, +# and if a profile/system is configured to boot via gpxe it will # chain load off pxelinux.0. # Default: 0 enable_gpxe: 0 @@ -137,13 +137,13 @@ enable_gpxe: 0 # basis when adding/editing profiles with --enable-menu=0/1. Users # should ordinarily leave this setting enabled unless they are concerned # with accidental reinstalls from users who select an entry at the PXE -# boot menu. Adding a password to the boot menus templates +# boot menu. Adding a password to the boot menus templates # may also be a good solution to prevent unwanted reinstallations enable_menu: 0 # enable Func-integration? This makes sure each installed machine is set up # to use func out of the box, which is a powerful way to script and control -# remote machines. +# remote machines. # Func lives at http://fedorahosted.org/func # read more at https://github.com/cobbler/cobbler/wiki/Func-integration # you will need to mirror Fedora/EPEL packages for this feature, so see @@ -190,7 +190,7 @@ ldap_tls_keyfile: '' ldap_tls_certfile: '' # cobbler has a feature that allows for integration with config management -# systems such as Puppet. The following parameters work in conjunction with +# systems such as Puppet. The following parameters work in conjunction with # --mgmt-classes and are described in furhter detail at: # https://github.com/cobbler/cobbler/wiki/Using-cobbler-with-a-configuration-management-system mgmt_classes: [] @@ -279,7 +279,7 @@ power_management_default_type: 'ipmitool' power_template_dir: "/etc/cobbler/power" # if this setting is set to 1, cobbler systems that pxe boot -# will request at the end of their installation to toggle the +# will request at the end of their installation to toggle the # --netboot-enabled record in the cobbler system record. This eliminates # the potential for a PXE boot loop if the system is set to PXE # first in it's BIOS order. Enable this if PXE is first in your BIOS @@ -291,7 +291,7 @@ pxe_just_once: 1 # from what directory? pxe_template_dir: "/etc/cobbler/pxe" -# Path to where system consoles are +# Path to where system consoles are consoles: "/var/consoles" # Are you using a Red Hat management platform in addition to Cobbler? @@ -313,12 +313,12 @@ redhat_management_server: "xmlrpc.rhn.redhat.com" # specify the default Red Hat authorization key to use to register # system. If left blank, no registration will be attempted. Similarly -# you can set the --redhat-management-key to blank on any system to +# you can set the --redhat-management-key to blank on any system to # keep it from trying to register. redhat_management_key: "" -# if using authn_spacewalk in modules.conf to let cobbler authenticate -# against Satellite/Spacewalk's auth system, by default it will not allow per user +# if using authn_spacewalk in modules.conf to let cobbler authenticate +# against Satellite/Spacewalk's auth system, by default it will not allow per user # access into Cobbler Web and Cobbler XMLRPC. # in order to permit this, the following setting must be enabled HOWEVER # doing so will permit all Spacewalk/Satellite users of certain types to edit all @@ -369,7 +369,7 @@ run_install_triggers: 1 # enables a trigger which version controls all changes to /var/lib/cobbler # when add, edit, or sync events are performed. This can be used # to revert to previous database versions, generate RSS feeds, or for -# other auditing or backup purposes. "git" and "hg" are currently suported, +# other auditing or backup purposes. "git" and "hg" are currently suported, # but git is the recommend SCM for use with this feature. scm_track_enabled: 0 scm_track_mode: "git" @@ -436,7 +436,7 @@ yum_post_install_mirror: 1 # if yum-priorities plugin is used. 1=maximum. Tweak with caution. yum_distro_priority: 1 -# Flags to use for yumdownloader. Not all versions may support +# Flags to use for yumdownloader. Not all versions may support # --resolve. yumdownloader_flags: "--resolve" diff --git a/cobbler/conf/tftpd.template b/cobbler/conf/tftpd.template index 31f4d36..98c1e9a 100644 --- a/cobbler/conf/tftpd.template +++ b/cobbler/conf/tftpd.template @@ -13,7 +13,7 @@ service tftp user = $user server = $binary server_args = -B 1380 -v -s $args - instances = 1000 + instances = 1000 per_source = 1000 cps = 1000 2 flags = IPv4 diff --git a/cobbler/kickstarts/default.ks b/cobbler/kickstarts/default.ks index cac02a3..ecd877b 100644 --- a/cobbler/kickstarts/default.ks +++ b/cobbler/kickstarts/default.ks @@ -85,7 +85,7 @@ $SNIPPET('kickstart_pre_anamon') # Packages %packages --nobase -@core +@core iproute ntp openssh-clients @@ -93,10 +93,11 @@ wget yum-plugin-priorities json-c libestr -libgt -liblogging rsyslog parted +vim +lsof +strace #if $os_version == "rhel7" net-tools #end if diff --git a/cobbler/kickstarts/default.seed b/cobbler/kickstarts/default.seed index 7461f83..f65b20b 100644 --- a/cobbler/kickstarts/default.seed +++ b/cobbler/kickstarts/default.seed @@ -13,6 +13,11 @@ d-i debian-installer/locale string en_US d-i debian-installer/country string US d-i debian-installer/language string en +d-i debian-installer/splash boolean false +d-i debian-installer/quiet boolean false +d-i debian-installer/framebuffer boolean true +d-i hw-detect/load_firmware boolean true + # Keyboard selection. # Disable automatic (interactive) keymap detection. d-i console-setup/ask_detect boolean false @@ -132,6 +137,8 @@ d-i cdrom-detect/eject boolean false # packages and run commands in the target system. # d-i preseed/late_command string [command] d-i preseed/late_command string \ +in-target sed -i '$a UseDNS no' /etc/ssh/sshd_config; \ +in-target sed -i 's/.*GSSAPIAuthentication.*/GSSAPIAuthentication no/g' /etc/ssh/sshd_config; \ wget -O- \ http://$http_server/cblr/svc/op/script/$what/$name/?script=preseed_late_default | \ chroot /target /bin/sh -s; cp /target/etc/network/interfaces /etc/network/interfaces diff --git a/cobbler/snippets/hosts.xml b/cobbler/snippets/hosts.xml index e3b578f..7fd4ab6 100644 --- a/cobbler/snippets/hosts.xml +++ b/cobbler/snippets/hosts.xml @@ -22,4 +22,4 @@ #end for #end if - + diff --git a/cobbler/snippets/kdump.xml b/cobbler/snippets/kdump.xml index f03c988..0cffe97 100644 --- a/cobbler/snippets/kdump.xml +++ b/cobbler/snippets/kdump.xml @@ -3,33 +3,33 @@ true 256M-2G:64M,2G-:128M - + file:///var/crash true 64 4 - + compressed 31 - + - + - + yes 3 - + diff --git a/cobbler/snippets/keep_cfengine_keys b/cobbler/snippets/keep_cfengine_keys index 78116ab..d2c5622 100644 --- a/cobbler/snippets/keep_cfengine_keys +++ b/cobbler/snippets/keep_cfengine_keys @@ -11,8 +11,8 @@ keys_found=no # /var could be a separate partition SHORTDIR=${SEARCHDIR#/var} if [ $SHORTDIR = $SEARCHDIR ]; then - SHORTDIR='' -fi + SHORTDIR='' +fi insmod /lib/jbd.o insmod /lib/ext3.o @@ -32,13 +32,13 @@ function findkeys # Copy current host keys out to be reused if [ -d /tmp/$tmpdir$SEARCHDIR ] && cp -a /tmp/$tmpdir$SEARCHDIR/${PATTERN}* /tmp/$TEMPDIR; then keys_found="yes" - umount /tmp/$tmpdir - rm -r /tmp/$tmpdir - break - elif [ -n "$SHORTDIR" ] && [ -d /tmp/$tmpdir$SHORTDIR ] && cp -a /tmp/$tmpdir$SHORTDIR/${PATTERN}* /tmp/$TEMPDIR; then - keys_found="yes" umount /tmp/$tmpdir - rm -r /tmp/$tmpdir + rm -r /tmp/$tmpdir + break + elif [ -n "$SHORTDIR" ] && [ -d /tmp/$tmpdir$SHORTDIR ] && cp -a /tmp/$tmpdir$SHORTDIR/${PATTERN}* /tmp/$TEMPDIR; then + keys_found="yes" + umount /tmp/$tmpdir + rm -r /tmp/$tmpdir break fi umount /tmp/$tmpdir @@ -71,9 +71,9 @@ if [ "$keys_found" = "no" ]; then # Activate any VG we found lvm vgchange -ay $vg done - + DISKS=$(lvm lvs | tail -n +2 | awk '{ print "/dev/" $2 "/" $1 }') - findkeys + findkeys # And clean up.. for vg in $vgs; do diff --git a/cobbler/snippets/keep_files b/cobbler/snippets/keep_files index 858db5d..d0e5e07 100644 --- a/cobbler/snippets/keep_files +++ b/cobbler/snippets/keep_files @@ -12,9 +12,9 @@ ## #if $getVar('$preserve_files','') != '' - #set $preserve_files = $getVar('$preserve_files','') - preserve_files = $preserve_files - + #set $preserve_files = $getVar('$preserve_files','') + preserve_files = $preserve_files + #raw # Nifty trick to restore keys without using a nochroot %post @@ -31,19 +31,19 @@ function findkeys mkdir -p /tmp/$tmpdir mount $disk /tmp/$tmpdir if [ $? -ne 0 ]; then # Skip to the next partition if the mount fails - rm -rf /tmp/$tmpdir - continue - fi + rm -rf /tmp/$tmpdir + continue + fi # Copy current host keys out to be reused if [ -d /tmp/$tmpdir$SEARCHDIR ] && cp -a /tmp/$tmpdir$SEARCHDIR/${PATTERN}* /tmp/$TEMPDIR; then keys_found="yes" - umount /tmp/$tmpdir - rm -r /tmp/$tmpdir - break + umount /tmp/$tmpdir + rm -r /tmp/$tmpdir + break elif [ -n "$SHORTDIR" ] && [ -d /tmp/$tmpdir$SHORTDIR ] && cp -a /tmp/$tmpdir$SHORTDIR/${PATTERN}* /tmp/$TEMPDIR; then - keys_found="yes" + keys_found="yes" umount /tmp/$tmpdir - rm -r /tmp/$tmpdir + rm -r /tmp/$tmpdir break fi umount /tmp/$tmpdir @@ -62,8 +62,8 @@ function search_for_keys # /var could be a separate partition SHORTDIR=${SEARCHDIR#/var} if [ $SHORTDIR = $SEARCHDIR ]; then - SHORTDIR='' - fi + SHORTDIR='' + fi mkdir -p /tmp/$TEMPDIR @@ -92,9 +92,9 @@ function search_for_keys # Activate any VG we found lvm vgchange -ay $vg done - + DISKS=$(lvm lvs | tail -n +2 | awk '{ print "/dev/" $2 "/" $1 }') - findkeys + findkeys # And clean up.. for vg in $vgs; do diff --git a/cobbler/snippets/keep_rhn_keys b/cobbler/snippets/keep_rhn_keys index 59bfc5d..46f7c99 100644 --- a/cobbler/snippets/keep_rhn_keys +++ b/cobbler/snippets/keep_rhn_keys @@ -48,7 +48,7 @@ if [ "$rhn_keys_found" = "no" ]; then # Activate any VG we found lvm vgchange -ay $vg done - + lvs=$(lvm lvs | tail -n +2 | awk '{ print "/dev/" $2 "/" $1 }') for lv in $lvs; do tmpdir=$(mktemp -d findkeys.XXXXXX) @@ -67,7 +67,7 @@ if [ "$rhn_keys_found" = "no" ]; then umount /tmp/${tmpdir} rm -r /tmp/${tmpdir} done - + # And clean up.. for vg in $vgs; do lvm vgchange -an $vg diff --git a/cobbler/snippets/keep_ssh_host_keys b/cobbler/snippets/keep_ssh_host_keys index 2c01c69..7597047 100644 --- a/cobbler/snippets/keep_ssh_host_keys +++ b/cobbler/snippets/keep_ssh_host_keys @@ -11,8 +11,8 @@ keys_found=no # /var could be a separate partition SHORTDIR=${SEARCHDIR#/var} if [ $SHORTDIR = $SEARCHDIR ]; then - SHORTDIR='' -fi + SHORTDIR='' +fi insmod /lib/jbd.o insmod /lib/ext3.o @@ -27,19 +27,19 @@ function findkeys mkdir -p /tmp/$tmpdir mount $disk /tmp/$tmpdir if [ $? -ne 0 ]; then # Skip to the next partition if the mount fails - rm -rf /tmp/$tmpdir - continue - fi + rm -rf /tmp/$tmpdir + continue + fi # Copy current host keys out to be reused - if [ -d /tmp/$tmpdir$SEARCHDIR ] && cp -a /tmp/$tmpdir$SEARCHDIR/${PATTERN}* /tmp/$TEMPDIR; then + if [ -d /tmp/$tmpdir$SEARCHDIR ] && cp -a /tmp/$tmpdir$SEARCHDIR/${PATTERN}* /tmp/$TEMPDIR; then keys_found="yes" - umount /tmp/$tmpdir - rm -r /tmp/$tmpdir - break - elif [ -n "$SHORTDIR" ] && [ -d /tmp/$tmpdir$SHORTDIR ] && cp -a /tmp/$tmpdir$SHORTDIR/${PATTERN}* /tmp/$TEMPDIR; then - keys_found="yes" + umount /tmp/$tmpdir + rm -r /tmp/$tmpdir + break + elif [ -n "$SHORTDIR" ] && [ -d /tmp/$tmpdir$SHORTDIR ] && cp -a /tmp/$tmpdir$SHORTDIR/${PATTERN}* /tmp/$TEMPDIR; then + keys_found="yes" umount /tmp/$tmpdir - rm -r /tmp/$tmpdir + rm -r /tmp/$tmpdir break fi umount /tmp/$tmpdir @@ -60,7 +60,7 @@ if [ "$keys_found" = "no" ]; then if mdadm -As; then DISKS=$(awk '/md/{print "/dev/"$1}' /proc/mdstat) findkeys - # unmount and deactivate all md + # unmount and deactivate all md for md in $DISKS ; do umount $md mdadm -S $md @@ -77,9 +77,9 @@ if [ "$keys_found" = "no" ]; then # Activate any VG we found lvm vgchange -ay $vg done - + DISKS=$(lvm lvs | tail -n +2 | awk '{ print "/dev/" $2 "/" $1 }') - findkeys + findkeys # And clean up.. for vg in $vgs; do diff --git a/cobbler/snippets/kickstart_chef_run.sh b/cobbler/snippets/kickstart_chef_run.sh index fef691a..6b2e030 100644 --- a/cobbler/snippets/kickstart_chef_run.sh +++ b/cobbler/snippets/kickstart_chef_run.sh @@ -32,8 +32,8 @@ PIDFILE=/tmp/chef_client_run.pid if [ -f \\$PIDFILE ]; then pid=\\$(cat \\$PIDFILE) if [ -f /proc/\\$pid/exe ]; then - echo "there are chef_client_run.sh running with pid \\$pid" >> /var/log/chef.log 2>&1 - exit 1 + echo "there are chef_client_run.sh running with pid \\$pid" >> /var/log/chef.log 2>&1 + exit 1 fi fi echo \\$$ > \\$PIDFILE @@ -53,7 +53,7 @@ while true; do let all_nodes_success=1 for node in \\$nodes; do mkdir -p /var/log/chef/\\$node - if [ ! -f /etc/chef/\\$node.json ]; then + if [ ! -f /etc/chef/\\$node.json ]; then cat << EOL > /etc/chef/\\$node.json { "local_repo": "$local_repo_url", diff --git a/cobbler/snippets/kickstart_client.rb b/cobbler/snippets/kickstart_client.rb index e6495d0..568ba46 100644 --- a/cobbler/snippets/kickstart_client.rb +++ b/cobbler/snippets/kickstart_client.rb @@ -12,7 +12,7 @@ chef_server_url 'https://$server' validation_client_name 'chef-validator' json_attribs nil pid_file '/var/run/chef-client.pid' -# Using default node name (fqdn) +# Using default node name (fqdn) no_lazy_load true ssl_verify_mode :verify_none #if $os_version == "rhel7" @@ -33,7 +33,7 @@ mkdir -p /etc/chef/trusted_certs cat << EOF > /etc/chef/trusted_certs/$filename #echo $f.read() EOF - #silent $f.close() + #silent $f.close() #end if #end for #end if diff --git a/cobbler/snippets/kickstart_knife.rb b/cobbler/snippets/kickstart_knife.rb index 94d4528..e4ab081 100644 --- a/cobbler/snippets/kickstart_knife.rb +++ b/cobbler/snippets/kickstart_knife.rb @@ -29,7 +29,7 @@ mkdir -p /root/.chef/trusted_certs cat << EOF > /root/.chef/trusted_certs/$filename #echo $f.read() EOF - #silent $f.close() + #silent $f.close() #end if #end for #end if diff --git a/cobbler/snippets/kickstart_limits.conf b/cobbler/snippets/kickstart_limits.conf index 0b116f3..00cf861 100644 --- a/cobbler/snippets/kickstart_limits.conf +++ b/cobbler/snippets/kickstart_limits.conf @@ -48,7 +48,7 @@ cat << EOF > /etc/security/limits.conf #@faculty hard nproc 50 #ftp hard nproc 0 #@student - maxlogins 4 -* - nofile 100000 +* - nofile 100000 # End of file #end raw EOF diff --git a/cobbler/snippets/kickstart_network_config b/cobbler/snippets/kickstart_network_config index 6de06e5..c4bb47e 100644 --- a/cobbler/snippets/kickstart_network_config +++ b/cobbler/snippets/kickstart_network_config @@ -1,6 +1,6 @@ ## start of cobbler network_config generated code #if $getVar("system_name","") != "" -# Using "new" style networking config, by matching networking information to the physical interface's +# Using "new" style networking config, by matching networking information to the physical interface's # MAC-address %include /tmp/pre_install_network_config #end if diff --git a/cobbler/snippets/kickstart_ntp b/cobbler/snippets/kickstart_ntp index 120a311..2cbf75e 100644 --- a/cobbler/snippets/kickstart_ntp +++ b/cobbler/snippets/kickstart_ntp @@ -7,10 +7,10 @@ cat << EOF > /etc/ntp.conf # For more information about this file, see the man pages # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). -# Include the option tinker panic 0 at the top of your ntp.conf file. +# Include the option tinker panic 0 at the top of your ntp.conf file. # By default, the NTP daemon sometimes panics and exits if the underlying clock -# appears to be behaving erratically. This option causes the daemon to keep -# running instead of panicking. +# appears to be behaving erratically. This option causes the daemon to keep +# running instead of panicking. tinker panic 0 driftfile /var/lib/ntp/drift @@ -23,7 +23,7 @@ restrict -6 default kod nomodify notrap nopeer noquery # Permit all access over the loopback interface. This could # be tightened as well, but to do so would effect some of # the administrative functions. -restrict 127.0.0.1 +restrict 127.0.0.1 restrict -6 ::1 # Hosts on local network are less restricted. @@ -36,16 +36,16 @@ restrict -6 ::1 # server 2.centos.pool.ntp.org server $ntp_server -# broadcast 192.168.1.255 autokey # broadcast server -# broadcastclient # broadcast client -# broadcast 224.0.1.1 autokey # multicast server -# multicastclient 224.0.1.1 # multicast client -# manycastserver 239.255.254.254 # manycast server +# broadcast 192.168.1.255 autokey # broadcast server +# broadcastclient # broadcast client +# broadcast 224.0.1.1 autokey # multicast server +# multicastclient 224.0.1.1 # multicast client +# manycastserver 239.255.254.254 # manycast server # manycastclient 239.255.254.254 autokey # manycast client # Undisciplined Local Clock. This is a fake driver intended for backup -# and when no outside source of synchronized time is available. -server 127.127.1.0 # local clock +# and when no outside source of synchronized time is available. +server 127.127.1.0 # local clock # Enable public key cryptography. # crypto @@ -53,7 +53,7 @@ server 127.127.1.0 # local clock includefile /etc/ntp/crypto/pw # Key file containing the keys and key identifiers used when operating -# with symmetric key cryptography. +# with symmetric key cryptography. keys /etc/ntp/keys # Specify the key identifiers which are trusted. diff --git a/cobbler/snippets/kickstart_post_anamon b/cobbler/snippets/kickstart_post_anamon index 9fbf1bf..699e830 100644 --- a/cobbler/snippets/kickstart_post_anamon +++ b/cobbler/snippets/kickstart_post_anamon @@ -87,3 +87,5 @@ test -d /selinux && restorecon /etc/init.d/set_state ## enable the script chkconfig set_state on + +echo "compass_server=$server" >> /etc/compass.conf diff --git a/cobbler/snippets/kickstart_post_install_network_config b/cobbler/snippets/kickstart_post_install_network_config index c22225a..2c089f9 100644 --- a/cobbler/snippets/kickstart_post_install_network_config +++ b/cobbler/snippets/kickstart_post_install_network_config @@ -25,7 +25,7 @@ fi exit \$RC #end raw EOF -chmod +x /sbin/ifup-local +chmod +x /sbin/ifup-local #if $hostname != "" # set the hostname in the network configuration file @@ -147,18 +147,18 @@ for logical_interface in \${!logical_interface_mapping[@]}; do physical_interface=\${logical_interface_mapping[\${logical_interface}]} if [ -z "\${physical_interface}" ]; then # check if the same name physical interface is mapped - mapped_logical_interface=\${physical_interface_mapping[\${logical_interface}]} - if [ -z "\${mapped_logical_interface}" ]; then + mapped_logical_interface=\${physical_interface_mapping[\${logical_interface}]} + if [ -z "\${mapped_logical_interface}" ]; then # check if the same name physical interface exists if [ ! -z "\${physical_interfaces[\${logical_interface}]}" ]; then logical_interface_mapping[\${logical_interface}]=\${logical_interface} - physical_interface_mapping[\${logical_interface}]=\${logical_interface} - else - echo "ignore logical interface \${logical_interface} since the same name physical interface does not exist" >> /tmp/network_log + physical_interface_mapping[\${logical_interface}]=\${logical_interface} + else + echo "ignore logical interface \${logical_interface} since the same name physical interface does not exist" >> /tmp/network_log fi - else - echo "ignore logical interface \${logical_interface} since the same name physical interface is mapped by logical interface \${mapped_logical_interface}" >> /tmp/network_log - fi + else + echo "ignore logical interface \${logical_interface} since the same name physical interface is mapped by logical interface \${mapped_logical_interface}" >> /tmp/network_log + fi else echo "ignore logical interface \${logical_interface} since it is mapped to physical interface \${physical_interface}" >> /tmp/network_log fi @@ -235,22 +235,22 @@ if [ \${#sorted_unset_logical_interfaces[@]} -gt 0 ]; then # get all available logical interfaces which the same name physical interface is not used for logical_interface in \${logical_interfaces[@]}; do mapped_logical_interface=\${physical_interface_mapping[\${logical_interface}]} - if [ -z "\${mapped_logical_interface}" ]; then - available_logical_interfaces[\${logical_interface}]=\${logical_interface} - else - echo "ignore logical interface \${logical_interface} since the same name physical interface mapped to logical interface \${mapped_logical_interface}" >> /tmp/network_log - fi + if [ -z "\${mapped_logical_interface}" ]; then + available_logical_interfaces[\${logical_interface}]=\${logical_interface} + else + echo "ignore logical interface \${logical_interface} since the same name physical interface mapped to logical interface \${mapped_logical_interface}" >> /tmp/network_log + fi done #first map logical interface to the same name physical interface if that physical interface name is not used for logical_interface in \${sorted_unset_logical_interfaces[@]}; do available_logical_interface=\${available_logical_interfaces[\${logical_interface}]} - if [ ! -z "\${available_logical_interface}" ]; then - unset unset_logical_interfaces[\${logical_interface}] - unset available_logical_interfaces[\${available_logical_interface}] - logical_interface_mapping[\${logical_interface}]=\${available_logical_interface} - physical_interface_mapping[\${available_logical_interface}]=\${logical_interface} - fi + if [ ! -z "\${available_logical_interface}" ]; then + unset unset_logical_interfaces[\${logical_interface}] + unset available_logical_interfaces[\${available_logical_interface}] + logical_interface_mapping[\${logical_interface}]=\${available_logical_interface} + physical_interface_mapping[\${available_logical_interface}]=\${logical_interface} + fi done echo "finish mapping ramaining unmapped logical interfaces to the same name physical interface" >> /tmp/network_log @@ -272,14 +272,14 @@ if [ \${#sorted_unset_logical_interfaces[@]} -gt 0 ]; then echo "sorted available logical interfaces: \${sorted_available_logical_interfaces[@]}" >> /tmp/network_log while [ \${#sorted_unset_logical_interfaces[@]} -gt 0 -a \${#sorted_available_logical_interfaces[@]} -gt 0 ]; do logical_interface=\${sorted_unset_logical_interfaces[0]} - available_logical_interface=\${sorted_available_logical_interfaces[0]} - echo "map logical interface \${logical_interface} to unused physical interface \${available_logical_interface}" >> /tmp/network_log + available_logical_interface=\${sorted_available_logical_interfaces[0]} + echo "map logical interface \${logical_interface} to unused physical interface \${available_logical_interface}" >> /tmp/network_log unset sorted_unset_logical_interfaces[0] - unset unset_logical_interfaces[\${logical_interface}] - unset sorted_available_logical_interfaces[0] - unset available_logical_interfaces[\${available_logical_interface}] - logical_interface_mapping[\${logical_interface}]=\${available_logical_interface} - physical_interface_mapping[\${available_logical_interface}]=\${logical_interface} + unset unset_logical_interfaces[\${logical_interface}] + unset sorted_available_logical_interfaces[0] + unset available_logical_interfaces[\${available_logical_interface}] + logical_interface_mapping[\${logical_interface}]=\${available_logical_interface} + physical_interface_mapping[\${available_logical_interface}]=\${logical_interface} done fi @@ -302,22 +302,22 @@ if [ \${#sorted_unset_physical_interfaces[@]} -gt 0 ]; then # get all available physical interfaces which the same name logical interface is not used for physical_interface in \${physical_interfaces[@]}; do mapped_physical_interface=\${logical_interface_mapping[\${physical_interface}]} - if [ -z "\${mapped_physical_interface}" ]; then - available_physical_interfaces[\${physical_interface}]=\${physical_interface} - else - echo "ignore physical interface \${physical_interface} since the same name logical interface mapped to physical interface \${mapped_physical_interface}" >> /tmp/network_log - fi + if [ -z "\${mapped_physical_interface}" ]; then + available_physical_interfaces[\${physical_interface}]=\${physical_interface} + else + echo "ignore physical interface \${physical_interface} since the same name logical interface mapped to physical interface \${mapped_physical_interface}" >> /tmp/network_log + fi done #first map physical interface to the same name logical interface if that logical interface name is not used for physical_interface in \${sorted_unset_physical_interfaces[@]}; do available_physical_interface=\${available_physical_interfaces[\${physical_interface}]} - if [ ! -z "\${available_physical_interface}" ]; then - unset unset_physical_interfaces[\${physical_interface}] - unset available_physical_interfaces[\${available_physical_interface}] - logical_interface_mapping[\${available_physical_interface}]=\${physical_interface} - physical_interface_mapping[\${physical_interface}]=\${available_physical_interface} - fi + if [ ! -z "\${available_physical_interface}" ]; then + unset unset_physical_interfaces[\${physical_interface}] + unset available_physical_interfaces[\${available_physical_interface}] + logical_interface_mapping[\${available_physical_interface}]=\${physical_interface} + physical_interface_mapping[\${physical_interface}]=\${available_physical_interface} + fi done echo "finish mapping ramaining unmapped physical interfaces to the same name logical interface" >> /tmp/network_log for key in \${!logical_interface_mapping[@]}; do @@ -338,14 +338,14 @@ if [ \${#sorted_unset_physical_interfaces[@]} -gt 0 ]; then echo "sorted available physical interfaces: \${sorted_available_physical_interfaces[@]}" >> /tmp/network_log while [ \${#sorted_unset_physical_interfaces[@]} -gt 0 -a \${#sorted_available_physical_interfaces[@]} -gt 0 ]; do physical_interface=\${sorted_unset_physical_interfaces[0]} - available_physical_interface=\${sorted_available_physical_interfaces[0]} - echo "map physical interface \${physical_interface} to unused logical interface \${available_physical_interface}" >> /tmp/network_log + available_physical_interface=\${sorted_available_physical_interfaces[0]} + echo "map physical interface \${physical_interface} to unused logical interface \${available_physical_interface}" >> /tmp/network_log unset sorted_unset_physical_interfaces[0] - unset unset_physical_interfaces[\${physical_interface}] - unset sorted_available_physical_interfaces[0] - unset available_physical_interfaces[\${available_physical_interface}] - physical_interface_mapping[\${available_physical_interface}]=\${physical_interface} - logical_interface_mapping[\${physical_interface}]=\${available_physical_interface} + unset unset_physical_interfaces[\${physical_interface}] + unset sorted_available_physical_interfaces[0] + unset available_physical_interfaces[\${available_physical_interface}] + physical_interface_mapping[\${available_physical_interface}]=\${physical_interface} + logical_interface_mapping[\${physical_interface}]=\${available_physical_interface} done fi @@ -385,7 +385,7 @@ for key in \${!logical_interface_mapping[@]}; do if [ ! -z "\${physical_mac}" ]; then physical_mac=\${physical_mac,,} echo "SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"?*\", ATTR{address}==\"\${physical_mac}\", ATTR{type}==\"1\", KERNEL==\"eth*\", NAME=\"\$key\"" >> /etc/udev/rules.d/\${udev_network_rule_filename}.new - echo "add network interface \$key mac \$physical_mac into udev rules" >> /tmp/network_log + echo "add network interface \$key mac \$physical_mac into udev rules" >> /tmp/network_log else echo "network interface \$key does not find mac address to add to udev rules" >> /tmp/network_log fi @@ -508,7 +508,7 @@ echo "GATEWAY=$if_gateway" >> $devfile #end if #if $netmask == "" - ## Default to 255.255.255.0? + ## Default to 255.255.255.0? #set $netmask = "255.255.255.0" #end if echo "NETMASK=$netmask" >> $devfile @@ -538,9 +538,9 @@ echo "MTU=$mtu" >> $devfile #set $nct = $nct + 1 echo "DNS$nct=$nameserver" >> $devfile #end for - #set $nameserver_set = 1 + #set $nameserver_set = 1 #end if - #end if + #end if #for $route in $static_routes #set routepattern = $re.compile("[0-9/.]+:[0-9.]+") diff --git a/cobbler/snippets/kickstart_pre_install_network_config b/cobbler/snippets/kickstart_pre_install_network_config index 8c24dc3..34d670f 100644 --- a/cobbler/snippets/kickstart_pre_install_network_config +++ b/cobbler/snippets/kickstart_pre_install_network_config @@ -33,7 +33,7 @@ get_ifname() { #set ikeys = $interfaces.keys() #for $iname in $ikeys #set $idata = $interfaces[$iname] - #set $management = $idata["management"] + #set $management = $idata["management"] #if $management #set $management_nic = $iname #end if @@ -67,10 +67,10 @@ then #end if #set $netinfo = "--bootproto=static --ip=%s --netmask=%s" % ($ip, $netmask) #if $gateway != "" - #set $netinfo = "%s --gateway=%s" % ($netinfo, $gateway) - #end if - #if $len($name_servers) > 0 - #set $netinfo = "%s --nameserver=%s" % ($netinfo, $name_servers[0]) + #set $netinfo = "%s --gateway=%s" % ($netinfo, $gateway) + #end if + #if $len($name_servers) > 0 + #set $netinfo = "%s --nameserver=%s" % ($netinfo, $name_servers[0]) #end if #else if not $static #set $netinfo = "--bootproto=dhcp" diff --git a/cobbler/snippets/kickstart_pre_partition_disks b/cobbler/snippets/kickstart_pre_partition_disks index 07108ac..6b1c9ce 100644 --- a/cobbler/snippets/kickstart_pre_partition_disks +++ b/cobbler/snippets/kickstart_pre_partition_disks @@ -34,14 +34,14 @@ if [ -e /dev/disk/by-path ]; then #end if path_name=\$(basename \$1) disk_name=\$(basename \$2) - let disk_mapping_offset=\$disk_mapping_offset+1 + let disk_mapping_offset=\$disk_mapping_offset+1 shift 2 if [ \$found_disk_type -gt 0 ]; then disk_mapping[\${disk_name}]="/dev/disk/by-path/\${path_name}" - disk_mapping[\${path_name}]="/dev/disk/by-path/\${path_name}" - disk_path_mapping[\${disk_name}]="/dev/\${disk_name}" - disk_path_mapping[\${path_name}]="/dev/\${disk_name}" + disk_mapping[\${path_name}]="/dev/disk/by-path/\${path_name}" + disk_path_mapping[\${disk_name}]="/dev/\${disk_name}" + disk_path_mapping[\${path_name}]="/dev/\${disk_name}" else ignore_disk_mapping[\${disk_name}]="/dev/disk/by-path/\${path_name}" ignore_disk_mapping[\${path_name}]="/dev/disk/by-path/\${path_name}" @@ -61,7 +61,7 @@ else echo "/dev/disk/by-path does not exist" >> /tmp/log fi -declare -A partition_disks +declare -A partition_disks declare -A disks set \$(list-harddrives) let disk_nums=\$#/2 @@ -108,13 +108,13 @@ while [ \$disk_offset -lt \$disk_nums ]; do if [ \${found_disk} -gt 0 ]; then echo "add disk \${disk_name} in partitioning list" >> /tmp/log partition_disks[\${found_disk_offset}]=\$disk - let found_disk_offset=\${found_disk_offset}+1 + let found_disk_offset=\${found_disk_offset}+1 fi done echo "partition disks \${partition_disks[@]}" >> /tmp/log echo "disks \${disks[@]}" >> /tmp/log -#if $getVar('sort_disks', '0') != "0" +#if $getVar('sort_disks', '0') != "0" sorted_disks=(\$(printf '%s\n' \${partition_disks[@]} | sort)) #else sorted_disks=(\${partition_disks[@]}) @@ -192,16 +192,16 @@ for disk_partition in \${disk_partitions}; do for remove_disk in \${remove_disks[@]}; do #if $getVar('partition_by_path', '0') != "0" path_name=\$(basename \${remove_disk}) - remove_disk_path=\${remove_disk} + remove_disk_path=\${remove_disk} remove_disk=\${disk_path_mapping[\${path_name}]} #else disk_name=\$(basename \${remove_disk}) remove_disk_path=\${disk_mapping[\${disk_name}]} #end if if [ -z "\${remove_disk}" ]; then - continue - fi - if [ -z "\${remove_disk_path}" ]; then + continue + fi + if [ -z "\${remove_disk_path}" ]; then continue fi if expr match "\${disk_partition}" "\${remove_disk_path}.*"; then @@ -216,9 +216,9 @@ for disk_partition in \${disk_partitions}; do else echo "partition \${disk_partition} does not match \${remove_disk}.*" >> /tmp/log fi - if [[ "\$vg" == "$vgname" ]]; then + if [[ "\$vg" == "$vgname" ]]; then remove_vg="\$vg" - remove_partition="\${disk_partition}" + remove_partition="\${disk_partition}" fi done if [ ! -z "\${remove_vg}" ]; then @@ -240,15 +240,15 @@ for disk_partition in \${disk_partitions}; do if [ -z "\${remove_partitions}" ]; then remove_partitions="\${remove_partition}" else - pv_removed=0 - for pv in ${remove_partitions}; do - if [[ "\$pv" == "\${remove_partition}" ]]; then - pv_removed=1 - fi - done - if [ \${pv_removed} -eq 0 ]; then + pv_removed=0 + for pv in ${remove_partitions}; do + if [[ "\$pv" == "\${remove_partition}" ]]; then + pv_removed=1 + fi + done + if [ \${pv_removed} -eq 0 ]; then remove_partitions="\${remove_partitions} \${remove_partition}" - fi + fi fi fi done @@ -269,17 +269,17 @@ declare -A reserve_disks_size #for disk_and_size in $disk_sizes #set disk_name, size = $disk_and_size.split(' ', 1) #set disk_name = $disk_name.strip() - #if $size.endswith('K') - #set disk_size = $int($size[:-1]) / 1000 - #elif size.endswith('M') - #set disk_size = $int($size[:-1]) - #elif $size.endswith('G') - #set disk_size = $int($size[:-1]) * 1000 - #elif $size.endswith('T') - #set disk_size = $int($size[:-1]) * 1000000 - #else - #set disk_size = $int($size) - #end if + #if $size.endswith('K') + #set disk_size = $int($size[:-1]) / 1000 + #elif size.endswith('M') + #set disk_size = $int($size[:-1]) + #elif $size.endswith('G') + #set disk_size = $int($size[:-1]) * 1000 + #elif $size.endswith('T') + #set disk_size = $int($size[:-1]) * 1000000 + #else + #set disk_size = $int($size) + #end if reserve_disks_size[${disk_name}]=${disk_size} #end for #end if @@ -303,17 +303,17 @@ declare -A max_disks_size #for disk_and_size in $disk_sizes #set disk_name, size = $disk_and_size.split(' ', 1) #set disk_name = $disk_name.strip() - #if $size.endswith('K') - #set disk_size = $int($size[:-1]) / 1000 - #elif $size.endswith('M') - #set disk_size = $int($size[:-1]) - #elif $size.endswith('G') - #set disk_size = $int($size[:-1]) * 1000 - #elif $size.endswith('T') - #set disk_size = $int($size[:-1]) * 1000000 - #else - #set disk_size = $int($size) - #end if + #if $size.endswith('K') + #set disk_size = $int($size[:-1]) / 1000 + #elif $size.endswith('M') + #set disk_size = $int($size[:-1]) + #elif $size.endswith('G') + #set disk_size = $int($size[:-1]) * 1000 + #elif $size.endswith('T') + #set disk_size = $int($size[:-1]) * 1000000 + #else + #set disk_size = $int($size) + #end if max_disks_size[${disk_name}]=${disk_size} #end for #end if @@ -341,14 +341,14 @@ declare -A partitions_maxsize #for vol_and_size in $vol_sizes #set vol, vol_size = $vol_and_size.split(' ', 1) #set vol = $vol.strip() - #if $vol == '/' - #set volname = 'root' - #elif $vol == 'swap' - #set volname = 'swap' - #elif $vol.startswith('/') + #if $vol == '/' + #set volname = 'root' + #elif $vol == 'swap' + #set volname = 'swap' + #elif $vol.startswith('/') #set volname = $vol[1:].replace('/', '_') - #else - #set volname = '' + #else + #set volname = '' # $vol is not starts with / #continue #end if @@ -358,17 +358,17 @@ partitions_name[$vol]=$volname #set vol_percent = $vol_size[:-1] partitions_percentage[$vol]=${vol_percent} #else - #if $vol_size.endswith('K') - #set vol_min_size = $int($vol_size[:-1]) / 1000 - #elif $vol_size.endswith('M') - #set vol_min_size = $int($vol_size[:-1]) - #elif $vol_size.endswith('G') - #set vol_min_size = $int($vol_size[:-1]) * 1000 - #elif $vol_size.endswith('T') - #set vol_min_size = $int($vol_size[:-1]) * 1000000 - #else - #set vol_min_size = $int($vol_size) - #end if + #if $vol_size.endswith('K') + #set vol_min_size = $int($vol_size[:-1]) / 1000 + #elif $vol_size.endswith('M') + #set vol_min_size = $int($vol_size[:-1]) + #elif $vol_size.endswith('G') + #set vol_min_size = $int($vol_size[:-1]) * 1000 + #elif $vol_size.endswith('T') + #set vol_min_size = $int($vol_size[:-1]) * 1000000 + #else + #set vol_min_size = $int($vol_size) + #end if partitions_size[$vol]=${vol_min_size} #end if #end for @@ -379,17 +379,17 @@ partitions_size[$vol]=${vol_min_size} #for vol_and_size in $vol_sizes #set vol, vol_size = $vol_and_size.split(' ', 1) #set vol = $vol.strip() - #if $vol_size.endswith('K') - #set vol_min_size = $int($vol_size[:-1]) / 1000 - #elif $vol_size.endswith('M') - #set vol_min_size = $int($vol_size[:-1]) - #elif $vol_size.endswith('G') - #set vol_min_size = $int($vol_size[:-1]) * 1000 - #elif $vol_size.endswith('T') - #set vol_min_size = $int($vol_size[:-1]) * 1000000 - #else - #set vol_min_size = $int($vol_size) - #end if + #if $vol_size.endswith('K') + #set vol_min_size = $int($vol_size[:-1]) / 1000 + #elif $vol_size.endswith('M') + #set vol_min_size = $int($vol_size[:-1]) + #elif $vol_size.endswith('G') + #set vol_min_size = $int($vol_size[:-1]) * 1000 + #elif $vol_size.endswith('T') + #set vol_min_size = $int($vol_size[:-1]) * 1000000 + #else + #set vol_min_size = $int($vol_size) + #end if partitions_size[$vol]=${vol_min_size} #end for #end if @@ -399,17 +399,17 @@ partitions_size[$vol]=${vol_min_size} #for vol_and_size in $vol_sizes #set vol, vol_size = $vol_and_size.split(' ', 1) #set vol = $vol.strip() - #if $vol_size.endswith('K') - #set vol_max_size = $int($vol_size[:-1]) / 1000 - #elif $vol_size.endswith('M') - #set vol_max_size = $int($vol_size[:-1]) - #elif $vol_size.endswith('G') - #set vol_max_size = $int($vol_size[:-1]) * 1000 - #elif $vol_size.endswith('T') - #set vol_max_size = $int($vol_size[:-1]) * 1000000 - #else - #set vol_max_size = $int($vol_size) - #end if + #if $vol_size.endswith('K') + #set vol_max_size = $int($vol_size[:-1]) / 1000 + #elif $vol_size.endswith('M') + #set vol_max_size = $int($vol_size[:-1]) + #elif $vol_size.endswith('G') + #set vol_max_size = $int($vol_size[:-1]) * 1000 + #elif $vol_size.endswith('T') + #set vol_max_size = $int($vol_size[:-1]) * 1000000 + #else + #set vol_max_size = $int($vol_size) + #end if partitions_maxsize[$vol]=${vol_max_size} #end for #end if @@ -522,7 +522,7 @@ for key in \${sorted_partitions[@]}; do partition_percentage=\${partitions_percentage[\$key]} if [[ x"\${partition_percentage}" != x"" ]]; then percentage_param="--percent=\${partition_percentage}" - grow_param="--grow" + grow_param="--grow" else percentage_param="" fi @@ -530,7 +530,7 @@ for key in \${sorted_partitions[@]}; do if [[ x"\${partition_size}" != x"" ]]; then size_param="--size=\${partition_size}" else - size_param="--size=1" + size_param="" fi partition_maxsize=\${partitions_maxsize[\$key]} if [[ x"\${partition_maxsize}" != x"" ]]; then diff --git a/cobbler/snippets/kickstart_rsyslog.conf b/cobbler/snippets/kickstart_rsyslog.conf index 8711acf..33af044 100644 --- a/cobbler/snippets/kickstart_rsyslog.conf +++ b/cobbler/snippets/kickstart_rsyslog.conf @@ -1,6 +1,6 @@ cat << EOL > /etc/rsyslog.conf \#\#\#\# MODULES \#\#\#\## - + \\$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) \\$ModLoad imfile diff --git a/cobbler/snippets/kickstart_ssh b/cobbler/snippets/kickstart_ssh index 9900294..2ffedaa 100644 --- a/cobbler/snippets/kickstart_ssh +++ b/cobbler/snippets/kickstart_ssh @@ -9,7 +9,7 @@ mkdir -p $home/.ssh chmod 700 -R $home/.ssh #set $firstline = True - #for $ssh_key in $ssh_keys.split(',') + #for $ssh_key in $ssh_keys.split(',') #if not $ssh_key #continue #end if diff --git a/cobbler/snippets/kickstart_sysctl.conf b/cobbler/snippets/kickstart_sysctl.conf index b814dfd..c227ecf 100644 --- a/cobbler/snippets/kickstart_sysctl.conf +++ b/cobbler/snippets/kickstart_sysctl.conf @@ -38,9 +38,9 @@ kernel.shmall = 4294967296 # increase TCP max buffer size settable using setsockopt() net.core.rmem_max = 16777216 -net.core.wmem_max = 16777216 +net.core.wmem_max = 16777216 -# increase Linux autotuning TCP buffer limit +# increase Linux autotuning TCP buffer limit net.ipv4.tcp_rmem = 4096 87380 16777216 net.ipv4.tcp_wmem = 4096 65536 16777216 @@ -48,7 +48,7 @@ net.ipv4.tcp_wmem = 4096 65536 16777216 net.core.netdev_max_backlog = 30000 net.ipv4.tcp_max_syn_backlog = 4096 -# recommended default congestion control is htcp +# recommended default congestion control is htcp net.ipv4.tcp_congestion_control=htcp # recommended for hosts with jumbo frames enabled @@ -60,12 +60,12 @@ net.ipv4.tcp_fin_timeout=30 # fast cycling of sockets in time_wait state and re-using them net.ipv4.tcp_tw_recycle = 1 -net.ipv4.tcp_tw_reuse = 1 +net.ipv4.tcp_tw_reuse = 1 # increase the maximum number of requests queued to a listen socket net.core.somaxconn = 8192 -# avoid caching tcp network transfer statistics +# avoid caching tcp network transfer statistics net.ipv4.route.flush=1 #end raw EOF diff --git a/cobbler/snippets/kickstart_yum b/cobbler/snippets/kickstart_yum index dc4026f..c33ba0f 100644 --- a/cobbler/snippets/kickstart_yum +++ b/cobbler/snippets/kickstart_yum @@ -36,7 +36,7 @@ mv -f /etc/yum.repos.d/* /root/repo_backup/ #set os_info = $profile_name.split('-') #set osname = $os_info[0].lower() #set osversion = $os_info[1] -#set osversion_flat = $osversion.replace('.', '_') +#set osversion_flat = $osversion.replace('.', '_') cat << EOF > /etc/yum.repos.d/${osname}_${osversion_flat}_os_repo.repo [${osname}_${osversion_flat}_os_repo] diff --git a/cobbler/snippets/limits_conf.xml b/cobbler/snippets/limits_conf.xml index 2f33be5..80caaf4 100644 --- a/cobbler/snippets/limits_conf.xml +++ b/cobbler/snippets/limits_conf.xml @@ -51,7 +51,7 @@ #@faculty hard nproc 50 #ftp hard nproc 0 #@student - maxlogins 4 -* - nofile 100000 +* - nofile 100000 # End of file #end raw ]]> diff --git a/cobbler/snippets/networking.xml b/cobbler/snippets/networking.xml index 2290d9f..c9428a4 100644 --- a/cobbler/snippets/networking.xml +++ b/cobbler/snippets/networking.xml @@ -3,11 +3,11 @@ #set $hostname = $getVar("system_name","cobbler") #end if #if $getVar("dns_name_eth0","") != "" - #set $my_hostname = $hostname.split('.',1)[:1][0] - #set $my_domainname = $dns_name_eth0.split('.',1)[1:][0] + #set $my_hostname = $hostname.split('.',1)[:1][0] + #set $my_domainname = $dns_name_eth0.split('.',1)[1:][0] #else - #set $my_hostname = $hostname - #set $my_domainname = "site" + #set $my_hostname = $hostname + #set $my_domainname = "site" #end if diff --git a/cobbler/snippets/preseed_apt_repo_config b/cobbler/snippets/preseed_apt_repo_config index bbc4cee..e28dd80 100644 --- a/cobbler/snippets/preseed_apt_repo_config +++ b/cobbler/snippets/preseed_apt_repo_config @@ -1,7 +1,7 @@ # Uncomment this if you don't want to use a network mirror d-i apt-setup/use_mirror boolean false d-i apt-setup/services-select multiselect -d-i apt-setup/security_host string $http_server +d-i apt-setup/security_host string $http_server d-i apt-setup/security_path string $install_source_directory # Additional repositories, local[0-9] available #set $cur=0 diff --git a/cobbler/snippets/preseed_chef_run.sh b/cobbler/snippets/preseed_chef_run.sh index ade6215..19d7eee 100644 --- a/cobbler/snippets/preseed_chef_run.sh +++ b/cobbler/snippets/preseed_chef_run.sh @@ -32,8 +32,8 @@ PIDFILE=/tmp/chef_client_run.pid if [ -f \\$PIDFILE ]; then pid=\\$(cat \\$PIDFILE) if [ -f /proc/\\$pid/exe ]; then - echo "there are chef_client_run.sh running with pid \\$pid" >> /var/log/chef.log 2>&1 - exit 1 + echo "there are chef_client_run.sh running with pid \\$pid" >> /var/log/chef.log 2>&1 + exit 1 fi fi echo \\$$ > \\$PIDFILE diff --git a/cobbler/snippets/preseed_client.rb b/cobbler/snippets/preseed_client.rb index d4dc2bf..e6c60a4 100644 --- a/cobbler/snippets/preseed_client.rb +++ b/cobbler/snippets/preseed_client.rb @@ -11,7 +11,7 @@ chef_server_url 'https://$server' validation_client_name 'chef-validator' json_attribs nil pid_file '/var/run/chef-client.pid' -# Using default node name (fqdn) +# Using default node name (fqdn) no_lazy_load true ssl_verify_mode :verify_none EOL @@ -29,7 +29,7 @@ mkdir -p /etc/chef/trusted_certs cat << EOF > /etc/chef/trusted_certs/$filename #echo $f.read() EOF - #silent $f.close() + #silent $f.close() #end if #end for #end if diff --git a/cobbler/snippets/preseed_knife.rb b/cobbler/snippets/preseed_knife.rb index 0cb6bbc..32047bb 100644 --- a/cobbler/snippets/preseed_knife.rb +++ b/cobbler/snippets/preseed_knife.rb @@ -26,7 +26,7 @@ mkdir -p /root/.chef/trusted_certs cat << EOF > /root/.chef/trusted_certs/$filename #echo $f.read() EOF - #silent $f.close() + #silent $f.close() #end if #end for #end if diff --git a/cobbler/snippets/preseed_limits.conf b/cobbler/snippets/preseed_limits.conf index 0b116f3..00cf861 100644 --- a/cobbler/snippets/preseed_limits.conf +++ b/cobbler/snippets/preseed_limits.conf @@ -48,7 +48,7 @@ cat << EOF > /etc/security/limits.conf #@faculty hard nproc 50 #ftp hard nproc 0 #@student - maxlogins 4 -* - nofile 100000 +* - nofile 100000 # End of file #end raw EOF diff --git a/cobbler/snippets/preseed_ntp b/cobbler/snippets/preseed_ntp index dda5ef5..83b3055 100644 --- a/cobbler/snippets/preseed_ntp +++ b/cobbler/snippets/preseed_ntp @@ -6,10 +6,10 @@ cat << EOF > /etc/ntp.conf # For more information about this file, see the man pages # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). -# Include the option tinker panic 0 at the top of your ntp.conf file. +# Include the option tinker panic 0 at the top of your ntp.conf file. # By default, the NTP daemon sometimes panics and exits if the underlying clock -# appears to be behaving erratically. This option causes the daemon to keep -# running instead of panicking. +# appears to be behaving erratically. This option causes the daemon to keep +# running instead of panicking. tinker panic 0 driftfile /var/lib/ntp/drift @@ -22,7 +22,7 @@ restrict -6 default kod nomodify notrap nopeer noquery # Permit all access over the loopback interface. This could # be tightened as well, but to do so would effect some of # the administrative functions. -restrict 127.0.0.1 +restrict 127.0.0.1 restrict -6 ::1 # Hosts on local network are less restricted. @@ -35,16 +35,16 @@ restrict -6 ::1 # server 2.ubuntu.pool.ntp.org server $ntp_server -# broadcast 192.168.1.255 autokey # broadcast server -# broadcastclient # broadcast client -# broadcast 224.0.1.1 autokey # multicast server -# multicastclient 224.0.1.1 # multicast client -# manycastserver 239.255.254.254 # manycast server +# broadcast 192.168.1.255 autokey # broadcast server +# broadcastclient # broadcast client +# broadcast 224.0.1.1 autokey # multicast server +# multicastclient 224.0.1.1 # multicast client +# manycastserver 239.255.254.254 # manycast server # manycastclient 239.255.254.254 autokey # manycast client # Undisciplined Local Clock. This is a fake driver intended for backup -# and when no outside source of synchronized time is available. -server 127.127.1.0 # local clock +# and when no outside source of synchronized time is available. +server 127.127.1.0 # local clock # Enable public key cryptography. # crypto @@ -52,7 +52,7 @@ server 127.127.1.0 # local clock includefile /etc/ntp/crypto/pw # Key file containing the keys and key identifiers used when operating -# with symmetric key cryptography. +# with symmetric key cryptography. keys /etc/ntp/keys # Specify the key identifiers which are trusted. diff --git a/cobbler/snippets/preseed_post_anamon b/cobbler/snippets/preseed_post_anamon index 4bd3c89..6d889bb 100644 --- a/cobbler/snippets/preseed_post_anamon +++ b/cobbler/snippets/preseed_post_anamon @@ -76,3 +76,5 @@ chmod 755 /etc/init.d/set_state test -d /selinux && restorecon /etc/init.d/set_state update-rc.d set_state defaults 99 99 + +echo "compass_server=$server" >> /etc/compass.conf diff --git a/cobbler/snippets/preseed_post_apt_repo_config b/cobbler/snippets/preseed_post_apt_repo_config index d9b5792..6ea56c5 100644 --- a/cobbler/snippets/preseed_post_apt_repo_config +++ b/cobbler/snippets/preseed_post_apt_repo_config @@ -21,7 +21,6 @@ Acquire::http::Proxy::${local_repo_server} DIRECT; #end if EOF -rm -f /etc/apt/sources.list #if $getVar("local_repo", "") == "" or $getVar("local_repo_only","1") == "0" #set repos_snippet = 'apt.repos.d/%s/%s/preseed_repos' % ($osname, $osversion) @@ -34,6 +33,7 @@ $SNIPPET($repos_snippet) #set $rarch = "[arch=%s]" % $arch #end if +rm -f /etc/apt/sources.list cat << EOF >> /etc/apt/sources.list deb ${rarch} $tree $os_version main restricted EOF @@ -61,8 +61,7 @@ cat << EOF >> /etc/apt/sources.list deb ${rarch} $local_repo/$compass_repo/ local_repo main EOF - - #if $getVar("local_repo_only","1") != "0" -apt-get -y update + #if $getVar("local_repo_only","1") != "0" +apt-get -y update #end if #end if diff --git a/cobbler/snippets/preseed_post_install_network_config b/cobbler/snippets/preseed_post_install_network_config index 90f6e2b..3e57783 100644 --- a/cobbler/snippets/preseed_post_install_network_config +++ b/cobbler/snippets/preseed_post_install_network_config @@ -153,13 +153,13 @@ for logical_interface in \${logical_interfaces}; do eval "mapped_logical_interface=\\${physical_interface_mapping_\${logical_interface}}" if [ ! -z "\${mapped_logical_interface}" ]; then echo "ignore logical interface \${logical_interface} since the same name physical interface is mapped by logical interface \${mapped_logical_interface}" >> /tmp/network_log - continue + continue fi # check if the same name physical interface exists eval "mapped_logical_interface=\\${physical_interface_\${logical_interface}}" if [ -z "\${mapped_logical_interface}" ]; then echo "ignore logical interface \${logical_interface} since the same name physical interface does not exist" >> /tmp/network_log - continue + continue fi eval "logical_interface_mapping_\${logical_interface}=\${logical_interface}" eval "physical_interface_mapping_\${logical_interface}=\${logical_interface}" @@ -225,16 +225,16 @@ for logical_interface in \${sorted_unset_logical_interfaces}; do physical_interface=\$1 shift 1 sorted_unset_physical_interfaces="\$@" - echo "map unset logical interface \${logical_interface} to unset physical interface \${physical_interface}" >> /tmp/network_log + echo "map unset logical interface \${logical_interface} to unset physical interface \${physical_interface}" >> /tmp/network_log eval "physical_interface_mapping_\${physical_interface}=\${logical_interface}" eval "logical_interface_mapping_\${logical_interface}=\${physical_interface}" else echo "remain unset logical interface \${logical_interface} since there is no remain unset physical interfaces" >> /tmp/network_log - if [ -z "\${unset_logical_interfaces}" ]; then - unset_logical_interfaces="\${logical_interface}" - else - unset_logical_interfaces="\${unset_logical_interfaces} \${logical_interface}" - fi + if [ -z "\${unset_logical_interfaces}" ]; then + unset_logical_interfaces="\${logical_interface}" + else + unset_logical_interfaces="\${unset_logical_interfaces} \${logical_interface}" + fi fi done sorted_unset_logical_interfaces=\${unset_logical_interfaces} @@ -260,16 +260,16 @@ if [ ! -z "\${sorted_unset_logical_interfaces}" ]; then available_logical_interfaces="" for logical_interface in \${logical_interfaces}; do eval "mapped_logical_interface=\\${physical_interface_mapping_\${logical_interface}}" - if [ -z "\${mapped_logical_interface}" ]; then - eval "available_logical_interface_\${logical_interface}=\${logical_interface}" - if [ -z "\${available_logical_interfaces}" ]; then - available_logical_interfaces="\${logical_interface}" - else - available_logical_interfaces="\${available_logical_interfaces} \${logical_interface}" - fi - else - echo "ignore logical interface \${logical_interface} since the same name physical interface mapped to logical interface \${mapped_logical_interface}" >> /tmp/network_log - fi + if [ -z "\${mapped_logical_interface}" ]; then + eval "available_logical_interface_\${logical_interface}=\${logical_interface}" + if [ -z "\${available_logical_interfaces}" ]; then + available_logical_interfaces="\${logical_interface}" + else + available_logical_interfaces="\${available_logical_interfaces} \${logical_interface}" + fi + else + echo "ignore logical interface \${logical_interface} since the same name physical interface mapped to logical interface \${mapped_logical_interface}" >> /tmp/network_log + fi done # add extra logical interfaces name to physical interfaces @@ -287,12 +287,12 @@ if [ ! -z "\${sorted_unset_logical_interfaces}" ]; then if [ ! -z "\${available_logical_interface}" ]; then eval "physical_interface_mapping_\${available_logical_interface}=\${logical_interface}" eval "logical_interface_mapping_\${logical_interface}=\${available_logical_interface}" - else - if [ -z "\${unset_logical_interfaces}" ]; then - unset_logical_interfaces="\${logical_interface}" - else - unset_logical_interfaces="\${unset_logical_interfaces} \${logical_interface}" - fi + else + if [ -z "\${unset_logical_interfaces}" ]; then + unset_logical_interfaces="\${logical_interface}" + else + unset_logical_interfaces="\${unset_logical_interfaces} \${logical_interface}" + fi fi done sorted_unset_logical_interfaces=\${unset_logical_interfaces} @@ -300,13 +300,13 @@ if [ ! -z "\${sorted_unset_logical_interfaces}" ]; then # map remain unset logical interfaces to available logical interface names for logical_interface in \${sorted_unset_logical_interfaces}; do for available_logical_interface in \${available_logical_interfaces}; do - eval "mapped_logical_interface=\\${physical_interface_mapping_\${available_logical_interface}}" - if [ -z "\${mapped_logical_interface}" ]; then - eval "physical_interface_mapping_\${available_logical_interface}=\${logical_interface}" - eval "logical_interface_mapping_\${logical_interface}=\${available_logical_interface}" - break - fi - done + eval "mapped_logical_interface=\\${physical_interface_mapping_\${available_logical_interface}}" + if [ -z "\${mapped_logical_interface}" ]; then + eval "physical_interface_mapping_\${available_logical_interface}=\${logical_interface}" + eval "logical_interface_mapping_\${logical_interface}=\${available_logical_interface}" + break + fi + done done fi unset_logical_interfaces="" @@ -329,16 +329,16 @@ if [ ! -z "\${sorted_unset_physical_interfaces}" ]; then available_physical_interfaces="" for physical_interface in \${physical_interfaces}; do eval "mapped_physical_interface=\\${logical_interface_mapping_\${physical_interface}}" - if [ -z "\${mapped_physical_interface}" ]; then - eval "available_physical_interface_\${physical_interface}=\${physical_interface}" - if [ -z "\${available_physical_interfaces}" ]; then - available_physical_interfaces="\${physical_interface}" - else - available_physical_interfaces="\${available_physical_interfaces} \${physical_interface}" - fi - else - echo "ignore physical interface \${physical_interface} since the same name logical interface mapped to physical interface \${mapped_physical_interface}" >> /tmp/network_log - fi + if [ -z "\${mapped_physical_interface}" ]; then + eval "available_physical_interface_\${physical_interface}=\${physical_interface}" + if [ -z "\${available_physical_interfaces}" ]; then + available_physical_interfaces="\${physical_interface}" + else + available_physical_interfaces="\${available_physical_interfaces} \${physical_interface}" + fi + else + echo "ignore physical interface \${physical_interface} since the same name logical interface mapped to physical interface \${mapped_physical_interface}" >> /tmp/network_log + fi done # add extra physical interfaces name to logical interfaces @@ -356,12 +356,12 @@ if [ ! -z "\${sorted_unset_physical_interfaces}" ]; then if [ ! -z "\${available_physical_interface}" ]; then eval "logical_interface_mapping_\${available_physical_interface}=\${physical_interface}" eval "physical_interface_mapping_\${physical_interface}=\${available_physical_interface}" - else - if [ -z "\${unset_physical_interfaces}" ]; then - unset_physical_interfaces="\${physical_interface}" - else - unset_physical_interfaces="\${unset_physical_interfaces} \${physical_interface}" - fi + else + if [ -z "\${unset_physical_interfaces}" ]; then + unset_physical_interfaces="\${physical_interface}" + else + unset_physical_interfaces="\${unset_physical_interfaces} \${physical_interface}" + fi fi done sorted_unset_physical_interfaces=\${unset_physical_interfaces} @@ -369,13 +369,13 @@ if [ ! -z "\${sorted_unset_physical_interfaces}" ]; then # map remain unset physical interfaces to logical interface name as available physical interface names for physical_interface in \${sorted_unset_physical_interfaces}; do for available_physical_interface in \${available_physical_interfaces}; do - eval "mapped_physical_interface=\\${logical_interface_mapping_\${available_physical_interface}}" - if [ -z "\${mapped_physical_interface}" ]; then - eval "logical_interface_mapping_\${available_physical_interface}=\${physical_interface}" - eval "physical_interface_mapping_\${physical_interface}=\${available_physical_interface}" - break - fi - done + eval "mapped_physical_interface=\\${logical_interface_mapping_\${available_physical_interface}}" + if [ -z "\${mapped_physical_interface}" ]; then + eval "logical_interface_mapping_\${available_physical_interface}=\${physical_interface}" + eval "physical_interface_mapping_\${physical_interface}=\${available_physical_interface}" + break + fi + done done fi unset_physical_interfaces="" @@ -401,7 +401,7 @@ for key in \${logical_interfaces}; do if [ ! -z "\${physical_mac}" ]; then physical_mac=\$(echo \${physical_mac} | tr 'A-Z' 'a-z') echo "SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"?*\", ATTR{address}==\"\${physical_mac}\", ATTR{type}==\"1\", KERNEL==\"eth*\", NAME=\"\$key\"" >> /etc/udev/rules.d/70-persistent-net.rules.new - echo "add network interface \$key mac \${physical_mac} into udev rules" >> /tmp/network_log + echo "add network interface \$key mac \${physical_mac} into udev rules" >> /tmp/network_log else echo "network interface \$key does not find mac address to add to udev rules" >> /tmp/network_log fi @@ -441,7 +441,7 @@ used_logical_interface_$iname=$iname #if $iface_type in ("slave","bond_slave","bridge_slave","bonded_bridge_slave") #set $static = 1 - #end if + #end if echo "auto $iname" >> /etc/network/interfaces #if $static @@ -500,8 +500,8 @@ fi echo " bond-slaves $bondslaves" >> /etc/network/interfaces #if $bonding_opts != "" - #for $bondopts in $bonding_opts.split(" ") - #set [$bondkey, $bondvalue] = $bondopts.split("=") + #for $bondopts in $bonding_opts.split(" ") + #set [$bondkey, $bondvalue] = $bondopts.split("=") echo " bond-$bondkey $bondvalue" >> /etc/network/interfaces #end for #end if @@ -519,10 +519,10 @@ echo " bond-master $iface_master" >> /etc/network/interfaces #set $bridgeslaves += $bridgeiname + " " #end if #end for -echo " bridge_ports $bridgeslaves" >> /etc/network/interfaces +echo " bridge_ports $bridgeslaves" >> /etc/network/interfaces #if $bridge_opts != "" - #for $bridgeopts in $bridge_opts.split(" ") - #set [$bridgekey, $bridgevalue] = $bridgeopts.split("=") + #for $bridgeopts in $bridge_opts.split(" ") + #set [$bridgekey, $bridgevalue] = $bridgeopts.split("=") echo " bridge_$bridgekey $bridgevalue" >> /etc/network/interfaces #end for #end if @@ -546,7 +546,7 @@ echo " address $ip" >> /etc/network/interfaces echo " netmask $netmask" >> /etc/network/interfaces #import netaddr #set interface_network = $netaddr.IPNetwork('%s/%s' % ($ip, $netmask)) - #set interface_network_str = $str($interface_network) + #set interface_network_str = $str($interface_network) #if $if_gateway != "" echo " gateway $if_gateway" >> /etc/network/interfaces #elif $gateway != "" @@ -555,7 +555,7 @@ echo " gateway $if_gateway" >> /etc/network/interfaces echo " gateway $gateway" >> /etc/network/interfaces #end if #end if - #end if + #end if #else #pass #end if @@ -595,19 +595,19 @@ for logical_interface in \${logical_interfaces}; do eval "used_logical_interface=\\${used_logical_interface_\${logical_interface}}" if [ ! -z "\${used_logical_interface}" ]; then # ignore logical interface that is already generated in above - echo "ignore used logical interface \${logical_interface}" >> /tmp/network_log - continue + echo "ignore used logical interface \${logical_interface}" >> /tmp/network_log + continue fi echo "add logical interface \${logical_interface} into network config since it is not set above" >> /tmp/network_log eval "physical_interface=\\${logical_interface_mapping_\${logical_interface}}" if [ ! -z "\${physical_interface}" ]; then - echo "auto \${logical_interface}" >> /etc/network/interfaces - echo "iface \${logical_interface} inet static" >> /etc/network/interfaces + echo "auto \${logical_interface}" >> /etc/network/interfaces + echo "iface \${logical_interface} inet static" >> /etc/network/interfaces eval "mac=\\${physical_interface_mac_\${physical_interface}}" if [ ! -z "\$mac" ]; then - echo " hwaddress ether \${mac}" >> /etc/network/interfaces - fi - echo "" >> /etc/network/interfaces + echo " hwaddress ether \${mac}" >> /etc/network/interfaces + fi + echo "" >> /etc/network/interfaces if [ -f "/etc/modprobe.conf" ] && [ ! -z "\${physical_interface}" ]; then grep \${physical_interface} /etc/modprobe.conf | sed "s/\${physical_interface}/\${logical_interface}/" >> /etc/modprobe.conf.cobbler grep -v \${physical_interface} /etc/modprobe.conf >> /etc/modprobe.conf.new @@ -627,4 +627,4 @@ fi if [ -f "/etc/udev/rules.d/70-persistent-net.rules.new" ]; then mv /etc/udev/rules.d/70-persistent-net.rules.new /etc/udev/rules.d/70-persistent-net.rules fi -## End post_install_network_config generated code +## End post_install_network_config generated code diff --git a/cobbler/snippets/preseed_post_partition_disks b/cobbler/snippets/preseed_post_partition_disks index 477551b..4770e2a 100644 --- a/cobbler/snippets/preseed_post_partition_disks +++ b/cobbler/snippets/preseed_post_partition_disks @@ -28,7 +28,7 @@ for remove_partition in \${remove_partitions}; do partition_number=\$2 if [ ! -z "\${partition_disk}" ]; then if [ ! -z "\${partition_number}" ]; then - echo "remove partition \${remove_partition} on \${partition_disk} number \${partition_number}" >> /tmp/post_partition.log + echo "remove partition \${remove_partition} on \${partition_disk} number \${partition_number}" >> /tmp/post_partition.log parted \${partition_disk} --script -- rm \${partition_number} else echo "no partition number found for \${remove_partition}" >> /tmp/post_partition.log diff --git a/cobbler/snippets/preseed_pre_install_network_config b/cobbler/snippets/preseed_pre_install_network_config index 2ea7479..e2d45df 100644 --- a/cobbler/snippets/preseed_pre_install_network_config +++ b/cobbler/snippets/preseed_pre_install_network_config @@ -34,7 +34,7 @@ get_ifname() { #set ikeys = $interfaces.keys() #for $iname in $ikeys #set $idata = $interfaces[$iname] - #set $management = $idata["management"] + #set $management = $idata["management"] #if $management #set $management_nic = $iname #end if diff --git a/cobbler/snippets/preseed_pre_partition_disks b/cobbler/snippets/preseed_pre_partition_disks index 79fcbc2..b31d884 100644 --- a/cobbler/snippets/preseed_pre_partition_disks +++ b/cobbler/snippets/preseed_pre_partition_disks @@ -43,13 +43,13 @@ if [ -e /dev/disk/by-path ]; then else disk_mapping="\${disk_mapping} \${disk_name}" fi - if [ -z "\${disk_path_mapping}" ]; then + if [ -z "\${disk_path_mapping}" ]; then disk_path_mapping="\${disk_path_name}" - else - disk_path_mapping="\${disk_path_mapping} \${disk_path_name}" + else + disk_path_mapping="\${disk_path_mapping} \${disk_path_name}" fi eval "disk_\${disk_name}=/dev/disk/by-path/\${path_name}" - eval "disk_\${disk_path_name}=/dev/disk/by-path/\${path_name}" + eval "disk_\${disk_path_name}=/dev/disk/by-path/\${path_name}" eval "disk_path_\${disk_path_name}=/dev/\${disk_name}" eval "disk_path_\${disk_name}=/dev/\${disk_name}" else @@ -77,7 +77,7 @@ partition_disks="" disks="" for disk in \$(list-devices disk); do disk_name=\$(basename \$disk) - eval "disk_path=\\${disk_\${disk_name}}" + eval "disk_path=\\${disk_\${disk_name}}" if [ -z "\${disk_path}" ]; then eval "ignore_disk_path=\\${ignore_disk_\${disk_name}}" if [ ! -z "\${ignore_disk_path}" ]; then @@ -186,16 +186,16 @@ for disk_partition in \${disk_partitions}; do #if $getVar('partition_by_path', '0') != "0" path_name=\$(basename \${remove_disk}) disk_path_name=\$(echo \${path_name} | tr '-' '_' | tr ':' '_' | tr '.' '_') - remove_disk_path=\${remove_disk} + remove_disk_path=\${remove_disk} eval "remove_disk=\\${disk_path_\${disk_path_name}}" #else disk_name=\$(basename \${remove_disk}) eval "remove_disk_path=\\${disk_\${disk_name}}" #end if if [ -z "\${remove_disk}" ]; then - continue - fi - if [ -z "\${remove_disk_path}" ]; then + continue + fi + if [ -z "\${remove_disk_path}" ]; then continue fi if expr match "\${disk_partition}" "\${remove_disk_path}.*"; then @@ -210,9 +210,9 @@ for disk_partition in \${disk_partitions}; do else echo "partition \${disk_partition} does not match \${remove_disk}.*" >> /tmp/preseed.log fi - if [[ "\$vg" == "$vgname" ]]; then + if [[ "\$vg" == "$vgname" ]]; then remove_vg="\$vg" - remove_partition="\${disk_partition}" + remove_partition="\${disk_partition}" fi done if [ ! -z "\${remove_vg}" ]; then @@ -234,15 +234,15 @@ for disk_partition in \${disk_partitions}; do if [ -z "\${remove_partitions}" ]; then remove_partitions="\${remove_partition}" else - pv_removed=0 - for pv in ${remove_partitions}; do - if [[ "\$pv" == "\${remove_partition}" ]]; then - pv_removed=1 - fi - done - if [ \${pv_removed} -eq 0 ]; then + pv_removed=0 + for pv in ${remove_partitions}; do + if [[ "\$pv" == "\${remove_partition}" ]]; then + pv_removed=1 + fi + done + if [ \${pv_removed} -eq 0 ]; then remove_partitions="\${remove_partitions} \${remove_partition}" - fi + fi fi fi done @@ -288,17 +288,17 @@ echo "partition fstype \${partition_fstype}" >> /tmp/preseed.log #for disk_and_size in $disk_sizes #set disk_name, size = $disk_and_size.split(' ', 1) #set disk_name = $disk_name.strip() - #if $size.endswith('K') - #set disk_size = $int($size[:-1]) / 1000 - #elif $size.endswith('M') - #set disk_size = $int($size[:-1]) - #elif $size.endswith('G') - #set disk_size = $int($size[:-1]) * 1000 - #elif $size.endswith('T') - #set disk_size = $int($size[:-1]) * 1000000 - #else - #set disk_size = $int($size) - #end if + #if $size.endswith('K') + #set disk_size = $int($size[:-1]) / 1000 + #elif $size.endswith('M') + #set disk_size = $int($size[:-1]) + #elif $size.endswith('G') + #set disk_size = $int($size[:-1]) * 1000 + #elif $size.endswith('T') + #set disk_size = $int($size[:-1]) * 1000000 + #else + #set disk_size = $int($size) + #end if reserve_disk_size_${disk_name}=${disk_size} #end for #end if @@ -321,17 +321,17 @@ default_reserve_disk_size=${disk_size} #for disk_and_size in $disk_sizes #set disk_name, size = $disk_and_size.split(' ', 1) #set disk_name = $disk_name.strip() - #if $size.endswith('K') - #set disk_size = $int($size[:-1]) / 1000 - #elif $size.endswith('M') - #set disk_size = $int($size[:-1]) - #elif $size.endswith('G') - #set disk_size = $int($size[:-1]) * 1000 - #elif $size.endswith('T') - #set disk_size = $int($size[:-1]) * 1000000 - #else - #set disk_size = $int($size) - #end if + #if $size.endswith('K') + #set disk_size = $int($size[:-1]) / 1000 + #elif $size.endswith('M') + #set disk_size = $int($size[:-1]) + #elif $size.endswith('G') + #set disk_size = $int($size[:-1]) * 1000 + #elif $size.endswith('T') + #set disk_size = $int($size[:-1]) * 1000000 + #else + #set disk_size = $int($size) + #end if max_disk_size_${disk_name}=${disk_size} #end for #end if @@ -356,13 +356,13 @@ default_partition_name="" #for vol_and_size in vol_sizes #set vol, vol_size = $vol_and_size.split(' ', 1) #set vol = $vol.strip() - #if $vol == '/' - #set volname = 'root' - #elif $vol == 'swap' - #set volname = 'swap' - #elif $vol.startswith('/') + #if $vol == '/' + #set volname = 'root' + #elif $vol == 'swap' + #set volname = 'swap' + #elif $vol.startswith('/') #set volname = $vol[1:].replace('/', '_') - #else + #else # $vol is not starts with / #continue #end if @@ -376,21 +376,21 @@ if [[ "$vol" == "\$default_partition" ]]; then fi partition_point_$volname=$vol #set vol_size = $vol_size.strip() - #if $vol_size.endswith('%') - #set vol_percent = $vol_size[:-1] + #if $vol_size.endswith('%') + #set vol_percent = $vol_size[:-1] partition_percentage_$volname=$vol_percent #else - #if $vol_size.endswith('K') + #if $vol_size.endswith('K') #set vol_min_size = $int($vol_size[:-1]) / 1000 #elif $vol_size.endswith('M') - #set vol_min_size = $int($vol_size[:-1]) + #set vol_min_size = $int($vol_size[:-1]) #elif $vol_size.endswith('G') - #set vol_min_size = $int($vol_size[:-1]) * 1000 - #elif $vol_size.endswith('T') - #set vol_min_size = $int($vol_size[:-1]) * 1000000 - #else - #set vol_min_size = $int($vol_size) - #end if + #set vol_min_size = $int($vol_size[:-1]) * 1000 + #elif $vol_size.endswith('T') + #set vol_min_size = $int($vol_size[:-1]) * 1000000 + #else + #set vol_min_size = $int($vol_size) + #end if partition_size_$volname=$vol_min_size #end if #end for @@ -401,27 +401,27 @@ partition_size_$volname=$vol_min_size #for vol_and_size in $vol_sizes #set vol, vol_size = $vol_and_size.split(' ', 1) #set vol = $vol.strip() - #if $vol == '/' - #set volname = 'root' - #elif $vol == 'swap' - #set volname = 'swap' - #elif $vol.startswith('/') + #if $vol == '/' + #set volname = 'root' + #elif $vol == 'swap' + #set volname = 'swap' + #elif $vol.startswith('/') #set volname = $vol[1:].replace('/', '_') - #else + #else # $vol is not starts with / #continue #end if - #if $vol_size.endswith('K') - #set vol_min_size = $int($vol_size[:-1]) / 1000 - #elif $vol_size.endswith('M') - #set vol_min_size = $int($vol_size[:-1]) - #elif $vol_size.endswith('G') - #set vol_min_size = $int($vol_size[:-1]) * 1000 - #elif $vol_size.endswith('T') - #set vol_min_size = $int($vol_size[:-1]) * 1000000 - #else - #set vol_min_size = $int($vol_size) - #end if + #if $vol_size.endswith('K') + #set vol_min_size = $int($vol_size[:-1]) / 1000 + #elif $vol_size.endswith('M') + #set vol_min_size = $int($vol_size[:-1]) + #elif $vol_size.endswith('G') + #set vol_min_size = $int($vol_size[:-1]) * 1000 + #elif $vol_size.endswith('T') + #set vol_min_size = $int($vol_size[:-1]) * 1000000 + #else + #set vol_min_size = $int($vol_size) + #end if partitions_size_$volname=${vol_min_size} #end for #end if @@ -431,27 +431,27 @@ partitions_size_$volname=${vol_min_size} #for vol_and_size in $vol_sizes #set vol, vol_size = $vol_and_size.split(' ', 1) #set vol = $vol.strip() - #if $vol == '/' - #set volname = 'root' - #elif $vol == 'swap' - #set volname = 'swap' - #elif $vol.startswith('/') + #if $vol == '/' + #set volname = 'root' + #elif $vol == 'swap' + #set volname = 'swap' + #elif $vol.startswith('/') #set volname = $vol[1:].replace('/', '_') - #else + #else # $vol is not starts with / #continue #end if - #if $vol_size.endswith('K') - #set vol_max_size = $int($vol_size[:-1]) / 1000 - #elif $vol_size.endswith('M') - #set vol_max_size = $int($vol_size[:-1]) - #elif $vol_size.endswith('G') - #set vol_max_size = $int($vol_size[:-1]) * 1000 - #elif $vol_size.endswith('T') - #set vol_max_size = $int($vol_size[:-1]) * 1000000 - #else - #set vol_max_size = $int($vol_size) - #end if + #if $vol_size.endswith('K') + #set vol_max_size = $int($vol_size[:-1]) / 1000 + #elif $vol_size.endswith('M') + #set vol_max_size = $int($vol_size[:-1]) + #elif $vol_size.endswith('G') + #set vol_max_size = $int($vol_size[:-1]) * 1000 + #elif $vol_size.endswith('T') + #set vol_max_size = $int($vol_size[:-1]) * 1000000 + #else + #set vol_max_size = $int($vol_size) + #end if partition_maxsize_$volname=${vol_max_size} #end for #end if @@ -602,7 +602,7 @@ in_vg{ $vgname } lv_name{ \${key}vol } \ mountpoint{ \$partition }" fi echo "partition param \$partition => \${partition_param}" >> /tmp/preseed.log - recipe="\$recipe \${partition_size} \${partition_factor} \${partition_maxsize} \${partition_param} ." + recipe="\$recipe \${partition_size} \${partition_factor} \${partition_maxsize} \${partition_param} ." done for disk in \${sorted_disks}; do @@ -641,7 +641,7 @@ method{ lvm } vg_name{ $vgname }" \\$defaultignore{ } device{ \${disk} } \ method{ lvm } vg_name{ reserved }" recipe="\$recipe \${reserve_disk_size} \${reserve_disk_size} \${reserve_disk_size} \${reserve_disk_param} ." - echo "reserve partition param \${disk_name} => \${reserve_disk_param}" >> /tmp/preseed.log + echo "reserve partition param \${disk_name} => \${reserve_disk_param}" >> /tmp/preseed.log fi done @@ -662,7 +662,7 @@ for disk in \$disks; do \\$defaultignore{ } device{ \${disk} } \ method{ lvm } vg_name{ reserved }" recipe="\$recipe 512 512+100% -1 \${reserve_disk_param} ." - echo "reserve partition param \${disk_name} => \${reserve_disk_param}" >> /tmp/preseed.log + echo "reserve partition param \${disk_name} => \${reserve_disk_param}" >> /tmp/preseed.log fi done #end if diff --git a/cobbler/snippets/preseed_rsyslog.conf b/cobbler/snippets/preseed_rsyslog.conf index 500d2a8..11e7722 100644 --- a/cobbler/snippets/preseed_rsyslog.conf +++ b/cobbler/snippets/preseed_rsyslog.conf @@ -1,6 +1,6 @@ cat << EOL > /etc/rsyslog.conf \#\#\#\# MODULES \#\#\#\## - + \\$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) \\$ModLoad imfile diff --git a/cobbler/snippets/preseed_sysctl.conf b/cobbler/snippets/preseed_sysctl.conf index b814dfd..c227ecf 100644 --- a/cobbler/snippets/preseed_sysctl.conf +++ b/cobbler/snippets/preseed_sysctl.conf @@ -38,9 +38,9 @@ kernel.shmall = 4294967296 # increase TCP max buffer size settable using setsockopt() net.core.rmem_max = 16777216 -net.core.wmem_max = 16777216 +net.core.wmem_max = 16777216 -# increase Linux autotuning TCP buffer limit +# increase Linux autotuning TCP buffer limit net.ipv4.tcp_rmem = 4096 87380 16777216 net.ipv4.tcp_wmem = 4096 65536 16777216 @@ -48,7 +48,7 @@ net.ipv4.tcp_wmem = 4096 65536 16777216 net.core.netdev_max_backlog = 30000 net.ipv4.tcp_max_syn_backlog = 4096 -# recommended default congestion control is htcp +# recommended default congestion control is htcp net.ipv4.tcp_congestion_control=htcp # recommended for hosts with jumbo frames enabled @@ -60,12 +60,12 @@ net.ipv4.tcp_fin_timeout=30 # fast cycling of sockets in time_wait state and re-using them net.ipv4.tcp_tw_recycle = 1 -net.ipv4.tcp_tw_reuse = 1 +net.ipv4.tcp_tw_reuse = 1 # increase the maximum number of requests queued to a listen socket net.core.somaxconn = 8192 -# avoid caching tcp network transfer statistics +# avoid caching tcp network transfer statistics net.ipv4.route.flush=1 #end raw EOF diff --git a/cobbler/snippets/puppet_register_if_enabled b/cobbler/snippets/puppet_register_if_enabled index 90ef702..439345a 100644 --- a/cobbler/snippets/puppet_register_if_enabled +++ b/cobbler/snippets/puppet_register_if_enabled @@ -1,4 +1,4 @@ -# start puppet registration +# start puppet registration #if $str($getVar('puppet_auto_setup','')) == "1" # generate puppet certificates and trigger a signing request, but # don't wait for signing to complete diff --git a/cobbler/snippets/redhat_register b/cobbler/snippets/redhat_register index 2f1f783..3ac0d07 100644 --- a/cobbler/snippets/redhat_register +++ b/cobbler/snippets/redhat_register @@ -4,12 +4,12 @@ mkdir -p /usr/share/rhn/ #if $redhat_management_type == "site" #set $mycert_file = "RHN-ORG-TRUSTED-SSL-CERT" #set $mycert = "/usr/share/rhn/" + $mycert_file -wget http://$redhat_management_server/pub/RHN-ORG-TRUSTED-SSL-CERT -O $mycert -perl -npe 's/RHNS-CA-CERT/$mycert_file/g' -i /etc/sysconfig/rhn/* +wget http://$redhat_management_server/pub/RHN-ORG-TRUSTED-SSL-CERT -O $mycert +perl -npe 's/RHNS-CA-CERT/$mycert_file/g' -i /etc/sysconfig/rhn/* #end if #if $redhat_management_type == "hosted" #set $mycert = "/usr/share/rhn/RHNS-CA-CERT" - #end if + #end if #set $endpoint = "https://%s/XMLRPC" % $redhat_management_server rhnreg_ks --serverUrl=$endpoint --sslCACert=$mycert --activationkey=$redhat_management_key #else diff --git a/cobbler/snippets/repo_config.xml b/cobbler/snippets/repo_config.xml index 5483644..dbdd5d2 100644 --- a/cobbler/snippets/repo_config.xml +++ b/cobbler/snippets/repo_config.xml @@ -10,7 +10,7 @@ ${repo.name} / false - ${repo.name} + ${repo.name} #end for diff --git a/cobbler/snippets/rsyslog.xml b/cobbler/snippets/rsyslog.xml index 4623eb3..63eacfe 100644 --- a/cobbler/snippets/rsyslog.xml +++ b/cobbler/snippets/rsyslog.xml @@ -3,7 +3,7 @@ diff --git a/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_base_repo b/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_base_repo index feb4d4f..ee01aa3 100644 --- a/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_base_repo +++ b/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_base_repo @@ -6,7 +6,7 @@ cat << EOF > /etc/yum.repos.d/CentOS-Base.repo # geographically close to the client. You should use this for CentOS updates # unless you are manually picking other mirrors. # -# If the mirrorlist= does not work for you, as a fall back you can try the +# If the mirrorlist= does not work for you, as a fall back you can try the # remarked out baseurl= line instead. # # @@ -19,7 +19,7 @@ gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 skip_if_unavailable=1 -# released updates +# released updates [updates] name=CentOS-6.5 - Updates mirrorlist=http://mirrorlist.centos.org/?release=6&arch=\\$basearch&repo=updates diff --git a/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_vault_repo b/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_vault_repo index 5e5de02..310736d 100644 --- a/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_vault_repo +++ b/cobbler/snippets/yum.repos.d/centos/6.5/kickstart_centos_vault_repo @@ -2,7 +2,7 @@ cat << EOF > /etc/yum.repos.d/CentOS-Vault.repo # CentOS-Vault.repo # # CentOS Vault holds packages from previous releases within the same CentOS Version -# these are packages obsoleted by the current release and should usually not +# these are packages obsoleted by the current release and should usually not # be used in production #----------------- diff --git a/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_base_repo b/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_base_repo index 7bb6b6b..7116ade 100644 --- a/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_base_repo +++ b/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_base_repo @@ -6,7 +6,7 @@ cat << EOF > /etc/yum.repos.d/CentOS-Base.repo # geographically close to the client. You should use this for CentOS updates # unless you are manually picking other mirrors. # -# If the mirrorlist= does not work for you, as a fall back you can try the +# If the mirrorlist= does not work for you, as a fall back you can try the # remarked out baseurl= line instead. # # @@ -19,7 +19,7 @@ gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 skip_if_unavailable=1 -# released updates +# released updates [updates] name=CentOS-6.6 - Updates mirrorlist=http://mirrorlist.centos.org/?release=6&arch=\\$basearch&repo=updates diff --git a/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_vault_repo b/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_vault_repo index ec23f5a..065fa43 100644 --- a/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_vault_repo +++ b/cobbler/snippets/yum.repos.d/centos/6.6/kickstart_centos_vault_repo @@ -2,7 +2,7 @@ cat << EOF > /etc/yum.repos.d/CentOS-Vault.repo # CentOS-Vault.repo # # CentOS Vault holds packages from previous releases within the same CentOS Version -# these are packages obsoleted by the current release and should usually not +# these are packages obsoleted by the current release and should usually not # be used in production #----------------- diff --git a/cobbler/snippets/yum.repos.d/centos/7.0/kickstart_centos_base_repo b/cobbler/snippets/yum.repos.d/centos/7.0/kickstart_centos_base_repo index 7ad1842..2462aa8 100644 --- a/cobbler/snippets/yum.repos.d/centos/7.0/kickstart_centos_base_repo +++ b/cobbler/snippets/yum.repos.d/centos/7.0/kickstart_centos_base_repo @@ -6,7 +6,7 @@ cat << EOF > /etc/yum.repos.d/CentOS-Base.repo # geographically close to the client. You should use this for CentOS updates # unless you are manually picking other mirrors. # -# If the mirrorlist= does not work for you, as a fall back you can try the +# If the mirrorlist= does not work for you, as a fall back you can try the # remarked out baseurl= line instead. # # @@ -19,7 +19,7 @@ gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 skip_if_unavailable=1 -# released updates +# released updates [updates] name=CentOS-7.0 - Updates mirrorlist=http://mirrorlist.centos.org/?release=7&arch=\\$basearch&repo=updates