Gate: Remove legacy gate code

This PS removes the legacy gate code from osh-addons.

Change-Id: I799ceafcc8c5f185e5bc5bac8d2d41a68c8dbf1f
This commit is contained in:
Pete Birley 2018-05-16 19:51:40 -05:00
parent c11eca9a6f
commit 79973b6c47
36 changed files with 0 additions and 1924 deletions

View File

@ -1,28 +0,0 @@
Openstack-Helm Gate Scripts
===========================
These scripts are used in the OpenStack-Helm Gates and can also be run
locally to aid development and for demonstration purposes. Please note
that they assume full control of a machine, and may be destructive in
nature, so should only be run on a dedicated host.
Usage
-----
The Gate scripts use the ``setup_gate.sh`` as an entrypoint and are
controlled by environment variables, an example of use to run the basic
integration test is below:
.. code:: bash
export INTEGRATION=aio
export INTEGRATION_TYPE=basic
./tools/gate/setup_gate.sh
Supported Platforms
~~~~~~~~~~~~~~~~~~~
Currently supported host platforms are: \* Ubuntu 16.04 \* CentOS 7
With some preparation to docker, and disabling of SELinux operation of
Fedora 25 is also supported.

View File

@ -1,28 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
source ${WORK_DIR}/tools/gate/funcs/helm.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
helm_build
helm search
kube_wait_for_pods kube-system 600
# todo(srwilkers): implement helm tests for postgresql
#helm_test_deployment postgresql openstack
kube_wait_for_pods kube-system 600

View File

@ -1,70 +0,0 @@
#!/bin/bash
set +xe
# if we can't find kubectl, bail immediately because it is likely
# the whitespace linter fails - no point to collect logs.
if ! type "kubectl" &> /dev/null; then
exit $1
fi
echo "Capturing logs from environment."
mkdir -p ${LOGS_DIR}/k8s/etc
sudo cp -a /etc/kubernetes ${LOGS_DIR}/k8s/etc
sudo chmod 777 --recursive ${LOGS_DIR}/*
mkdir -p ${LOGS_DIR}/k8s
for OBJECT_TYPE in nodes \
namespace; do
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
done
kubectl describe nodes > ${LOGS_DIR}/k8s/nodes.txt
for OBJECT_TYPE in svc \
pods \
jobs \
deployments \
daemonsets \
statefulsets \
configmaps \
secrets; do
kubectl get --all-namespaces ${OBJECT_TYPE} -o yaml > \
${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
done
mkdir -p ${LOGS_DIR}/k8s/pods
kubectl get pods -a --all-namespaces -o json | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl get --namespace $NAMESPACE pod $NAME -o json | jq -r \
'.spec.containers[].name' | while read line; do
CONTAINER=$(echo $line | awk '{print $1}')
kubectl logs $NAME --namespace $NAMESPACE -c $CONTAINER > \
${LOGS_DIR}/k8s/pods/$NAMESPACE-$NAME-$CONTAINER.txt
done
done
mkdir -p ${LOGS_DIR}/k8s/svc
kubectl get svc -o json --all-namespaces | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl describe svc $NAME --namespace $NAMESPACE > \
${LOGS_DIR}/k8s/svc/$NAMESPACE-$NAME.txt
done
mkdir -p ${LOGS_DIR}/k8s/rbac
for OBJECT_TYPE in clusterroles \
roles \
clusterrolebindings \
rolebindings; do
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/rbac/${OBJECT_TYPE}.yaml
done
mkdir -p ${LOGS_DIR}/nodes/$(hostname)
sudo iptables-save > ${LOGS_DIR}/nodes/$(hostname)/iptables.txt
sudo ip a > ${LOGS_DIR}/nodes/$(hostname)/ip.txt
sudo route -n > ${LOGS_DIR}/nodes/$(hostname)/routes.txt
arp -a > ${LOGS_DIR}/nodes/$(hostname)/arp.txt
exit $1

View File

@ -1,102 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
function helm_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
git \
make \
curl \
ca-certificates
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
git \
make \
curl
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
git \
make \
curl
fi
# install helm
if CURRENT_HELM_LOC=$(type -p helm); then
CURRENT_HELM_VERSION=$(${CURRENT_HELM_LOC} version --client --short | awk '{ print $NF }' | awk -F '+' '{ print $1 }')
fi
[ "x$HELM_VERSION" == "x$CURRENT_HELM_VERSION" ] || ( \
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
sudo mv ${TMP_DIR}/helm /usr/local/bin/helm
rm -rf ${TMP_DIR} )
}
function helm_serve {
if [[ -d "$HOME/.helm" ]]; then
echo ".helm directory found"
else
helm init --client-only
fi
if [[ -z $(curl -s 127.0.0.1:8879 | grep 'Helm Repository') ]]; then
helm serve & > /dev/null
while [[ -z $(curl -s 127.0.0.1:8879 | grep 'Helm Repository') ]]; do
sleep 1
echo "Waiting for Helm Repository"
done
else
echo "Helm serve already running"
fi
if helm repo list | grep -q "^stable" ; then
helm repo remove stable
fi
helm repo add local http://localhost:8879/charts
}
function helm_lint {
make build-helm-toolkit -C ${WORK_DIR}
make TASK=lint -C ${WORK_DIR}
}
function helm_build {
make TASK=build -C ${WORK_DIR}
}
function helm_test_deployment {
DEPLOYMENT=$1
NAMESPACE=$2
helm test ${DEPLOYMENT}
mkdir -p ${LOGS_DIR}/helm-tests
kubectl logs -n ${NAMESPACE} ${DEPLOYMENT}-helm-tests > ${LOGS_DIR}/helm-tests/${DEPLOYMENT}
kubectl delete pods ${DEPLOYMENT}-helm-tests -n ${NAMESPACE}
}
function helm_plugin_template_install {
# NOTE(portdirect): the helm plugin install command does not seem to respect the --version flag with helm 2.3.0
#helm plugin install https://github.com/technosophos/helm-template --version 2.3.0.1
mkdir -p ${HOME}/.helm/plugins/helm-template
curl -sSL https://github.com/technosophos/helm-template/releases/download/2.3.0%2B1/helm-template-linux-2.3.0.1.tgz | tar -zxv -C ${HOME}/.helm/plugins/helm-template
}
function helm_template_run {
mkdir -p ${LOGS_DIR}/templates
for CHART in $(helm search | awk '{ print $1 }' | tail -n +2 | awk -F '/' '{ print $NF }'); do
echo "Running Helm template plugin on chart: $CHART"
helm template --verbose $CHART > ${LOGS_DIR}/templates/$CHART
done
}

View File

@ -1,132 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
function kube_wait_for_pods {
# From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi
# Default wait timeout is 180 seconds
set +x
end=$(date +%s)
if [ x$2 != "x" ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
kubectl get pods --namespace=$1 -o json | jq -r \
'.items[].status.phase' | grep Pending > /dev/null && \
PENDING=True || PENDING=False
query='.items[]|select(.status.phase=="Running")'
query="$query|.status.containerStatuses[].ready"
kubectl get pods --namespace=$1 -o json | jq -r "$query" | \
grep false > /dev/null && READY="False" || READY="True"
kubectl get jobs -o json --namespace=$1 | jq -r \
'.items[] | .spec.completions == .status.succeeded' | \
grep false > /dev/null && JOBR="False" || JOBR="True"
[ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && echo containers failed to start. && \
kubectl get pods --namespace $1 -o wide && exit -1
done
set -x
}
function kube_wait_for_nodes {
# Default wait timeout is 180 seconds
set +x
end=$(date +%s)
if [ x$2 != "x" ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
NUMBER_OF_NODES=$(kubectl get nodes --no-headers -o name | wc -l)
NUMBER_OF_NODES_EXPECTED=$(($(cat /etc/nodepool/sub_nodes_private | wc -l) + 1))
[ $NUMBER_OF_NODES -eq $NUMBER_OF_NODES_EXPECTED ] && \
NODES_ONLINE="True" || NODES_ONLINE="False"
while read SUB_NODE; do
echo $SUB_NODE | grep -q ^Ready && NODES_READY="True" || NODES_READY="False"
done < <(kubectl get nodes --no-headers | awk '{ print $2 }')
[ $NODES_ONLINE == "True" -a $NODES_READY == "True" ] && \
break || true
sleep 5
now=$(date +%s)
[ $now -gt $end ] && echo "Nodes Failed to be ready in time." && \
kubectl get nodes -o wide && exit -1
done
set -x
}
function kubeadm_aio_reqs_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
docker.io \
nfs-common \
jq
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
epel-release
sudo yum install -y \
docker-latest \
nfs-utils \
jq
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
sudo sed -i 's/^OPTIONS/#OPTIONS/g' /etc/sysconfig/docker-latest
sudo sed -i "s|^MountFlags=slave|MountFlags=share|g" /etc/systemd/system/docker.service
sudo sed -i "/--seccomp-profile/,+1 d" /etc/systemd/system/docker.service
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay" | sudo tee /etc/sysconfig/docker-latest-storage
sudo setenforce 0 || true
sudo systemctl daemon-reload
sudo systemctl restart docker
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
docker-latest \
nfs-utils \
jq
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay2" | sudo tee /etc/sysconfig/docker-latest-storage
sudo setenforce 0 || true
sudo systemctl daemon-reload
sudo systemctl restart docker
fi
if CURRENT_KUBECTL_LOC=$(type -p kubectl); then
CURRENT_KUBECTL_VERSION=$(${CURRENT_KUBECTL_LOC} version --client --short | awk '{ print $NF }' | awk -F '+' '{ print $1 }')
fi
[ "x$KUBE_VERSION" == "x$CURRENT_KUBECTL_VERSION" ] || ( \
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
chmod +x ${TMP_DIR}/kubectl
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
rm -rf ${TMP_DIR} )
}
function kubeadm_aio_build {
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
}
function kubeadm_aio_launch {
${WORK_DIR}/tools/kubeadm-aio/kubeadm-aio-launcher.sh
mkdir -p ${HOME}/.kube
cat ${KUBECONFIG} > ${HOME}/.kube/config
kube_wait_for_pods kube-system 240
kube_wait_for_pods default 240
}

View File

@ -1,44 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
function net_resolv_pre_kube {
sudo cp -f /etc/resolv.conf /etc/resolv-pre-kube.conf
cat << EOF | sudo tee /etc/resolv.conf
nameserver 8.8.8.8
EOF
}
function net_resolv_kube {
kubectl get namespace openstack || kubectl create namespace openstack
kubectl create --namespace openstack -f ${WORK_DIR}/tools/gate/manifests/resolv-conf-util.yaml
kube_wait_for_pods openstack 240
}
function net_resolv_post_kube {
sudo cp -f /etc/resolv-pre-kube.conf /etc/resolv.conf
}
function net_hosts_pre_kube {
sudo cp -f /etc/hosts /etc/hosts-pre-kube
HOST_IFACE=$(ip route | grep "^default" | awk '{ print $5 }')
HOST_IP=$(ip addr | awk "/inet/ && /${HOST_IFACE}/{sub(/\/.*$/,\"\",\$2); print \$2}")
sudo sed -i "/$(hostname)/d" /etc/hosts
echo "${HOST_IP} $(hostname)" | sudo tee -a /etc/hosts
}
function net_hosts_post_kube {
sudo cp -f /etc/hosts-pre-kube /etc/hosts
}

View File

@ -1,24 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
source ${WORK_DIR}/tools/gate/funcs/helm.sh
helm_build
mkdir -p ${LOGS_DIR}/dry-runs
for CHART in $(helm search | awk '{ print $1 }' | tail -n +2 | awk -F '/' '{ print $NF }'); do
echo "Dry Running chart: $CHART"
helm install --dry-run --debug local/$CHART --name=$CHART --namespace=openstack > ${LOGS_DIR}/dry-runs/$CHART
done

View File

@ -1,22 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
source ${WORK_DIR}/tools/gate/funcs/network.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
kubeadm_aio_reqs_install
sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
kubeadm_aio_launch
net_resolv_kube

View File

@ -1,41 +0,0 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: resolv-conf-util
spec:
template:
metadata:
labels:
version: v0.1.0
name: resolv-conf-util
spec:
containers:
- name: resolv-conf-util
image: docker.io/ubuntu:16.04
command:
- /bin/bash
- -ecx
- |
cat /etc/resolv.conf > /host/etc/resolv.conf
tail -f /dev/null
volumeMounts:
- mountPath: /host/etc/resolv.conf
name: host-resolv-conf
volumes:
- name: host-resolv-conf
hostPath:
path: /etc/resolv.conf

View File

@ -1,31 +0,0 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
vars:
logs_dir: "/tmp/logs"
environment:
LOGS_DIR: "{{ logs_dir }}"
tasks:
- name: Capture logs from environment
shell: ./tools/gate/dump_logs.sh 0
args:
chdir: "{{ zuul.project.src_dir }}"
ignore_errors: yes
- name: Downloads logs to executor
synchronize:
src: "{{ logs_dir }}/"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull
ignore_errors: yes

View File

@ -1,66 +0,0 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
tasks:
- name: Create nodepool directory
become: true
become_user: root
file:
path: /etc/nodepool
state: directory
mode: 0777
- name: Create nodepool sub_nodes file
copy:
dest: /etc/nodepool/sub_nodes
content: ""
- name: Create nodepool sub_nodes_private file
copy:
dest: /etc/nodepool/sub_nodes_private
content: ""
- name: Populate nodepool sub_nodes file
lineinfile:
path: /etc/nodepool/sub_nodes
line: "{{ hostvars[item]['nodepool']['private_ipv4'] }}"
with_items: "{{ groups['nodes'] }}"
when: groups['nodes'] is defined
- name: Populate nodepool sub_nodes_private file
lineinfile:
path: /etc/nodepool/sub_nodes_private
line: "{{ hostvars[item]['nodepool']['private_ipv4'] }}"
with_items: "{{ groups['nodes'] }}"
when: groups['nodes'] is defined
- name: Create nodepool primary file
copy:
dest: /etc/nodepool/primary_node
content: "{{ hostvars['primary']['nodepool']['private_ipv4'] }}"
when: hostvars['primary'] is defined
- name: Create nodepool node_private for this node
copy:
dest: /etc/nodepool/node_private
content: "{{ nodepool.private_ipv4 }}"
- name: Run OSH Deploy
shell: |
set -xe;
export INTEGRATION=multi
export INTEGRATION_TYPE=basic
export PVC_BACKEND=ceph
export ZUUL_VERSION=v3
export KUBECONFIG=${HOME}/.kube/config
export SDN_PLUGIN="{{ sdn_plugin }}"
export GLANCE="{{ glance_backend }}"
kubectl get nodes -o wide
./tools/gate/setup_gate.sh
args:
chdir: "{{ zuul.project.src_dir }}"

View File

@ -1,58 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
cd ${WORK_DIR}
source /etc/os-release
export HOST_OS=${ID}
source ${WORK_DIR}/tools/gate/funcs/network.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
kubeadm_aio_reqs_install
# Setup shared mounts for kubelet
sudo mkdir -p /var/lib/kubelet
sudo mount --bind /var/lib/kubelet /var/lib/kubelet
sudo mount --make-shared /var/lib/kubelet
# Cleanup any old deployment
sudo docker rm -f kubeadm-aio || true
sudo docker rm -f kubelet || true
sudo docker ps -aq | xargs -r -l1 sudo docker rm -f
sudo rm -rfv \
/etc/cni/net.d \
/etc/kubernetes \
/var/lib/etcd \
/var/etcd \
/var/lib/kubelet/* \
/run/openvswitch || true
# Launch Container
sudo docker run \
-dt \
--name=kubeadm-aio \
--net=host \
--security-opt=seccomp:unconfined \
--cap-add=SYS_ADMIN \
--tmpfs=/run \
--tmpfs=/run/lock \
--volume=/etc/machine-id:/etc/machine-id:ro \
--volume=${HOME}:${HOME}:rw \
--volume=/etc/kubernetes:/etc/kubernetes:rw \
--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \
--volume=/var/run/docker.sock:/run/docker.sock \
--env KUBE_ROLE="worker" \
--env KUBELET_CONTAINER="${KUBEADM_IMAGE}" \
--env KUBEADM_JOIN_ARGS="--token=${KUBEADM_TOKEN} ${PRIMARY_NODE_IP}:6443" \
${KUBEADM_IMAGE}

View File

@ -1,79 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
export HELM_VERSION=${2:-v2.5.0}
export KUBE_VERSION=${3:-v1.6.7}
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:${KUBE_VERSION}
export WORK_DIR=$(pwd)
source /etc/os-release
export HOST_OS=${ID}
source ${WORK_DIR}/tools/gate/funcs/network.sh
source ${WORK_DIR}/tools/gate/funcs/helm.sh
# Setup the logging location: by default use the working dir as the root.
export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"}
mkdir -p ${LOGS_DIR}
function dump_logs () {
${WORK_DIR}/tools/gate/dump_logs.sh
}
trap 'dump_logs "$?"' ERR
# Moving the ws-linter here to avoid it blocking all the jobs just for ws
if [ "x$INTEGRATION_TYPE" == "xlinter" ]; then
bash ${WORK_DIR}/tools/gate/whitespace.sh
fi
# We setup the network for pre kube here, to enable cluster restarts on
# development machines
net_resolv_pre_kube
net_hosts_pre_kube
# todo(srwilkers): remove this when zuul-cloner functional for helm-toolkit
# Clones openstack-helm to retrieve helm-toolkit
TMP_DIR=$(mktemp -d)
git clone https://github.com/openstack/openstack-helm ${TMP_DIR}
mv ${TMP_DIR}/helm-toolkit ${WORK_DIR}
rm -rf ${TMP_DIR}
# Setup helm
helm_install
helm_serve
helm_lint
# In the linter, we also run the helm template plugin to get a sanity check
# of the chart without verifying against the k8s API
if [ "x$INTEGRATION_TYPE" == "xlinter" ]; then
helm_build > ${LOGS_DIR}/helm_build
helm_plugin_template_install
helm_template_run
fi
# Setup the K8s Cluster
if [ "x$INTEGRATION" == "xaio" ]; then
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
elif [ "x$INTEGRATION" == "xmulti" ]; then
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
bash ${WORK_DIR}/tools/gate/setup_gate_worker_nodes.sh
fi
# Deploy OpenStack-Helm
if [ "x$INTEGRATION_TYPE" == "xbasic" ]; then
bash ${WORK_DIR}/tools/gate/helm_dry_run.sh
bash ${WORK_DIR}/tools/gate/basic_launch.sh
bash ${WORK_DIR}/tools/gate/dump_logs.sh 0
fi

View File

@ -1,46 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
: ${SSH_PRIVATE_KEY:="/etc/nodepool/id_rsa"}
sudo chown $(whoami) ${SSH_PRIVATE_KEY}
sudo chmod 600 ${SSH_PRIVATE_KEY}
PRIMARY_NODE_IP=$(cat /etc/nodepool/primary_node_private | tail -1)
KUBEADM_TOKEN=$(sudo docker exec kubeadm-aio kubeadm token list | tail -n -1 | awk '{ print $1 }')
SUB_NODE_PROVISION_SCRIPT=$(mktemp --suffix=.sh)
cat /etc/nodepool/sub_nodes_private | while read SUB_NODE; do
cat >> ${SUB_NODE_PROVISION_SCRIPT} <<EOS
ssh-keyscan "${SUB_NODE}" >> ~/.ssh/known_hosts
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${SUB_NODE} mkdir -p ${WORK_DIR%/*}
scp -i ${SSH_PRIVATE_KEY} -r ${WORK_DIR} $(whoami)@${SUB_NODE}:${WORK_DIR%/*}
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${SUB_NODE} "export WORK_DIR=${WORK_DIR}; \
export KUBEADM_TOKEN=${KUBEADM_TOKEN}; \
export PRIMARY_NODE_IP=${PRIMARY_NODE_IP}; \
export KUBEADM_IMAGE=${KUBEADM_IMAGE}; \
bash ${WORK_DIR}/tools/gate/provision_gate_worker_node.sh"
EOS
done
bash ${SUB_NODE_PROVISION_SCRIPT}
rm -rf ${SUB_NODE_PROVISION_SCRIPT}
source ${WORK_DIR}/tools/gate/funcs/kube.sh
kube_wait_for_nodes 240
kube_wait_for_pods kube-system 240
kube_wait_for_pods openstack 240
kubectl get nodes --show-all
kubectl get --all-namespaces all --show-all
sudo docker exec kubeadm-aio openstack-helm-dev-prep

View File

@ -1,32 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
folder='.'
if [[ $# -gt 0 ]] ; then
folder="$1";
fi
res=$(find $folder \
-not -path "*/\.*" \
-not -path "*/doc/build/*" \
-not -name "*.tgz" \
-type f -exec egrep -l " +$" {} \;)
if [[ -z $res ]] ; then
exit 0
else
echo 'Trailing space(s) found.'
exit 1
fi

View File

@ -1,90 +0,0 @@
FROM ubuntu:16.04
MAINTAINER pete.birley@att.com
ENV HELM_VERSION=v2.5.0\
KUBE_VERSION=v1.6.7 \
CNI_VERSION=v0.5.2 \
container="docker" \
DEBIAN_FRONTEND="noninteractive"
RUN set -x \
&& TMP_DIR=$(mktemp --directory) \
&& cd ${TMP_DIR} \
&& apt-get update \
&& apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
# Add Kubernetes repo
&& curl -sSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list \
&& apt-get update \
&& apt-get install -y \
docker.io \
iptables \
kubectl \
kubelet \
kubernetes-cni \
# Install Kubeadm without running postinstall script as it expects systemd to be running.
&& apt-get download kubeadm \
&& dpkg --unpack kubeadm*.deb \
&& mv /var/lib/dpkg/info/kubeadm.postinst /opt/kubeadm.postinst \
&& dpkg --configure kubeadm \
&& apt-get install -yf kubeadm \
&& mkdir -p /etc/kubernetes/manifests \
# Install kubectl:
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-client-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/client/bin/kubectl /usr/bin/kubectl \
&& chmod +x /usr/bin/kubectl \
# Install kubelet & kubeadm binaries:
# (portdirect) We do things in this weird way to let us use the deps and systemd
# units from the packages in the .deb repo.
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/server/bin/kubelet /usr/bin/kubelet \
&& chmod +x /usr/bin/kubelet \
&& mv ${TMP_DIR}/server/bin/kubeadm /usr/bin/kubeadm \
&& chmod +x /usr/bin/kubeadm \
# Install CNI:
&& CNI_BIN_DIR=/opt/cni/bin \
&& mkdir -p ${CNI_BIN_DIR} \
&& cd ${CNI_BIN_DIR} \
&& curl -sSL https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \
&& cd ${TMP_DIR} \
# Move kubelet binary as we will run containerised
&& mv /usr/bin/kubelet /usr/bin/kubelet-real \
# Install helm binary
&& curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/helm /usr/bin/helm \
# Install openstack-helm dev utils
&& apt-get install -y \
make \
git \
vim \
jq \
# Install nfs utils for development PVC provisioner
nfs-common \
# Tweak Systemd units and targets for running in a container
&& find /lib/systemd/system/sysinit.target.wants/ ! -name 'systemd-tmpfiles-setup.service' -type l -exec rm -fv {} + \
&& rm -fv \
/lib/systemd/system/multi-user.target.wants/* \
/etc/systemd/system/*.wants/* \
/lib/systemd/system/local-fs.target.wants/* \
/lib/systemd/system/sockets.target.wants/*udev* \
/lib/systemd/system/sockets.target.wants/*initctl* \
/lib/systemd/system/basic.target.wants/* \
# Clean up apt cache
&& rm -rf /var/lib/apt/lists/* \
# Clean up tmp dir
&& cd / \
&& rm -rf ${TMP_DIR}
# Load assets into place, setup startup target & units
COPY ./assets/ /
RUN set -x \
&& ln -s /usr/lib/systemd/system/container-up.target /etc/systemd/system/default.target \
&& mkdir -p /etc/systemd/system/container-up.target.wants \
&& ln -s /usr/lib/systemd/system/kubeadm-aio.service /etc/systemd/system/container-up.target.wants/kubeadm-aio.service
VOLUME /sys/fs/cgroup
CMD /kubeadm-aio

View File

@ -1,102 +0,0 @@
Kubeadm AIO Container
=====================
This container builds a small AIO Kubeadm based Kubernetes deployment
for Development and Gating use.
Instructions
------------
OS Specific Host setup:
~~~~~~~~~~~~~~~~~~~~~~~
Ubuntu:
^^^^^^^
From a freshly provisioned Ubuntu 16.04 LTS host run:
.. code:: bash
sudo apt-get update -y
sudo apt-get install -y \
docker.io \
nfs-common \
git \
make
OS Independent Host setup:
~~~~~~~~~~~~~~~~~~~~~~~~~~
You should install the ``kubectl`` and ``helm`` binaries:
.. code:: bash
KUBE_VERSION=v1.6.7
HELM_VERSION=v2.5.0
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
chmod +x ${TMP_DIR}/kubectl
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
sudo mv ${TMP_DIR}/helm /usr/local/bin/helm
rm -rf ${TMP_DIR}
And clone the OpenStack-Helm repo:
.. code:: bash
git clone https://git.openstack.org/openstack/openstack-helm
Build the AIO environment (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A known good image is published to dockerhub on a fairly regular basis, but if
you wish to build your own image, from the root directory of the OpenStack-Helm
repo run:
.. code:: bash
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.7
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
Deploy the AIO environment
~~~~~~~~~~~~~~~~~~~~~~~~~~
To launch the environment then run:
.. code:: bash
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.7
export KUBE_VERSION=v1.6.7
./tools/kubeadm-aio/kubeadm-aio-launcher.sh
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
One this has run, you should hopefully have a Kubernetes single node
environment running, with Helm, Calico, a NFS PVC provisioner and
appropriate RBAC rules and node labels to get developing.
If you wish to use this environment at the primary Kubernetes
environment on your host you may run the following, but note that this
will wipe any previous client configuration you may have.
.. code:: bash
mkdir -p ${HOME}/.kube
cat ${HOME}/.kubeadm-aio/admin.conf > ${HOME}/.kube/config
If you wish to create dummy network devices for Neutron to manage there
is a helper script that can set them up for you:
.. code:: bash
sudo docker exec kubelet /usr/bin/openstack-helm-aio-network-prep
Logs
~~~~
You can get the logs from your ``kubeadm-aio`` container by running:
.. code:: bash
sudo docker logs -f kubeadm-aio

View File

@ -1,3 +0,0 @@
# If KUBE_ROLE is set 'master' kubeadm-aio will set this node up to be a master
# node, otherwise if 'worker', will join an existing cluster.
KUBE_ROLE=master

View File

@ -1,3 +0,0 @@
# If KUBE_VERSION is set 'default' kubeadm will use the default version of K8s
# otherwise the version specified here will be used.
KUBE_VERSION=default

View File

@ -1 +0,0 @@
KUBEADM_JOIN_ARGS="no_command_supplied"

View File

@ -1,3 +0,0 @@
# If KUBE_BIND_DEV is set to 'autodetect' we will use kubeadm's autodetect logic
# otherwise use the device specified to find the IP address to bind to.
KUBE_BIND_DEV=autodetect

View File

@ -1,3 +0,0 @@
# If KUBELET_CONTAINER is set 'to_this' one we will not attempt to launch a new
# container for the kubelet process, otherwise use the image tag specified
KUBELET_CONTAINER=this_one

View File

@ -1,45 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Checking cgroups'
if ls -dZ /sys/fs/cgroup | grep -q :svirt_sandbox_file_t: ; then
echo 'Invocation error: use -v /sys/fs/cgroup:/sys/fs/cgroup:ro parameter to docker run.'
exit 1
fi
echo 'Setting up K8s version to deploy'
: ${KUBE_VERSION:="default"}
sed -i "s|KUBE_VERSION=.*|KUBE_VERSION=${KUBE_VERSION}|g" /etc/kube-version
echo 'Setting up device to use for kube-api'
: ${KUBE_BIND_DEV:="autodetect"}
sed -i "s|KUBE_BIND_DEV=.*|KUBE_BIND_DEV=${KUBE_BIND_DEV}|g" /etc/kubeapi-device
echo 'Setting up container image to use for kubelet'
: ${KUBELET_CONTAINER:="this_one"}
sed -i "s|KUBELET_CONTAINER=.*|KUBELET_CONTAINER=${KUBELET_CONTAINER}|g" /etc/kubelet-container
echo 'Setting whether this node is a master, or slave, K8s node'
: ${KUBE_ROLE:="master"}
sed -i "s|KUBE_ROLE=.*|KUBE_ROLE=${KUBE_ROLE}|g" /etc/kube-role
echo 'Setting any kubeadm join commands'
: ${KUBEADM_JOIN_ARGS:="no_command_supplied"}
sed -i "s|KUBEADM_JOIN_ARGS=.*|KUBEADM_JOIN_ARGS=\"${KUBEADM_JOIN_ARGS}\"|g" /etc/kubeadm-join-command-args
echo 'Starting Systemd'
exec /bin/systemd --system

View File

@ -1,365 +0,0 @@
# Calico Version v2.1.4
# http://docs.projectcalico.org/v2.1/releases#v2.1.4
# This manifest includes the following component versions:
# calico/node:v1.1.3
# calico/cni:v1.7.0
# calico/kube-policy-controller:v0.5.4
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: "http://10.96.232.136:6666"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
# nodeSelector to ensure it only runs on the master.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
k8s-app: calico-etcd
spec:
template:
metadata:
labels:
k8s-app: calico-etcd
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# Only run this pod on the master.
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
hostNetwork: true
containers:
- name: calico-etcd
image: gcr.io/google_containers/etcd:2.2.1
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
k8s-app: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: 10.96.232.136
ports:
- port: 6666
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.3
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.7.0
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -1,73 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: nfs-provisioner
labels:
app: nfs-provisioner
spec:
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
selector:
app: nfs-provisioner
---
kind: Deployment
apiVersion: apps/v1beta1
metadata:
name: nfs-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.7
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=example.com/nfs"
- "-grace-period=10"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
hostPath:
path: /var/lib/nfs-provisioner

View File

@ -1,5 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: general
provisioner: example.com/nfs

View File

@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
name: cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: Group
name: system:masters
- kind: Group
name: system:authenticated
- kind: Group
name: system:unauthenticated

View File

@ -1,54 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
source /etc/kube-role
if [[ "${KUBE_ROLE}" == "master" ]]; then
# Define k8s version
source /etc/kube-version
if [[ "${KUBE_VERSION}" == "default" ]]; then
KUBE_VERSION_FLAG=""
else
KUBE_VERSION_FLAG="--kubernetes-version=${KUBE_VERSION}"
echo "We will use K8s ${KUBE_VERSION}"
fi
echo 'Setting up K8s'
source /etc/kubeapi-device
if [[ "$KUBE_BIND_DEV" != "autodetect" ]]; then
KUBE_BIND_IP=$(ip addr list ${KUBE_BIND_DEV} |grep "inet " |cut -d' ' -f6|cut -d/ -f1)
echo 'We are going to bind the K8s API to: ${KUBE_BIND_IP}'
kubeadm init --skip-preflight-checks ${KUBE_VERSION_FLAG} --api-advertise-addresses ${KUBE_BIND_IP}
else
kubeadm init --skip-preflight-checks ${KUBE_VERSION_FLAG}
fi
echo 'Setting up K8s client'
cp /etc/kubernetes/admin.conf /root/
export KUBECONFIG=/root/admin.conf
echo 'Marking master node as schedulable'
kubectl taint nodes --all node-role.kubernetes.io/master-
echo 'Installing Calico CNI'
kubectl apply -f /opt/cni-manifests/calico.yaml
echo 'Setting Up Cluser for OpenStack-Helm dev use'
/usr/bin/openstack-helm-dev-prep
elif [[ "${KUBE_ROLE}" == "worker" ]]; then
source /etc/kubeadm-join-command-args
kubeadm join --skip-preflight-checks ${KUBEADM_JOIN_ARGS}
fi

View File

@ -1,61 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# Set the KUBELET_CONTAINER env var
source /etc/kubelet-container
# Detrmine the Cgroup driver in use by Docker
CGROUP_DRIVER=$(docker info | awk '/^Cgroup Driver:/ { print $NF }')
if [[ "${KUBELET_CONTAINER}" == "this_one" ]]; then
exec kubelet-real \
--containerized=true \
--enable-cri=false \
--cgroup-driver=${CGROUP_DRIVER} "${@}"
else
# Lets remove any old containers
docker rm -f kubelet || true
# Launch the container
exec docker run \
--name kubelet \
--restart=always \
--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \
--volume=/:/rootfs:ro \
--volume=/dev/net:/dev/net:rw \
--volume=/var/run/netns:/var/run/netns:rw \
--volume=/sys:/sys:ro \
--volume=/etc/machine-id:/etc/machine-id:ro \
--volume=/opt/cni:/opt/cni:rw \
--volume=/etc/cni/net.d:/etc/cni/net.d:rw \
--volume=/var/lib/docker/:/var/lib/docker:rw \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rshared \
--volume=/var/run:/var/run:rw \
--volume=/var/log/containers:/var/log/containers:rw \
--volume=/etc/kubernetes:/etc/kubernetes:rw \
--volume=/etc/hosts:/etc/hosts:rw \
--volume=/etc/resolv.conf:/etc/resolv.conf:rw \
--net=host \
--privileged=true \
--pid=host \
--ipc=host \
${KUBELET_CONTAINER} \
kubelet \
--containerized=true \
--enable-cri=false \
--cgroup-driver=${CGROUP_DRIVER} "${@}"
fi

View File

@ -1,22 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Setting up virtual network devices'
ip link add neutron-ext type dummy || true
ip link set neutron-ext up
ip link add neutron-phys type dummy || true
ip link set neutron-phys up

View File

@ -1,34 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Setting Kubecfg Location'
export KUBECONFIG=/root/admin.conf
echo 'Cloning OpenStack-Helm'
git clone --depth 1 https://github.com/openstack/openstack-helm.git /opt/openstack-helm
echo 'Starting helm local repo'
helm serve &
until curl -sSL --connect-timeout 1 http://localhost:8879 > /dev/null; do
echo 'Waiting for helm serve to start'
sleep 2
done
helm repo add local http://localhost:8879/charts
echo 'Building OpenStack-Helm'
cd /opt/openstack-helm
make

View File

@ -1,27 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Setting Kubecfg Location'
export KUBECONFIG=/root/admin.conf
echo 'Labeling the nodes for Openstack-Helm deployment'
kubectl label nodes openstack-control-plane=enabled --all --namespace=openstack --overwrite
kubectl label nodes openvswitch=enabled --all --namespace=openstack --overwrite
kubectl label nodes openstack-compute-node=enabled --all --namespace=openstack --overwrite
echo 'RBAC: applying development rules (totally open!)'
kubectl update -f /opt/rbac/dev.yaml

View File

@ -1,22 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Setting Kubecfg Location'
export KUBECONFIG=/root/admin.conf
echo 'Deploying NFS Provisioner'
kubectl create -R -f /opt/nfs-provisioner/

View File

@ -1,42 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
# Default wait timeout is 180 seconds
: ${KUBECONFIG:="/etc/kubernetes/admin.conf"}
export KUBECONFIG=${KUBECONFIG}
end=$(date +%s)
if [ x$2 != "x" ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
NUMBER_OF_NODES=$(kubectl get nodes --no-headers -o name | wc -l)
NUMBER_OF_NODES_EXPECTED=$(($(cat /etc/nodepool/sub_nodes_private | wc -l) + 1))
[ $NUMBER_OF_NODES -eq $NUMBER_OF_NODES_EXPECTED ] && \
NODES_ONLINE="True" || NODES_ONLINE="False"
while read SUB_NODE; do
echo $SUB_NODE | grep -q ^Ready && NODES_READY="True" || NODES_READY="False"
done < <(kubectl get nodes --no-headers | awk '{ print $2 }')
[ $NODES_ONLINE == "True" -a $NODES_READY == "True" ] && \
break || true
sleep 5
now=$(date +%s)
[ $now -gt $end ] && echo "Nodes Failed to be ready in time." && \
kubectl get nodes -o wide && exit -1
done

View File

@ -1,46 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
# From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi
# Default wait timeout is 180 seconds
: ${KUBECONFIG:="/etc/kubernetes/admin.conf"}
export KUBECONFIG=${KUBECONFIG}
end=$(date +%s)
if [ x$2 != "x" ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
kubectl get pods --namespace=$1 -o json | jq -r \
'.items[].status.phase' | grep Pending > /dev/null && \
PENDING=True || PENDING=False
query='.items[]|select(.status.phase=="Running")'
query="$query|.status.containerStatuses[].ready"
kubectl get pods --namespace=$1 -o json | jq -r "$query" | \
grep false > /dev/null && READY="False" || READY="True"
kubectl get jobs -o json --namespace=$1 | jq -r \
'.items[] | .spec.completions == .status.succeeded' | \
grep false > /dev/null && JOBR="False" || JOBR="True"
[ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && echo containers failed to start. && \
kubectl get pods --namespace $1 -o wide && exit -1
done

View File

@ -1,105 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# Setup shared mounts for kubelet
sudo mkdir -p /var/lib/kubelet
sudo mount --bind /var/lib/kubelet /var/lib/kubelet
sudo mount --make-shared /var/lib/kubelet
# Cleanup any old deployment
sudo docker rm -f kubeadm-aio || true
sudo docker rm -f kubelet || true
sudo docker ps -aq | xargs -r -l1 sudo docker rm -f
sudo rm -rfv \
/etc/cni/net.d \
/etc/kubernetes \
/var/lib/etcd \
/var/etcd \
/var/lib/kubelet/* \
/run/openvswitch \
${HOME}/.kubeadm-aio/admin.conf \
/var/lib/nfs-provisioner || true
# Launch Container
sudo docker run \
-dt \
--name=kubeadm-aio \
--net=host \
--security-opt=seccomp:unconfined \
--cap-add=SYS_ADMIN \
--tmpfs=/run \
--tmpfs=/run/lock \
--volume=/etc/machine-id:/etc/machine-id:ro \
--volume=${HOME}:${HOME}:rw \
--volume=${HOME}/.kubeadm-aio:/root:rw \
--volume=/etc/kubernetes:/etc/kubernetes:rw \
--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \
--volume=/var/run/docker.sock:/run/docker.sock \
--env KUBELET_CONTAINER=${KUBEADM_IMAGE} \
--env KUBE_VERSION=${KUBE_VERSION} \
${KUBEADM_IMAGE}
echo "Waiting for kubeconfig"
set +x
end=$(($(date +%s) + 120))
READY="False"
while true; do
if [ -f ${HOME}/.kubeadm-aio/admin.conf ]; then
READY="True"
fi
[ $READY == "True" ] && break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && \
echo "KubeADM did not generate kubectl config in time" && \
sudo docker logs kubeadm-aio && exit -1
done
set -x
# Set perms of kubeconfig and set env-var
sudo chown $(id -u):$(id -g) ${HOME}/.kubeadm-aio/admin.conf
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
echo "Waiting for node to be ready before continuing"
set +x
end=$(($(date +%s) + 240))
READY="False"
while true; do
READY=$(kubectl get nodes --no-headers=true | awk "{ print \$2 }" | head -1)
[ $READY == "Ready" ] && break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && \
echo "Kube node did not register as ready in time" && \
sudo docker logs kubeadm-aio && exit -1
done
set -x
# Waiting for kube-system pods to be ready before continuing
sudo docker exec kubeadm-aio wait-for-kube-pods kube-system
# Initialize Helm
helm init
# Initialize Environment for Development
sudo docker exec kubeadm-aio openstack-helm-dev-prep
: ${PVC_BACKEND:="nfs"}
if [ "$PVC_BACKEND" == "nfs" ]; then
# Deploy NFS provisioner into enviromment
sudo docker exec kubeadm-aio openstack-helm-nfs-prep
fi