Merge branch 'master' into horizon_merge

This commit is contained in:
Alan Meadows 2016-12-23 15:01:28 -08:00 committed by GitHub
commit 1c6fc76842
10 changed files with 321 additions and 43 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
*.lock
*.tgz
**/*.tgz
.idea/

View File

@ -1,4 +1,4 @@
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack all clean
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack all clean
B64_DIRS := common/secrets
B64_EXCLUDE := $(wildcard common/secrets/*.b64)
@ -6,10 +6,11 @@ B64_EXCLUDE := $(wildcard common/secrets/*.b64)
CHARTS := ceph mariadb rabbitmq GLANCE memcached keystone glance horizon openstack
COMMON_TPL := common/templates/_globals.tpl
all: common bootstrap mariadb rabbitmq memcached keystone glance horizon openstack
all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon openstack
common: build-common
#ceph: nolint-build-ceph
ceph: build-ceph
bootstrap: build-bootstrap
@ -24,18 +25,25 @@ rabbitmq: build-rabbitmq
glance: build-glance
memcached: build-memcached
memcached: build-memcached
openstack: build-openstack
clean:
$(shell rm -rf common/secrets/*.b64)
$(shell rm -rf */templates/_partials.tpl)
$(shell rm -rf */templates/_globals.tpl)
echo "Removed all .b64, _partials.tpl, and _globals.tpl files"
clean:
$(shell rm -rf common/secrets/*.b64)
$(shell rm -rf */templates/_partials.tpl)
$(shell rm -rf */templates/_globals.tpl)
echo "Removed all .b64, _partials.tpl, and _globals.tpl files"
build-%:
if [ -f $*/Makefile ]; then make -C $*; fi
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
helm lint $*
helm package $*
if [ -f $*/Makefile ]; then make -C $*; fi
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
helm lint $*
helm package $*
## this is required for some charts which cannot pass a lint, namely
## those which use .Release.Namespace in a default pipe capacity
#nolint-build-%:
# if [ -f $*/Makefile ]; then make -C $*; fi
# if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
# helm package $*

View File

@ -1,7 +1,15 @@
# aic-helm
# Openstack-Helm
This is a fully self-contained helm-based OpenStack deployment on Kubernetes, including baremetal provisioning, persistent storage, and resiliency considerations.
Openstack-Helm is a fully self-contained Helm-based OpenStack deployment on Kubernetes. It will provide baremetal provisioning, persistent storage, full-stack resiliency, full-stack scalability, performance monitoring and tracing, and an optional development pipeline (using Jenkins). This project, along with the tools used within are community-based and open sourced.
This collection is a work in progress so components will continue to be added over time.
# Mission
Please see the [wiki](https://github.com/att-comdev/aic-helm/wiki) for more details.
The goal for Openstack-Helm is to provide an incredibly customizable *framework* for operators and developers alike. This framework will enable end-users to deploy, maintain, and upgrade a fully functioning Openstack environment for both simple and complex environments. Administrators or developers can either deploy all or individual Openstack components along with their required dependancies. It heavily borrows concepts from [Stackanetes](https://github.com/stackanetes/stackanetes) and [other complex Helm application deployments](https://github.com/sapcc/openstack-helm). This project is meant to be a collaborative project that brings Openstack applications into a [Cloud-Native](https://www.cncf.io/about/charter) model.
# Open Releases
Until a 1.0.0 release, this collection is a work in progress and components will continue to be added or modified over time. Please review our [Milestones](https://github.com/att-comdev/openstack-helm/milestones), [Releases](https://github.com/att-comdev/openstack-helm/releases), and [Project](https://github.com/att-comdev/openstack-helm/projects/1) timelines.
# Additional Details
For additional details, and instructions on how to use this project, please see the [wiki](https://github.com/att-comdev/openstack-helm/wiki) for more details.

View File

@ -1,4 +1,4 @@
apiVersion: v1
description: aic-helm namespace bootstrap
description: openstack-helm namespace bootstrap
name: bootstrap
version: 0.1.0

View File

@ -230,6 +230,8 @@ spec:
memory: "100Mi"
cpu: "2000m"
---
# rgw not required: using if statement for deployment
{{- if .Values.rgw.enabled }}
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
@ -304,3 +306,5 @@ spec:
limits:
memory: "500Mi"
cpu: ".5"
{{ end }}
# end: rgw removed optionally

View File

@ -7,10 +7,11 @@ metadata:
name: {{ .Values.storageclass.name }}
provisioner: kubernetes.io/rbd
parameters:
monitors: {{ .Values.storageclass.monitors | default "ceph.{{ .Release.Namespace }}:6789" }}
monitors: {{ .Values.storageclass.monitors | default "ceph-mon.ceph:6789" }}
adminId: {{ .Values.storageclass.admin_id }}
adminSecretName: {{ .Values.storageclass.admin_secret_name }}
adminSecretNamespace: {{ .Values.storageclass.admin_secret_namespace | default "{{ .Release.Namespace }}" }}
## forcing namespace due to issue with -- default "{{ .Release.Namespace }}" }} --
adminSecretNamespace: {{ .Values.storageclass.admin_secret_namespace | default "ceph" }}
pool: {{ .Values.storageclass.pool }}
userId: {{ .Values.storageclass.user_id }}
userSecretName: {{ .Values.storageclass.user_secret_name }}

View File

@ -24,6 +24,10 @@ storage:
osd_directory: /var/lib/aic-helm/ceph/osd
var_directory: /var/lib/aic-helm/ceph/ceph
# rgw is optionall disabled
rgw:
enabled: false
# Setting this to false will assume you will
# setup and orchestrate your own secrets and
# configmaps outside of this helm chart
@ -59,10 +63,10 @@ secrets:
storageclass:
provision_storage_class: true
name: general
monitors: nil
monitors: null
pool: rbd
admin_id: admin
admin_secret_name: pvc-ceph-conf-combined-storageclass
admin_secret_namespace: nil
admin_secret_namespace: null
user_id: admin
user_secret_name: pvc-ceph-client-key
user_secret_name: pvc-ceph-client-key

View File

@ -0,0 +1,249 @@
# Overview
In order to drive towards a production-ready Openstack solution, our goal is to provide containerized, yet stable [persistent volumes](http://kubernetes.io/docs/user-guide/persistent-volumes/) that Kubernetes can use to schedule applications that require state, such as MariaDB (Galera). Although we make an assumption that the project should provide a “batteries included” approach towards persistent storage, we want to allow operators to define their own solution as well. Examples of this work will be documented in another section, however evidence of this is found throughout the project. If you have any questions or comments, please create an [issue](https://github.com/att-comdev/openstack-helm/issues).
**IMPORTANT**: Please see the latest published information about our application versions.
| | Version | Notes |
|--- |--- |--- |
| **Kubernetes** | [v1.5.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v151) | [Custom Controller for RDB tools](https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags) |
| **Helm** | [v2.1.2](https://github.com/kubernetes/helm/wiki/Roadmap#210-decided) | Planning for [v2.2.0](https://github.com/kubernetes/helm/wiki/Roadmap#220-open-for-small-additions) |
| **Calico** | [v2.0](http://docs.projectcalico.org/v2.0/releases/) | [`calicoctl` v1.0](https://github.com/projectcalico/calicoctl/releases) |
| **Docker** | [v1.12.1](https://github.com/docker/docker/releases/tag/v1.12.1) | [Per kubeadm Instructions](http://kubernetes.io/docs/getting-started-guides/kubeadm/) | |
Other versions and considerations (such as other CNI SDN providers), config map data, and value overrides will be included in other documentation as we explore these options further.
# Quickstart (Bare Metal)
This walkthrough will help you set up a bare metal environment, using `kubeadm` on Ubuntu 16.04. The assumption is that you have a working `kubeadm` environment and that your environment is at a working state, ***prior*** to deploying a CNI-SDN. This deployment proceedure is opinionated *only to standardize the deployment process for users and developers*, and to limit questions to a known working deployment. Instructions will expand as the project becomes more mature.
If youre environment looks like this, you are ready to continue:
```
admin@kubenode01:~$ kubectl get pods -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
kube-system dummy-2088944543-lg0vc 1/1 Running 1 5m 192.168.3.21 kubenode01
kube-system etcd-kubenode01 1/1 Running 1 5m 192.168.3.21 kubenode01
kube-system kube-apiserver-kubenode01 1/1 Running 3 5m 192.168.3.21 kubenode01
kube-system kube-controller-manager-kubenode01 1/1 Running 0 5m 192.168.3.21 kubenode01
kube-system kube-discovery-1769846148-8g4d7 1/1 Running 1 5m 192.168.3.21 kubenode01
kube-system kube-dns-2924299975-xxtrg 0/4 ContainerCreating 0 5m <none> kubenode01
kube-system kube-proxy-7kxpr 1/1 Running 0 5m 192.168.3.22 kubenode02
kube-system kube-proxy-b4xz3 1/1 Running 0 5m 192.168.3.24 kubenode04
kube-system kube-proxy-b62rp 1/1 Running 0 5m 192.168.3.23 kubenode03
kube-system kube-proxy-s1fpw 1/1 Running 1 5m 192.168.3.21 kubenode01
kube-system kube-proxy-thc4v 1/1 Running 0 5m 192.168.3.25 kubenode05
kube-system kube-scheduler-kubenode01 1/1 Running 1 5m 192.168.3.21 kubenode01
admin@kubenode01:~$
```
## Deploying a CNI-Enabled SDN (Calico)
After an initial `kubeadmn` deployment has been scheduled, it is time to deploy a CNI-enabled SDN. We have selected **Calico**, but have also confirmed that this works for Weave, and Romana. For Calico version v2.0, you can apply the provided [Kubeadm Hosted Install](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/) manifest:
```
admin@kubenode01:~$ kubectl apply -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/calico.yaml
```
**PLEASE NOTE:** If you are using a 192.168.0.0/16 CIDR for your Kubernetes hosts, you will need to modify [line 42](https://gist.github.com/v1k0d3n/a152b1f5b8db5a8ae9c8c7da575a9694#file-calico-kubeadm-hosted-yml-L42) for the `cidr` declaration within the `ippool`. This must be a `/16` range or more, as the `kube-controller` will hand out `/24` ranges to each node. We have included a sample comparison of the changes [here](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/calico.yaml) and [here](https://gist.githubusercontent.com/v1k0d3n/a152b1f5b8db5a8ae9c8c7da575a9694/raw/c950eef1123a7dcc4b0dedca1a202e0c06248e9e/calico-kubeadm-hosted.yml).
After the container CNI-SDN is deployed, Calico has a tool you can use to verify your deployment. You can download this tool, [`calicoctl`](https://github.com/projectcalico/calicoctl/releases) to execute the following command:
```
admin@kubenode01:~$ sudo calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+--------------+-------------------+-------+----------+-------------+
| 192.168.3.22 | node-to-node mesh | up | 16:34:03 | Established |
| 192.168.3.23 | node-to-node mesh | up | 16:33:59 | Established |
| 192.168.3.24 | node-to-node mesh | up | 16:34:00 | Established |
| 192.168.3.25 | node-to-node mesh | up | 16:33:59 | Established |
+--------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.
admin@kubenode01:~$
```
It is important to call out that the Self Hosted Calico manifest for v2.0 (above) supports `nodetonode` mesh, and `nat-outgoing` by default. This is a change from version 1.6.
## Preparing Persistent Storage
Persistant storage is improving. Please check our current and/or resolved [issues](https://github.com/att-comdev/openstack-helm/issues?utf8=✓&q=ceph) to find out how we're working with the community to improve persistent storage for our project. For now, a few preparations need to be completed.
### Installing Ceph Host Requirements
At some future point, we want to ensure that our solution is cloud-native, allowing installation on any host system without a package manager and only a container runtime (i.e. CoreOS). Until this happens, we will need to ensure that `ceph-common` is installed on each of our hosts. Using our Ubuntu example:
```
admin@kubenode01:~$ sudo apt-get install ceph-common -y
```
We will always attempt to keep host-specific requirements to a minimum, and we are working with the Ceph team (Sébastien Han) to quickly address this Ceph requirement.
### Kubernetes Controller Manager
Before deploying Ceph, you will need to re-deploy a custom Kubernetes Controller with the necessary [RDB](http://docs.ceph.com/docs/jewel/rbd/rbd/) utilities. For your convenience, we are maintaining this along with the Openstack-Helm project. If you would like to check the current [tags](https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags) or the [security](https://quay.io/repository/attcomdev/kube-controller-manager/image/eedc2bf21cca5647a26e348ee3427917da8b17c25ead38e832e1ed7c2ef1b1fd?tab=vulnerabilities) of these pre-built containers, you may view them at [our public Quay container registry](https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags). If you would prefer to build this container yourself, or add any additional packages, you are free to use our GitHub [dockerfiles](https://github.com/att-comdev/dockerfiles/tree/master/kube-controller-manager) repository to do so.
To make these changes, export your Kubernetes version, and edit the `image` line of your `kube-controller-manager` json manifest on your Kubernetes Master:
```
admin@kubenode01:~$ export kube_version=v1.5.1
admin@kubenode01:~$ sed -i "s|gcr.io/google_containers/kube-controller-manager-amd64:'$kube_version'|quay.io/attcomdev/kube-controller-manager:'$kube_version'|g" /etc/kubernetes/manifests/kube-controller-manager.json
```
Now you will want to `restart` your master server.
### Ceph Secrets Generation
Another thing of interest is that our deployment assumes that you can generate secrets at the time of the container deployment. We require the [`sigil`](https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz) binary on your deployment host in order to perform this action.
```
admin@kubenode01:~$ curl -L https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz | tar -zxC /usr/local/bin
```
### Kube Controller Manager DNS Resolution
Until the following [Kubernetes Pull Request](https://github.com/kubernetes/kubernetes/issues/17406) is merged, you will need to allow the Kubernetes Controller to use the internal container `skydns` endpoint as a DNS server, and add the Kubernetes search suffix into the controller's resolv.conf. As of now, the Kuberenetes controller only mirrors the host's `resolv.conf`. This is is not sufficent if you want the controller to know how to correctly resolve container service endpoints (in the case of DaemonSets).
First, find out what the IP Address of your `kube-dns` deployment is:
```
admin@kubenode01:~$ kubectl get svc kube-dns --namespace=kube-system
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns 10.96.0.10 <none> 53/UDP,53/TCP 1d
admin@kubenode01:~$
```
As you can see by this example, `10.96.0.10` is the `CLUSTER-IP`IP. Now, have a look at the current `kube-controller-manager-kubenode01` `/etc/resolv.conf`:
```
admin@kubenode01:~$ kubectl exec kube-controller-manager-kubenode01 -n kube-system -- cat /etc/resolv.conf
# Dynamic resolv.conf(5) file for glibc resolver(3) generated by resolvconf(8)
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
nameserver 192.168.1.70
nameserver 8.8.8.8
search jinkit.com
admin@kubenode01:~$
```
What we need is for `kube-controller-manager-kubenode01` `/etc/resolv.conf` to look like this:
```
admin@kubenode01:~$ kubectl exec kube-controller-manager-kubenode01 -n kube-system -- cat /etc/resolv.conf
nameserver 10.96.0.10
nameserver 192.168.1.70
nameserver 8.8.8.8
search svc.cluster.local jinkit.com
admin@kubenode01:~$
```
You can change this by doing the following:
```
admin@kubenode01:~$ kubectl exec kube-controller-manager-kubenode01 -it -n kube-system -- /bin/bash
root@kubenode01:/# cat <<EOF > /etc/resolv.conf
nameserver 10.96.0.10
nameserver 192.168.1.70
nameserver 8.8.8.8
search svc.cluster.local jinkit.com
EOF
root@kubenode01:/#
```
Now you can test your changes by deploying a service to your cluster, and resolving this from the controller. As an example, lets deploy something useful, like [Kubernetes dashboard](https://github.com/kubernetes/dashboard):
```
admin@kubenode01:~$ kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml
```
Note the `IP` field:
```
admin@kubenode01:~$ kubectl describe svc kubernetes-dashboard -n kube-system
Name: kubernetes-dashboard
Namespace: kube-system
Labels: app=kubernetes-dashboard
Selector: app=kubernetes-dashboard
Type: NodePort
IP: 10.110.207.144
Port: <unset> 80/TCP
NodePort: <unset> 32739/TCP
Endpoints: 10.25.178.65:9090
Session Affinity: None
No events.
admin@kubenode01:~$
```
Now you should be able to resolve the host `kubernetes-dashboard.kube-system.svc.cluster.local`:
```
bjozsa@kubenode01:~$ kubectl exec kube-controller-manager-kubenode01 -it -n kube-system -- ping kubernetes-dashboard.kube-system.svc.cluster.local
PING kubernetes-dashboard.kube-system.svc.cluster.local (10.110.207.144) 56(84) bytes of data.
...
...
bjozsa@kubenode01:~$
```
(Note: This host example above has `iputils-ping` installed)
Now we can continue with the deployment.
## Openstack-Helm Installation
Before you begin, make sure you have read and understand the project [Requirements](Requirements).
You can start openstack-helm fairly quickly. Assuming the above requirements are met (above), you can install the charts in a layered approach. The OpenStack parent chart, which installs all OpenStack services, is a work in progress and is simply a one-stack convenience. For now, please install each individual service chart as noted below.
Note that the ```bootstrap``` chart is meant to be installed in every namespace you plan to use. It helps install required secrets.
If any helm install step fails, you can back it out with ```helm delete --purge <releaseName>```
Make sure sigil is installed to perform the ceph secret generation, as noted in the [Requirements](Requirements).
```
# label all known nodes as candidates for pods
kubectl label nodes openstack-control-plane=enabled --all
kubectl label nodes ceph-storage=enabled --all
# move into the openstack-helm directory
cd openstack-helm
# set your network cidr--these values are only
# appropriate for calico and may be different in your
# environment: using example above = 10.25.0.0/16 (avoiding 192.168.0.0/16 overlap)
export osd_cluster_network=10.25.0.0/16
export osd_public_network=10.25.0.0/16
# on every node that will receive ceph instances,
# create some local directories used as nodeDirs
# for persistent storage
mkdir -p /var/lib/openstack-helm/ceph
# generate secrets (ceph, etc.)
cd common/utils/secret-generator
./generate_secrets.sh all `./generate_secrets.sh fsid`
cd ../../..
# now you are ready to build openstack-helm
helm serve . &
helm repo add local http://localhost:8879/charts
make
# install ceph
helm install --name=ceph local/ceph --namespace=ceph
# bootstrap the openstack namespace for chart installation
helm install --name=bootstrap local/bootstrap --namespace=openstack
# install mariadb
helm install --name=mariadb local/mariadb --namespace=openstack
# install rabbitmq/memcache
helm install --name=memcached local/memcached --namespace=openstack
helm install --name=rabbitmq local/rabbitmq --namespace=openstack
# install keystone
helm install --name=keystone local/keystone --namespace=openstack
# install horizon
helm install --name=horizon local/horizon --namespace=openstack
# install glance
helm install --name=glance local/glance --namespace=openstack
# ensure all services enter a running state, with the
# exception of one jobs/glance-post and the ceph
# rgw containers, due to outstanding issues
watch kubectl get all --namespace=openstack
```
You should now be able to access horizon at http://<horizon-svc-ip> using admin/password

View File

@ -7,34 +7,34 @@ data:
#!/bin/bash
set -ex
ansible localhost -vvv -m kolla_keystone_service -a 'service_name=glance \
ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \
service_type=image \
description="Openstack Image" \
endpoint_region="{{ .Values.keystone.glance_region_name }}" \
url="http://glance-api:{{ .Values.network.port.api }}" \
description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \
interface=admin \
region_name="{{ .Values.keystone.admin_region_name }}" \
auth="{# openstack_glance_auth #}"' \
-e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }"
region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \
-e "{'openstack_glance_auth': {{ include "keystone_auth" . }}}"
ansible localhost -vvv -m kolla_keystone_service -a 'service_name=glance \
ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \
service_type=image \
description="Openstack Image" \
endpoint_region="{{ .Values.keystone.glance_region_name }}" \
url="http://glance-api:{{ .Values.network.port.api }}" \
description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \
interface=internal \
region_name="{{ .Values.keystone.admin_region_name }}" \
auth="{# openstack_glance_auth #}"' \
region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \
-e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }"
ansible localhost -vvv -m kolla_keystone_service -a 'service_name=glance \
ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \
service_type=image \
description="Openstack Image" \
endpoint_region="{{ .Values.keystone.glance_region_name }}" \
url="http://glance-api:{{ .Values.network.port.api }}" \
description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \
interface=public \
region_name="{{ .Values.keystone.admin_region_name }}" \
auth="{# openstack_glance_auth #}"' \
region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \
-e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }"
ansible localhost -vvv -m kolla_keystone_user -a "project=service \
@ -42,5 +42,5 @@ data:
password={{ .Values.keystone.glance_password }} \
role=admin \
region_name={{ .Values.keystone.admin_region_name }} \
auth="{# openstack_glance_auth #}"' \
auth='{{ include "keystone_auth" . }}'" \
-e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }"

View File

@ -44,6 +44,9 @@ spec:
- name: postsh
mountPath: /tmp/post.sh
subPath: post.sh
env:
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
volumes:
- name: postsh
configMap: