Merge "use kubeadm to reduce complexity of installing k8s"
This commit is contained in:
commit
88ebbcf77f
|
@ -13,21 +13,20 @@ Will install a single controller node and two compute nodes by default, can incr
|
|||
## Prep
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Upload a CoreOS image to glance. [Instructions Here](https://coreos.com/os/docs/latest/booting-on-openstack.html)
|
||||
- Upload a Ubuntu Xenial or CentOS 7 image to glance.
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to declare and provision a Kubernetes cluster. By default it will be a single controller with a single compute node. You can add more nodes by adjusting the `compute_workers` variable.
|
||||
|
||||
The compute workers (for now) do not have a floating ip, this means to `ssh` to them you must `ssh -A` to the controller node first.
|
||||
The compute workers do not have a floating ip by default, this means to `ssh` to them you must use the controller node as a bastion and forward your SSH agent through.
|
||||
|
||||
### Prep
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This step is required by the terraform provisioner.
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
$ eval $(ssh-agent -s); ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
Ensure that you have your Openstack credentials loaded into environment variables. Likely via a command similar to:
|
||||
|
@ -36,15 +35,11 @@ Ensure that you have your Openstack credentials loaded into environment variable
|
|||
$ source ~/.stackrc
|
||||
```
|
||||
|
||||
Edit the terraform.tfvars file to put the name of your CoreOS image, OpenStack network names, etc. You'll also set the Kubernetes versions there. For the hyperkube version, you need to use the tags [here](https://quay.io/repository/coreos/hyperkube?tab=tags).
|
||||
Edit the terraform.tfvars file to put the name of your ubuntu/centos image, OpenStack network names, etc. If you use centos you will also have to change `ssh_user` to `centos`.
|
||||
|
||||
|
||||
### Provision the Kubernetes Cluster
|
||||
|
||||
If you wish to re-use previously generated SSL key/certs for CA and admin, simply add `-var "generate_ssl=0" \`.
|
||||
|
||||
It can take some time for the `kubernetes-api` to come online. Do not be surprised if you see a series of failed `curl` commands, this is just a `terraform` provisioning script waiting until it can access the api before moving on.
|
||||
|
||||
```
|
||||
$ cd terraform
|
||||
$ export MY_IP=$(curl -s icanhazip.com)
|
||||
|
@ -82,7 +77,7 @@ State path: terraform.tfstate
|
|||
|
||||
Outputs:
|
||||
|
||||
kubernetes-controller = $ ssh -A core@xx.xx.xx.xx
|
||||
kubernetes-controller = $ ssh -A ubuntu@xx.xx.xx.xx
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
@ -90,65 +85,56 @@ Outputs:
|
|||
### Check its up
|
||||
|
||||
```
|
||||
$ ssh -A core@xx.xx.xx.xx
|
||||
|
||||
|
||||
$ kubectl config view
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://127.0.0.1:6443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: admin
|
||||
name: kubernetes
|
||||
current-context: kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: admin
|
||||
user:
|
||||
token: kubernetes
|
||||
$ ssh -A ubuntu@xx.xx.xx.xx
|
||||
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
192.168.3.197 Ready 1m
|
||||
192.168.3.198 Ready 11s
|
||||
```
|
||||
kubestack-testing-compute0 Ready 8s
|
||||
kubestack-testing-compute1 Ready 6s
|
||||
kubestack-testing-controller0 Ready 2m
|
||||
|
||||
|
||||
### Run a container
|
||||
$ kubectl get pods --all-namespaces
|
||||
|
||||
```
|
||||
$ kubectl run my-nginx --image=nginx --replicas=1 --port=80
|
||||
replicationcontroller "my-nginx" created
|
||||
|
||||
$ kubectl expose rc my-nginx --port=80 --type=LoadBalancer
|
||||
service "my-nginx" exposed
|
||||
|
||||
$ kubectl get svc my-nginx
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
my-nginx 10.200.43.104 80/TCP run=my-nginx 6s
|
||||
### Run a demo application
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
my-nginx-k1zoe 1/1 Running 0 1m
|
||||
```
|
||||
$ git clone https://github.com/microservices-demo/microservices-demo
|
||||
$ kubectl apply \
|
||||
-f microservices-demo/deploy/kubernetes/manifests/sock-shop-ns.yml \
|
||||
-f microservices-demo/deploy/kubernetes/manifests
|
||||
|
||||
$ curl 10.200.43.104
|
||||
$ kubectl describe svc front-end -n sock-shop
|
||||
Name: front-end
|
||||
Namespace: sock-shop
|
||||
Labels: name=front-end
|
||||
Selector: name=front-end
|
||||
Type: NodePort
|
||||
IP: 100.79.5.35
|
||||
Port: <unset> 80/TCP
|
||||
NodePort: <unset> 30768/TCP
|
||||
Endpoints: 10.36.0.3:8079
|
||||
Session Affinity: None
|
||||
```
|
||||
|
||||
once its online you can browse to it via the IP of the controller node, or via the endpoint if you're on the k8s controller.
|
||||
|
||||
```
|
||||
$ curl -s 10.36.0.3:8079 | head
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<title>Welcome to nginx!</title>
|
||||
|
||||
<meta charset="utf-8">
|
||||
<meta name="robots" content="all,follow">
|
||||
<meta name="googlebot" content="index,follow,snippet,archive">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="description" content="WeaveSocks Demo App">
|
||||
(23) Failed writing body
|
||||
|
||||
$ kubectl delete rc my-nginx
|
||||
replicationcontroller "my-nginx" deleted
|
||||
|
||||
$ kubectl delete svc my-nginx
|
||||
service "my-nginx" deleted
|
||||
```
|
||||
|
||||
### Install The Dashboard Addon
|
||||
|
@ -156,12 +142,18 @@ service "my-nginx" deleted
|
|||
```
|
||||
$ kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml
|
||||
|
||||
deployment "kubernetes-dashboard" created
|
||||
You have exposed your service on an external port on all nodes in your
|
||||
cluster. If you want to expose this service to the external internet, you may
|
||||
need to set up firewall rules for the service port(s) (tcp:32584) to serve traffic.
|
||||
$ kubectl describe svc kubernetes-dashboard -n kube-system
|
||||
Name: kubernetes-dashboard
|
||||
Namespace: kube-system
|
||||
Labels: app=kubernetes-dashboard
|
||||
Selector: app=kubernetes-dashboard
|
||||
Type: NodePort
|
||||
IP: 100.64.81.128
|
||||
Port: <unset> 80/TCP
|
||||
NodePort: <unset> 31149/TCP
|
||||
Endpoints: 10.44.0.7:9090
|
||||
Session Affinity: None
|
||||
|
||||
See http://releases.k8s.io/release-1.2/docs/user-guide/services-firewalls.md for more details.
|
||||
|
||||
```
|
||||
You can now access the dashboard from your whitelisted IP at:
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority: /etc/kubernetes/ssl/ca.pem
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: /etc/kubernetes/ssl/compute.pem
|
||||
client-key: /etc/kubernetes/ssl/compute-key.pem
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: kubelet-context
|
||||
current-context: kubelet-context
|
|
@ -1,23 +0,0 @@
|
|||
[Unit]
|
||||
Description=kubelet service
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
|
||||
[Service]
|
||||
Environment=KUBELET_VERSION=HYPERKUBE_VERSION
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--api_servers=https://CONTROLLER_HOST \
|
||||
--register-node=true \
|
||||
--allow-privileged=true \
|
||||
--config=/etc/kubernetes/manifests \
|
||||
--hostname-override=MY_IP \
|
||||
--cluster-dns=CLUSTER_DNS \
|
||||
--cluster-domain=cluster.local \
|
||||
--kubeconfig=/etc/kubernetes/compute-kubeconfig.yaml \
|
||||
--tls-cert-file=/etc/kubernetes/ssl/compute.pem \
|
||||
--tls-private-key-file=/etc/kubernetes/ssl/compute-key.pem
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,37 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --master=https://CONTROLLER_HOST
|
||||
- --kubeconfig=/etc/kubernetes/compute-kubeconfig.yaml
|
||||
- --v=2
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: "ssl-certs"
|
||||
- mountPath: /etc/kubernetes/compute-kubeconfig.yaml
|
||||
name: "kubeconfig"
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/ssl
|
||||
name: "etc-kube-ssl"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: "ssl-certs"
|
||||
hostPath:
|
||||
path: "/usr/share/ca-certificates"
|
||||
- name: "kubeconfig"
|
||||
hostPath:
|
||||
path: "/etc/kubernetes/compute-kubeconfig.yaml"
|
||||
- name: "etc-kube-ssl"
|
||||
hostPath:
|
||||
path: "/etc/kubernetes/ssl"
|
|
@ -1,45 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
|
||||
command:
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --bind-address=0.0.0.0
|
||||
- --etcd-servers=http://127.0.0.1:2379
|
||||
- --allow-privileged=true
|
||||
- --service-cluster-ip-range=PORTAL_NET
|
||||
- --secure-port=443
|
||||
- --advertise-address=ADVERTISE_IP
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
|
||||
- --tls-cert-file=/etc/kubernetes/ssl/controller.pem
|
||||
- --tls-private-key-file=/etc/kubernetes/ssl/controller-key.pem
|
||||
- --client-ca-file=/etc/kubernetes/ssl/ca.pem
|
||||
- --service-account-key-file=/etc/kubernetes/ssl/controller-key.pem
|
||||
ports:
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
name: https
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: local
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/ssl
|
||||
name: ssl-certs-kubernetes
|
||||
readOnly: true
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/ssl
|
||||
name: ssl-certs-kubernetes
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: ssl-certs-host
|
|
@ -1,37 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
|
||||
command:
|
||||
- /hyperkube
|
||||
- controller-manager
|
||||
- --master=http://127.0.0.1:8080
|
||||
- --service-account-private-key-file=/etc/kubernetes/ssl/controller-key.pem
|
||||
- --root-ca-file=/etc/kubernetes/ssl/ca.pem
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10252
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/ssl
|
||||
name: ssl-certs-kubernetes
|
||||
readOnly: true
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/ssl
|
||||
name: ssl-certs-kubernetes
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: ssl-certs-host
|
|
@ -1,20 +0,0 @@
|
|||
[Unit]
|
||||
Description=kubelet service
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
|
||||
[Service]
|
||||
Environment=KUBELET_VERSION=HYPERKUBE_VERSION
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--api_servers=http://127.0.0.1:8080 \
|
||||
--register-node=false \
|
||||
--allow-privileged=true \
|
||||
--config=/etc/kubernetes/manifests \
|
||||
--hostname-override=ADVERTISE_IP \
|
||||
--cluster-dns=CLUSTER_DNS \
|
||||
--cluster-domain=cluster.local
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,46 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-podmaster
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: scheduler-elector
|
||||
image: gcr.io/google_containers/podmaster:1.1
|
||||
command:
|
||||
- /podmaster
|
||||
- --etcd-servers=http://127.0.0.1:2379
|
||||
- --key=scheduler
|
||||
- --whoami=ADVERTISE_IP
|
||||
- --source-file=/src/manifests/kube-scheduler.yaml
|
||||
- --dest-file=/dst/manifests/kube-scheduler.yaml
|
||||
volumeMounts:
|
||||
- mountPath: /src/manifests
|
||||
name: manifest-src
|
||||
readOnly: true
|
||||
- mountPath: /dst/manifests
|
||||
name: manifest-dst
|
||||
- name: controller-manager-elector
|
||||
image: gcr.io/google_containers/podmaster:1.1
|
||||
command:
|
||||
- /podmaster
|
||||
- --etcd-servers=http://127.0.0.1:2379
|
||||
- --key=controller
|
||||
- --whoami=ADVERTISE_IP
|
||||
- --source-file=/src/manifests/kube-controller-manager.yaml
|
||||
- --dest-file=/dst/manifests/kube-controller-manager.yaml
|
||||
terminationMessagePath: /dev/termination-log
|
||||
volumeMounts:
|
||||
- mountPath: /src/manifests
|
||||
name: manifest-src
|
||||
readOnly: true
|
||||
- mountPath: /dst/manifests
|
||||
name: manifest-dst
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /srv/kubernetes/manifests
|
||||
name: manifest-src
|
||||
- hostPath:
|
||||
path: /etc/kubernetes/manifests
|
||||
name: manifest-dst
|
|
@ -1,25 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --master=http://127.0.0.1:8080
|
||||
- --v=2
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: ssl-certs-host
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
|
||||
command:
|
||||
- /hyperkube
|
||||
- scheduler
|
||||
- --master=http://127.0.0.1:8080
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
if which apt > /dev/null; then
|
||||
echo "==> Detected Ubuntu"
|
||||
echo "----> Installing Kubernetes apt repo"
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
|
||||
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
EOF
|
||||
apt-get -yq update > /dev/null
|
||||
echo "----> Installing Kubernetes requirements"
|
||||
apt-get install -yq docker.io kubelet kubeadm kubectl kubernetes-cni > /dev/null
|
||||
elif which yum > /dev/null; then
|
||||
echo "==> Detected CentOS/RHEL"
|
||||
echo "----> Installing Kubernetes apt repo"
|
||||
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
|
||||
[kubernetes]
|
||||
name=Kubernetes
|
||||
baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
repo_gpgcheck=1
|
||||
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||
EOF
|
||||
echo "----> YOLO setenforce"
|
||||
setenforce 0
|
||||
|
||||
yum install -y docker kubelet kubeadm kubectl kubernetes-cni > /dev/null
|
||||
systemctl enable docker && systemctl start docker
|
||||
systemctl enable kubelet && systemctl start kubelet
|
||||
else
|
||||
echo "YOUR OPERATING SYSTEM IS NOT SUPPORTED"
|
||||
echo "MUST BE Ubuntu Xenial or Centos/Redhat 7"
|
||||
exit 1
|
||||
fi
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
openssl genrsa -out files/ssl/ca-key.pem 2048
|
||||
openssl req -x509 -new -nodes -key files/ssl/ca-key.pem -days 10000 -out files/ssl/ca.pem -subj '/CN=kubernetes-ca'
|
||||
openssl genrsa -out files/ssl/admin-key.pem 2048
|
||||
openssl req -new -key files/ssl/admin-key.pem -out files/ssl/admin.csr -subj '/CN=kubernetes-admin' -config files/ssl/openssl.cnf
|
||||
openssl x509 -req -in files/ssl/admin.csr -CA files/ssl/ca.pem -CAkey files/ssl/ca-key.pem -CAcreateserial -out files/ssl/admin.pem -days 365 -extfile files/ssl/openssl.cnf
|
|
@ -1,8 +0,0 @@
|
|||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth, serverAuth
|
|
@ -0,0 +1,68 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: weave-net
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/tolerations: |
|
||||
[
|
||||
{
|
||||
"key": "dedicated",
|
||||
"operator": "Equal",
|
||||
"value": "master",
|
||||
"effect": "NoSchedule"
|
||||
}
|
||||
]
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: weave
|
||||
image: weaveworks/weave-kube:1.7.2
|
||||
command:
|
||||
- /home/weave/launch.sh
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 6784
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: weavedb
|
||||
mountPath: /weavedb
|
||||
- name: cni-bin
|
||||
mountPath: /opt
|
||||
- name: cni-bin2
|
||||
mountPath: /host_home
|
||||
- name: cni-conf
|
||||
mountPath: /etc
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
- name: weave-npc
|
||||
image: weaveworks/weave-npc:1.7.2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
securityContext:
|
||||
privileged: true
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: weavedb
|
||||
emptyDir: {}
|
||||
- name: cni-bin
|
||||
hostPath:
|
||||
path: /opt
|
||||
- name: cni-bin2
|
||||
hostPath:
|
||||
path: /home
|
||||
- name: cni-conf
|
||||
hostPath:
|
||||
path: /etc
|
|
@ -1,54 +1,12 @@
|
|||
resource "null_resource" "discovery_url_template" {
|
||||
provisioner "local-exec" {
|
||||
command = "curl -s 'https://discovery.etcd.io/new?size=1' > templates/discovery_url"
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "generate_ssl" {
|
||||
count = "${var.generate_ssl}"
|
||||
provisioner "local-exec" {
|
||||
command = "bash files/ssl/generate-ssl.sh"
|
||||
}
|
||||
}
|
||||
|
||||
resource "template_file" "discovery_url" {
|
||||
template = "${file("templates/discovery_url")}"
|
||||
depends_on = [
|
||||
"null_resource.discovery_url_template"
|
||||
]
|
||||
}
|
||||
|
||||
resource "template_file" "controller_cloud_init" {
|
||||
template = "${file("templates/cloud-init")}"
|
||||
vars {
|
||||
flannel_network = "${var.flannel_network}"
|
||||
flannel_backend = "${var.flannel_backend}"
|
||||
etcd_servers = "http://127.0.0.1:2379"
|
||||
cluster_token = "${var.cluster_name}"
|
||||
discovery_url = "${template_file.discovery_url.rendered}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "template_file" "compute_cloud_init" {
|
||||
template = "${file("templates/cloud-init")}"
|
||||
vars {
|
||||
flannel_network = "${var.flannel_network}"
|
||||
flannel_backend = "${var.flannel_backend}"
|
||||
etcd_servers = "${join(",", "${formatlist("http://%s:2379", openstack_compute_instance_v2.controller.*.network.0.fixed_ip_v4)}")}"
|
||||
cluster_token = "${var.cluster_name}"
|
||||
discovery_url = "${template_file.discovery_url.rendered}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "controller" {
|
||||
count = "1"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "compute" {
|
||||
count = "${var.compute_count}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
#resource "openstack_networking_floatingip_v2" "compute" {
|
||||
# count = "${var.compute_count}"
|
||||
# pool = "${var.floatingip_pool}"
|
||||
#}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "kubernetes" {
|
||||
name = "${var.project}"
|
||||
|
@ -69,60 +27,27 @@ resource "openstack_compute_instance_v2" "controller" {
|
|||
"${openstack_compute_secgroup_v2.kubernetes_controller.name}"
|
||||
]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.controller.*.address, count.index)}"
|
||||
user_data = "${template_file.controller_cloud_init.rendered}"
|
||||
#user_data = "${template_file.controller_cloud_init.rendered}"
|
||||
provisioner "file" {
|
||||
source = "files"
|
||||
destination = "/tmp/stage"
|
||||
connection {
|
||||
user = "core"
|
||||
user = "${var.ssh_user}"
|
||||
}
|
||||
}
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mkdir -p /opt/bin",
|
||||
"sudo wget -q -O /opt/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${var.kubectl_version}/bin/linux/amd64/kubectl",
|
||||
"sudo chmod 0755 /opt/bin/kubectl",
|
||||
"sudo mkdir -p /etc/kubernetes/ssl",
|
||||
"cd /tmp/stage/ssl",
|
||||
"echo \"subjectAltName = @alt_names\n[alt_names]\" >> openssl.cnf",
|
||||
"echo DNS.1 = kubernetes >> openssl.cnf",
|
||||
"echo DNS.2 = kubernetes.local >> openssl.cnf",
|
||||
"echo DNS.3 = ${element(openstack_networking_floatingip_v2.controller.*.address, count.index)}.xip.io >> openssl.cnf",
|
||||
"echo 'IP.1 = ${element(openstack_networking_floatingip_v2.controller.*.address, count.index)}' >> openssl.cnf",
|
||||
"echo 'IP.3 = ${cidrhost(var.portal_net, count.index + 1)}' >> openssl.cnf",
|
||||
"echo 'IP.2 = ${self.network.0.fixed_ip_v4}' >> openssl.cnf",
|
||||
"openssl genrsa -out controller-key.pem 2048",
|
||||
"openssl req -new -key controller-key.pem -out controller.csr -subj '/CN=kubernetes-controller' -config openssl.cnf",
|
||||
"openssl x509 -req -in controller.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out controller.pem -days 365 -extensions v3_req -extfile openssl.cnf",
|
||||
"sudo mv controller*.pem /etc/kubernetes/ssl",
|
||||
"sudo mv admin*.pem /etc/kubernetes/ssl",
|
||||
"sudo mv ca.pem /etc/kubernetes/ssl",
|
||||
"sudo chown root:core /etc/kubernetes/ssl/*; sudo chmod 0640 /etc/kubernetes/ssl/*-key.pem",
|
||||
"sed -i 's/MY_IP/${self.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
|
||||
"sed -i 's/ADVERTISE_IP/${self.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
|
||||
"sed -i 's|PORTAL_NET|${var.portal_net}|' /tmp/stage/*/*",
|
||||
"sed -i 's|CLUSTER_DNS|${cidrhost(var.portal_net, 200)}|' /tmp/stage/*/*",
|
||||
"sed -i 's|HYPERKUBE_VERSION|${var.hyperkube_version}|' /tmp/stage/*/*",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mv /tmp/stage/controller/*.yaml /etc/kubernetes/manifests/",
|
||||
"sudo mv /tmp/stage/controller/*.service /etc/systemd/system/",
|
||||
"sudo mv /tmp/stage/addons /etc/kubernetes/addons",
|
||||
"#rm -rf /tmp/stage",
|
||||
"sudo systemctl daemon-reload",
|
||||
"sudo systemctl restart docker",
|
||||
"sudo systemctl enable kube-kubelet",
|
||||
"sudo systemctl start kube-kubelet",
|
||||
"echo Wait until API comes online...",
|
||||
"while ! curl http://127.0.0.1:8080/version; do sleep 60; done",
|
||||
"curl -XPOST -d'{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"name\":\"kube-system\"}}' \"http://127.0.0.1:8080/api/v1/namespaces\"",
|
||||
"sudo bash /tmp/stage/install_kube.sh",
|
||||
"echo '----> Starting Kubernetes Controller'",
|
||||
"sudo kubeadm init --token ${var.kubernetes_token}",
|
||||
"echo '----> Installing Weave'",
|
||||
"kubectl apply -f https://git.io/weave-kube"
|
||||
]
|
||||
connection {
|
||||
user = "core"
|
||||
user = "${var.ssh_user}"
|
||||
}
|
||||
}
|
||||
depends_on = [
|
||||
"template_file.controller_cloud_init",
|
||||
"null_resource.generate_ssl",
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -131,7 +56,7 @@ resource "openstack_compute_instance_v2" "compute" {
|
|||
count = "${var.compute_count}"
|
||||
image_name = "${var.kubernetes_image}"
|
||||
flavor_name = "${var.kubernetes_flavor}"
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.compute.*.address, count.index)}"
|
||||
#floating_ip = "${element(openstack_networking_floatingip_v2.compute.*.address, count.index)}"
|
||||
key_pair = "${openstack_compute_keypair_v2.kubernetes.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
|
@ -140,81 +65,30 @@ resource "openstack_compute_instance_v2" "compute" {
|
|||
"${openstack_compute_secgroup_v2.kubernetes_base.name}",
|
||||
"${openstack_compute_secgroup_v2.kubernetes_compute.name}"
|
||||
]
|
||||
user_data = "${template_file.compute_cloud_init.rendered}"
|
||||
provisioner "file" {
|
||||
source = "files"
|
||||
destination = "/tmp/stage"
|
||||
connection {
|
||||
user = "core"
|
||||
user = "${var.ssh_user}"
|
||||
bastion_host = "${openstack_networking_floatingip_v2.controller.0.address}"
|
||||
}
|
||||
}
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mkdir -p /opt/bin",
|
||||
"sudo wget -q -O /opt/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${var.kubectl_version}/bin/linux/amd64/kubectl",
|
||||
"sudo chmod 0755 /opt/bin/kubectl",
|
||||
"sudo mkdir -p /etc/kubernetes/ssl",
|
||||
"cd /tmp/stage/ssl",
|
||||
"echo \"subjectAltName = @alt_names\n[alt_names]\" >> openssl.cnf",
|
||||
"echo 'IP.1 = ${self.network.0.fixed_ip_v4}' >> openssl.cnf",
|
||||
"openssl genrsa -out compute-key.pem 2048",
|
||||
"openssl req -new -key compute-key.pem -out compute.csr -subj '/CN=kubernetes-compute' -config openssl.cnf",
|
||||
"openssl x509 -req -in compute.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out compute.pem -days 365 -extensions v3_req -extfile openssl.cnf",
|
||||
"sudo mv compute-key.pem /etc/kubernetes/ssl",
|
||||
"sudo mv compute.pem /etc/kubernetes/ssl",
|
||||
"sudo mv ca.pem /etc/kubernetes/ssl",
|
||||
"sudo chown root:core /etc/kubernetes/ssl/*; sudo chmod 0640 /etc/kubernetes/ssl/*-key.pem",
|
||||
"sed -i 's/MY_IP/${self.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
|
||||
"sed -i 's/ADVERTISE_IP/${element(openstack_networking_floatingip_v2.compute.*.address, count.index)}/' /tmp/stage/*/*",
|
||||
"sed -i 's/CONTROLLER_HOST/${openstack_compute_instance_v2.controller.0.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
|
||||
"sed -i 's|PORTAL_NET|${var.portal_net}|' /tmp/stage/*/*",
|
||||
"sed -i 's|CLUSTER_DNS|${cidrhost(var.portal_net, 200)}|' /tmp/stage/*/*",
|
||||
"sed -i 's|HYPERKUBE_VERSION|${var.hyperkube_version}|' /tmp/stage/*/*",
|
||||
"sudo mkdir -p /etc/kubernetes/manifests",
|
||||
"sudo mv /tmp/stage/compute/*.yaml /etc/kubernetes/manifests/",
|
||||
"sudo mv /tmp/stage/compute/*.service /etc/systemd/system/",
|
||||
"sudo mv /tmp/stage/compute/compute-kubeconfig.yaml.config /etc/kubernetes/compute-kubeconfig.yaml",
|
||||
"#rm -rf /tmp/stage",
|
||||
"sudo systemctl daemon-reload",
|
||||
"sudo systemctl restart docker",
|
||||
"sudo systemctl enable kube-kubelet",
|
||||
"sudo systemctl start kube-kubelet",
|
||||
"sudo bash /tmp/stage/install_kube.sh",
|
||||
"echo '----> Joining K8s Controller'",
|
||||
"sudo kubeadm join --token ${var.kubernetes_token} ${openstack_compute_instance_v2.controller.0.network.0.fixed_ip_v4}"
|
||||
]
|
||||
connection {
|
||||
user = "core"
|
||||
user = "${var.ssh_user}"
|
||||
bastion_host = "${openstack_networking_floatingip_v2.controller.0.address}"
|
||||
}
|
||||
}
|
||||
depends_on = [
|
||||
"template_file.compute_cloud_init",
|
||||
"openstack_compute_instance_v2.controller"
|
||||
]
|
||||
}
|
||||
|
||||
resource "null_resource" "controller" {
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"/opt/bin/kubectl config set-cluster ${var.cluster_name} --certificate-authority=/etc/kubernetes/ssl/ca.pem \\",
|
||||
" --server=https://${openstack_compute_instance_v2.controller.0.network.0.fixed_ip_v4}:443",
|
||||
"/opt/bin/kubectl config set-credentials ${var.kubernetes_user} \\",
|
||||
" --certificate-authority=/etc/kubernetes/ssl/ca.pem \\",
|
||||
" --client-key=/etc/kubernetes/ssl/admin-key.pem \\",
|
||||
" --client-certificate=/etc/kubernetes/ssl/admin.pem",
|
||||
"/opt/bin/kubectl config set-context ${var.kubernetes_user} --cluster=${var.cluster_name} --user=${var.kubernetes_user}",
|
||||
"/opt/bin/kubectl config set-context kubernetes --cluster=${var.cluster_name} --user=${var.kubernetes_user}",
|
||||
"/opt/bin/kubectl config set current-context kubernetes",
|
||||
"/opt/bin/kubectl create namespace kube-system",
|
||||
]
|
||||
connection {
|
||||
user = "core"
|
||||
host = "${openstack_networking_floatingip_v2.controller.0.address}"
|
||||
}
|
||||
}
|
||||
depends_on = [
|
||||
"openstack_compute_instance_v2.controller",
|
||||
"openstack_compute_instance_v2.compute",
|
||||
]
|
||||
}
|
||||
|
||||
output "kubernetes-controller" {
|
||||
value = "$ ssh -A core@${openstack_networking_floatingip_v2.controller.0.address}"
|
||||
value = "$ ssh -A ${var.ssh_user}@${openstack_networking_floatingip_v2.controller.0.address}"
|
||||
}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd.service
|
||||
mask: true
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: flanneld.service
|
||||
command: start
|
||||
drop-ins:
|
||||
- name: 50-network-config.conf
|
||||
content: |
|
||||
[Service]
|
||||
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "${flannel_network}", "Backend": { "Type": "${flannel_backend}"} }'
|
||||
flannel:
|
||||
interface: $private_ipv4
|
||||
etcd2:
|
||||
discovery: ${discovery_url}
|
||||
advertise-client-urls: http://$private_ipv4:2379
|
||||
initial-advertise-peer-urls: http://$private_ipv4:2380
|
||||
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
|
||||
listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
|
||||
data-dir: /var/lib/etcd2
|
||||
initial-cluster-token: ${cluster_token}
|
||||
update:
|
||||
reboot-strategy: "off"
|
|
@ -1,10 +1,6 @@
|
|||
flannel_backend = "vxlan"
|
||||
flannel_network = "10.10.0.0/16"
|
||||
kubernetes_image = "coreos-stable-1010-6-0"
|
||||
portal_net = "10.200.0.0/16"
|
||||
kubernetes_image = "ubuntu-xenial"
|
||||
ssh_user = "ubuntu"
|
||||
cluster_name = "kubestack-testing"
|
||||
kubectl_version = "v1.2.4"
|
||||
hyperkube_version = "v1.2.4_coreos.1"
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
network_name = "internal"
|
||||
floatingip_pool = "external"
|
||||
|
|
|
@ -1,19 +1,11 @@
|
|||
variable "flannel_backend" {
|
||||
default = "vxlan"
|
||||
}
|
||||
|
||||
variable "flannel_network" {
|
||||
default = "10.10.0.0/16"
|
||||
}
|
||||
|
||||
variable "kubernetes_image" {
|
||||
default = "kubernetes"
|
||||
default = "ubuntu-xenial"
|
||||
}
|
||||
|
||||
variable "project" {}
|
||||
|
||||
variable "portal_net" {
|
||||
default = "10.200.0.0/16"
|
||||
variable "ssh_user" {
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "compute_count" {
|
||||
|
@ -37,11 +29,7 @@ variable "kubernetes_flavor" {
|
|||
}
|
||||
|
||||
variable "kubernetes_token" {
|
||||
default = "kubernetes"
|
||||
}
|
||||
|
||||
variable "kubernetes_user" {
|
||||
default = "admin"
|
||||
default = "5aa3f9.c7acfa51e41f1f7d"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
|
@ -69,18 +57,3 @@ variable "whitelist_network" {
|
|||
description = "network to allow connectivity from"
|
||||
default = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
variable "kubectl_version" {
|
||||
description = "Version of kubectl binary to download"
|
||||
default = "v1.1.2"
|
||||
}
|
||||
|
||||
variable "hyperkube_version" {
|
||||
description = "Version of the hypercube container to use"
|
||||
default = "v1.1.2"
|
||||
}
|
||||
|
||||
variable "generate_ssl" {
|
||||
descripion = "set to 1 to regenerate SSL certificates/keys"
|
||||
default = 1
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue