fixes refrence to wrong file

1) Updated file references to remove depricated terrform option
2) fixed the ADVERTISE_IP var to use the OpenStack private net IP instead of floating ip. proxy on compute nodes could not reach via FIP
3) added missing sed line for HYPERKUBE_VERSION on compute nodes

Changed to allow all TCP ports from whitelisted IP instead of just 22

updated image reference and added overrides for kubectl and hyperkube versions

Remove Outdated Add-ons

Update kubelet service definition to use the newer kubelet-wrapper in CoreOS. Updated image references to use CoreOS Specifc ones found on quay.io

Fixed Quay-based hyperkube versioning

typo

typo- missing LR

Fixed wrong service account key

Updated README as well

md formatting fix

Change-Id: I4faaf00319c332d15748f17ebda7d9b8306d7716
This commit is contained in:
Tyler Britten 2016-07-05 13:45:51 -04:00 committed by Tyler Britten
parent e3b5bc9634
commit 1a183467e7
15 changed files with 48 additions and 209 deletions

View File

@ -13,7 +13,7 @@ Will install a single controller node and two compute nodes by default, can incr
## Prep
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
- Upload a CoreOS image to glance.
- Upload a CoreOS image to glance. [Instructions Here](https://coreos.com/os/docs/latest/booting-on-openstack.html)
## Terraform
@ -36,6 +36,9 @@ Ensure that you have your Openstack credentials loaded into environment variable
$ source ~/.stackrc
```
Edit the terraform.tfvars file to put the name of your CoreOS image, OpenStack network names, etc. You'll also set the Kubernetes versions there. For the hyperkube version, you need to use the tags [here](https://quay.io/repository/coreos/hyperkube?tab=tags).
### Provision the Kubernetes Cluster
If you wish to re-use previously generated SSL key/certs for CA and admin, simply add `-var "generate_ssl=0" \`.
@ -68,7 +71,7 @@ $ terraform apply \
-var "whitelist_network=${MY_IP}/32"
...
...
Apply complete! Resources: 12 added, 0 changed, 0 destroyed.
Apply complete! Resources: 16 added, 0 changed, 0 destroyed.
The state of your infrastructure has been saved to the path
below. This state is required to modify and destroy your
@ -89,8 +92,6 @@ Outputs:
```
$ ssh -A core@xx.xx.xx.xx
$ kubectl config use-context kubernetes
switched to context "kubernetes".
$ kubectl config view
apiVersion: v1
@ -112,9 +113,10 @@ users:
user:
token: kubernetes
$ kubectl get nodes
NAME LABELS STATUS AGE
10.230.7.23 kubernetes.io/hostname=10.230.7.23 Ready 5m
$ kubectl get nodes
NAME STATUS AGE
192.168.3.197 Ready 1m
192.168.3.198 Ready 11s
```
@ -149,18 +151,22 @@ $ kubectl delete svc my-nginx
service "my-nginx" deleted
```
### Install some addons
### Install The Dashboard Addon
```
$ kubectl create -f /etc/kubernetes/addons/kube-ui-rc.yaml \
--namespace=kube-system
$ kubectl create -f /etc/kubernetes/addons/kube-ui-svc.yaml \
--namespace=kube-system
$ kubectl create -f /etc/kubernetes/addons/kube-dns-rc.yaml \
--namespace=kube-system
$ kubectl create -f /etc/kubernetes/addons/kube-dns-svc.yaml \
--namespace=kube-system
$ kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml
deployment "kubernetes-dashboard" created
You have exposed your service on an external port on all nodes in your
cluster. If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (tcp:32584) to serve traffic.
See http://releases.k8s.io/release-1.2/docs/user-guide/services-firewalls.md for more details.
```
You can now access the dashboard from your whitelisted IP at:
```http://<controller public ip>:<service port>```
The service port is supplied when you create the dashboard. In the example here, it was 32584.
### Destroy the cluster
@ -183,5 +189,5 @@ Do you really want to destroy?
openstack_compute_secgroup_v2.kubernetes_controller: Destruction complete
openstack_compute_secgroup_v2.kubernetes_internal: Destruction complete
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
Apply complete! Resources: 0 added, 0 changed, 16 destroyed.
```

View File

@ -4,8 +4,8 @@ resource "openstack_compute_secgroup_v2" "kubernetes_controller" {
description = "kubernetes Controller Security Group"
rule {
ip_protocol = "tcp"
from_port = "443"
to_port = "443"
from_port = "1"
to_port = "65535"
cidr = "${var.whitelist_network}"
}
rule {

View File

@ -1,98 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v9
namespace: kube-system
labels:
k8s-app: kube-dns
version: v9
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-dns
version: v9
template:
metadata:
labels:
k8s-app: kube-dns
version: v9
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: etcd
image: gcr.io/google_containers/etcd:2.0.9
resources:
limits:
cpu: 100m
memory: 50Mi
command:
- /usr/local/bin/etcd
- -data-dir
- /var/etcd/data
- -listen-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -advertise-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -initial-cluster-token
- skydns-etcd
volumeMounts:
- name: etcd-storage
mountPath: /var/etcd/data
- name: kube2sky
image: gcr.io/google_containers/kube2sky:1.11
resources:
limits:
cpu: 100m
memory: 50Mi
args:
# command = "/kube2sky"
- -domain=cluster.local
- name: skydns
image: gcr.io/google_containers/skydns:2015-03-11-001
resources:
limits:
cpu: 100m
memory: 50Mi
args:
# command = "/skydns"
- -machines=http://localhost:4001
- -addr=0.0.0.0:53
- -domain=cluster.local.
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 1
timeoutSeconds: 5
- name: healthz
image: gcr.io/google_containers/exechealthz:1.0
resources:
limits:
cpu: 10m
memory: 20Mi
args:
- -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null
- -port=8080
ports:
- containerPort: 8080
protocol: TCP
volumes:
- name: etcd-storage
emptyDir: {}
dnsPolicy: Default

View File

@ -1,20 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: CLUSTER_DNS
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View File

@ -1,40 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-ui-v4
namespace: kube-system
labels:
k8s-app: kube-ui
version: v4
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-ui
version: v4
template:
metadata:
labels:
k8s-app: kube-ui
version: v4
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kube-ui
image: gcr.io/google_containers/kube-ui:v4
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 30
timeoutSeconds: 5

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-ui
namespace: kube-system
labels:
k8s-app: kube-ui
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeUI"
spec:
selector:
k8s-app: kube-ui
ports:
- port: 80
targetPort: 8080

View File

@ -4,7 +4,8 @@ Requires=flanneld.service
After=flanneld.service
[Service]
ExecStart=/usr/bin/kubelet \
Environment=KUBELET_VERSION=HYPERKUBE_VERSION
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--api_servers=https://CONTROLLER_HOST \
--register-node=true \
--allow-privileged=true \

View File

@ -7,7 +7,7 @@ spec:
hostNetwork: true
containers:
- name: kube-proxy
image: gcr.io/google_containers/hyperkube:HYPERKUBE_VERSION
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
command:
- /hyperkube
- proxy

View File

@ -7,7 +7,7 @@ spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: gcr.io/google_containers/hyperkube:HYPERKUBE_VERSION
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
command:
- /hyperkube
- apiserver
@ -17,11 +17,11 @@ spec:
- --service-cluster-ip-range=PORTAL_NET
- --secure-port=443
- --advertise-address=ADVERTISE_IP
#- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
- --tls-cert-file=/etc/kubernetes/ssl/controller.pem
- --tls-private-key-file=/etc/kubernetes/ssl/controller-key.pem
- --client-ca-file=/etc/kubernetes/ssl/ca.pem
- --service-account-key-file=/etc/kubernetes/ssl/admin-key.pem
- --service-account-key-file=/etc/kubernetes/ssl/controller-key.pem
ports:
- containerPort: 443
hostPort: 443

View File

@ -7,12 +7,12 @@ spec:
hostNetwork: true
containers:
- name: kube-controller-manager
image: gcr.io/google_containers/hyperkube:HYPERKUBE_VERSION
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
command:
- /hyperkube
- controller-manager
- --master=http://127.0.0.1:8080
- --service-account-private-key-file=/etc/kubernetes/ssl/controller.pem
- --service-account-private-key-file=/etc/kubernetes/ssl/controller-key.pem
- --root-ca-file=/etc/kubernetes/ssl/ca.pem
livenessProbe:
httpGet:

View File

@ -4,7 +4,8 @@ Requires=flanneld.service
After=flanneld.service
[Service]
ExecStart=/usr/bin/kubelet \
Environment=KUBELET_VERSION=HYPERKUBE_VERSION
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--api_servers=http://127.0.0.1:8080 \
--register-node=false \
--allow-privileged=true \

View File

@ -7,7 +7,7 @@ spec:
hostNetwork: true
containers:
- name: kube-proxy
image: gcr.io/google_containers/hyperkube:HYPERKUBE_VERSION
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
command:
- /hyperkube
- proxy

View File

@ -7,7 +7,7 @@ spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: gcr.io/google_containers/hyperkube:HYPERKUBE_VERSION
image: quay.io/coreos/hyperkube:HYPERKUBE_VERSION
command:
- /hyperkube
- scheduler

View File

@ -12,14 +12,14 @@ resource "null_resource" "generate_ssl" {
}
resource "template_file" "discovery_url" {
template = "templates/discovery_url"
template = "${file("templates/discovery_url")}"
depends_on = [
"null_resource.discovery_url_template"
]
}
resource "template_file" "controller_cloud_init" {
template = "templates/cloud-init"
template = "${file("templates/cloud-init")}"
vars {
flannel_network = "${var.flannel_network}"
flannel_backend = "${var.flannel_backend}"
@ -30,8 +30,8 @@ resource "template_file" "controller_cloud_init" {
}
resource "template_file" "compute_cloud_init" {
template = "templates/cloud-init"
vars {
template = "${file("templates/cloud-init")}"
vars {
flannel_network = "${var.flannel_network}"
flannel_backend = "${var.flannel_backend}"
etcd_servers = "${join(",", "${formatlist("http://%s:2379", openstack_compute_instance_v2.controller.*.network.0.fixed_ip_v4)}")}"
@ -99,7 +99,7 @@ resource "openstack_compute_instance_v2" "controller" {
"sudo mv ca.pem /etc/kubernetes/ssl",
"sudo chown root:core /etc/kubernetes/ssl/*; sudo chmod 0640 /etc/kubernetes/ssl/*-key.pem",
"sed -i 's/MY_IP/${self.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
"sed -i 's/ADVERTISE_IP/${element(openstack_networking_floatingip_v2.controller.*.address, count.index)}/' /tmp/stage/*/*",
"sed -i 's/ADVERTISE_IP/${self.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
"sed -i 's|PORTAL_NET|${var.portal_net}|' /tmp/stage/*/*",
"sed -i 's|CLUSTER_DNS|${cidrhost(var.portal_net, 200)}|' /tmp/stage/*/*",
"sed -i 's|HYPERKUBE_VERSION|${var.hyperkube_version}|' /tmp/stage/*/*",
@ -169,6 +169,7 @@ resource "openstack_compute_instance_v2" "compute" {
"sed -i 's/CONTROLLER_HOST/${openstack_compute_instance_v2.controller.0.network.0.fixed_ip_v4}/' /tmp/stage/*/*",
"sed -i 's|PORTAL_NET|${var.portal_net}|' /tmp/stage/*/*",
"sed -i 's|CLUSTER_DNS|${cidrhost(var.portal_net, 200)}|' /tmp/stage/*/*",
"sed -i 's|HYPERKUBE_VERSION|${var.hyperkube_version}|' /tmp/stage/*/*",
"sudo mkdir -p /etc/kubernetes/manifests",
"sudo mv /tmp/stage/compute/*.yaml /etc/kubernetes/manifests/",
"sudo mv /tmp/stage/compute/*.service /etc/systemd/system/",
@ -200,6 +201,8 @@ resource "null_resource" "controller" {
" --client-certificate=/etc/kubernetes/ssl/admin.pem",
"/opt/bin/kubectl config set-context ${var.kubernetes_user} --cluster=${var.cluster_name} --user=${var.kubernetes_user}",
"/opt/bin/kubectl config set-context kubernetes --cluster=${var.cluster_name} --user=${var.kubernetes_user}",
"/opt/bin/kubectl config set current-context kubernetes",
"/opt/bin/kubectl create namespace kube-system",
]
connection {
user = "core"

View File

@ -1,9 +1,10 @@
flannel_backend = "vxlan"
flannel_network = "10.10.0.0/16"
kubernetes_image = "coreos-alpha-884-0-0"
kubernetes_image = "coreos-stable-1010-6-0"
portal_net = "10.200.0.0/16"
cluster_name = "kubestack-testing"
kubectl_version = "v1.2.4"
hyperkube_version = "v1.2.4_coreos.1"
public_key_path = "~/.ssh/id_rsa.pub"
network_name = "internal"
floatingip_pool = "external"