Support kuryr-daemon when running containerized

This commit implements kuryr-daemon support when
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=True. It's done by:

* CNI docker image installs Kuryr-Kubernetes pip package and adds
  exectution of kuryr-daemon into entrypoint script.
* Hosts /proc and /var/run/openvswitch are mounted into the CNI
  container.
* Code is changed to use /host_proc instead of /proc when in a container
  (it's impossible to mount host's /proc into container's /proc).

Implements: blueprint cni-split-exec-daemon

Change-Id: I9155a2cba28f578cee129a4c40066209f7ab543d
This commit is contained in:
Michał Dulko 2017-11-03 13:25:15 +01:00
parent c468b28c38
commit 18db649943
15 changed files with 180 additions and 35 deletions

View File

@ -3,6 +3,14 @@ LABEL authors="Antoni Segura Puimedon<toni@kuryr.org>, Vikas Choudhary<vichoudh@
COPY . /opt/kuryr-kubernetes
RUN yum install -y epel-release https://rdoproject.org/repos/rdo-release.rpm \
&& yum install -y --setopt=tsflags=nodocs python-pip iproute bridge-utils openvswitch \
&& yum install -y --setopt=tsflags=nodocs gcc python-devel git \
&& cd /opt/kuryr-kubernetes \
&& pip install --no-cache-dir . \
&& rm -fr .git \
&& yum -y history undo last
COPY kuryr-cni /kuryr-cni
COPY kuryr-cni-bin /kuryr-cni-bin
COPY cni_ds_init /usr/bin/cni_ds_init
@ -11,5 +19,8 @@ ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d
ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH}
ARG CNI_BIN_DIR_PATH=/opt/cni/bin
ENV CNI_BIN_DIR_PATH ${CNI_BIN_DIR_PATH}
ARG CNI_DAEMON=False
ENV CNI_DAEMON ${CNI_DAEMON}
VOLUME [ "/sys/fs/cgroup" ]
ENTRYPOINT [ "cni_ds_init" ]

View File

@ -17,6 +17,8 @@ ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d
ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH}
ARG CNI_BIN_DIR_PATH=/opt/cni/bin
ENV CNI_BIN_DIR_PATH ${CNI_BIN_DIR_PATH}
ARG CNI_DAEMON=False
ENV CNI_DAEMON ${CNI_DAEMON}
RUN cd /usr/src \
&& wget https://www.python.org/ftp/python/3.5.3/Python-3.5.3.tgz \

View File

@ -34,4 +34,9 @@ function deploy() {
cleanup "$CNI_CONFIG_DIR_PATH" "$CNI_BIN_DIR_PATH"
deploy "$CNI_CONFIG_DIR_PATH" "$CNI_BIN_DIR_PATH"
sleep infinity
# Start CNI daemon if required
if [ "$CNI_DAEMON" == "True" ]; then
/usr/bin/kuryr-daemon --config-file /etc/kuryr/kuryr.conf
else
sleep infinity
fi

View File

@ -1,9 +1,9 @@
--- /root/tmp/kuryr-kubernetes/kuryr_kubernetes/cni/main.py 2017-06-19 07:15:39.898398766 -0400
+++ kuryr_kubernetes/cni/main.py 2017-06-22 04:28:41.421123949 -0400
@@ -61,6 +61,9 @@
config.init(args)
config.setup_logging()
os_vif.initialize()
@@ -54,6 +54,9 @@ class K8sCNIPlugin(cni_api.CNIPlugin):
self._watcher.stop()
def _setup(self, params):
+ ovs = os_vif._EXT_MANAGER['ovs'].obj
+ ovs_mod = sys.modules[ovs.__module__]
+ ovs_mod.linux_net.privsep.vif_plug.start(ovs_mod.linux_net.privsep.priv_context.Method.FORK)

View File

@ -304,10 +304,12 @@ EOF
function build_kuryr_containers() {
local cni_bin_dir
local cni_conf_dir
local cni_daemon
local build_dir
cni_bin_dir=$1
cni_conf_dir=$2
cni_daemon=$3
build_dir="${DEST}/kuryr-kubernetes"
pushd "$build_dir"
@ -315,7 +317,7 @@ function build_kuryr_containers() {
sudo docker build -t kuryr/controller -f "controller.Dockerfile" .
# Build CNI image
sudo ./tools/build_cni_daemonset_image $cni_bin_dir $cni_conf_dir
sudo ./tools/build_cni_daemonset_image $cni_bin_dir $cni_conf_dir $cni_daemon
popd
}
@ -491,6 +493,11 @@ spec:
image: kuryr/cni:latest
imagePullPolicy: Never
command: [ "cni_ds_init" ]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
@ -503,6 +510,10 @@ spec:
subPath: kuryr-cni.conf
- name: etc
mountPath: /etc
- name: proc
mountPath: /host_proc
- name: openvswitch
mountPath: /var/run/openvswitch
volumes:
- name: bin
hostPath:
@ -516,6 +527,12 @@ spec:
- name: etc
hostPath:
path: /etc
- name: proc
hostPath:
path: /proc
- name: openvswitch
hostPath:
path: /var/run/openvswitch
EOF
}

View File

@ -183,8 +183,7 @@ enable_service kuryr-kubernetes
# instead of processing them on its own. This limits the number of Kubernetes
# API requests (as only Kuryr Daemon will watch for new pod events) and should
# increase scalability in environments that often delete and create pods.
# Please note that kuryr-daemon is not yet supported in containerized
# deployment. To enable kuryr-daemon uncomment next line.
# To enable kuryr-daemon uncomment next line.
# enable_service kuryr-daemon

View File

@ -67,6 +67,12 @@ function configure_kuryr {
if is_service_enabled kuryr-daemon; then
iniset "$KURYR_CONFIG" cni_daemon daemon_enabled True
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then
# When running kuryr-daemon in container we need to set up configs.
iniset "$KURYR_CONFIG" cni_daemon docker_mode True
iniset "$KURYR_CONFIG" cni_daemon netns_proc_dir "/host_proc"
fi
fi
create_kuryr_cache_dir
@ -582,14 +588,7 @@ function run_kuryr_daemon {
source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes
# main loop
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if is_service_enabled kuryr-daemon && [[ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]]; then
die $LINENO "Cannot enable kuryr-daemon with KURYR_K8S_CONTAINERIZED_DEPLOYMENT."
fi
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
setup_develop "$KURYR_HOME"
if is_service_enabled kubelet || is_service_enabled openshift-node; then
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
@ -669,7 +668,11 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then
run_k8s_scheduler
fi
run_kuryr_daemon
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "False" ]; then
# If running in containerized mode, we'll run the daemon as DaemonSet.
run_kuryr_daemon
fi
if is_service_enabled kubelet; then
prepare_kubelet
@ -688,11 +691,14 @@ if [[ "$1" == "stack" && "$2" == "extra" ]]; then
fi
if is_service_enabled kuryr-kubernetes; then
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "False" ]; then
run_kuryr_kubernetes
else
build_kuryr_containers $CNI_BIN_DIR $CNI_CONF_DIR
if is_service_enabled kuryr-daemon; then
build_kuryr_containers $CNI_BIN_DIR $CNI_CONF_DIR True
else
build_kuryr_containers $CNI_BIN_DIR $CNI_CONF_DIR False
fi
generate_containerized_kuryr_resources
run_containerized_kuryr_resources
fi
@ -706,12 +712,21 @@ elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
fi
if [[ "$1" == "unstack" ]]; then
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if is_service_enabled kuryr-kubernetes; then
stop_process kuryr-kubernetes
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "False" ]; then
stop_process kuryr-kubernetes
else
$KURYR_HYPERKUBE_BINARY kubectl delete deploy/kuryr-controller
fi
elif is_service_enabled kubelet; then
$KURYR_HYPERKUBE_BINARY kubectl delete nodes ${HOSTNAME}
fi
stop_process kuryr-daemon
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "False" ]; then
stop_process kuryr-daemon
else
$KURYR_HYPERKUBE_BINARY kubectl delete ds/kuryr-cni-ds
fi
if is_service_enabled kubernetes-controller-manager; then
stop_container kubernetes-controller-manager

View File

@ -13,7 +13,19 @@ For creating controller image on local machine: ::
For creating cni daemonset image on local machine: ::
$ ./tools/build_cni_daemonset_image
$ ./tools/build_cni_daemonset_image [<cni_bin_dir>] [<cni_conf_dir>] [<enable_cni_daemon>]
* ``cni_bin_dir`` - host directory where CNI binaries are located, defaults to
``/opt/cni/bin``.
* ``cni_conf_dir`` - host directory where CNI configuration is located,
defaults to ``/etc/cni/net.d``.
* ``enable_cni_daemon`` - Set to ``True`` if you want CNI Docker image to run
CNI daemon by default. Defaults to ``False``.
.. note::
You can override those build variables by passing env variables when running
the Docker image. Supported variables are ``CNI_CONFIG_DIR_PATH``,
``CNI_BIN_DIR_PATH`` and ``CNI_DAEMON``.
Alternatively, you can remove ``imagePullPolicy: Never`` from kuryr-controller
Deployment and kuryr-cni DaemonSet definitions to use pre-built

View File

@ -17,6 +17,8 @@ import os_vif
import pyroute2
from stevedore import driver as stv_driver
from kuryr_kubernetes import utils
_BINDING_NAMESPACE = 'kuryr_kubernetes.cni.binding'
@ -29,6 +31,7 @@ def _get_binding_driver(vif):
def get_ipdb(netns=None):
if netns:
netns = utils.convert_netns(netns)
ipdb = pyroute2.IPDB(nl=pyroute2.NetNS(netns))
else:
ipdb = pyroute2.IPDB()
@ -39,10 +42,12 @@ def _enable_ipv6(netns):
# Docker disables IPv6 for --net=none containers
# TODO(apuimedo) remove when it is no longer the case
try:
self_ns_fd = open('/proc/self/ns/net')
netns = utils.convert_netns(netns)
path = utils.convert_netns('/proc/self/ns/net')
self_ns_fd = open(path)
pyroute2.netns.setns(netns)
with open('/proc/sys/net/ipv6/conf/all/disable_ipv6',
'w') as disable_ipv6:
path = utils.convert_netns('/proc/sys/net/ipv6/conf/all/disable_ipv6')
with open(path, 'w') as disable_ipv6:
disable_ipv6.write('0')
except Exception:
raise

View File

@ -17,6 +17,7 @@ import six
from kuryr_kubernetes.cni.binding import base as b_base
from kuryr_kubernetes import config
from kuryr_kubernetes import utils
VLAN_KIND = 'vlan'
MACVLAN_KIND = 'macvlan'
@ -48,7 +49,7 @@ class NestedDriver(object):
with h_ipdb.create(ifname=temp_name,
link=h_ipdb.interfaces[vm_iface_name],
**args) as iface:
iface.net_ns_fd = netns
iface.net_ns_fd = utils.convert_netns(netns)
with b_base.get_ipdb(netns) as c_ipdb:
with c_ipdb.interfaces[temp_name] as iface:

View File

@ -13,6 +13,7 @@
# limitations under the License.
import multiprocessing
import os
from six.moves import http_client as httplib
import socket
import sys
@ -128,11 +129,34 @@ class DaemonServer(object):
self.headers = {'ContentType': 'application/json',
'Connection': 'close'}
def _prepare_request(self):
if CONF.cni_daemon.docker_mode:
# FIXME(dulek): This is an awful hack to make os_vif's privsep
# daemon to run in FORK mode. This is required,
# as it's assumed kuryr-daemon is run as root, but
# it's not assumed that system it's running on has
# sudo command. Once os_vif allows to configure the
# mode, switch this to nicer method. It's placed
# here, because we need to repeat it for each process
# spawned by HTTP server.
ovs = os_vif._EXT_MANAGER['ovs'].obj
ovs_mod = sys.modules[ovs.__module__]
ovs_mod.linux_net.privsep.vif_plug.start(
ovs_mod.linux_net.privsep.priv_context.Method.FORK)
params = utils.CNIParameters(flask.request.get_json())
LOG.debug('Received %s request. CNI Params: %s',
params.CNI_COMMAND, params)
return params
def add(self):
params = None
try:
params = utils.CNIParameters(flask.request.get_json())
LOG.debug('Received addNetwork request. CNI Params: %s', params)
params = self._prepare_request()
except Exception:
LOG.exception('Exception when reading CNI params.')
return '', httplib.BAD_REQUEST, self.headers
try:
vif = self.plugin.add(params)
data = jsonutils.dumps(vif.obj_to_primitive())
except exceptions.ResourceNotReady as e:
@ -143,13 +167,17 @@ class DaemonServer(object):
LOG.exception('Error when processing addNetwork request. CNI '
'Params: %s', params)
return '', httplib.INTERNAL_SERVER_ERROR, self.headers
return data, httplib.ACCEPTED, self.headers
def delete(self):
params = None
try:
params = utils.CNIParameters(flask.request.get_json())
LOG.debug('Received delNetwork request. CNI Params: %s', params)
params = self._prepare_request()
except Exception:
LOG.exception('Exception when reading CNI params.')
return '', httplib.BAD_REQUEST, self.headers
try:
self.plugin.delete(params)
except exceptions.ResourceNotReady as e:
# NOTE(dulek): It's better to ignore this error - most of the time
@ -212,6 +240,15 @@ class CNIDaemonWatcherService(cotyledon.Service):
self.watcher = None
self.registry = registry
def _get_nodename(self):
# NOTE(dulek): At first try to get it using environment variable,
# otherwise assume hostname is the nodename.
try:
nodename = os.environ['KUBERNETES_NODE_NAME']
except KeyError:
nodename = socket.gethostname()
return nodename
def run(self):
self.pipeline = h_cni.CNIPipeline()
self.pipeline.register(h_cni.CallbackHandler(self.on_done))
@ -219,7 +256,7 @@ class CNIDaemonWatcherService(cotyledon.Service):
self.watcher.add(
"%(base)s/pods?fieldSelector=spec.nodeName=%(node_name)s" % {
'base': k_const.K8S_API_BASE,
'node_name': socket.gethostname()})
'node_name': self._get_nodename()})
self.watcher.start()
def on_done(self, pod, vif):

View File

@ -54,6 +54,22 @@ daemon_opts = [
'process all networking stack changes. This option '
'allows to tune internal pyroute2 timeout.'),
default=10),
cfg.BoolOpt('docker_mode',
help=_('Set to True when you are running kuryr-daemon inside '
'a Docker container on Kubernetes host. E.g. as '
'DaemonSet on Kubernetes cluster Kuryr is supposed to '
'provide networking for. This mainly means that'
'kuryr-daemon will look for network namespaces in '
'$netns_proc_dir instead of /proc.'),
default=False),
cfg.StrOpt('netns_proc_dir',
help=_("When docker_mode is set to True, this config option "
"should be set to where host's /proc directory is "
"mounted. Please note that mounting it is necessary to "
"allow Kuryr-Kubernetes to move host interfaces between "
"host network namespaces, which is essential for Kuryr "
"to work."),
default=None),
]
k8s_opts = [

View File

@ -95,7 +95,8 @@ class TestDaemonServer(base.TestCase):
self.srv.application.testing = True
self.test_client = self.srv.application.test_client()
params = {'config_kuryr': {}, 'CNI_ARGS': 'foo=bar'}
params = {'config_kuryr': {}, 'CNI_ARGS': 'foo=bar',
'CNI_CONTAINERID': 'baz', 'CNI_COMMAND': 'ADD'}
self.params_str = jsonutils.dumps(params)
@mock.patch('kuryr_kubernetes.cni.daemon.service.K8sCNIRegistryPlugin.add')

View File

@ -10,8 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
CONF = cfg.CONF
def utf8_json_decoder(byte_data):
"""Deserializes the bytes into UTF-8 encoded JSON.
@ -20,3 +23,19 @@ def utf8_json_decoder(byte_data):
:returns: The UTF-8 encoded JSON represented by Python dictionary format.
"""
return jsonutils.loads(byte_data.decode('utf8'))
def convert_netns(netns):
"""Convert /proc based netns path to Docker-friendly path.
When CONF.docker_mode is set this method will change /proc to
/CONF.netns_proc_dir. This allows netns manipulations to work when running
in Docker container on Kubernetes host.
:param netns: netns path to convert.
:return: Converted netns path.
"""
if CONF.cni_daemon.docker_mode:
return netns.replace('/proc', CONF.cni_daemon.netns_proc_dir)
else:
return netns

View File

@ -2,6 +2,7 @@
CNI_BIN_DIR=$1
CNI_CONF_DIR=$2
CNI_DAEMON=${3:-"False"}
BUILDER_TAG="kuryr/cni-builder"
CNI_TAG="kuryr/cni"
@ -12,6 +13,7 @@ else
docker build -t "$BUILDER_TAG" \
--build-arg "CNI_BIN_DIR_PATH=$CNI_BIN_DIR" \
--build-arg "CNI_CONFIG_DIR_PATH=$CNI_CONF_DIR" \
--build-arg "CNI_DAEMON=$CNI_DAEMON" \
-f cni_builder.Dockerfile .
fi
docker run \
@ -20,4 +22,7 @@ docker run \
"$BUILDER_TAG":latest
# create cni daemonset image
docker build -t "$CNI_TAG" -f cni.Dockerfile .
docker build -t "$CNI_TAG" \
--build-arg "CNI_DAEMON=$CNI_DAEMON" \
-f cni.Dockerfile .