Use virtualenv to build kuryr-cni Docker image

This commit changes the way we produce kuryr-cni Docker image. Previously we've
distributed the kuryr-driver as pyinstaller binary that contained Python 3
interpreter and all the dependencies. This binary was called from CNI. That
approach had some disadvantages, the major being complicated build procedure
and having to see false-positive BrokenPipeError tracebacks in kubelet
logs.

This commit implements distributing kuryr-driver as a virtualenv with
kuryr-kubernetes and all the dependecies installed. That virtualenv is then
copied onto the host system and CNI can easily activate it and run kuryr-cni
binary. This should solve issues caused by pyinstaller.

Closes-Bug: 1747058

Change-Id: I65b01ba27cbe39b66f0a972d12f3abc166934e62
This commit is contained in:
Michał Dulko 2018-02-02 13:22:31 +01:00
parent 02329030a9
commit 78102c9984
12 changed files with 60 additions and 170 deletions

View File

@ -1,19 +1,21 @@
FROM centos:7 FROM centos:7
LABEL authors="Antoni Segura Puimedon<toni@kuryr.org>, Vikas Choudhary<vichoudh@redhat.com>" LABEL authors="Antoni Segura Puimedon<toni@kuryr.org>, Vikas Choudhary<vichoudh@redhat.com>"
RUN yum install -y epel-release https://rdoproject.org/repos/rdo-release.rpm \
&& yum install -y --setopt=tsflags=nodocs python-pip iproute bridge-utils openvswitch sudo \
&& yum install -y --setopt=tsflags=nodocs gcc python-devel git \
&& pip install virtualenv \
&& virtualenv /kuryr-kubernetes
COPY . /opt/kuryr-kubernetes COPY . /opt/kuryr-kubernetes
RUN yum install -y epel-release https://rdoproject.org/repos/rdo-release.rpm \ RUN cd /opt/kuryr-kubernetes \
&& yum install -y --setopt=tsflags=nodocs python-pip iproute bridge-utils openvswitch \ && /kuryr-kubernetes/bin/pip install . \
&& yum install -y --setopt=tsflags=nodocs gcc python-devel git \ && virtualenv --relocatable /kuryr-kubernetes \
&& cd /opt/kuryr-kubernetes \
&& pip install --no-cache-dir . \
&& rm -fr .git \ && rm -fr .git \
&& yum -y history undo last && yum -y history undo last
COPY kuryr-cni /kuryr-cni COPY ./cni_ds_init /usr/bin/cni_ds_init
COPY kuryr-cni-bin /kuryr-cni-bin
COPY cni_ds_init /usr/bin/cni_ds_init
ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d
ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH} ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH}

View File

@ -1,28 +0,0 @@
# -*- mode: python -*-
block_cipher = None
a = Analysis(['/usr/local/bin/kuryr-cni'],
pathex=['/usr/local/lib/python3.5/site-packages', '/usr/local/lib/python3.5/site-packages/eventlet/support'],
binaries=[],
datas=[],
hiddenimports=['backports.ssl_match_hostname', 'setuptools', 'kuryr_kubernetes.objects.vif', 'kuryr_kubernetes.os_vif_plug_noop', 'dns', 'vif_plug_ovs', 'vif_plug_linux_bridge', 'oslo_privsep'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='kuryr-cni',
debug=False,
strip=False,
upx=True,
console=True )

View File

@ -1,13 +0,0 @@
#!/bin/bash -ex
rm -f /opt/kuryr-kubernetes/kuryr-cni
rm -f /opt/kuryr-kubernetes/kuryr-cni-bin
pbr_version=$(find /usr/local/lib/python3.5/site-packages/ -type d \
-name 'kuryr_kubernetes*info' -exec basename {} \; \
| awk -F"-" '{sub(/\.dist/,"",$2); print $2}')
cat > /opt/kuryr-kubernetes/kuryr-cni << EOF
#!/bin/bash
export PBR_VERSION='$pbr_version'
${CNI_BIN_DIR_PATH}/kuryr-cni-bin
EOF
cp /dist/kuryr-cni /opt/kuryr-kubernetes/kuryr-cni-bin
chmod 744 /opt/kuryr-kubernetes/kuryr-cni

View File

@ -1,45 +0,0 @@
FROM centos:centos6
LABEL authors="Antoni Segura Puimedon<toni@kuryr.org>, Vikas Choudhary<vichoudh@redhat.com>"
RUN yum install --setopt=tsflags=nodocs --assumeyes \
net-tools \
patch \
gcc \
python-devel \
wget \
openssl-devel \
zlib-devel \
git; \
yum clean all
ENV LANG en_US.UTF-8
ARG CNI_CONFIG_DIR_PATH=/etc/cni/net.d
ENV CNI_CONFIG_DIR_PATH ${CNI_CONFIG_DIR_PATH}
ARG CNI_BIN_DIR_PATH=/opt/cni/bin
ENV CNI_BIN_DIR_PATH ${CNI_BIN_DIR_PATH}
ARG CNI_DAEMON=False
ENV CNI_DAEMON ${CNI_DAEMON}
RUN cd /usr/src \
&& wget https://www.python.org/ftp/python/3.5.3/Python-3.5.3.tgz \
&& tar zxf Python-3.5.3.tgz \
&& cd Python-3.5.3 && ./configure --enable-shared && make altinstall \
&& ln -s /usr/local/lib/libpython3.5m.so.1.0 /usr/lib64/libpython3.5m.so.1.0
COPY . /opt/kuryr-kubernetes
# Installing from dev because of this issue, https://github.com/pyinstaller/pyinstaller/issues/2434
RUN cd /opt/kuryr-kubernetes \
&& patch -b kuryr_kubernetes/k8s_client.py < k8s_client.patch \
&& patch -b kuryr_kubernetes/cni/main.py < cni_main.patch \
&& pip3.5 install --no-cache-dir . \
&& pip3.5 install git+https://github.com/pyinstaller/pyinstaller.git \
&& pip3.5 install pyroute2 \
&& sed -i -e "s/self.bytebuffer + newdata/self.bytebuffer + newdata.encode()/" /usr/local/lib/python3.5/codecs.py
COPY cni_builder /usr/bin/cni_builder
COPY hooks/* /usr/local/lib/python3.5/site-packages/PyInstaller/hooks/
COPY cni.spec /
RUN pyinstaller cni.spec
CMD ["cni_builder"]
ENTRYPOINT [ "/bin/bash" ]

View File

@ -1,42 +1,37 @@
#!/bin/bash -e #!/bin/bash -ex
function cleanup() { function cleanup() {
local cni_conf_path rm -f "/etc/cni/net.d/10-kuryr.conf"
local cni_bin_path rm -f "/opt/cni/bin/kuryr-cni"
cni_conf_path="$1" rm -rf "/opt/cni/bin/kuryr-venv"
cni_bin_path="$2"
rm -f "${cni_conf_path}/10-kuryr.conf"
rm -f "${cni_bin_path}/kuryr-cni"
rm -f "${cni_bin_path}/kuryr-cni-bin"
rm -rf /etc/kuryr rm -rf /etc/kuryr
} }
function deploy() { function deploy() {
local cni_conf_path
local cni_bin_path
local serviceaccount_path local serviceaccount_path
cni_conf_path="$1"
cni_bin_path="$2"
serviceaccount_path="/var/run/secrets/kubernetes.io/serviceaccount" serviceaccount_path="/var/run/secrets/kubernetes.io/serviceaccount"
mkdir -p /etc/kuryr mkdir -p /etc/kuryr
cp "${serviceaccount_path}/token" /etc/kuryr/token cp "${serviceaccount_path}/token" /etc/kuryr/token
cp "${serviceaccount_path}/ca.crt" /etc/kuryr/ca.crt cp "${serviceaccount_path}/ca.crt" /etc/kuryr/ca.crt
cp /opt/kuryr-kubernetes/etc/cni/net.d/* "$cni_conf_path" cp /opt/kuryr-kubernetes/etc/cni/net.d/* /etc/cni/net.d
cp /kuryr-cni-bin "${cni_bin_path}/kuryr-cni-bin" cp -r /kuryr-kubernetes "/opt/cni/bin/kuryr-venv"
cp /kuryr-cni "${cni_bin_path}/kuryr-cni" cat > /kuryr-cni << EOF
#!/bin/bash
${CNI_BIN_DIR_PATH}/kuryr-venv/bin/kuryr-cni
EOF
cp /kuryr-cni "/opt/cni/bin/kuryr-cni"
chmod +x /opt/cni/bin/kuryr-cni
cat /tmp/kuryr/* > /etc/kuryr/kuryr.conf cat /tmp/kuryr/* > /etc/kuryr/kuryr.conf
} }
cleanup "$CNI_CONFIG_DIR_PATH" "$CNI_BIN_DIR_PATH" cleanup
deploy "$CNI_CONFIG_DIR_PATH" "$CNI_BIN_DIR_PATH" deploy
# Start CNI daemon if required # Start CNI daemon if required
if [ "$CNI_DAEMON" == "True" ]; then if [ "$CNI_DAEMON" == "True" ]; then
/usr/bin/kuryr-daemon --config-file /etc/kuryr/kuryr.conf /kuryr-kubernetes/bin/kuryr-daemon --config-file /etc/kuryr/kuryr.conf
else else
sleep infinity sleep infinity
fi fi

View File

@ -1,12 +0,0 @@
--- /root/tmp/kuryr-kubernetes/kuryr_kubernetes/cni/main.py 2017-06-19 07:15:39.898398766 -0400
+++ kuryr_kubernetes/cni/main.py 2017-06-22 04:28:41.421123949 -0400
@@ -54,6 +54,9 @@ class K8sCNIPlugin(cni_api.CNIPlugin):
self._watcher.stop()
def _setup(self, params):
+ ovs = os_vif._EXT_MANAGER['ovs'].obj
+ ovs_mod = sys.modules[ovs.__module__]
+ ovs_mod.linux_net.privsep.vif_plug.start(ovs_mod.linux_net.privsep.priv_context.Method.FORK)
clients.setup_kubernetes_client()
self._pipeline = h_cni.CNIPipeline()
self._watcher = k_watcher.Watcher(self._pipeline)

View File

@ -70,13 +70,24 @@ function configure_kuryr {
iniset "$KURYR_CONFIG" kubernetes port_debug "$KURYR_PORT_DEBUG" iniset "$KURYR_CONFIG" kubernetes port_debug "$KURYR_PORT_DEBUG"
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then
# This works around the issue of being unable to set oslo.privsep mode
# to FORK in os-vif. When running in a container we disable `sudo` that
# was prefixed before `privsep-helper` command. This let's us run in
# envs without sudo and keep the same python environment as the parent
# process.
iniset "$KURYR_CONFIG" vif_plug_ovs_privileged helper_command privsep-helper
iniset "$KURYR_CONFIG" vif_plug_linux_bridge_privileged helper_command privsep-helper
fi
if is_service_enabled kuryr-daemon; then if is_service_enabled kuryr-daemon; then
iniset "$KURYR_CONFIG" cni_daemon daemon_enabled True iniset "$KURYR_CONFIG" cni_daemon daemon_enabled True
iniset "$KURYR_CONFIG" oslo_concurrency lock_path "$KURYR_LOCK_DIR" iniset "$KURYR_CONFIG" oslo_concurrency lock_path "$KURYR_LOCK_DIR"
create_kuryr_lock_dir create_kuryr_lock_dir
KURYR_K8S_CONTAINERIZED_DEPLOYMENT=$(trueorfalse False KURYR_K8S_CONTAINERIZED_DEPLOYMENT)
if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then if [ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]; then
# When running kuryr-daemon in container we need to set up configs. # When running kuryr-daemon in container we need to set up some
# configs.
iniset "$KURYR_CONFIG" cni_daemon docker_mode True iniset "$KURYR_CONFIG" cni_daemon docker_mode True
iniset "$KURYR_CONFIG" cni_daemon netns_proc_dir "/host_proc" iniset "$KURYR_CONFIG" cni_daemon netns_proc_dir "/host_proc"
fi fi

View File

@ -67,6 +67,22 @@ Below is the list of available variables:
* ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default: ``kuryr.lib.binding.drivers.vlan``) * ``$KURYR_K8S_BINDING_DRIVER`` - ``[binding]driver`` (default: ``kuryr.lib.binding.drivers.vlan``)
* ``$KURYR_K8S_BINDING_IFACE`` - ``[binding]link_iface`` (default: eth0) * ``$KURYR_K8S_BINDING_IFACE`` - ``[binding]link_iface`` (default: eth0)
.. note::
kuryr-daemon will be started in the CNI container. It is using ``os-vif`` and
``oslo.privsep`` to do pod wiring tasks. By default it'll call ``sudo`` to
raise privileges, even though container is priviledged by itself or ``sudo``
is missing from container OS (e.g. default CentOS 7). To prevent that make
sure to set following options in kuryr.conf used for kuryr-daemon::
[vif_plug_ovs_privileged]
helper_command=privsep-helper
[vif_plug_linux_bridge_privileged]
helper_command=privsep-helper
Those options will prevent oslo.privsep from doing that. If rely on
aformentioned script to generate config files, those options will be added
automatically.
In case of using ports pool functionality, we may want to make the In case of using ports pool functionality, we may want to make the
kuryr-controller not ready until the pools are populated with the existing kuryr-controller not ready until the pools are populated with the existing
ports. To achive this a readiness probe must be added to the kuryr-controller ports. To achive this a readiness probe must be added to the kuryr-controller

View File

@ -1,11 +0,0 @@
--- /root/tmp/kuryr-kubernetes/kuryr_kubernetes/k8s_client.py 2017-06-19 07:15:39.901398831 -0400
+++ kuryr_kubernetes/k8s_client.py 2017-06-22 06:14:48.177325667 -0400
@@ -138,7 +138,7 @@
headers=header)) as response:
if not response.ok:
raise exc.K8sClientException(response.text)
- for line in response.iter_lines(delimiter='\n'):
+ for line in response.iter_lines(delimiter=b'\n'):
line = line.strip()
if line:
yield jsonutils.loads(line)

View File

@ -148,20 +148,6 @@ class DaemonServer(object):
'Connection': 'close'} 'Connection': 'close'}
def _prepare_request(self): def _prepare_request(self):
if CONF.cni_daemon.docker_mode:
# FIXME(dulek): This is an awful hack to make os_vif's privsep
# daemon to run in FORK mode. This is required,
# as it's assumed kuryr-daemon is run as root, but
# it's not assumed that system it's running on has
# sudo command. Once os_vif allows to configure the
# mode, switch this to nicer method. It's placed
# here, because we need to repeat it for each process
# spawned by HTTP server.
ovs = os_vif._EXT_MANAGER['ovs'].obj
ovs_mod = sys.modules[ovs.__module__]
ovs_mod.linux_net.privsep.vif_plug.start(
ovs_mod.linux_net.privsep.priv_context.Method.FORK)
params = utils.CNIParameters(flask.request.get_json()) params = utils.CNIParameters(flask.request.get_json())
LOG.debug('Received %s request. CNI Params: %s', LOG.debug('Received %s request. CNI Params: %s',
params.CNI_COMMAND, params) params.CNI_COMMAND, params)

View File

@ -3,26 +3,11 @@
CNI_BIN_DIR=$1 CNI_BIN_DIR=$1
CNI_CONF_DIR=$2 CNI_CONF_DIR=$2
CNI_DAEMON=${3:-"False"} CNI_DAEMON=${3:-"False"}
BUILDER_TAG="kuryr/cni-builder"
CNI_TAG="kuryr/cni" CNI_TAG="kuryr/cni"
# build the cni image
if [ -z "$CNI_BIN_DIR" ] && [ -z "$CNI_CONF_DIR" ]; then
docker build -t "$BUILDER_TAG" -f cni_builder.Dockerfile .
else
docker build -t "$BUILDER_TAG" \
--build-arg "CNI_BIN_DIR_PATH=$CNI_BIN_DIR" \
--build-arg "CNI_CONFIG_DIR_PATH=$CNI_CONF_DIR" \
--build-arg "CNI_DAEMON=$CNI_DAEMON" \
-f cni_builder.Dockerfile .
fi
docker run \
--rm \
-v $(pwd):/opt/kuryr-kubernetes \
"$BUILDER_TAG":latest
# create cni daemonset image # create cni daemonset image
docker build -t "$CNI_TAG" \ docker build -t "$CNI_TAG" \
--build-arg "CNI_BIN_DIR_PATH=$CNI_BIN_DIR" \
--build-arg "CNI_CONFIG_DIR_PATH=$CNI_CONF_DIR" \
--build-arg "CNI_DAEMON=$CNI_DAEMON" \ --build-arg "CNI_DAEMON=$CNI_DAEMON" \
-f cni.Dockerfile . -f cni.Dockerfile .

View File

@ -86,6 +86,10 @@ api_root = $api_root
token_file = /etc/kuryr/token token_file = /etc/kuryr/token
ssl_ca_crt_file = /etc/kuryr/ca.crt ssl_ca_crt_file = /etc/kuryr/ca.crt
ssl_verify_server_crt = true ssl_verify_server_crt = true
[vif_plug_ovs_privileged]
helper_command=privsep-helper
[vif_plug_linux_bridge_privileged]
helper_command=privsep-helper
EOF EOF
if [ ! -z $binding_driver ]; then if [ ! -z $binding_driver ]; then