Add network isolation for trove

this PR adds a network_isolation config option for trove,
with network_isolation enabled, trove guest agent will
plug the user-defined port to database container by
docker host_nic network driver which is implemented in this PR.

docker host_nic network driver is a simple driver to plug host
nic to a container. this driver supports ipv4,ipv6 and dual-stack.

for more details please see the story.

story: 2010733
task: 47957

Change-Id: I35d6f8b81a2c5e847cbed3f5bc6095dc1d387165
This commit is contained in:
wuchunyang 2023-04-29 09:35:38 +00:00 committed by wu.chunyang
parent b79019336e
commit 2f755b64b3
29 changed files with 1114 additions and 44 deletions

29
contrib/trove-network-driver Executable file
View File

@ -0,0 +1,29 @@
#!/usr/bin/python3
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'trove', '__init__.py')):
sys.path.insert(0, possible_topdir)
from trove.cmd.network_driver import main
if __name__ == "__main__":
sys.exit(main())

View File

@ -348,8 +348,9 @@ function create_mgmt_subnet_v4 {
local net_id=$2 local net_id=$2
local name=$3 local name=$3
local ip_range=$4 local ip_range=$4
local gateway=$5
subnet_id=$(openstack subnet create --project ${project_id} --ip-version 4 --subnet-range ${ip_range} --gateway none --dns-nameserver 8.8.8.8 --network ${net_id} $name -c id -f value) subnet_id=$(openstack subnet create --project ${project_id} --ip-version 4 --subnet-range ${ip_range} --gateway ${gateway} --dns-nameserver 8.8.8.8 --network ${net_id} $name -c id -f value)
die_if_not_set $LINENO subnet_id "Failed to create private IPv4 subnet for network: ${net_id}, project: ${project_id}" die_if_not_set $LINENO subnet_id "Failed to create private IPv4 subnet for network: ${net_id}, project: ${project_id}"
echo $subnet_id echo $subnet_id
} }
@ -386,7 +387,8 @@ function setup_mgmt_network() {
local NET_NAME=$2 local NET_NAME=$2
local SUBNET_NAME=$3 local SUBNET_NAME=$3
local SUBNET_RANGE=$4 local SUBNET_RANGE=$4
local SHARED=$5 local SUBNET_GATEWAY=$5
local SHARED=$6
local share_flag="" local share_flag=""
if [[ "${SHARED}" == "TRUE" ]]; then if [[ "${SHARED}" == "TRUE" ]]; then
@ -397,9 +399,10 @@ function setup_mgmt_network() {
die_if_not_set $LINENO network_id "Failed to create network: $NET_NAME, project: ${PROJECT_ID}" die_if_not_set $LINENO network_id "Failed to create network: $NET_NAME, project: ${PROJECT_ID}"
if [[ "$IP_VERSION" =~ 4.* ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then
net_subnet_id=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE}) net_subnet_id=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE} ${SUBNET_GATEWAY})
# 'openstack router add' has a bug that cound't show the error message if [[ ${SUBNET_GATEWAY} != "none" ]]; then
# openstack router add subnet ${ROUTER_ID} ${net_subnet_id} --debug openstack router add subnet ${ROUTER_ID} ${net_subnet_id}
fi
fi fi
# Trove doesn't support IPv6 for now. # Trove doesn't support IPv6 for now.
@ -557,6 +560,7 @@ function config_trove_network {
echo " SUBNETPOOL_V4_ID: $SUBNETPOOL_V4_ID" echo " SUBNETPOOL_V4_ID: $SUBNETPOOL_V4_ID"
echo " ROUTER_GW_IP: $ROUTER_GW_IP" echo " ROUTER_GW_IP: $ROUTER_GW_IP"
echo " TROVE_MGMT_SUBNET_RANGE: ${TROVE_MGMT_SUBNET_RANGE}" echo " TROVE_MGMT_SUBNET_RANGE: ${TROVE_MGMT_SUBNET_RANGE}"
echo " TROVE_MGMT_GATEWAY: ${TROVE_MGMT_GATEWAY}"
# Save xtrace setting # Save xtrace setting
local orig_xtrace local orig_xtrace
@ -565,7 +569,7 @@ function config_trove_network {
echo "Creating Trove management network/subnet for Trove service project." echo "Creating Trove management network/subnet for Trove service project."
trove_service_project_id=$(openstack project show $SERVICE_PROJECT_NAME -c id -f value) trove_service_project_id=$(openstack project show $SERVICE_PROJECT_NAME -c id -f value)
setup_mgmt_network ${trove_service_project_id} ${TROVE_MGMT_NETWORK_NAME} ${TROVE_MGMT_SUBNET_NAME} ${TROVE_MGMT_SUBNET_RANGE} setup_mgmt_network ${trove_service_project_id} ${TROVE_MGMT_NETWORK_NAME} ${TROVE_MGMT_SUBNET_NAME} ${TROVE_MGMT_SUBNET_RANGE} ${TROVE_MGMT_GATEWAY}
mgmt_net_id=$(openstack network show ${TROVE_MGMT_NETWORK_NAME} -c id -f value) mgmt_net_id=$(openstack network show ${TROVE_MGMT_NETWORK_NAME} -c id -f value)
echo "Created Trove management network ${TROVE_MGMT_NETWORK_NAME}(${mgmt_net_id})" echo "Created Trove management network ${TROVE_MGMT_NETWORK_NAME}(${mgmt_net_id})"

View File

@ -52,8 +52,9 @@ if is_service_enabled neutron; then
TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"} TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"}
TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet} TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet}
TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"} TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"}
TROVE_MGMT_SUBNET_START=${TROVE_MGMT_SUBNET_START:-"192.168.254.2"} TROVE_MGMT_SUBNET_START=${TROVE_MGMT_SUBNET_START:-"192.168.254.10"}
TROVE_MGMT_SUBNET_END=${TROVE_MGMT_SUBNET_END:-"192.168.254.200"} TROVE_MGMT_SUBNET_END=${TROVE_MGMT_SUBNET_END:-"192.168.254.200"}
TROVE_MGMT_GATEWAY=${TROVE_MGMT_GATEWAY:-"none"}
else else
TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
fi fi

View File

@ -12,3 +12,4 @@
secure_oslo_messaging secure_oslo_messaging
database_management database_management
troubleshooting troubleshooting
network_isolation

View File

@ -0,0 +1,51 @@
=======================
Trove network isolation
=======================
.. _network_isolation:
Isolate bussiness network from management network
-------------------------------------------------
This document aims to help administrator to configure network_isolation in trove.
Before ``Bobcat`` release, trove didn't isolate the management network from bussiness network, sometimes, this
may cause network performance issue or security issue.
Since ``Bobcat`` release, trove adds a new configure option(network_isolation) to configure network isolation.
network_isolation has the following behaviors and requirements:
* Trove will not check the overlap between management networks cidrs and bussiness networks cidrs anymore.
as trove allows the same cidrs between management network and bussiness network.
* Cloud administrator must configure the management_networks in config file. Management network is responsible for
connecting with rabbitMQ, as well as docker registry. Even though you have set network_isolation to true, if your
management_networks is not configured, Trove will still not plug the network interface into the container.
Configure network isolation
---------------------------
* Setting ``management_networks`` in :file:`/etc/trove/trove.conf`, typically, this is a neutron provider
network with a gateway configured. see the :ref:`management network <trove-management-network>`
.. path /etc/trove/trove.conf
.. code-block:: ini
[DEFAULT]
management_networks = <your-network-id>
* Setting network_isolation to True(default is False)
.. path /etc/trove/trove.conf
.. code-block:: ini
[network]
network_isolation: True
Upgrade
-------
This feature is not backward compatible with older Trove guest images; you need to re-build the guest image
with the updated code. see the :ref:`build image <build_guest_images>`

View File

@ -94,6 +94,7 @@ Running multiple instances of the individual Trove controller components on
separate physical hosts is recommended in order to provide scalability and separate physical hosts is recommended in order to provide scalability and
availability of the controller software. availability of the controller software.
.. _trove-management-network:
Management Network Management Network
~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~

View File

@ -31,14 +31,19 @@ if [[ ${SYNC_LOG_TO_CONTROLLER} == "True" ]]; then
cp ${SCRIPTDIR}/guest-log-collection.timer /etc/systemd/system/guest-log-collection.timer cp ${SCRIPTDIR}/guest-log-collection.timer /etc/systemd/system/guest-log-collection.timer
fi fi
# Install docker network plugin
ln -s ${GUEST_VENV}/bin/trove-docker-plugin /usr/local/bin/trove-docker-plugin || true
install -D -g root -o root -m 0644 ${SCRIPTDIR}/docker-hostnic.socket /lib/systemd/system/docker-hostnic.socket
if [[ ${DEV_MODE} == "true" ]]; then if [[ ${DEV_MODE} == "true" ]]; then
[[ -n "${HOST_SCP_USERNAME}" ]] || die "HOST_SCP_USERNAME needs to be set to the trovestack host user" [[ -n "${HOST_SCP_USERNAME}" ]] || die "HOST_SCP_USERNAME needs to be set to the trovestack host user"
[[ -n "${ESCAPED_PATH_TROVE}" ]] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host" [[ -n "${ESCAPED_PATH_TROVE}" ]] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host"
sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g" ${SCRIPTDIR}/docker-hostnic-dev.service > /lib/systemd/system/docker-hostnic.service
sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${SCRIPTDIR}/guest-agent-dev.service > /etc/systemd/system/guest-agent.service sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${SCRIPTDIR}/guest-agent-dev.service > /etc/systemd/system/guest-agent.service
else else
# Link the trove-guestagent out to /usr/local/bin where the startup scripts look for # Link the trove-guestagent out to /usr/local/bin where the startup scripts look for
ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true
install -D -g root -o root -m 0644 ${SCRIPTDIR}/docker-hostnic.service /lib/systemd/system/docker-hostnic.service
case "$DIB_INIT_SYSTEM" in case "$DIB_INIT_SYSTEM" in
systemd) systemd)

View File

@ -0,0 +1,13 @@
[Unit]
Description=Docker hostnic plugin Service
Before=docker.service
After=network.target docker-hostnic.socket
Requires=docker-hostnic.socket docker.service
[Service]
User=root
Group=root
ExecStart=/opt/guest-agent-venv/bin/python /home/GUEST_USERNAME/trove/contrib/trove-network-driver
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,13 @@
[Unit]
Description=Docker hostnic plugin Service
Before=docker.service
After=network.target docker-hostnic.socket
Requires=docker-hostnic.socket docker.service
[Service]
User=root
Group=root
ExecStart=/usr/local/bin/trove-docker-plugin
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,8 @@
[Unit]
Description=docker hostnic driver
[Socket]
ListenStream=/run/docker/plugins/docker-hostnic.sock
[Install]
WantedBy=sockets.target

View File

@ -7,7 +7,7 @@ set -eu
set -o pipefail set -o pipefail
if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then
systemctl enable $(svc-map guest-agent) systemctl enable $(svc-map guest-agent docker-hostnic.socket)
fi fi
if [[ ${SYNC_LOG_TO_CONTROLLER} == "True" ]]; then if [[ ${SYNC_LOG_TO_CONTROLLER} == "True" ]]; then

View File

@ -0,0 +1,8 @@
---
features:
- |
Add network_isolation config option for trove. With network_isolation enabled,
trove guest agent will plug the user defined port to database container.thereby
achieving traffic isolation between management and business traffic.
`Stroy 2010733 <https://storyboard.openstack.org/#!/story/2010733>`__

View File

@ -50,3 +50,8 @@ docker>=4.2.0 # Apache-2.0
psycopg2-binary>=2.6.2 # LGPL/ZPL psycopg2-binary>=2.6.2 # LGPL/ZPL
semantic-version>=2.7.0 # BSD semantic-version>=2.7.0 # BSD
oslo.cache>=1.26.0 # Apache-2.0 oslo.cache>=1.26.0 # Apache-2.0
# for trove network driver
Flask>=2.2.3 # BSD
pyroute2>=0.7.7;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2)
gunicorn>=20.1.0 # MIT

View File

@ -38,6 +38,7 @@ console_scripts =
trove-guestagent = trove.cmd.guest:main trove-guestagent = trove.cmd.guest:main
trove-fake-mode = trove.cmd.fakemode:main trove-fake-mode = trove.cmd.fakemode:main
trove-status = trove.cmd.status:main trove-status = trove.cmd.status:main
trove-docker-plugin = trove.cmd.network_driver:main
trove.api.extensions = trove.api.extensions =
mgmt = trove.extensions.routes.mgmt:Mgmt mgmt = trove.extensions.routes.mgmt:Mgmt

View File

@ -31,7 +31,10 @@ CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None,
help="ID of the Guest Instance."), help="ID of the Guest Instance."),
openstack_cfg.StrOpt('instance_rpc_encr_key', openstack_cfg.StrOpt('instance_rpc_encr_key',
help=('Key (OpenSSL aes_cbc) for ' help=('Key (OpenSSL aes_cbc) for '
'instance RPC encryption.'))]) 'instance RPC encryption.')),
openstack_cfg.BoolOpt('network_isolation',
help='whether to plug user defined '
'port to database container')])
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

306
trove/cmd/network_driver.py Normal file
View File

@ -0,0 +1,306 @@
# Copyright 2023 Yovole
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import jsonschema
import netaddr
import os
import sys
import traceback
import flask
from flask import Flask
import gunicorn.app.base
from oslo_log import log as logging
from pyroute2 import IPRoute
from werkzeug import exceptions as w_exceptions
from trove.common import constants
from trove.common import schemata
LOG = logging.getLogger(__name__)
class hostnic_config(object):
"""this class records network id and its host nic"""
CONFIG_FILE = "/etc/docker/hostnic.json"
def __init__(self) -> None:
if not os.path.exists(self.CONFIG_FILE):
with open(self.CONFIG_FILE, 'w+') as f:
f.write(json.dumps({}))
def get_data(self) -> dict:
with open(self.CONFIG_FILE, 'r') as cfg:
data = json.loads(cfg.read())
return data
def write_config(self, key: str, value: str):
data = self.get_data()
data[key] = value
with open(self.CONFIG_FILE, 'w+') as cfg:
cfg.write(json.dumps(data))
def get_config(self, key: str):
data = self.get_data()
return data.get(key, "")
def delete_config(self, key: str):
data = self.get_data()
if not data.get(key):
return
data.pop(key)
with open(self.CONFIG_FILE, 'w+') as cfg:
cfg.write(json.dumps(data))
driver_config = hostnic_config()
def make_json_app(import_name, **kwargs):
"""Creates a JSON-oriented Flask app.
All error responses that you don't specifically manage yourself will have
application/json content type, and will contain JSON that follows the
libnetwork remote driver protocol.
{ "Err": "405: Method Not Allowed" }
See:
- https://github.com/docker/libnetwork/blob/3c8e06bc0580a2a1b2440fe0792fbfcd43a9feca/docs/remote.md#errors # noqa
"""
app = Flask(import_name)
@app.errorhandler(jsonschema.ValidationError)
def make_json_error(ex):
LOG.error("Unexpected error happened: %s", ex)
traceback.print_exc(file=sys.stderr)
response = flask.jsonify({"Err": str(ex)})
response.status_code = w_exceptions.InternalServerError.code
if isinstance(ex, w_exceptions.HTTPException):
response.status_code = ex.code
elif isinstance(ex, jsonschema.ValidationError):
response.status_code = w_exceptions.BadRequest.code
content_type = 'application/vnd.docker.plugins.v1+json; charset=utf-8'
response.headers['Content-Type'] = content_type
return response
for code in w_exceptions.default_exceptions:
app.register_error_handler(code, make_json_error)
return app
app = make_json_app(__name__)
@app.route('/Plugin.Activate', methods=['POST', 'GET'])
def plugin_activate():
"""Returns the list of the implemented drivers.
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/docs/remote.md#handshake # noqa
"""
LOG.debug("Received /Plugin.Activate")
return flask.jsonify(schemata.SCHEMA['PLUGIN_ACTIVATE'])
@app.route('/NetworkDriver.GetCapabilities', methods=['POST'])
def plugin_scope():
"""Returns the capability as the remote network driver.
This function returns the capability of the remote network driver, which is
``global`` or ``local`` and defaults to ``local``. With ``global``
capability, the network information is shared among multipe Docker daemons
if the distributed store is appropriately configured.
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/docs/remote.md#set-capability # noqa
"""
LOG.debug("Received /NetworkDriver.GetCapabilities")
capabilities = {'Scope': 'local'}
return flask.jsonify(capabilities)
@app.route('/NetworkDriver.DiscoverNew', methods=['POST'])
def network_driver_discover_new():
"""The callback function for the DiscoverNew notification.
The DiscoverNew notification includes the type of the
resource that has been newly discovered and possibly other
information associated with the resource.
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/docs/remote.md#discovernew-notification # noqa
"""
LOG.debug("Received /NetworkDriver.DiscoverNew")
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.DiscoverDelete', methods=['POST'])
def network_driver_discover_delete():
"""The callback function for the DiscoverDelete notification.
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/docs/remote.md#discoverdelete-notification # noqa
"""
LOG.debug("Received /NetworkDriver.DiscoverDelete")
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.CreateNetwork', methods=['POST'])
def network_driver_create_network():
"""Creates a new Network which name is the given NetworkID.
example:
docker network create --driver docker-hostnic --gateway 192.168.1.1 --subnet 192.168.1.0/24 -o hostnic_mac=52:54:00:e1:d9:ef test_network
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/docs/remote.md#create-network # noqa
"""
json_data = flask.request.get_json(force=True)
jsonschema.validate(json_data, schemata.NETWORK_CREATE_SCHEMA)
hostnic_mac = \
json_data['Options']['com.docker.network.generic']['hostnic_mac']
if driver_config.get_config(json_data['NetworkID']):
return flask.jsonify("network already has a host nic")
gw = json_data.get("IPv4Data")[0].get("Gateway", '')
netinfo = {"mac_address": hostnic_mac}
if gw:
ip = netaddr.IPNetwork(gw)
netinfo["gateway"] = str(ip.ip)
driver_config.write_config(json_data['NetworkID'], netinfo)
LOG.debug("Received JSON data %s for /NetworkDriver.Create", json_data)
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.DeleteNetwork', methods=['POST'])
def network_driver_delete_network():
# Just remove the network from the config file.
json_data = flask.request.get_json(force=True)
driver_config.delete_config(json_data['NetworkID'])
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.Join', methods=['POST'])
def network_driver_join():
json_data = flask.request.get_json(force=True)
jsonschema.validate(json_data, schemata.NETWORK_JOIN_SCHEMA)
netid = json_data['NetworkID']
hostnic_mac = driver_config.get_config(netid).get('mac_address')
ipr = IPRoute()
ifaces = ipr.get_links(address=hostnic_mac)
ifname = ifaces[0].get_attr('IFLA_IFNAME')
with open(constants.ETH1_CONFIG_PATH) as fd:
eth1_config = json.load(fd)
join_response = {
"InterfaceName": {
"SrcName": ifname,
"DstPrefix": "eth"},
}
if eth1_config.get("ipv4_gateway"):
join_response["Gateway"] = eth1_config.get("ipv4_gateway")
if eth1_config.get("ipv6_gateway"):
join_response["GatewayIPv6"] = eth1_config.get("ipv6_gateway")
if eth1_config.get("ipv4_host_routes"):
join_response["StaticRoutes"] = list()
for route in eth1_config.get("ipv4_host_routes"):
join_response["StaticRoutes"].append(
{"Destination": route["destination"],
"NextHop": route["nexthop"]})
return flask.jsonify(join_response)
@app.route('/NetworkDriver.Leave', methods=['POST'])
def network_driver_leave():
"""Unbinds a hostnic from a sandbox.
This function takes the following JSON data and delete the veth pair
corresponding to the given info. ::
{
"NetworkID": string,
"EndpointID": string
}
we don't need to remove the port from the sandbox explicitly,
once the sandbox get deleted, the hostnic comes to default
netns automatically.
"""
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.DeleteEndpoint', methods=['POST'])
def network_driver_delete_endpoint():
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.CreateEndpoint', methods=['POST'])
def network_driver_create_endpoint():
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.EndpointOperInfo', methods=['POST'])
def network_driver_endpoint_operational_info():
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.ProgramExternalConnectivity', methods=['POST'])
def network_driver_program_external_connectivity():
"""provide external connectivity for the given container."""
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
@app.route('/NetworkDriver.RevokeExternalConnectivity', methods=['POST'])
def network_driver_revoke_external_connectivity():
"""Removes external connectivity for a given container.
Performs the necessary programming to remove the external connectivity
of a container
See the following link for more details about the spec:
https://github.com/docker/libnetwork/blob/master/driverapi/driverapi.go
"""
return flask.jsonify(schemata.SCHEMA['SUCCESS'])
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super().__init__()
def load_config(self):
config = {key: value for key, value in self.options.items()
if key in self.cfg.settings and value is not None}
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def main():
options = {
'bind': "unix:/run/docker/docker-hostnic.sock",
'workers': 1,
}
StandaloneApplication(app, options).run()

View File

@ -1478,7 +1478,14 @@ network_opts = [
'This is needed for the instance initialization. The check is ' 'This is needed for the instance initialization. The check is '
'also necessary when creating public facing instance. A scenario ' 'also necessary when creating public facing instance. A scenario '
'to set this option False is when using Neutron provider ' 'to set this option False is when using Neutron provider '
'network.') 'network.'
),
cfg.BoolOpt(
'network_isolation', default=False,
help='whether to plug user defined port to database container.'
'This would be useful to isolate user traffic from management'
'traffic and to avoid network address conflicts.'
)
] ]
service_credentials_group = cfg.OptGroup( service_credentials_group = cfg.OptGroup(

View File

@ -14,3 +14,7 @@
BACKUP_TYPE_FULL = 'full' BACKUP_TYPE_FULL = 'full'
BACKUP_TYPE_INC = 'incremental' BACKUP_TYPE_INC = 'incremental'
ETH1_CONFIG_PATH = "/etc/trove/eth1.json"
DOCKER_NETWORK_NAME = "database-network"
DOCKER_HOST_NIC_MODE = "docker-hostnic"
DOCKER_BRIDGE_MODE = "bridge"

303
trove/common/schemata.py Normal file
View File

@ -0,0 +1,303 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(wuchunyang): these codes are copied from kuryr-libnetwork project.
EPSILON_PATTERN = '^$'
UUID_BASE = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
UUID_PATTERN = EPSILON_PATTERN + '|' + UUID_BASE
IPV4_PATTERN_BASE = ('((25[0-5]|2[0-4][0-9]|1?[0-9]?[0-9])\\.){3}'
'(25[0-5]|2[0-4][0-9]|1?[0-9]?[0-9])')
CIDRV4_PATTERN = EPSILON_PATTERN + '|^(' + IPV4_PATTERN_BASE + \
'(/(1[0-2][0-8]|[1-9]?[0-9]))' + ')$'
IPV6_PATTERN_BASE = ('('
'([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|'
'([0-9a-fA-F]{1,4}:){1,7}:|'
'([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|'
'([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|'
'([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|'
'([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|'
'([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|'
'[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|'
':((:[0-9a-fA-F]{1,4}){1,7}|:)|'
'fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|'
'::(ffff(:0{1,4}){0,1}:){0,1}'
'((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}'
'(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|'
'([0-9a-fA-F]{1,4}:){1,4}:'
'((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}'
'(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))')
IPV6_PATTERN = EPSILON_PATTERN + u'|^' + IPV6_PATTERN_BASE + u'$'
CIDRV6_PATTERN = EPSILON_PATTERN + u'|^(' + IPV6_PATTERN_BASE + \
'(/(1[0-2][0-8]|[1-9]?[0-9]))' + u')$'
SCHEMA = {
"PLUGIN_ACTIVATE": {"Implements": ["NetworkDriver"]},
"SUCCESS": {}
}
COMMONS = {
'description': 'Common data schemata shared among other schemata.',
'links': [],
'title': 'Trove Common Data Schema Definitions',
'properties': {
'options': {'$ref': '/schemata/commons#/definitions/options'},
'mac': {'$ref': '/schemata/commons#/definitions/mac'},
'cidrv6': {'$ref': '/schemata/commons#/definitions/cidrv6'},
'interface': {'$ref': '/schemata/commons#/definitions/interface'},
'cidr': {'$ref': '/schemata/commons#/definitions/cidr'},
'id': {'$ref': '/schemata/commons#/definitions/id'},
'uuid': {'$ref': '/schemata/commons#/definitions/uuid'},
'ipv4': {'$ref': '/schemata/commons#/definitions/ipv4'},
},
'definitions': {
'options': {
'type': ['object', 'null'],
'description': 'Options.',
'example': {}
},
'id': {
'oneOf': [
{'pattern': '^([0-9a-f]{64})$'},
{'pattern': '^([0-9a-z]{25})$'}],
'type': 'string',
'description': '64 or 25 length ID value of Docker.',
'example': [
'51c75a2515d47edecc3f720bb541e287224416fb66715eb7802011d6ffd4'
'99f1',
'xqqzd9p112o4kvok38n3caxjm'
]
},
'mac': {
'pattern': (EPSILON_PATTERN + '|'
'^((?:[0-9a-f]{2}:){5}[0-9a-f]{2}|'
'(?:[0-9A-F]{2}:){5}[0-9A-F]{2})$'),
'type': 'string',
'description': 'A MAC address.',
'example': 'aa:bb:cc:dd:ee:ff'
},
'cidr': {
'pattern': CIDRV4_PATTERN,
'type': 'string',
'description': 'A IPv4 CIDR of the subnet.',
'example': '10.0.0.0/24'
},
'cidrv6': {
'pattern': CIDRV6_PATTERN,
'type': 'string',
'description': 'A IPv6 CIDR of the subnet.',
'example': '10.0.0.0/24'
},
'ipv4datum': {
'description': 'IPv4 data',
'required': [
'AddressSpace', 'Pool'],
'type': 'object',
'example': {
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
'AuxAddresses': {
'web': '192.168.42.2',
'db': '192.168.42.3'
}
},
'properties': {
'AddressSpace': {
'description': 'The name of the address space.',
'type': 'string',
'example': 'foo',
},
'Pool': {
'description': 'A range of IP Addresses requested in '
'CIDR format address/mask.',
'$ref': '#/definitions/commons/definitions/cidr'
},
'Gateway': {
'description': 'Optionally, the IPAM driver may provide '
'a Gateway for the subnet represented by '
'the Pool.',
'$ref': '#/definitions/commons/definitions/cidr',
},
'AuxAddresses': {
'description': 'A list of pre-allocated ip-addresses '
'with an associated identifier as '
'provided by the user to assist network '
'driver if it requires specific '
'ip-addresses for its operation.',
'type': 'object',
'patternProperties': {
'.+': {
'description': 'key-value pair of the ID and '
'the IP address',
'$ref': '#/definitions/commons/definitions/ipv4'
}
}
}
}
},
'ipv6datum': {
'description': 'IPv6 data',
'required': [
'AddressSpace', 'Pool', 'Gateway'],
'type': 'object',
'example': {
'AddressCpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
'AuxAddresses': {
'web': 'fe80::f816:3eff:fe20:57c4',
'db': 'fe80::f816:3eff:fe20:57c5'
}
},
'properties': {
'AddressSpace': {
'description': 'The name of the address space.',
'type': 'string',
'example': 'foo',
},
'Pool': {
'description': 'A range of IP Addresses requested in '
'CIDR format address/mask.',
'$ref': '#/definitions/commons/definitions/cidrv6'
},
'Gateway': {
'description': 'Optionally, the IPAM driver may provide '
'a Gateway for the subnet represented by '
'the Pool.',
'$ref': '#/definitions/commons/definitions/cidrv6',
},
'AuxAddresses': {
'description': 'A list of pre-allocated ip-addresses '
'with an associated identifier as '
'provided by the user to assist network '
'driver if it requires specific '
'ip-addresses for its operation.',
'type': 'object',
'patternProperties': {
'.+': {
'description': 'key-vavule pair of the ID and '
'the IP address',
'$ref': '#/definitions/commons/definitions/ipv6'
}
}
}
}
},
'sandbox_key': {
'pattern': '^(/var/run/docker/netns/[0-9a-f]{12})$',
'type': 'string',
'description': 'Sandbox information of netns.',
'example': '/var/run/docker/netns/12bbda391ed0'
},
'uuid': {
'pattern': UUID_PATTERN,
'type': 'string',
'description': 'uuid of neutron resources.',
'example': 'dfe39822-ad5e-40bd-babd-3954113b3687'
}
},
'$schema': 'http://json-schema.org/draft-04/hyper-schema',
'type': 'object',
'id': 'schemata/commons'
}
NETWORK_CREATE_SCHEMA = {
'links': [{
'method': 'POST',
'href': '/NetworkDriver.CreateNetwork',
'description': 'Create a Network',
'rel': 'self',
'title': 'Create'
}],
'title': 'Create network',
'required': ['NetworkID', 'IPv4Data', 'IPv6Data', 'Options'],
'definitions': {'commons': {}},
'$schema': 'http://json-schema.org/draft-04/hyper-schema',
'type': 'object',
'properties': {
'NetworkID': {
'description': 'ID of a Network to be created',
'$ref': '#/definitions/commons/definitions/id'
},
'IPv4Data': {
'description': 'IPv4 data for the network',
'type': 'array',
'items': {
'$ref': '#/definitions/commons/definitions/ipv4datum'
}
},
'IPv6Data': {
'description': 'IPv6 data for the network',
'type': 'array',
'items': {
'$ref': '#/definitions/commons/definitions/ipv6datum'
}
},
'Options': {
'type': 'object',
'description': 'Options',
'required': ['com.docker.network.generic'],
'properties': {
'com.docker.network.generic': {
'type': 'object',
'required': ['hostnic_mac'],
'properties': {
'hostnic_mac': {
'$ref': '#/definitions/commons/definitions/mac'
}
}
}
}
}
}
}
NETWORK_CREATE_SCHEMA['definitions']['commons'] = COMMONS
NETWORK_JOIN_SCHEMA = {
'links': [{
'method': 'POST',
'href': '/NetworkDriver.Join',
'description': 'Join the network',
'rel': 'self',
'title': 'Create'
}],
'title': 'Join endpoint',
'required': [
'NetworkID',
'EndpointID',
'SandboxKey'
],
'properties': {
'NetworkID': {
'description': 'Network ID',
'$ref': '#/definitions/commons/definitions/id'
},
'SandboxKey': {
'description': 'Sandbox Key',
'$ref': '#/definitions/commons/definitions/sandbox_key'
},
'Options': {
'$ref': '#/definitions/commons/definitions/options'
},
'EndpointID': {
'description': 'Endpoint ID',
'$ref': '#/definitions/commons/definitions/id'
}
},
'definitions': {'commons': {}},
'$schema': 'http://json-schema.org/draft-04/hyper-schema',
'type': 'object',
}
NETWORK_JOIN_SCHEMA['definitions']['commons'] = COMMONS

View File

@ -13,19 +13,20 @@
# limitations under the License. # limitations under the License.
import abc import abc
import os
import re import re
import sqlalchemy
import urllib
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import encodeutils from oslo_utils import encodeutils
import sqlalchemy
from sqlalchemy import event from sqlalchemy import event
from sqlalchemy import exc from sqlalchemy import exc
from sqlalchemy.sql.expression import text from sqlalchemy.sql.expression import text
import urllib
from trove.common import cfg from trove.common import cfg
from trove.common.configurations import MySQLConfParser from trove.common.configurations import MySQLConfParser
from trove.common import constants
from trove.common.db.mysql import models from trove.common.db.mysql import models
from trove.common import exception from trove.common import exception
from trove.common.i18n import _ from trove.common.i18n import _
@ -612,12 +613,18 @@ class BaseMySqlApp(service.BaseDbApp):
for port in port_range: for port in port_range:
ports[f'{port}/tcp'] = port ports[f'{port}/tcp'] = port
if CONF.network_isolation and \
os.path.exists(constants.ETH1_CONFIG_PATH):
network_mode = constants.DOCKER_HOST_NIC_MODE
else:
network_mode = constants.DOCKER_BRIDGE_MODE
try: try:
docker_util.start_container( docker_util.start_container(
self.docker_client, self.docker_client,
image, image,
volumes=volumes, volumes=volumes,
network_mode="bridge", network_mode=network_mode,
ports=ports, ports=ports,
user=user, user=user,
environment={ environment={

View File

@ -12,11 +12,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from collections import OrderedDict from collections import OrderedDict
import os
from oslo_log import log as logging from oslo_log import log as logging
import psycopg2 import psycopg2
from trove.common import cfg from trove.common import cfg
from trove.common import constants
from trove.common.db.postgresql import models from trove.common.db.postgresql import models
from trove.common import exception from trove.common import exception
from trove.common import stream_codecs from trove.common import stream_codecs
@ -200,12 +202,18 @@ class PgSqlApp(service.BaseDbApp):
for port in port_range: for port in port_range:
ports[f'{port}/tcp'] = port ports[f'{port}/tcp'] = port
if CONF.network_isolation and \
os.path.exists(constants.ETH1_CONFIG_PATH):
network_mode = constants.DOCKER_HOST_NIC_MODE
else:
network_mode = constants.DOCKER_BRIDGE_MODE
try: try:
docker_util.start_container( docker_util.start_container(
self.docker_client, self.docker_client,
image, image,
volumes=volumes, volumes=volumes,
network_mode="bridge", network_mode=network_mode,
ports=ports, ports=ports,
user=user, user=user,
environment={ environment={

View File

@ -11,13 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import json
import re import re
import docker import docker
from docker import errors as derros
from docker import types
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import encodeutils from oslo_utils import encodeutils
from trove.common import cfg from trove.common import cfg
from trove.common import constants
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -34,6 +38,84 @@ def stop_container(client, name="database"):
container.stop(timeout=CONF.state_change_wait_time) container.stop(timeout=CONF.state_change_wait_time)
def create_network(client: docker.client.DockerClient,
name: str) -> str:
networks = client.networks.list()
for net in networks:
if net.name == name:
return net.id
LOG.debug("Creating docker network: %s", name)
with open(constants.ETH1_CONFIG_PATH) as fs:
eth1_config = json.load(fs)
enable_ipv6 = False
ipam_pool = list()
if eth1_config.get("ipv4_address"):
ipam_pool.append(types.IPAMPool(
subnet=eth1_config.get("ipv4_cidr"),
gateway=eth1_config.get("ipv4_gateway"))
)
if eth1_config.get("ipv6_address"):
enable_ipv6 = True
ipam_pool.append(types.IPAMPool(
subnet=eth1_config.get("ipv6_cidr"),
gateway=eth1_config.get("ipv6_gateway")
))
ipam_config = docker.types.IPAMConfig(pool_configs=ipam_pool)
mac_address = eth1_config.get("mac_address")
net = client.networks.create(name=name,
driver=constants.DOCKER_HOST_NIC_MODE,
ipam=ipam_config,
enable_ipv6=enable_ipv6,
options=dict(hostnic_mac=mac_address))
LOG.debug("docker network: %s created successfully", net.id)
return net.id
def _create_container_with_low_level_api(image: str, param: dict) -> None:
# create a low-level docker api object
client = docker.APIClient(base_url='unix://var/run/docker.sock')
host_config_kwargs = dict()
if param.get("restart_policy"):
host_config_kwargs["restart_policy"] = param.get("restart_policy")
if param.get("privileged"):
host_config_kwargs["privileged"] = param.get("privileged")
if param.get("volumes"):
host_config_kwargs["binds"] = param.get("volumes")
host_config = client.create_host_config(**host_config_kwargs)
network_config_kwargs = dict()
with open(constants.ETH1_CONFIG_PATH) as fs:
eth1_config = json.load(fs)
if eth1_config.get("ipv4_address"):
network_config_kwargs["ipv4_address"] = eth1_config.get("ipv4_address")
if eth1_config.get("ipv6_address"):
network_config_kwargs["ipv6_address"] = eth1_config.get("ipv6_address")
networking_config = client.create_networking_config(
{param.get("network"):
client.create_endpoint_config(**network_config_kwargs)})
# NOTE(wuchunyang): the low-level api doesn't support RUN interface,
# so we need pull image first, then start the container
LOG.debug("Pulling docker images: %s", image)
try:
client.pull(image)
except derros.APIError as e:
LOG.error("failed to pull image: %s, due to the error: %s", image, e)
raise
LOG.debug("Creating container: %s", param.get("name"))
container = client.create_container(image=image,
name=param.get("name"),
detach=param.get("detach"),
user=param.get("user"),
environment=param.get("environment"),
command=param.get("command"),
host_config=host_config,
networking_config=networking_config)
LOG.debug("Starting container: %s", param.get("name"))
client.start(container=container)
def start_container(client, image, name="database", def start_container(client, image, name="database",
restart_policy="unless-stopped", restart_policy="unless-stopped",
volumes={}, ports={}, user="", network_mode="host", volumes={}, ports={}, user="", network_mode="host",
@ -58,27 +140,31 @@ def start_container(client, image, name="database",
container = client.containers.get(name) container = client.containers.get(name)
LOG.info(f'Starting existing container {name}') LOG.info(f'Starting existing container {name}')
container.start() container.start()
return
except docker.errors.NotFound: except docker.errors.NotFound:
pass
LOG.info( LOG.info(
f"Creating docker container, image: {image}, " f"Creating docker container, image: {image}, "
f"volumes: {volumes}, ports: {ports}, user: {user}, " f"volumes: {volumes}, ports: {ports}, user: {user}, "
f"network_mode: {network_mode}, environment: {environment}, " f"network_mode: {network_mode}, environment: {environment}, "
f"command: {command}") f"command: {command}")
container = client.containers.run( kwargs = dict(name=name,
image,
name=name,
restart_policy={"Name": restart_policy}, restart_policy={"Name": restart_policy},
privileged=False, privileged=False,
network_mode=network_mode,
detach=True, detach=True,
volumes=volumes, volumes=volumes,
ports=ports, ports=ports,
user=user, user=user,
environment=environment, environment=environment,
command=command command=command)
) if network_mode == constants.DOCKER_HOST_NIC_MODE:
create_network(client, constants.DOCKER_NETWORK_NAME)
return container kwargs["network"] = constants.DOCKER_NETWORK_NAME
return _create_container_with_low_level_api(image, kwargs)
else:
kwargs["network_mode"] = network_mode
return client.containers.run(image, **kwargs)
def _decode_output(output): def _decode_output(output):

View File

@ -992,7 +992,10 @@ class BaseInstance(SimpleInstance):
return userdata if userdata else "" return userdata if userdata else ""
def get_injected_files(self, datastore_manager, datastore_version): def get_injected_files(self,
datastore_manager,
datastore_version,
**kwargs):
injected_config_location = CONF.get('injected_config_location') injected_config_location = CONF.get('injected_config_location')
guest_info = CONF.get('guest_info') guest_info = CONF.get('guest_info')
@ -1016,6 +1019,12 @@ class BaseInstance(SimpleInstance):
) )
} }
# pass through the network_isolation to guest
files = {
guest_info_file: ("%snetwork_isolation=%s\n" %
(files.get(guest_info_file),
CONF.network.network_isolation))
}
instance_key = get_instance_encryption_key(self.id) instance_key = get_instance_encryption_key(self.id)
if instance_key: if instance_key:
files = { files = {
@ -1040,6 +1049,14 @@ class BaseInstance(SimpleInstance):
# Configure docker's daemon.json if the directives exist in trove.conf # Configure docker's daemon.json if the directives exist in trove.conf
docker_daemon_values = {} docker_daemon_values = {}
# In case that user enables network_isolation with management/bussiness
# network not set
if CONF.network.network_isolation and \
kwargs.get("disable_bridge", False):
docker_daemon_values["bridge"] = "none"
docker_daemon_values["ip-forward"] = False
docker_daemon_values["iptables"] = False
else:
# Configure docker_bridge_network_ip in order to change the docker # Configure docker_bridge_network_ip in order to change the docker
# default range(172.17.0.0/16) of bridge network # default range(172.17.0.0/16) of bridge network
if CONF.docker_bridge_network_ip: if CONF.docker_bridge_network_ip:

View File

@ -311,7 +311,7 @@ class InstanceController(wsgi.Controller):
nic['network_id'] = network_id nic['network_id'] = network_id
nic.pop('net-id', None) nic.pop('net-id', None)
if not CONF.network.network_isolation:
self._check_network_overlap(context, network_id, subnet_id) self._check_network_overlap(context, network_id, subnet_id)
def _check_network_overlap(self, context, user_network=None, def _check_network_overlap(self, context, user_network=None,

View File

@ -13,6 +13,7 @@
# under the License. # under the License.
import copy import copy
import json
import time import time
import traceback import traceback
@ -33,6 +34,7 @@ from trove.common import clients
from trove.common.clients import create_cinder_client from trove.common.clients import create_cinder_client
from trove.common.clients import create_dns_client from trove.common.clients import create_dns_client
from trove.common.clients import create_guest_client from trove.common.clients import create_guest_client
from trove.common import constants
from trove.common import exception from trove.common import exception
from trove.common.exception import BackupCreationError from trove.common.exception import BackupCreationError
from trove.common.exception import GuestError from trove.common.exception import GuestError
@ -557,6 +559,26 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
) )
return networks return networks
def _get_user_nic_info(self, port_id):
nic_info = dict()
port = self.neutron_client.show_port(port_id)
fixed_ips = port['port']["fixed_ips"]
nic_info["mac_address"] = port['port']["mac_address"]
for fixed_ip in fixed_ips:
subnet = self.neutron_client.show_subnet(
fixed_ip["subnet_id"])['subnet']
if subnet.get("ip_version") == 4:
nic_info["ipv4_address"] = fixed_ip.get("ip_address")
nic_info["ipv4_cidr"] = subnet.get("cidr")
nic_info["ipv4_gateway"] = subnet.get("gateway_ip")
nic_info["ipv4_host_routes"] = subnet.get("host_routes")
elif subnet.get("ip_version") == 6:
nic_info["ipv6_address"] = fixed_ip.get("ip_address")
nic_info["ipv6_cidr"] = subnet.get("cidr")
nic_info["ipv6_gateway"] = subnet.get("gateway_ip")
nic_info["ipv6_host_routes"] = subnet.get("host_routes")
return nic_info
def create_instance(self, flavor, image_id, databases, users, def create_instance(self, flavor, image_id, databases, users,
datastore_manager, packages, volume_size, datastore_manager, packages, volume_size,
backup_id, availability_zone, root_password, nics, backup_id, availability_zone, root_password, nics,
@ -573,11 +595,21 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
"Creating instance %s, nics: %s, access: %s", "Creating instance %s, nics: %s, access: %s",
self.id, nics, access self.id, nics, access
) )
networks = self._prepare_networks_for_instance( networks = self._prepare_networks_for_instance(
datastore_manager, nics, access=access datastore_manager, nics, access=access
) )
if CONF.network.network_isolation and len(nics) > 1:
# the user defined port is always the first one.
nic_info = self._get_user_nic_info(networks[0]["port-id"])
LOG.debug("Generate the eth1_config.json file: %s", nic_info)
files = self.get_injected_files(datastore_manager,
ds_version,
disable_bridge=True)
files[constants.ETH1_CONFIG_PATH] = json.dumps(nic_info)
else:
files = self.get_injected_files(datastore_manager, ds_version) files = self.get_injected_files(datastore_manager, ds_version)
cinder_volume_type = volume_type or CONF.cinder_volume_type cinder_volume_type = volume_type or CONF.cinder_volume_type
volume_info = self._create_server_volume( volume_info = self._create_server_volume(
flavor['id'], image_id, flavor['id'], image_id,

View File

@ -0,0 +1,139 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from trove.guestagent.utils import docker as docker_utils
from trove.tests.unittests import trove_testtools
class TestDockerUtils(trove_testtools.TestCase):
def setUp(self):
super().setUp()
self.docker_client = mock.MagicMock()
def test_create_network_with_network_exists(self):
network_name = "test_network"
network1 = mock.MagicMock(id="111")
network1.name = "test_network"
network2 = mock.MagicMock(id="222")
network2.name = "test_network_2"
self.docker_client.networks.list.return_value = [network1, network2]
id = docker_utils.create_network(self.docker_client, network_name)
self.assertEqual(id, "111")
def test_create_network_ipv4_only(self):
network_name = "test_network"
eth1_data = json.dumps({"mac_address": "fa:16:3e:7c:9c:57",
"ipv4_address": "10.111.0.8",
"ipv4_cidr": "10.111.0.0/26",
"ipv4_gateway": "10.111.0.1",
"ipv4_host_routes": [{
"destination": "10.10.0.0/16",
"nexthop": "10.111.0.10"}]})
net = mock.MagicMock(return_value=mock.MagicMock(id=111))
self.docker_client.networks.create = net
mo = mock.mock_open(read_data=eth1_data)
with mock.patch.object(docker_utils, 'open', mo):
id = docker_utils.create_network(self.docker_client, network_name)
self.assertEqual(id, 111)
net.assert_called_once()
kwargs = net.call_args.kwargs
self.assertEqual(kwargs.get("name"), "test_network")
self.assertEqual(kwargs.get("driver"), "docker-hostnic")
self.assertEqual(len(kwargs.get("ipam").get("Config")), 1)
self.assertEqual(kwargs["ipam"]["Config"][0]["Gateway"],
"10.111.0.1")
self.assertEqual(kwargs["enable_ipv6"], False)
self.assertEqual(kwargs["options"]["hostnic_mac"],
"fa:16:3e:7c:9c:57")
def test_create_network_ipv6_only(self):
network_name = "test_network"
eth1_data = json.dumps({"mac_address": "fa:16:3e:7c:9c:58",
"ipv6_address":
"fda3:96d9:23e:0:f816:3eff:fe7c:9c57",
"ipv6_cidr": "fda3:96d9:23e::/64",
"ipv6_gateway": "fda3:96d9:23e::1"})
net = mock.MagicMock(return_value=mock.MagicMock(id=222))
self.docker_client.networks.create = net
mo = mock.mock_open(read_data=eth1_data)
with mock.patch.object(docker_utils, 'open', mo):
id = docker_utils.create_network(self.docker_client, network_name)
self.assertEqual(id, 222)
net.assert_called_once()
kwargs = net.call_args.kwargs
self.assertEqual(kwargs.get("name"), "test_network")
self.assertEqual(kwargs.get("driver"), "docker-hostnic")
self.assertEqual(len(kwargs.get("ipam").get("Config")), 1)
self.assertEqual(kwargs["ipam"]["Config"][0]["Gateway"],
"fda3:96d9:23e::1")
self.assertEqual(kwargs["enable_ipv6"], True)
self.assertEqual(kwargs["options"]["hostnic_mac"], "fa:16:3e:7c:9c:58")
def test_create_network_dual_stack(self):
network_name = "test_network"
eth1_data = json.dumps({"mac_address": "fa:16:3e:7c:9c:59",
"ipv4_address": "10.111.0.8",
"ipv4_cidr": "10.111.0.0/26",
"ipv4_gateway": "10.111.0.1",
"ipv4_host_routes": [{
"destination": "10.10.0.0/16",
"nexthop": "10.111.0.10"}],
"ipv6_address":
"fda3:96d9:23e:0:f816:3eff:fe7c:9c57",
"ipv6_cidr": "fda3:96d9:23e::/64",
"ipv6_gateway": "fda3:96d9:23e::1"})
net = mock.MagicMock(return_value=mock.MagicMock(id=333))
self.docker_client.networks.create = net
mo = mock.mock_open(read_data=eth1_data)
with mock.patch.object(docker_utils, 'open', mo):
id = docker_utils.create_network(self.docker_client, network_name)
self.assertEqual(id, 333)
net.assert_called_once()
kwargs = net.call_args.kwargs
self.assertEqual(kwargs["name"], "test_network")
self.assertEqual(kwargs["driver"], "docker-hostnic")
self.assertEqual(len(kwargs["ipam"]["Config"]), 2)
self.assertEqual(kwargs["enable_ipv6"], True)
self.assertEqual(kwargs["options"]["hostnic_mac"],
"fa:16:3e:7c:9c:59")
@mock.patch("docker.APIClient")
def test__create_container_with_low_level_api(self, mock_client):
eth1_data = json.dumps({
"mac_address": "fa:16:3e:7c:9c:57",
"ipv4_address": "10.111.0.8",
"ipv4_cidr": "10.111.0.0/26",
"ipv4_gateway": "10.111.0.1",
"ipv4_host_routes": [{"destination": "10.10.0.0/16",
"nexthop": "10.111.0.10"}]})
mo = mock.mock_open(read_data=eth1_data)
param = dict(name="test",
restart_policy={"Name": "always"},
privileged=False,
detach=True,
volumes={},
ports={},
user="test_user",
environment={},
command="sleep inf")
with mock.patch.object(docker_utils, 'open', mo):
docker_utils._create_container_with_low_level_api(
"busybox", param)
mock_client().create_host_config.assert_called_once()
mock_client().create_networking_config.assert_called_once()
mock_client().pull.assert_called_once()
mock_client().create_container.assert_called_once()
mock_client().start.assert_called_once()

View File

@ -349,6 +349,20 @@
zuul_copy_output: zuul_copy_output:
'/var/log/guest-agent-logs/': 'logs' '/var/log/guest-agent-logs/': 'logs'
- job:
name: trove-tempest-mysql-network-isolation
parent: trove-tempest
vars:
devstack_localrc:
TROVE_ENABLE_LOCAL_REGISTRY: True
TROVE_MGMT_GATEWAY: "192.168.254.1"
devstack_local_conf:
post-config:
$TROVE_CONF:
network:
network_isolation: True
tempest_test_regex: ^trove_tempest_plugin\.tests\.scenario\.test_backup
- job: - job:
name: trove-tempest-postgres name: trove-tempest-postgres
parent: devstack-tempest parent: devstack-tempest
@ -449,6 +463,7 @@
name: trove-ubuntu-guest-image-build name: trove-ubuntu-guest-image-build
run: playbooks/image-build/run.yaml run: playbooks/image-build/run.yaml
nodeset: trove-ubuntu-focal-single nodeset: trove-ubuntu-focal-single
timeout: 3600
description: | description: |
Build Ubuntu focal based image only on ubuntu distro. Build Ubuntu focal based image only on ubuntu distro.
required-projects: required-projects:
@ -469,6 +484,7 @@
name: trove-centos8s-guest-image-build name: trove-centos8s-guest-image-build
run: playbooks/image-build/run.yaml run: playbooks/image-build/run.yaml
nodeset: trove-centos8s-single nodeset: trove-centos8s-single
timeout: 3600
description: | description: |
Build Ubuntu focal based image only on centos8 stream. Build Ubuntu focal based image only on centos8 stream.
required-projects: required-projects:

View File

@ -17,6 +17,8 @@
voting: true voting: true
- trove-tempest: - trove-tempest:
voting: false voting: false
- trove-tempest-mysql-network-isolation:
voting: true
- trove-ubuntu-guest-image-build: - trove-ubuntu-guest-image-build:
voting: true voting: true
- trove-centos8s-guest-image-build: - trove-centos8s-guest-image-build: