Fix devstack deployment when ovn-provider is selected

This patch ensures devstack deploys the K8s API behind an L3
amphora driver based loadbalancer, since OVN may not work properly
in case L2 mode is selected.

In addition, it drops the load balancer cascade deleting differences
for ovn-octavia provider, as this is being fixed on ovn side.

Depends-On: I31da7134d51665bbfe316b8a05cc34216420a4d0
Implements: blueprint octavia-ovn-provider

Change-Id: I89e969ae52378a7c62ac552e1a69b7e2a45bd23a
This commit is contained in:
Luis Tomas Bolivar 2018-10-01 18:12:32 +02:00
parent 451add3543
commit ff3d45803e
4 changed files with 32 additions and 50 deletions

View File

@ -42,12 +42,8 @@ function ovs_bind_for_kubelet() {
# access
local use_octavia
use_octavia=$(trueorfalse True KURYR_K8S_LBAAS_USE_OCTAVIA)
if [[ "$use_octavia" == "True" && \
"$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L3" ]]; then
if [[ "$use_octavia" == "True" ]]; then
openstack port set "$port_id" --security-group service_pod_access
elif [[ "$use_octavia" == "True" && \
"$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L2" ]]; then
openstack port set "$port_id" --security-group octavia_pod_access
fi
if [[ "$KURYR_SG_DRIVER" == "namespace" ]]; then
openstack port set "$port_id" --security-group allow_from_namespace
@ -1062,18 +1058,10 @@ function create_load_balancer_member {
wait_for_lb $lb
if is_service_enabled octavia; then
if [[ "$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L3" ]]; then
openstack loadbalancer member create --name "$name" \
--address "$address" \
--protocol-port "$port" \
"$pool"
else
openstack loadbalancer member create --name "$name" \
--address "$address" \
--protocol-port "$port" \
--subnet "$subnet" \
"$pool"
fi
openstack loadbalancer member create --name "$name" \
--address "$address" \
--protocol-port "$port" \
"$pool"
else
neutron lbaas-member-create --name "$name" \
--subnet "$subnet" \

View File

@ -55,8 +55,9 @@ KURYR_K8S_LBAAS_USE_OCTAVIA=True
# In case Octavia is used for LBaaS, you can choose the
# Octavia's Load Balancer provider.
# KURYR_EP_DRIVER_OCTAVIA_PROVIDER=default
# Uncomment the next lines to enable ovn provider. Note L2 mode is used
# to ensure member subnet is added
# Uncomment the next lines to enable ovn provider. Note only one mode is
# supported on ovn-octavia. As the member subnet must be added when adding
# members, it must be set to L2 mode
# KURYR_EP_DRIVER_OCTAVIA_PROVIDER=ovn
# KURYR_K8S_OCTAVIA_MEMBER_MODE=L2
# KURYR_K8S_OCTAVIA_SG_MODE=create

View File

@ -264,15 +264,10 @@ function create_k8s_api_service {
address="${HOST_IP}"
fi
use_octavia=$(trueorfalse True KURYR_K8S_LBAAS_USE_OCTAVIA)
if [[ "$use_octavia" == "True" && \
"$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L2" ]]; then
create_load_balancer_member "$(hostname)" "$address" "$api_port" \
default/kubernetes:${KURYR_K8S_API_LB_PORT} $KURYR_NEUTRON_DEFAULT_POD_SUBNET "$lb_name" "$project_id"
else
create_load_balancer_member "$(hostname)" "$address" "$api_port" \
default/kubernetes:${KURYR_K8S_API_LB_PORT} public-subnet "$lb_name" "$project_id"
fi
# Regardless of the octavia mode, the k8s API will be behind an L3 mode
# amphora driver loadbalancer
create_load_balancer_member "$(hostname)" "$address" "$api_port" \
default/kubernetes:${KURYR_K8S_API_LB_PORT} public-subnet "$lb_name" "$project_id"
}
function configure_neutron_defaults {
@ -325,24 +320,27 @@ function configure_neutron_defaults {
ext_svc_subnet_id="$(openstack subnet show -c id -f value \
"${KURYR_NEUTRON_DEFAULT_EXT_SVC_SUBNET}")"
# In order for the ports to allow service traffic under Octavia L3 mode,
# it is necessary for the service subnet to be allowed into the port's
# security groups. If L3 is used, then the pods created will include it.
# Otherwise it will be just used by the kubelet port used for the K8s API
# load balancer
local service_cidr
local service_pod_access_sg_id
service_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet show \
"${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}" -f value -c cidr)
service_pod_access_sg_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
security group create --project "$project_id" \
service_pod_access -f value -c id)
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "k8s service subnet allowed" \
--remote-ip "$service_cidr" --ethertype IPv4 --protocol tcp \
"$service_pod_access_sg_id"
if [[ "$use_octavia" == "True" && \
"$KURYR_K8S_OCTAVIA_MEMBER_MODE" == "L3" ]]; then
# In order for the pods to allow service traffic under Octavia L3 mode,
#it is necessary for the service subnet to be allowed into the $sg_ids
local service_cidr
local service_pod_access_sg_id
service_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet show \
"${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}" -f value -c cidr)
service_pod_access_sg_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
security group create --project "$project_id" \
service_pod_access -f value -c id)
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "k8s service subnet allowed" \
--remote-ip "$service_cidr" --ethertype IPv4 --protocol tcp \
"$service_pod_access_sg_id"
if [ -n "$sg_ids" ]; then
sg_ids+=",${service_pod_access_sg_id}"
else

View File

@ -66,12 +66,7 @@ def setup_loadbalancer_client():
service_type='load-balancer')
lbaas_client.httpclient = octo_httpclient
_clients[_LB_CLIENT] = lbaas_client
# FIXME(ltomasbo): For now ovn provider is not able to work with
# cascade loadbalancer deletion. Remove when fixed
if config.CONF.kubernetes.endpoints_driver_octavia_provider == 'ovn':
lbaas_client.cascading_capable = False
else:
lbaas_client.cascading_capable = True
lbaas_client.cascading_capable = True
def setup_kubernetes_client():