Merge "Sample of deploying Kubernetes cluster with cilium"

This commit is contained in:
Zuul 2024-03-16 14:20:05 +00:00 committed by Gerrit Code Review
commit 80b5ad3ced
16 changed files with 2956 additions and 0 deletions

View File

@ -0,0 +1,7 @@
---
features:
- |
Add a sample of management driver for Cilium CNI deployment. It provides
two usecases, one is a simple flavor of 1-master/2-workers and another is
more complex flavor of 3-masters/2-workers cases, for demonstrating
Tacker's basic scenarios such as heal, scale-out or so.

View File

@ -0,0 +1,42 @@
heat_template_version: 2013-05-23
description: 'Complex Base HOT for Sample VNF'
parameters:
nfv:
type: json
resources:
masterNode:
type: complex_nested_master.yaml
properties:
flavor: { get_param: [ nfv, VDU, masterNode, computeFlavourId ] }
image: { get_param: [ nfv, VDU, masterNode, vcImageId ] }
net1: { get_param: [ nfv, CP, masterNode_CP1, network ] }
vip_port_ip: { get_attr: [vip_CP, fixed_ips, 0, ip_address] }
workerNode:
type: complex_nested_worker.yaml
properties:
flavor: { get_param: [ nfv, VDU, workerNode, computeFlavourId ] }
image: { get_param: [ nfv, VDU, workerNode, vcImageId ] }
net1: { get_param: [ nfv, CP, workerNode_CP1, network ] }
affinity: { get_resource: nfvi_node_affinity }
vip_CP:
type: OS::Neutron::Port
properties:
network: net0
vip_CP_floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: public
port_id: { get_resource: vip_CP }
nfvi_node_affinity:
type: OS::Nova::ServerGroup
properties:
name: nfvi_node_affinity
policies: [ 'anti-affinity' ]
outputs: {}

View File

@ -0,0 +1,36 @@
heat_template_version: 2013-05-23
description: 'masterNode HOT for Sample VNF'
parameters:
flavor:
type: string
image:
type: string
net1:
type: string
vip_port_ip:
type: string
resources:
masterNode:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: masterNode
image: { get_param: image }
networks:
- port:
get_resource: masterNode_CP1
masterNode_CP1:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
allowed_address_pairs:
- ip_address: { get_param: vip_port_ip }
masterNode_CP1_floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: public
port_id: { get_resource: masterNode_CP1 }

View File

@ -0,0 +1,36 @@
heat_template_version: 2013-05-23
description: 'workerNode HOT for Sample VNF'
parameters:
flavor:
type: string
image:
type: string
net1:
type: string
affinity:
type: string
resources:
workerNode:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: workerNode
image: { get_param: image }
networks:
- port:
get_resource: workerNode_CP1
scheduler_hints:
group: { get_param: affinity }
workerNode_CP1:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
workerNode_CP1_floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: public
port_id: { get_resource: workerNode_CP1 }

View File

@ -0,0 +1,32 @@
heat_template_version: 2013-05-23
description: 'masterNode HOT for Sample VNF'
parameters:
flavor:
type: string
image:
type: string
net1:
type: string
resources:
masterNode:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: masterNode
image: { get_param: image }
networks:
- port:
get_resource: masterNode_CP1
masterNode_CP1:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
masterNode_CP1_floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: public
port_id: { get_resource: masterNode_CP1 }

View File

@ -0,0 +1,32 @@
heat_template_version: 2013-05-23
description: 'workerNode HOT for Sample VNF'
parameters:
flavor:
type: string
image:
type: string
net1:
type: string
resources:
workerNode:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: workerNode
image: { get_param: image }
networks:
- port:
get_resource: workerNode_CP1
workerNode_CP1:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
workerNode_CP1_floating_ip:
type: OS::Neutron::FloatingIP
properties:
floating_network: public
port_id: { get_resource: workerNode_CP1 }

View File

@ -0,0 +1,23 @@
heat_template_version: 2013-05-23
description: 'Simple Base HOT for Sample VNF'
parameters:
nfv:
type: json
resources:
masterNode:
type: simple_nested_master.yaml
properties:
flavor: { get_param: [ nfv, VDU, masterNode, computeFlavourId ] }
image: { get_param: [ nfv, VDU, masterNode, vcImageId ] }
net1: { get_param: [ nfv, CP, masterNode_CP1, network ] }
workerNode:
type: simple_nested_worker.yaml
properties:
flavor: { get_param: [ nfv, VDU, workerNode, computeFlavourId ] }
image: { get_param: [ nfv, VDU, workerNode, vcImageId ] }
net1: { get_param: [ nfv, CP, workerNode_CP1, network ] }
outputs: {}

View File

@ -0,0 +1,229 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_kubernetes_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: complex
requirements:
virtual_link_external1_1: [ masterNode_CP1, virtual_link ]
virtual_link_external1_2: [ workerNode_CP1, virtual_link ]
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A complex flavour
interfaces:
Vnflcm:
instantiate_end:
implementation: mgmt-drivers-kubernetes
heal_start:
implementation: mgmt-drivers-kubernetes
heal_end:
implementation: mgmt-drivers-kubernetes
scale_start:
implementation: mgmt-drivers-kubernetes
scale_end:
implementation: mgmt-drivers-kubernetes
artifacts:
mgmt-drivers-kubernetes:
description: Management driver for kubernetes cluster
type: tosca.artifacts.Implementation.Python
file: ../Scripts/kubernetes_mgmt_v2.py
masterNode:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: masterNode
description: masterNode compute node
vdu_profile:
min_number_of_instances: 3
max_number_of_instances: 3
sw_image_data:
name: masterNode-image
version: '22.04'
checksum:
algorithm: sha-512
hash: 7273f6c927c2fccb31ac1398da7c30dc9265f7c51896e41d062f9426afd061326947c9af442df6b0eddd04bca7c29239baaccf2dec4ace19a39bcdb74bbb4758
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
artifacts:
sw_image:
type: tosca.artifacts.nfv.SwImage
file: ../Files/images/ubuntu-22.04-server-cloudimg-amd64.img
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: m1.medium
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 4 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 45 GB
workerNode:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: workerNode
description: workerNode compute node
vdu_profile:
min_number_of_instances: 2
max_number_of_instances: 4
sw_image_data:
name: workerNode-image
version: '22.04'
checksum:
algorithm: sha-512
hash: 7273f6c927c2fccb31ac1398da7c30dc9265f7c51896e41d062f9426afd061326947c9af442df6b0eddd04bca7c29239baaccf2dec4ace19a39bcdb74bbb4758
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
artifacts:
sw_image:
type: tosca.artifacts.nfv.SwImage
file: ../Files/images/ubuntu-22.04-server-cloudimg-amd64.img
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: m1.medium
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 4 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 45 GB
masterNode_CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: masterNode
workerNode_CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: workerNode
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
workerNode_scale:
name: workerNode_scale
description: workerNode scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- masterNode_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 3
targets: [ masterNode ]
- workerNode_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 2
targets: [ workerNode ]
- workerNode_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: workerNode_scale
deltas:
delta_1:
number_of_instances: 1
targets: [ workerNode ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
workerNode_scale:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
workerNode_scale:
scale_level: 2
default_level: instantiation_level_1
- masterNode_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 3
instantiation_level_2:
number_of_instances: 3
targets: [ masterNode ]
- workerNode_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 2
instantiation_level_2:
number_of_instances: 4
targets: [ workerNode ]
- policy_antiaffinity_group:
type: tosca.policies.nfv.AntiAffinityRule
targets: [ workerNode ]
properties:
scope: nfvi_node

View File

@ -0,0 +1,223 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_kubernetes_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external1_1: [ masterNode_CP1, virtual_link ]
virtual_link_external1_2: [ workerNode_CP1, virtual_link ]
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_end:
implementation: mgmt-drivers-kubernetes
heal_start:
implementation: mgmt-drivers-kubernetes
heal_end:
implementation: mgmt-drivers-kubernetes
scale_start:
implementation: mgmt-drivers-kubernetes
scale_end:
implementation: mgmt-drivers-kubernetes
artifacts:
mgmt-drivers-kubernetes:
description: Management driver for kubernetes cluster
type: tosca.artifacts.Implementation.Python
file: ../Scripts/kubernetes_mgmt_v2.py
masterNode:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: masterNode
description: masterNode compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
sw_image_data:
name: masterNode-image
version: '22.04'
checksum:
algorithm: sha-512
hash: 7273f6c927c2fccb31ac1398da7c30dc9265f7c51896e41d062f9426afd061326947c9af442df6b0eddd04bca7c29239baaccf2dec4ace19a39bcdb74bbb4758
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
artifacts:
sw_image:
type: tosca.artifacts.nfv.SwImage
file: ../Files/images/ubuntu-22.04-server-cloudimg-amd64.img
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: m1.medium
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 4 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 45 GB
workerNode:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: workerNode
description: workerNode compute node
vdu_profile:
min_number_of_instances: 2
max_number_of_instances: 4
sw_image_data:
name: workerNode-image
version: '22.04'
checksum:
algorithm: sha-512
hash: 7273f6c927c2fccb31ac1398da7c30dc9265f7c51896e41d062f9426afd061326947c9af442df6b0eddd04bca7c29239baaccf2dec4ace19a39bcdb74bbb4758
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
artifacts:
sw_image:
type: tosca.artifacts.nfv.SwImage
file: ../Files/images/ubuntu-22.04-server-cloudimg-amd64.img
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: m1.medium
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 4 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 45 GB
masterNode_CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: masterNode
workerNode_CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: workerNode
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
workerNode_scale:
name: workerNode_scale
description: workerNode scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- masterNode_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ masterNode ]
- workerNode_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 2
targets: [ workerNode ]
- workerNode_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: workerNode_scale
deltas:
delta_1:
number_of_instances: 1
targets: [ workerNode ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
workerNode_scale:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
workerNode_scale:
scale_level: 2
default_level: instantiation_level_1
- masterNode_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 1
targets: [ masterNode ]
- workerNode_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 2
instantiation_level_2:
number_of_instances: 4
targets: [ workerNode ]

View File

@ -0,0 +1,32 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF.
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_kubernetes_types.yaml
- sample_kubernetes_df_simple.yaml
- sample_kubernetes_df_complex.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: d34ac189-5376-493f-828f-224dd5fe7393
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,51 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ d34ac189-5376-493f-828f-224dd5fe7393 ] ]
default: d34ac189-5376-493f-828f-224dd5fe7393
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple,complex ] ]
default: simple
flavour_description:
type: string
default: "This is the default flavour description"
requirements:
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: default-token-k8svim
annotations:
kubernetes.io/service-account.name: "default"
type: kubernetes.io/service-account-token

View File

@ -0,0 +1,695 @@
#!/bin/bash
set -o xtrace
###############################################################################
#
# This script will install and setting for the Kubernetes Cluster on Ubuntu.
# It's confirmed operation on Ubuntu of below.
#
# * OS type : Ubuntu(64 bit)
# * OS version : 22.04 LTS
# * OS architecture : amd64 (x86_64)
# * Disk/Ram size : 15GB/2GB
# * Pre setup user : ubuntu
#
###############################################################################
#==============================================================================
# Usage Definition
#==============================================================================
function usage {
sudo cat <<_EOT_
$(basename ${0}) is script to construct the kubernetes cluster.
Usage:
$(basename ${0}) [-d] [-o] [-m <master ip address>]
[-w <worker ip address>] [-i <master cluster ip address>]
[-a <k8s api cluster cidr] [-p <k8s pod network cidr>]
[-t <token name>] [-s <token hash>] [-k <certificate key>]
Description:
This script is to construct the kubernetes cluster on a virtual machine.
It can install and configure a Master node or each Worker Node
as specify arguments.
Options:
-m Install and setup all master nodes(use "," to separate, the first master ip is main master ip)
-w Install and setup worker node
-i master cluster IP address (e.g. 192.168.120.100)
-a Kubernetes api cluster CIDR (e.g. 10.96.0.0/12)
-p Kubernetes pod network CIDR (e.g. 192.168.0.0/16)
-d Display the execution result in debug mode
-o Output the execution result to the log file
-t The first master's token name
-s The first master's token hash
-k The first masters certificate key
_EOT_
exit 1
}
declare -g INSTALL_MODE=""
declare -g DEBUG_MODE="False"
declare -g OUTPUT_LOGFILE="False"
# master/worker ip
declare -g MASTER_IPADDRS=${MASTER_IPADDRS:-}
declare -a -g MASTER_IPS=${MASTER_IPS:-}
declare -g MASTER_IP=${MASTER_IP:-}
declare -g WORKER_IPADDR=${WORKER_IPADDR:-}
declare -g TOKEN_NAME=${TOKEN_NAME:-}
declare -g TOKEN_HASH=${TOKEN_HASH:-}
declare -g CERT_KEY=${CERT_KEY:-}
declare -g K8S_API_CLUSTER_CIDR=${K8S_API_CLUSTER_CIDR:-10.96.0.0/12}
declare -g K8S_POD_CIDR=${K8S_POD_CIDR:-10.0.0.0/8}
# software version
declare -g K8S_MINOR_VERSION="1.26"
declare -g K8S_VERSION="1.26.8-1.1"
declare -g CONTAINERD_VERSION="1.7.11"
declare -g RUNC_VERSION="1.1.10"
declare -g CILIUM_VERSION="1.14.5"
if [ "$OPTIND" = 1 ]; then
while getopts dom:w:i:a:p:t:s:k:h OPT; do
case $OPT in
m)
MASTER_IPADDRS=$OPTARG # 192.168.120.17,192.168.120.18,192.168.120.19
INSTALL_MODE="master" # master
MASTER_IPS=(${MASTER_IPADDRS//,/ })
MASTER_IP=${MASTER_IPS[0]}
;;
w)
WORKER_IPADDR=$OPTARG # 192.168.120.2
INSTALL_MODE="worker" # worker
;;
i)
MASTER_CLUSTER_IP=$OPTARG # master cluster ip: 192.168.120.100
;;
a)
K8S_API_CLUSTER_CIDR=$OPTARG # cluster cidr: 10.96.0.0/12
;;
p)
K8S_POD_CIDR=$OPTARG # pod network cidr: 192.168.0.0/16
;;
d)
DEBUG_MODE="True" # start debug
;;
o)
OUTPUT_LOGFILE="True" # output log file
;;
t)
TOKEN_NAME=$OPTARG # token name
;;
s)
TOKEN_HASH=$OPTARG # token hash
;;
k)
CERT_KEY=$OPTARG # certificate key
;;
esac
done
else
echo "No installed getopts-command." 1>&2
exit 1
fi
# check parameter entered by user
if [ "$DEBUG_MODE" == "True" ]; then
echo "*** DEBUG MODE ***"
set -x
fi
if [ "$OUTPUT_LOGFILE" == "True" ]; then
echo "*** OUTPUT LOGFILE MODE ***"
exec > /tmp/k8s_install_`date +%Y%m%d%H%M%S`.log 2>&1
fi
# Application Variables
#----------------------
# haproxy
declare -g CURRENT_HOST_IP=${CURRENT_HOST_IP:-}
declare -g MASTER_CLUSTER_PORT=16443
# kubeadm join
declare -g KUBEADM_JOIN_WORKER_RESULT=${KUBEADM_JOIN_WORKER_RESULT:-}
# Functions
#==========
# Set OS common functions
#------------------------
# Set public DNS
function set_public_dns {
sudo sed -i -e 's/^#DNS=/DNS=8.8.8.8 8.8.4.4/g' /etc/systemd/resolved.conf
ip route | grep "127.0.0.53" > /dev/null 2>&1
result=$?
if [ $result = 0 ]; then
sudo ip route delete 127.0.0.53
fi
sudo systemctl restart systemd-resolved.service
}
function set_hostname {
tmp_master_ipaddr3=`echo ${MASTER_IP} | sudo sed -e "s/.[0-9]\{1,3\}$//"`
local tmp_result=""
if [[ "$INSTALL_MODE" =~ "master" ]]; then
for _ip in `ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}'`; do
_tmp_ip=`echo ${_ip} |sudo sed -e "s/.[0-9]\{1,3\}$//"`
if [[ $_tmp_ip == $tmp_master_ipaddr3 ]]; then
CURRENT_HOST_IP=$_ip
tmp_result=`echo $_ip|cut -d"." -f4`
break
fi
done
sudo /usr/bin/hostnamectl set-hostname master$tmp_result
elif [[ "$INSTALL_MODE" == "worker" ]]; then
CURRENT_HOST_IP=$WORKER_IPADDR
tmp_result=`echo $CURRENT_HOST_IP|cut -d"." -f4`
sudo /usr/bin/hostnamectl set-hostname worker$tmp_result
else
echo "set_hostname error. INSTALL_MODE is invalid."
exit 0
fi
}
function set_sudoers {
echo "ubuntu ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/ubuntu
}
function set_hosts {
hostname=`hostname`
sudo sed -i -e 's/127.0.0.1 localhost/127.0.0.1 localhost master/g' \
/etc/hosts
sudo sed -i -e "s/127.0.1.1 $hostname/127.0.1.1 $hostname master/g" \
/etc/hosts
}
function invalidate_swap {
sudo sed -i -e '/swap/s/^/#/' /etc/fstab
swapoff -a
}
# Install Haproxy
#----------------
function install_haproxy {
REPOS_UPDATED=False apt_get_update
sleep 5
apt_get install haproxy
}
function modify_haproxy_conf {
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg >/dev/null
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# Default ciphers to use on SSL-enabled listening sockets.
# For more information, see ciphers(1SSL). This list is from:
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
# An alternative list with additional directives can be obtained from
# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
ssl-default-bind-options no-sslv3
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
frontend kubernetes-apiserver
mode tcp
bind *:$MASTER_CLUSTER_PORT
option tcplog
default_backend kubernetes-apiserver
backend kubernetes-apiserver
mode tcp
balance roundrobin
EOF
for master_ip in ${MASTER_IPS[@]}; do
split_ips=(${master_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server master${split_ips[3]} $master_ip:6443 check
EOF
done
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
EOF
}
function start_haproxy {
sudo systemctl enable haproxy
sudo systemctl start haproxy
sudo systemctl status haproxy | grep Active
result=$(ss -lnt |grep -E "16443|1080")
if [[ -z $result ]]; then
sudo systemctl restart haproxy
fi
}
# Install Keepalived
#-------------------
function install_keepalived {
REPOS_UPDATED=False apt_get_update
apt_get install keepalived
}
function modify_keepalived_conf {
local priority
local ip_name
local index=0
for master_ip in ${MASTER_IPS[@]}; do
if [[ "$CURRENT_HOST_IP" == "$master_ip" ]]; then
priority=$(expr 103 - $index)
fi
index=$(expr $index + 1)
done
ip_name=$(ip a s | grep $CURRENT_HOST_IP | awk '{print $NF}')
cat <<EOF | sudo tee /etc/keepalived/keepalived.conf >/dev/null
vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 3 fall 3
}
vrrp_instance VRRP1 {
state MASTER
interface $ip_name
virtual_router_id 51
priority $priority
advert_int 1
virtual_ipaddress {
$MASTER_CLUSTER_IP/24
}
track_script {
chk_haproxy
}
}
EOF
}
function start_keepalived {
sudo systemctl enable keepalived.service
sudo systemctl start keepalived.service
sudo systemctl status keepalived.service | grep Active
result=$(sudo systemctl status keepalived.service | \
grep Active | grep "running")
if [[ "$result" == "" ]]; then
exit 0
fi
}
function install_cri {
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
sleep 30
curl -OLsS https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz
curl -OLsS https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz.sha256sum
sha256sum --check containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz.sha256sum
sudo tar Cxzvf /usr/local containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz
sudo curl https://raw.githubusercontent.com/containerd/containerd/main/containerd.service -o /usr/local/lib/systemd/system/containerd.service --create-dirs
sudo systemctl daemon-reload
sudo systemctl enable --now containerd
curl -OLsS https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.amd64
curl -OLsS https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.sha256sum
grep runc.amd64 runc.sha256sum | sha256sum --check
sudo install -m 755 runc.amd64 /usr/local/sbin/runc
sudo mkdir /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i -e "s/SystemdCgroup = false/SystemdCgroup = true/" /etc/containerd/config.toml
sudo systemctl restart containerd
rm containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz{,.sha256sum} runc.amd64 runc.sha256sum
}
# Install Kubernetes
#-------------------
function set_k8s_components {
REPOS_UPDATED=False apt_get_update
sudo apt-get install -y apt-transport-https ca-certificates curl
sudo mkdir -p -m 755 /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v${K8S_MINOR_VERSION}/deb/Release.key | \
sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
result=$?
if [[ $result != 0 ]]; then
exit 1
fi
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${K8S_MINOR_VERSION}/deb/ /" | \
sudo tee /etc/apt/sources.list.d/kubernetes.list
apt_get update
apt_get install -y kubelet=$K8S_VERSION kubeadm=$K8S_VERSION kubectl=$K8S_VERSION
sudo apt-mark hold kubelet kubeadm kubectl
echo "starting kubelet, wait 30s ..."
sleep 30
sudo systemctl status kubelet | grep Active
}
function init_master {
if [[ "$MASTER_IPADDRS" =~ "," ]]; then
sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \
--service-cidr=$K8S_API_CLUSTER_CIDR \
--control-plane-endpoint "$MASTER_CLUSTER_IP:16443" --upload-certs
else
sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \
--service-cidr=$K8S_API_CLUSTER_CIDR \
--control-plane-endpoint "$MASTER_CLUSTER_IP:6443" --upload-certs
fi
sleep 3
sudo mkdir -p $HOME/.kube
sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 20
}
function install_cilium_cli {
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
}
function install_pod_network {
install_cilium_cli
if [ ${K8S_POD_CIDR} = "10.0.0.0/8" ]; then
cilium install --version ${CILIUM_VERSION}
else
cilium install --version ${CILIUM_VERSION} --set ipam.operator.clusterPoolIPv4PodCIDRList=${K8S_POD_CIDR}
fi
cilium status --wait
}
function add_master_node {
sudo kubeadm join $MASTER_CLUSTER_IP:16443 \
--token $TOKEN_NAME \
--discovery-token-ca-cert-hash sha256:$TOKEN_HASH \
--control-plane --certificate-key $CERT_KEY
sudo mkdir -p $HOME/.kube
sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
echo "add node ..."
sleep 10
kubectl get nodes -o wide
echo "add node successfully"
}
function init_worker {
sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \
--service-cidr=$K8S_API_CLUSTER_CIDR
sleep 5
sudo mkdir -p $HOME/.kube
sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
}
function add_worker_node {
if [[ "$ha_flag" != "False" ]]; then
KUBEADM_JOIN_WORKER_RESULT=$(sudo kubeadm join \
$MASTER_CLUSTER_IP:16443 --token $TOKEN_NAME \
--discovery-token-ca-cert-hash sha256:$TOKEN_HASH)
else
KUBEADM_JOIN_WORKER_RESULT=$(sudo kubeadm join \
$MASTER_CLUSTER_IP:6443 --token $TOKEN_NAME \
--discovery-token-ca-cert-hash sha256:$TOKEN_HASH)
fi
}
# Set common functions
#
# Refer: devstack project functions-common
#-----------------------------------------
function apt_get_update {
if [[ "$REPOS_UPDATED" == "True" ]]; then
return
fi
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# time all the apt operations
time_start "apt-get-update"
local update_cmd="sudo apt-get update"
if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then
die $LINENO "Failed to update apt repos, we're dead now"
fi
REPOS_UPDATED=True
# stop the clock
time_stop "apt-get-update"
}
function time_start {
local name=$1
local start_time=${_TIME_START[$name]}
if [[ -n "$start_time" ]]; then
die $LINENO \
"Trying to start the clock on $name, but it's already been started"
fi
_TIME_START[$name]=$(date +%s%3N)
}
function time_stop {
local name
local end_time
local elapsed_time
local total
local start_time
name=$1
start_time=${_TIME_START[$name]}
if [[ -z "$start_time" ]]; then
die $LINENO \
"Trying to stop the clock on $name, but it was never started"
fi
end_time=$(date +%s%3N)
elapsed_time=$(($end_time - $start_time))
total=${_TIME_TOTAL[$name]:-0}
# reset the clock so we can start it in the future
_TIME_START[$name]=""
_TIME_TOTAL[$name]=$(($total + $elapsed_time))
}
function apt_get {
local xtrace result
xtrace=$(set +o | grep xtrace) # set +o xtrace
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# time all the apt operations
time_start "apt-get"
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
apt-get --option "Dpkg::Options::=--force-confold" \
--assume-yes "$@" < /dev/null
result=$?
# stop the clock
time_stop "apt-get"
return $result
}
# Choose install function based on install mode
#----------------------------------------------
function main_master {
# prepare
set_public_dns
set_hostname
set_sudoers
set_hosts
invalidate_swap
if [[ "$MASTER_IPADDRS" =~ "," ]]; then
# haproxy
install_haproxy
modify_haproxy_conf
start_haproxy
# keepalived
install_keepalived
modify_keepalived_conf
start_keepalived
fi
install_cri
# kubernetes
set_k8s_components
init_master
install_pod_network
clear
token=$(sudo kubeadm token create)
echo "token:$token"
server=$(kubectl cluster-info | \
sed 's,\x1B\[[0-9;]*[a-zA-Z],,g' | \
grep 'Kubernetes' |awk '{print $7}')
echo "server:$server"
cat /etc/kubernetes/pki/ca.crt
ssl_ca_cert_hash=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | \
openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sudo sed 's/^.* //')
echo "ssl_ca_cert_hash:$ssl_ca_cert_hash"
cert_key=$(sudo kubeadm init phase upload-certs --upload-certs 2> /dev/null | \
tail -n 1)
echo "certificate_key:$cert_key"
}
function normal_master {
# prepare
set_public_dns
set_hostname
set_sudoers
set_hosts
invalidate_swap
# haproxy
install_haproxy
modify_haproxy_conf
start_haproxy
# keepalived
install_keepalived
modify_keepalived_conf
start_keepalived
# kubernetes
install_cri
set_k8s_components
add_master_node
install_cilium_cli
}
function main_worker {
# prepare
set_public_dns
set_hostname
set_sudoers
set_hosts
invalidate_swap
# kubernetes
install_cri
set_k8s_components
add_worker_node
}
# Pre preparations
# ________________
function check_OS {
. /etc/os-release
if [[ $PRETTY_NAME =~ "Ubuntu 22.04" ]]; then
os_architecture=`uname -a | grep 'x86_64'`
if [[ $os_architecture == "" ]]; then
echo "Your OS does not support at present."
echo "It only supports x86_64."
fi
else
echo "Your OS does not support at present."
echo "It only supports Ubuntu 22.04 LTS."
fi
}
# Main
# ____
flag="False"
check_OS
if [[ "$INSTALL_MODE" =~ "master" ]]; then
echo "Start install to main master node"
for _ip in `ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}'`; do
if [[ $_ip == $MASTER_IP ]]; then
flag="True"
break
fi
done
if [[ "$flag" == "True" ]]; then
INSTALL_MODE="main_master"
main_master
else
INSTALL_MODE="normal_master"
normal_master
fi
elif [ "$INSTALL_MODE" == "worker" ]; then
echo "Start install to worker node"
main_worker
else
echo "The install mode does not support at present!"
exit 255
fi
if [[ "$INSTALL_MODE" =~ "master" ]]; then
result=$(kubectl get nodes -o wide | grep $CURRENT_HOST_IP)
if [[ -z "$result" ]];then
echo "Install Failed! The node does not exist in Kubernetes cluster."
exit 255
else
echo "Install Success!" | sudo tee /tmp/installed
fi
else
if [[ "$KUBEADM_JOIN_WORKER_RESULT" =~ \
"This node has joined the cluster" ]]; then
echo "Install Success!" | sudo tee /tmp/installed
else
echo "Install Failed! The node does not exist in Kubernetes cluster."
exit 255
fi
fi
sudo chmod 666 /var/lib/kubelet/config.yaml
exit 0

View File

@ -0,0 +1,7 @@
TOSCA-Meta-File-Version: 1.0
Created-by: Dummy User
CSAR-Version: 1.1
Entry-Definitions: Definitions/sample_kubernetes_top.vnfd.yaml
Name: Files/images/ubuntu-22.04-server-cloudimg-amd64.img
Content-Type: application/x-iso9066-image

View File

@ -0,0 +1,82 @@
# Copyright (C) 2024 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import shutil
import tempfile
# NOTE: This package is a sample for vnflcm v2 API to deploy kubernetes cluster
# with cilium CNI using the management driver.
# This sample package requires an OS Image
# (ubuntu-22.04-server-cloudimg-amd64.img) with password access settings
# to be placed in the top directory.
SAMPLE_IMAGE_HASH = "7273f6c927c2fccb31ac1398da7c30dc9265f7c51896e41d062f" \
"9426afd061326947c9af442df6b0eddd04bca7c29239baaccf2d" \
"ec4ace19a39bcdb74bbb4758"
image_file = "ubuntu-22.04-server-cloudimg-amd64.img"
if not os.path.isfile(image_file):
print("Ubuntu image does not exists. This sample requires OS image.")
os._exit(1)
tmp_dir = tempfile.mkdtemp()
zip_file_name = os.path.basename(os.path.abspath("."))
zip_file_path = os.path.join(tmp_dir, zip_file_name)
tmp_contents = os.path.join(tmp_dir, "contents")
shutil.copytree(os.path.join(".", "contents"), tmp_contents)
# add userdata script
# tacker/sol_refactored/infra_drivers/openstack/userdata_standard.py
userdata_dir = "../../../../tacker/sol_refactored/infra_drivers/openstack/"
userdata_file = "userdata_standard.py"
userdata_path = os.path.abspath(userdata_dir + userdata_file)
# mkdir UserData/ and copy userdata_path into it
file_path = os.path.join(tmp_contents, "UserData")
os.makedirs(file_path)
shutil.copy(userdata_path, file_path)
# add common vnfd files
common_dir = "../../../tests/functional/sol_v2_common/common/Definitions/"
for entry in os.listdir(common_dir):
shutil.copy(os.path.join(common_dir, entry),
os.path.join(tmp_contents, "Definitions"))
# check os-image hash and replace hash value
with open(image_file, 'rb') as f:
content = f.read()
hash_value = hashlib.sha512(content).hexdigest()
def_path = os.path.join(tmp_contents, "Definitions")
for entry in os.listdir(def_path):
entry_path = os.path.join(def_path, entry)
with open(entry_path, 'r') as f:
content = f.read()
content = content.replace(SAMPLE_IMAGE_HASH, hash_value)
with open(entry_path, 'w') as f:
f.write(content)
# create Files dir and copy image_path into it
file_path = os.path.join(tmp_contents, "Files", "images")
os.makedirs(file_path)
shutil.copy(image_file, file_path)
# make zip file
shutil.make_archive(zip_file_path, "zip", tmp_contents)
shutil.copy(os.path.join(tmp_dir, zip_file_name + ".zip"), ".")
shutil.rmtree(tmp_dir)