Basic updates of k8s resources

* no new commands/flags implemented - if object already exists,
it will be updated
* only Deployment, ConfigMap, Service can be updated
* if deployment spec was not updated, but related configmaps were,
deployment will be updated
* deployments will be updated with RollingUpdate strategy

Change-Id: If8fe78d6bea95b11d87f243cadc79490736745e7
This commit is contained in:
Andrey Pavlov 2016-09-13 13:59:11 +03:00
parent a069ddb9ac
commit 6a864bb4ba
7 changed files with 76 additions and 48 deletions

View File

@ -31,6 +31,16 @@ def _expand_files(service, files):
_expand(cmd)
def _get_configmaps_version(configmaps):
"""Get concatenation of ConfigMaps versions
If version of any of the ConfigMaps changed, the overall version will be
changed and deployment will be updated no matter if the deployment spec
was updated or not.
"""
return ''.join(cm.obj['metadata']['resourceVersion'] for cm in configmaps)
def parse_role(service_dir, role, config):
service = role["service"]
if service["name"] not in config.get("topology", {}):
@ -40,11 +50,13 @@ def parse_role(service_dir, role, config):
LOG.info("Scheduling service %s deployment", service["name"])
_expand_files(service, role.get("files"))
_create_files_configmap(service_dir, service["name"], role.get("files"))
_create_meta_configmap(service)
files_cm = _create_files_configmap(
service_dir, service["name"], role.get("files"))
meta_cm = _create_meta_configmap(service)
workflows = _parse_workflows(service)
_create_workflow(workflows, service["name"])
workflow_cm = _create_workflow(workflows, service["name"])
configmaps = config['configmaps'] + (files_cm, meta_cm, workflow_cm)
for cont in service["containers"]:
daemon_cmd = cont["daemon"]
@ -52,6 +64,7 @@ def parse_role(service_dir, role, config):
_create_pre_jobs(service, cont)
_create_post_jobs(service, cont)
cont['cm_version'] = _get_configmaps_version(configmaps)
cont_spec = templates.serialize_daemon_pod_spec(service)
affinity = templates.serialize_affinity(service, config["topology"])
@ -62,7 +75,7 @@ def parse_role(service_dir, role, config):
else:
obj = templates.serialize_deployment(service["name"], cont_spec,
affinity)
kubernetes.create_object_from_definition(obj)
kubernetes.process_object(obj)
_create_service(service)
LOG.info("Service %s successfuly scheduled", service["name"])
@ -103,7 +116,7 @@ def _fill_cmd(workflow, cmd):
def _create_workflow(workflow, name):
configmap_name = "%s-%s" % (name, templates.ROLE_CONFIG)
template = templates.serialize_configmap(configmap_name, workflow)
kubernetes.create_object_from_definition(template)
return kubernetes.process_object(template)
def _create_service(service):
@ -123,7 +136,7 @@ def _create_service(service):
else:
ports.append({"port": source_port, "name": name_port})
template = templates.serialize_service(service["name"], ports)
kubernetes.create_object_from_definition(template)
kubernetes.process_object(template)
def _create_pre_commands(workflow, container):
@ -176,7 +189,7 @@ def _create_job(service, container, job):
cont_spec = templates.serialize_job_container_spec(container, job)
pod_spec = templates.serialize_job_pod_spec(service, job, cont_spec)
job_spec = templates.serialize_job(job["name"], pod_spec)
kubernetes.create_object_from_definition(job_spec)
kubernetes.process_object(job_spec)
def _create_command(workflow, cmd):
@ -214,7 +227,7 @@ def _create_globals_configmap(config):
templates.GLOBAL_CONFIG: json.dumps(config, sort_keys=True)
}
cm = templates.serialize_configmap(templates.GLOBAL_CONFIG, data)
kubernetes.create_object_from_definition(cm)
return kubernetes.process_object(cm)
def _create_start_script_configmap():
@ -229,7 +242,7 @@ def _create_start_script_configmap():
templates.SCRIPT_CONFIG: start_scr_data
}
cm = templates.serialize_configmap(templates.SCRIPT_CONFIG, data)
kubernetes.create_object_from_definition(cm)
return kubernetes.process_object(cm)
def _create_files_configmap(service_dir, service_name, configs):
@ -242,7 +255,7 @@ def _create_files_configmap(service_dir, service_name, configs):
data[filename] = f.read()
data["placeholder"] = ""
template = templates.serialize_configmap(configmap_name, data)
kubernetes.create_object_from_definition(template)
return kubernetes.process_object(template)
def _create_meta_configmap(service):
@ -253,7 +266,7 @@ def _create_meta_configmap(service):
"host-net": service.get("host-net", False)}, sort_keys=True)
}
template = templates.serialize_configmap(configmap_name, data)
kubernetes.create_object_from_definition(template)
return kubernetes.process_object(template)
def _make_topology(nodes, roles):
@ -303,7 +316,7 @@ def _create_namespace(namespace):
return
template = templates.serialize_namespace(namespace)
kubernetes.create_object_from_definition(template)
kubernetes.process_object(template)
def _create_openrc(config, namespace):
@ -338,8 +351,9 @@ def deploy_components(components=None):
namespace = CONF.kubernetes.namespace
_create_namespace(namespace)
_create_globals_configmap(config["configs"])
_create_start_script_configmap()
globals_cm = _create_globals_configmap(config["configs"])
start_script_cm = _create_start_script_configmap()
config['configmaps'] = (globals_cm, start_script_cm)
for component in components:
parse_role(components_map[component]['service_dir'],

View File

@ -10,6 +10,8 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
UPDATABLE_OBJECTS = ('ConfigMap', 'Deployment', 'Service')
def get_client(kube_apiserver=None, key_file=None, cert_file=None,
ca_cert=None, insecure=None):
@ -70,7 +72,7 @@ def export_object(object_dict):
object_dict, default_flow_style=False))
def create_object_from_definition(object_dict, namespace=None, client=None):
def process_object(object_dict, namespace=None, client=None):
LOG.debug("Deploying %s: \"%s\"",
object_dict["kind"], object_dict["metadata"]["name"])
if not object_dict['kind'] == 'Namespace':
@ -94,10 +96,17 @@ def create_object_from_definition(object_dict, namespace=None, client=None):
if obj.exists():
LOG.debug('%s "%s" already exists', object_dict['kind'],
object_dict['metadata']['name'])
return obj
obj.create()
LOG.debug('%s "%s" has been created', object_dict['kind'],
object_dict['metadata']['name'])
if object_dict['kind'] in UPDATABLE_OBJECTS:
obj.update()
LOG.debug('%s "%s" has been updated', object_dict['kind'],
object_dict['metadata']['name'])
if object_dict['kind'] == 'DaemonSet':
LOG.warning('%s will not be updated (DaemonSet objects cannot be '
'updated' % object_dict['metadata']['name'])
else:
obj.create()
LOG.debug('%s "%s" has been created', object_dict['kind'],
object_dict['metadata']['name'])
return obj

View File

@ -98,7 +98,11 @@ def serialize_daemon_container_spec(container):
"command": _get_readiness_cmd(container["name"])
},
"timeoutSeconds": 1
}
},
"env": [{
"name": "CM_VERSION",
"value": container['cm_version']
}]
}
if container.get("probes", {}).get("liveness"):
cont_spec["livenessProbe"] = {
@ -265,6 +269,12 @@ def serialize_deployment(name, spec, affinity):
},
"spec": {
"replicas": 1,
"strategy": {
"rollingUpdate": {
"maxSurge": 1,
"maxUnavailable": 0
}
},
"template": {
"metadata": {
"annotations": affinity,

View File

@ -2,6 +2,7 @@ import filecmp
import os
import fixtures
import mock
import yaml
from fuel_ccp import deploy
@ -122,12 +123,20 @@ class TestDeploy(base.TestCase):
shallow=False)
self.assertTrue(result)
def test_get_configmaps_version(self):
cm_list = [mock.Mock(obj={'metadata': {'resourceVersion': '1'}})
for _ in range(3)]
self.assertEqual('111', deploy._get_configmaps_version(cm_list))
cm_list = []
self.assertEqual('', deploy._get_configmaps_version(cm_list))
class TestDeployCreateService(base.TestCase):
def setUp(self):
super(TestDeployCreateService, self).setUp()
fixture = self.useFixture(fixtures.MockPatch(
"fuel_ccp.kubernetes.create_object_from_definition"))
"fuel_ccp.kubernetes.process_object"))
self.create_obj = fixture.mock
def test_create_service_without_ports(self):

View File

@ -57,12 +57,12 @@ class TestKubernetesClient(base.TestCase):
class TestKubernetesObjects(testscenarios.WithScenarios, base.TestCase):
scenarios = (
('ConfigMap', {'kind': 'ConfigMap'}),
('Deployment', {'kind': 'Deployment'}),
('DaemonSet', {'kind': 'DaemonSet'}),
('Job', {'kind': 'Job'}),
('Namespace', {'kind': 'Namespace'}),
('Service', {'kind': 'Service'})
('ConfigMap', {'kind': 'ConfigMap', 'update': True}),
('Deployment', {'kind': 'Deployment', 'update': True}),
('DaemonSet', {'kind': 'DaemonSet', 'update': False}),
('Job', {'kind': 'Job', 'update': False}),
('Namespace', {'kind': 'Namespace', 'update': False}),
('Service', {'kind': 'Service', 'update': True})
)
def setUp(self):
@ -76,19 +76,19 @@ class TestKubernetesObjects(testscenarios.WithScenarios, base.TestCase):
m_class = self.useFixture(fixtures.MockPatch(
'pykube.{}'.format(self.kind), return_value=m_obj))
kubernetes.create_object_from_definition(
obj_dict, client=mock.Mock())
kubernetes.process_object(obj_dict, client=mock.Mock())
m_class.mock.assert_called_once_with(mock.ANY, obj_dict)
m_obj.create.assert_called_once_with()
def test_object_exists(self):
def test_object_update(self):
obj_dict = {'kind': self.kind, 'metadata': {'name': 'test'}}
m_obj = mock.Mock(exists=mock.Mock(return_value=True))
m_class = self.useFixture(fixtures.MockPatch(
'pykube.{}'.format(self.kind), return_value=m_obj))
kubernetes.create_object_from_definition(
obj_dict, client=mock.Mock())
kubernetes.process_object(obj_dict, client=mock.Mock())
m_class.mock.assert_called_once_with(mock.ANY, obj_dict)
m_obj.exists.assert_called_once_with()
m_obj.create.assert_not_called()
if self.update:
m_obj.update.assert_called_once_with()

View File

@ -80,12 +80,3 @@ class TestDeployValidation(base.TestCase):
'deployment: service2',
deploy_validation.validate_requested_components,
{'service1'}, COMPONENTS_MAP)
# requested services already deployed
m_get_deps.return_value = {}
m_get_deployed.return_value = {'service1'}
self.assertRaisesRegexp(
RuntimeError,
'Following components are already deployed: service1',
deploy_validation.validate_requested_components,
{'service1'}, COMPONENTS_MAP)

View File

@ -5,17 +5,12 @@ from fuel_ccp import dependencies
def validate_requested_components(components, components_map):
"""Validate requested components.
Validate that requested components are not already deployed and all
required components provided.
Validate that all components required for successful deployment of
requested components are provided or already deployed.
"""
deployed_components = utils.get_deployed_components()
required_components = dependencies.get_deps(components, components_map)
already_deployed_components = components & deployed_components
if already_deployed_components:
raise RuntimeError('Following components are already deployed: '
'%s' % ' '.join(already_deployed_components))
not_provided_components = (required_components - components -
deployed_components)
if not_provided_components: