Merge "Drop DaemonSet support"

This commit is contained in:
Jenkins 2016-11-11 13:08:52 +00:00 committed by Gerrit Code Review
commit a685ad8d21
5 changed files with 20 additions and 43 deletions

View File

@ -93,11 +93,9 @@ def get_deploy_components_info(rendering_context=None):
def get_deployed_components():
"""Returns set of deployed components."""
deployed_daemonsets = kubernetes.list_cluster_daemonsets()
deployed_deployments = kubernetes.list_cluster_deployments()
deployed_petsets = kubernetes.list_cluster_petsets()
deployed_components = set(kubernetes.get_object_names(
itertools.chain(deployed_daemonsets, deployed_deployments,
deployed_petsets))
itertools.chain(deployed_deployments, deployed_petsets))
)
return deployed_components

View File

@ -99,16 +99,19 @@ def parse_role(component, topology, configmaps):
replicas = CONF.replicas.get(service_name)
if service.get("kind") == 'DaemonSet':
LOG.warning("Deployment is being used instead of DaemonSet to support "
"updates")
if replicas is not None:
LOG.error("Replicas was specified for %s, but it's implemented "
"using Kubernetes DaemonSet that will deploy service on "
"in DaemonSet-like way and will be deployed on "
"all matching nodes (section 'nodes' in config file)",
service_name)
raise RuntimeError("Replicas couldn't be specified for services "
"implemented using Kubernetes DaemonSet")
obj = templates.serialize_daemonset(service_name, cont_spec,
affinity, component_name)
replicas = len(set(topology[service_name]))
obj = templates.serialize_deployment(service_name, cont_spec,
affinity, replicas,
component_name)
elif service.get("kind") == "PetSet":
replicas = replicas or 1
obj = templates.serialize_petset(service_name, cont_spec,

View File

@ -108,9 +108,6 @@ def process_object(object_dict, namespace=None, client=None):
obj.update()
LOG.debug('%s "%s" has been updated', object_dict['kind'],
object_dict['metadata']['name'])
if object_dict['kind'] == 'DaemonSet':
LOG.warning('%s will not be updated (DaemonSet objects cannot be '
'updated' % object_dict['metadata']['name'])
else:
obj.create()
LOG.debug('%s "%s" has been created', object_dict['kind'],
@ -123,13 +120,6 @@ def list_k8s_nodes():
return pykube.Node.objects(client).all()
def list_cluster_daemonsets():
client = get_client()
return pykube.DaemonSet.objects(client).filter(
namespace=CONF.kubernetes.namespace,
selector="ccp=true")
def list_cluster_deployments():
client = get_client()
return pykube.Deployment.objects(client).filter(

View File

@ -69,9 +69,6 @@ def get_pod_states(components=None):
for dp in kubernetes.list_cluster_deployments():
if not components or dp.name in components:
states.append(_get_pods_status(dp.name, svc_map))
for ds in kubernetes.list_cluster_daemonsets():
if not components or ds.name in components:
states.append(_get_pods_status(ds.name, svc_map))
job_states = {}
for job in kubernetes.list_cluster_jobs():

View File

@ -318,29 +318,6 @@ def serialize_deployment(name, spec, affinity, replicas, component_name):
}
def serialize_daemonset(name, spec, affinity, component_name):
return {
"apiVersion": "extensions/v1beta1",
"kind": "DaemonSet",
"metadata": {
"name": name
},
"spec": {
"template": {
"metadata": {
"annotations": affinity,
"labels": {
"app": name,
"ccp": "true",
"ccp-component": component_name
}
},
"spec": spec
}
}
}
def serialize_petset(name, spec, affinity, replicas, component_name):
annotations = {
"pod.alpha.kubernetes.io/initialized": "true"
@ -397,6 +374,18 @@ def serialize_affinity(service, topology):
"namespaces": []
}]
}
elif service.get("kind") == "DaemonSet":
policy["podAntiAffinity"] = {
"requiredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchLabels": {
"app": service["name"]
}
},
"topologyKey": "kubernetes.io/hostname",
"namespaces": [CONF.kubernetes.namespace]
}]
}
return {"scheduler.alpha.kubernetes.io/affinity": json.dumps(
policy, sort_keys=True)}