Adds support for several applications in single Pod

Kubernetes doesn't allow to add containers to a Pod that
is already deployed. With this change old pods will be deleted
and either new pod created which will include new container
or Replication Controller will be modified (before Pod delete
so that updated Pods be created instead)

Also now Pods are created only when replica number set to 0.
Otherwise Pods created by ReplicationController alone.

Change-Id: I8552793f39083de47ad07e7013dedb72e0550885
Closes-Bug: #1444208
This commit is contained in:
Stan Lagun 2015-04-15 03:26:06 +03:00
parent e9a04c4db0
commit a855e9a879
5 changed files with 83 additions and 16 deletions

View File

@ -28,16 +28,23 @@ Methods:
deploy:
Body:
- $._environment.reporter.report($this, 'Waiting for the host to be ready')
- $.host.deploy()
- $container: $.getContainer()
- $repr: $._getContainerRepresentation($container)
- If: $.getAttr(container, null) != $repr
Then:
- $.onInstallationStart()
- $.onInstallationStart()
- Try:
- $.applicationEndpoints: $.host.hostContainer($container)
- $.setAttr(container, $repr)
- $.onInstallationFinish()
Catch:
- As: e
Do:
- $formatString: 'Error: {0}'
- $._environment.reporter.report_error($, $formatString.format($e.message))
- Rethrow:
Else:
- $.onInstallationFinish()
getConnectionTo:

View File

@ -114,6 +114,7 @@ Methods:
Body:
Return: $.masterNode.getIp()
createPod:
Arguments:
- definition:
@ -159,6 +160,18 @@ Methods:
- $.masterNode.instance.agent.call($template, $resources)
deletePods:
Arguments:
- labels:
Contract:
$.string().notNull(): $.string().notNull()
Body:
- $.deploy()
- $resources: new(sys:Resources)
- $template: $resources.yaml('DeletePods.template').bind(dict(labels => $labels))
- $.masterNode.instance.agent.call($template, $resources)
createServices:
Arguments:
- applicationName:
@ -339,6 +352,7 @@ Methods:
- $serviceIp: $endpoint.address
- Return: $serviceIp
_updateEndpoints:
Body:
- For: endpoint
@ -428,6 +442,7 @@ Methods:
- $result.publicIPs: $.minionNodes.take($.nodeCount).select($.getIp())
- Return: $result
scaleNodesUp:
Usage: Action
Body:
@ -437,6 +452,7 @@ Methods:
- $.nodeCount: $.nodeCount + 1
- $.deploy()
scaleNodesDown:
Usage: Action
Body:
@ -446,6 +462,7 @@ Methods:
- $.nodeCount: $.nodeCount - 1
- $.deploy()
scaleGatewaysUp:
Usage: Action
Body:

View File

@ -0,0 +1,21 @@
FormatVersion: 2.0.0
Version: 1.0.0
Name: Delete Pods
Parameters:
labels: $labels
Body: |
labels_str = ','.join(map(lambda t: '='.join(t), args.labels.items()))
deletePods(labels_str)
Scripts:
deletePods:
Type: Application
Version: 1.0.0
EntryPoint: deletePods.sh
Files: []
Options:
captureStdout: true
captureStderr: true

View File

@ -0,0 +1,3 @@
#!/bin/bash
echo "Deleting Pods" >> /tmp/murano-kube.log
/opt/bin/kubectl delete pod -l $1 >> /tmp/murano-kube.log

View File

@ -65,6 +65,17 @@ Methods:
- container:
Contract: $.class(docker:DockerContainer)
Body:
- $podName: $._getPodName()
- For: port
In: $container.ports
Do:
- $endpoints: $.kubernetesCluster.serviceEndpoints.where(
$.containerPort = $port.port and $.protocol = $port.protocol and $.podId = $podName)
- If: len($endpoints) > 0
Then:
- $msg: format('Port {0} is already used in the Pod {1}', $port.port, $.name)
- Throw: PortConflictException
Message: $msg
- $._deleteContainer($container.name)
- $containerDef:
@ -89,7 +100,7 @@ Methods:
- $.kubernetesCluster.createServices(
applicationName => $container.name,
applicationPorts => $container.ports,
podId => $._getPodName())
podId => $podName)
- Return: $.getEndpoints($container.name)
@ -171,33 +182,39 @@ Methods:
Body:
Return: toLower($volume.name)
deploy:
Body:
- $prevPod: $.getAttr(lastPodDeployed, null)
- $prevReplicas: $.getAttr(lastReplicas, 0)
- $podDefinition: $._podDefinition
- If: $prevPod != $podDefinition
- $replicas: $.replicas
- If: len($podDefinition.desiredState.manifest.containers) = 0
Then:
- $._environment.reporter.report($, 'Deploying Pod {0}'.format($.name))
- $.kubernetesCluster.createPod(
definition => $podDefinition, isNew => not $.getAttr(deployed, false))
- $.setAttr(lastPodDeployed, $podDefinition)
- $.setAttr(deployed, true)
- If: $.replicas != $prevReplicas or $prevPod != $podDefinition
- $replicas: 0
- $.setAttr(lastReplicas, $replicas)
- If: $replicas != $prevReplicas or $prevPod != $podDefinition
Then:
- If: $.replicas > 0
- If: $replicas > 0
Then:
- $._environment.reporter.report($, 'Deploying Replication Controller for Pod {0}'.format($.name))
- $rcDefinition: $._buildReplicationControllerDefinition($podDefinition)
- $.kubernetesCluster.createReplicationController(
definition => $rcDefinition, isNew => $prevReplicas = 0)
- If: $.replicas = 0 and $prevReplicas > 0
- If: $replicas = 0 and $prevReplicas > 0
Then:
- $.kubernetesCluster.deleteReplicationController($._getReplicationControllerId())
- $.setAttr(lastReplicas, $.replicas)
- $._environment.reporter.report($, 'Pod {0} is ready'.format($.name))
- If: $prevPod != $podDefinition
Then:
- $.kubernetesCluster.deletePods(dict(id => $._getPodName()))
- If: $.replicas = 0 and len($podDefinition.desiredState.manifest.containers) > 0
Then:
- $.kubernetesCluster.createPod(definition => $podDefinition, isNew => true)
- $._environment.reporter.report($, 'Pod {0} is ready'.format($.name))
- $.setAttr(lastPodDeployed, $podDefinition)
_buildReplicationControllerDefinition:
Arguments:
@ -216,10 +233,12 @@ Methods:
desiredState: $podDefinition.desiredState
labels: $podDefinition.labels
_getReplicationControllerId:
Body:
- Return: $._getPodName()
getInternalScopeId:
Body:
Return: $.kubernetesCluster.id()