Improved Kubernetes implementation
1. improved k8s/etcd cluster configuration 2. service/endpoint management 3. init.d scripts 4. gateway (haproxy) nodes with confd 5. code refactoring Change-Id: I0e641108e580bb470494a972013bda465d831bd6
This commit is contained in:
parent
5e662d9a53
commit
7c15e2728a
|
@ -9,32 +9,40 @@ Extends: std:Application
|
|||
Properties:
|
||||
name:
|
||||
Contract: $.string().notNull()
|
||||
|
||||
host:
|
||||
Contract: $.class(DockerContainerHost).notNull()
|
||||
|
||||
port:
|
||||
Contract: $.int().check($ > 0 and $ < 65536)
|
||||
|
||||
publish:
|
||||
Contract: $.bool().notNull()
|
||||
Default: true
|
||||
|
||||
|
||||
Methods:
|
||||
initialize:
|
||||
Body:
|
||||
- $._environment: $.find(std:Environment).require()
|
||||
|
||||
|
||||
deploy:
|
||||
Body:
|
||||
- If: not $.getAttr(deployed, false)
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Installing Application')
|
||||
- $.host.hostContainer(
|
||||
name => $.name,
|
||||
image => httpd,
|
||||
commands => list(),
|
||||
env => dict(),
|
||||
ports => list($.port),
|
||||
volumes => dict()
|
||||
)
|
||||
- $.host.deploy()
|
||||
|
||||
- $._environment.reporter.report($this, 'Application HTTPd available at {0}:{1}'.format($.host.getIp(), $.port))
|
||||
- $.setAttr(deployed, true)
|
||||
- $._environment.reporter.report($this, 'Installing HTTPd')
|
||||
- $ports:
|
||||
- port: $.port
|
||||
scope: switch($.publish, $ => public, not $ => host)
|
||||
|
||||
- $endpoints: $.host.hostContainer(
|
||||
applicationName => $.name,
|
||||
image => httpd,
|
||||
ports => $ports
|
||||
).where($.scope = public).select(format('http://{0}:{1}', $.address, $.port))
|
||||
|
||||
- If: $.publish
|
||||
Then: $._environment.reporter.report($this, 'Application HTTPd is available at {0}'.format(join(', ', $endpoints)))
|
||||
Else: $._environment.reporter.report($this, 'Application HTTPd has deployed but is not accessible from outside')
|
||||
- $.setAttr(deployed, true)
|
||||
|
|
|
@ -6,6 +6,7 @@ Application:
|
|||
name: $.appConfiguration.name
|
||||
host: $.appConfiguration.host
|
||||
port: $.appConfiguration.port
|
||||
publish: $.appConfiguration.publish
|
||||
|
||||
|
||||
Forms:
|
||||
|
@ -22,7 +23,7 @@ Forms:
|
|||
type: io.murano.apps.docker.kubernetes.KubernetesPod
|
||||
label: Container Host
|
||||
description: >-
|
||||
Select an instance of Docker contaniner to run the app
|
||||
Select an instance of Docker contaniner hosting provider to run the app
|
||||
- name: port
|
||||
type: integer
|
||||
label: Port
|
||||
|
@ -31,4 +32,10 @@ Forms:
|
|||
initial: 80
|
||||
description: >-
|
||||
Select a port to run the app
|
||||
- name: publish
|
||||
type: boolean
|
||||
initial: false
|
||||
label: Make application accessible from outside
|
||||
initial: false
|
||||
required: true
|
||||
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
Namespaces:
|
||||
=: io.murano.apps.docker
|
||||
std: io.murano
|
||||
|
||||
Name: ApplicationPort
|
||||
|
||||
Properties:
|
||||
port:
|
||||
Contract: $.int().notNull().check($ > 0 and $ < 65536)
|
||||
|
||||
scope:
|
||||
Contract: $.string().notNull().check($ in list(public, private, host, node))
|
||||
Default: private
|
||||
|
||||
protocol:
|
||||
Contract: $.string().notNull()
|
||||
Default: TCP
|
|
@ -13,27 +13,34 @@ Properties:
|
|||
Methods:
|
||||
hostContainer:
|
||||
Arguments:
|
||||
- name:
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
- image:
|
||||
Contract: $.string().notNull()
|
||||
- commands:
|
||||
Contract:
|
||||
- $.string().notNull()
|
||||
Default: []
|
||||
- env:
|
||||
Contract:
|
||||
$.string().notNull(): $.string().notNull()
|
||||
Default: {}
|
||||
- ports:
|
||||
Contract:
|
||||
- $.int().notNull()
|
||||
- $.class(ApplicationPort)
|
||||
Default: []
|
||||
- volumes:
|
||||
Contract:
|
||||
$.string().notNull(): $.class(DockerVolume).notNull()
|
||||
Default: {}
|
||||
|
||||
|
||||
deleteContainer:
|
||||
Arguments:
|
||||
- name:
|
||||
Contract: $.string().notNull()
|
||||
|
||||
getIp:
|
||||
|
||||
getEndpoints:
|
||||
Arguments:
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
|
|
|
@ -7,7 +7,7 @@ Extends: DockerVolume
|
|||
|
||||
Properties:
|
||||
path:
|
||||
Contract: $.string.notNull()
|
||||
Contract: $.string().notNull()
|
||||
|
||||
Methods:
|
||||
getType:
|
||||
|
|
|
@ -12,3 +12,4 @@ Classes:
|
|||
io.murano.apps.docker.DockerHostVolume: DockerHostVolume.yaml
|
||||
io.murano.apps.docker.DockerTempVolume: DockerTempVolume.yaml
|
||||
io.murano.apps.docker.DockerVolume: DockerVolume.yaml
|
||||
io.murano.apps.docker.ApplicationPort: ApplicationPort.yaml
|
||||
|
|
|
@ -3,6 +3,8 @@ Namespaces:
|
|||
std: io.murano
|
||||
res: io.murano.resources
|
||||
sys: io.murano.system
|
||||
docker: io.murano.apps.docker
|
||||
|
||||
|
||||
Name: KubernetesCluster
|
||||
|
||||
|
@ -22,12 +24,42 @@ Properties:
|
|||
|
||||
nodeCount:
|
||||
Contract: $.int().notNull().check($ > 0)
|
||||
Usage: InOut
|
||||
|
||||
gatewayCount:
|
||||
Contract: $.int().notNull()
|
||||
|
||||
gatewayNodes:
|
||||
Contract:
|
||||
- $.class(KubernetesGatewayNode).notNull()
|
||||
|
||||
serviceEndpoints:
|
||||
Contract:
|
||||
- port: $.int().notNull().check($ > 0)
|
||||
address: $.string().notNull()
|
||||
scope: $.string().notNull().check($ in list(public, cloud, internal))
|
||||
applicationPort: $.class(docker:ApplicationPort).notNull()
|
||||
applicationName: $.string().notNull()
|
||||
podId: $.string().notNull()
|
||||
name: $.string().notNull()
|
||||
Default: []
|
||||
Usage: Out
|
||||
|
||||
|
||||
Methods:
|
||||
initialize:
|
||||
Body:
|
||||
- $._environment: $.find(std:Environment).require()
|
||||
- $._services: {}
|
||||
- For: endpoint
|
||||
In: $.serviceEndpoints
|
||||
Do:
|
||||
- $port: $endpoint.port
|
||||
- $._services[$port]:
|
||||
applicationName: $endpoint.applicationName
|
||||
applicationPort: $endpoint.applicationPort
|
||||
podId: $endpoint.podId
|
||||
name: $endpoint.name
|
||||
|
||||
|
||||
deploy:
|
||||
Body:
|
||||
|
@ -51,53 +83,317 @@ Methods:
|
|||
FromPort: 8080
|
||||
IpProtocol: tcp
|
||||
External: false
|
||||
- ToPort: 2380
|
||||
FromPort: 2380
|
||||
IpProtocol: tcp
|
||||
External: false
|
||||
- ToPort: 8285
|
||||
FromPort: 8285
|
||||
IpProtocol: udp
|
||||
External: false
|
||||
- $._environment.securityGroupManager.addGroupIngress($securityGroupIngress)
|
||||
- $._environment.reporter.report($this, 'Setting up Kubernetes cluster')
|
||||
- $.masterNode.deploy()
|
||||
- $.setAttr(deployed, true)
|
||||
|
||||
- $._environment.reporter.report($this, 'Setting up Kubernetes cluster')
|
||||
- Parallel:
|
||||
- Do:
|
||||
- $.minionNodes.take($.nodeCount).pselect($.deploy())
|
||||
- Do:
|
||||
- $.minionNodes.skip($.nodeCount).pselect($.removeFromCluster())
|
||||
- Do: $.masterNode.deployInstance()
|
||||
- Do: $.minionNodes.take($.nodeCount).pselect($.deployInstance())
|
||||
- Do: $.gatewayNodes.take($.gatewayCount).pselect($.deployInstance())
|
||||
|
||||
scaleUP:
|
||||
- $.masterNode.setupEtcd()
|
||||
- $.minionNodes.take($.nodeCount).select($.setupEtcd())
|
||||
- $.gatewayNodes.take($.gatewayCount).select($.setupEtcd())
|
||||
|
||||
- $.masterNode.setupNode()
|
||||
- Parallel:
|
||||
- Do: $.minionNodes.take($.nodeCount).pselect($.setupNode())
|
||||
- Do: $.minionNodes.skip($.nodeCount).pselect($.removeFromCluster())
|
||||
- Do: $.gatewayNodes.take($.gatewayCount).pselect($.setupNode())
|
||||
- $._deployServices()
|
||||
|
||||
|
||||
getIp:
|
||||
Body:
|
||||
Return: $.masterNode.getIp()
|
||||
|
||||
|
||||
createPod:
|
||||
Arguments:
|
||||
- definition:
|
||||
Contract: {}
|
||||
- isNew:
|
||||
Contract: $.bool().notNull()
|
||||
Default: true
|
||||
Body:
|
||||
- $.deploy()
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('UpdatePod.template').bind(dict(
|
||||
podDefinition => $definition,
|
||||
isNew => $isNew
|
||||
))
|
||||
- $.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
|
||||
createReplicationController:
|
||||
Arguments:
|
||||
- definition:
|
||||
Contract: {}
|
||||
- isNew:
|
||||
Contract: $.bool().notNull()
|
||||
Default: true
|
||||
Body:
|
||||
- $.deploy()
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('UpdateReplicationController.template').bind(dict(
|
||||
controllerDefinition => $definition,
|
||||
isNew => $isNew
|
||||
))
|
||||
- $.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
|
||||
destroyReplicationController:
|
||||
Arguments:
|
||||
- id:
|
||||
Contract: $.string().notNull()
|
||||
Body:
|
||||
- $.deploy()
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('DestroyReplicationController.template').bind(dict(rcId => $id))
|
||||
- $.kubernetesCluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
|
||||
createServices:
|
||||
Arguments:
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
- applicationPorts:
|
||||
Contract:
|
||||
- $.class(docker:ApplicationPort)
|
||||
- podId:
|
||||
Contract: $.string().notNull()
|
||||
Body:
|
||||
- $._destroyService($applicationName, $podId)
|
||||
- For: applicationPort
|
||||
In: $applicationPorts.where($.scope in list(public, host))
|
||||
Do:
|
||||
- $allocatedPort: $._findUnusedPort($applicationPort.port)
|
||||
- $._services[$allocatedPort]:
|
||||
applicationName: $applicationName
|
||||
applicationPort: $applicationPort
|
||||
podId: $podId
|
||||
name: format('svc-{0}', randomName())
|
||||
- $.deploy()
|
||||
|
||||
|
||||
_destroyService:
|
||||
Arguments:
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
- podId:
|
||||
Contract: $.string().notNull()
|
||||
Body:
|
||||
- $services: {}
|
||||
- For: port
|
||||
In: $._services.keys()
|
||||
Do:
|
||||
- $record: $._services.get($port)
|
||||
- If: $record.applicationName != $applicationName or $record.podId != $podId
|
||||
Then:
|
||||
$services[$port]: $record
|
||||
- $._services: $services
|
||||
|
||||
|
||||
destroyService:
|
||||
Arguments:
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
- podId:
|
||||
Contract: $.string().notNull()
|
||||
Body:
|
||||
- $._destroyService($applicationName, $podId)
|
||||
- $.deploy()
|
||||
|
||||
|
||||
_deployServices:
|
||||
Body:
|
||||
- $services: $._services
|
||||
- $endpoints: []
|
||||
- $skipPorts: []
|
||||
- $resources: new(sys:Resources)
|
||||
- $prevNodeCount: $.getAttr(lastNodeCount, -1)
|
||||
- $prevGatewayCount: $.getAttr(lastGatewayCount, -1)
|
||||
- $gatewayModeChanged: $prevGatewayCount != $.gatewayCount and $prevGatewayCount * $.gatewayCount = 0
|
||||
- $serviceEndpoints: {}
|
||||
|
||||
- For: deployedRecord
|
||||
In: $.serviceEndpoints.where($.scope = cloud)
|
||||
Do:
|
||||
- $port: $deployedRecord.port
|
||||
- If: not $port in $services.keys()
|
||||
Then:
|
||||
- $template: $resources.yaml('DestroyService.template').bind(dict(
|
||||
serviceId => $deployedRecord.name
|
||||
))
|
||||
- $.masterNode.instance.agent.call($template, $resources)
|
||||
Else:
|
||||
$serviceEndpoints[$port]: $deployedRecord
|
||||
|
||||
- For: port
|
||||
In: $services.keys()
|
||||
Do:
|
||||
- $runtimeRecord: $services.get($port)
|
||||
- $deployedRecord: $serviceEndpoints.get($port)
|
||||
- $portChanged: $deployedRecord = null
|
||||
- If: not $portChanged
|
||||
Then:
|
||||
$portChanged: $deployedRecord.podId != $runtimeRecord.podId or
|
||||
$runtimeRecord.applicationPort.scope = public and (
|
||||
$gatewayModeChanged or $.gatewayCount = 0 and $prevNodeCount != nodeCount)
|
||||
|
||||
- If: $portChanged
|
||||
Then:
|
||||
- $serviceDefinition: $._buildServiceDefinition(
|
||||
$runtimeRecord.name,
|
||||
$port,
|
||||
$runtimeRecord.applicationPort.port,
|
||||
$runtimeRecord.podId,
|
||||
$.gatewayCount = 0
|
||||
)
|
||||
|
||||
- $template: $resources.yaml('UpdateService.template').bind(dict(
|
||||
serviceDefinition => $serviceDefinition,
|
||||
isNew => $deployedRecord = null
|
||||
))
|
||||
- If: $runtimeRecord.applicationPort.scope = public
|
||||
Then:
|
||||
- $securityGroupIngress:
|
||||
- ToPort: $port
|
||||
FromPort: $port
|
||||
IpProtocol: toLower($runtimeRecord.applicationPort.protocol)
|
||||
External: true
|
||||
- $._environment.securityGroupManager.addGroupIngress($securityGroupIngress)
|
||||
|
||||
- $serviceIp: $.masterNode.instance.agent.call($template, $resources)
|
||||
Else:
|
||||
- $serviceIp: $deployedRecord.address
|
||||
|
||||
- $endpoint:
|
||||
port: $port
|
||||
address: $serviceIp
|
||||
scope: internal
|
||||
applicationPort: $runtimeRecord.applicationPort
|
||||
applicationName: $runtimeRecord.applicationName
|
||||
podId: $runtimeRecord.podId
|
||||
name: $runtimeRecord.name
|
||||
- $endpoints: $endpoints + list($endpoint)
|
||||
|
||||
- If: $runtimeRecord.applicationPort.scope = public
|
||||
Then:
|
||||
- If: $.gatewayCount > 0
|
||||
Then:
|
||||
$nodes: $.gatewayNodes.take($.gatewayCount)
|
||||
Else:
|
||||
$nodes: $.minionNodes.take($.nodeCount)
|
||||
|
||||
- For: t
|
||||
In: $nodes
|
||||
Do:
|
||||
- $endpoint.address: $t.getIp()
|
||||
- $endpoint.scope: cloud
|
||||
- $endpoints: $endpoints + list($endpoint)
|
||||
- If: $t.instance.floatingIpAddress != null
|
||||
Then:
|
||||
- $endpoint.address: $t.instance.floatingIpAddress
|
||||
- $endpoint.scope: public
|
||||
- $endpoints: $endpoints + list($endpoint)
|
||||
- $.serviceEndpoints: $endpoints
|
||||
- $._environment.stack.push()
|
||||
|
||||
|
||||
_findUnusedPort:
|
||||
Arguments:
|
||||
- initial:
|
||||
Contract: $.int().notNull()
|
||||
Body:
|
||||
- If: not $initial in $._services.keys()
|
||||
Then:
|
||||
Return: $initial
|
||||
- $port: 1025
|
||||
- While: $port in $._services.keys()
|
||||
Do:
|
||||
$port: $port + 1
|
||||
- Return: $port
|
||||
|
||||
|
||||
_buildServiceDefinition:
|
||||
Arguments:
|
||||
- serviceName:
|
||||
Contract: $.string().notNull()
|
||||
- servicePort:
|
||||
Contract: $.int().notNull()
|
||||
- containerPort:
|
||||
Contract: $.int().notNull()
|
||||
- podId:
|
||||
Contract: $.string().notNull()
|
||||
- withDodeIps:
|
||||
Contract: $.bool().notNull()
|
||||
Body:
|
||||
- $result:
|
||||
id: $serviceName
|
||||
kind: Service
|
||||
apiVersion: v1beta1
|
||||
port: $servicePort
|
||||
containerPort: $containerPort
|
||||
selector:
|
||||
id: $podId
|
||||
- If: $withDodeIps
|
||||
Then:
|
||||
- $result.publicIPs: $.minionNodes.take($.nodeCount).select($.getIp())
|
||||
- Return: $result
|
||||
|
||||
scaleUp:
|
||||
Usage: Action
|
||||
Body:
|
||||
- $._environment.reporter.report($this, 'Scaling up Kubernetes containers cluster')
|
||||
|
||||
- If: $.nodeCount < len($.minionNodes)
|
||||
Then:
|
||||
- $minion: $.minionNodes[$.nodeCount]
|
||||
- $._environment.reporter.report($this, 'Creating a new VM')
|
||||
- $minion.deploy()
|
||||
- $._environment.reporter.report($this, 'Scaling up Kubernetes cluster')
|
||||
- $.nodeCount: $.nodeCount + 1
|
||||
- $._environment.reporter.report($this, 'Scaling Kubernetes containers cluster complete')
|
||||
- $.deploy()
|
||||
|
||||
scaleDown:
|
||||
Usage: Action
|
||||
Body:
|
||||
- $._environment.reporter.report($this, 'Scaling Kubernetes containers cluster down')
|
||||
- If: $.nodeCount > 1
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Scaling Kubernetes cluster down')
|
||||
- $.nodeCount: $.nodeCount - 1
|
||||
- $.deploy()
|
||||
|
||||
scaleGatewaysUp:
|
||||
Usage: Action
|
||||
Body:
|
||||
- If: $.nodeCount < len($.gatewayNodes)
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Adding new gateway node')
|
||||
- $.nodeCount: $.gatewayCount + 1
|
||||
- $.deploy()
|
||||
|
||||
scaleGatewaysUp:
|
||||
Usage: Action
|
||||
Body:
|
||||
- If: $.nodeCount > 0
|
||||
Then:
|
||||
- $minion: $.minionNodes[$.nodeCount - 1]
|
||||
- $minion.removeFromCluster()
|
||||
- $.nodeCount: $.nodeCount - 1
|
||||
- $._environment.reporter.report($this, 'Scaling Kubernetes containers cluster complete')
|
||||
- $._environment.reporter.report($this, 'Removing gateway node')
|
||||
- $.nodeCount: $.gatewayCount - 1
|
||||
- $.deploy()
|
||||
|
||||
exportConfig:
|
||||
Usage: Action
|
||||
Body:
|
||||
- $._environment.reporter.report($this, 'Action exportConfig is called')
|
||||
- $._environment.reporter.report($this, 'Action exportConfig called')
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('ExportConfig.template')
|
||||
- $result: $.masterNode.instance.agent.call($template, $resources)
|
||||
- $._environment.reporter.report($this, 'Got archive from Kubernetes')
|
||||
- Return: new(std:File, base64Content=>$result.content,
|
||||
filename => 'application.tar.gz')
|
||||
|
||||
getIp:
|
||||
Body:
|
||||
Return: $.masterNode.getIp()
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
Namespaces:
|
||||
=: io.murano.apps.docker.kubernetes
|
||||
std: io.murano
|
||||
res: io.murano.resources
|
||||
sys: io.murano.system
|
||||
|
||||
Name: KubernetesGatewayNode
|
||||
|
||||
Extends: KubernetesNode
|
||||
|
||||
Methods:
|
||||
initialize:
|
||||
Body:
|
||||
- $._environment: $.find(std:Environment).require()
|
||||
- $._cluster: $.find(KubernetesCluster).require()
|
||||
|
||||
deployInstance:
|
||||
Body:
|
||||
- If: not $.getAttr(deployed, false)
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Creating Gateway node for Kubernetes services')
|
||||
- $.super($.deployInstance())
|
||||
- $.setAttr(deployed, true)
|
||||
|
||||
setupEtcd:
|
||||
Body:
|
||||
- $._environment.reporter.report($, 'Configuring etcd node {0}'.format($.instance.name))
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('EtcdAddMember.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp()
|
||||
))
|
||||
- $clusterConfig: $._cluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
- $template: $resources.yaml('MemberEtcdSetup.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp(),
|
||||
clusterConfig => $clusterConfig
|
||||
))
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
|
||||
setupNode:
|
||||
Body:
|
||||
- $._environment.reporter.report($this, 'Setup Flannel network on {0}'.format($.instance.name))
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('SetupFlannelNode.template')
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
- $._environment.reporter.report($, 'Setup Load Balancer on {0}'.format($.instance.name))
|
||||
- $template: $resources.yaml('HAProxySetup.template').bind(dict(
|
||||
masterIp => $._cluster.masterNode.getIp()
|
||||
))
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
- $template: $resources.yaml('KubeRegisterNode.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp()
|
||||
))
|
||||
- $._cluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
removeFromCluster:
|
||||
Body:
|
||||
- If: $.getAttr(deployed, false)
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Deleting Kubernetes Minion')
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('RemoveMinion.template').bind(dict(name=>$.getIp()))
|
||||
- $._cluster.masterNode.instance.agent.call($template, $resources)
|
||||
- $._environment.reporter.report($this, 'Node {0} deleted'.format($.getIp()))
|
||||
- $.setAttr(deployed, false)
|
||||
|
|
@ -8,40 +8,46 @@ Name: KubernetesMasterNode
|
|||
|
||||
Extends: KubernetesNode
|
||||
|
||||
Properties:
|
||||
portalNet:
|
||||
Contract: $.string().notNull()
|
||||
Default: '11.1.1.1/24'
|
||||
|
||||
|
||||
Methods:
|
||||
initialize:
|
||||
Body:
|
||||
- $._environment: $.find(std:Environment).require()
|
||||
|
||||
deploy:
|
||||
|
||||
deployInstance:
|
||||
Body:
|
||||
- If: not $.getAttr(deployed, false)
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Creating Kubernetes Master')
|
||||
- $.super($.deploy())
|
||||
- $.setupNode()
|
||||
- $.super($.deployInstance())
|
||||
- $.setAttr(deployed, true)
|
||||
|
||||
|
||||
setupEtcd:
|
||||
Body:
|
||||
- If: not $.getAttr(etcdConfigured, false)
|
||||
Then:
|
||||
- $._environment.reporter.report($, 'Configuring etcd master node')
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('MasterEtcdSetup.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp()
|
||||
))
|
||||
- $.instance.agent.call($template, $resources)
|
||||
- $.setAttr(etcdConfigured, true)
|
||||
|
||||
|
||||
setupNode:
|
||||
Body:
|
||||
- $._environment.reporter.report($, 'Configuring ETCD master node')
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('MasterEtcdSetup.template').bind(dict(
|
||||
- $._environment.reporter.report($this, 'Setup Flannel network on master node')
|
||||
- $template: $resources.yaml('SetupFlannelMaster.template')
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
- $._environment.reporter.report($this, 'Setup Kubernetes API host')
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('KubeMasterSetup.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp()
|
||||
))
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
- $._environment.reporter.report($this, 'Setup Kubernetes API host')
|
||||
- $template: $resources.yaml('KubeMasterSetup.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp(),
|
||||
portalNet => $.portalNet
|
||||
))
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
|
|
@ -14,18 +14,19 @@ Methods:
|
|||
- $._environment: $.find(std:Environment).require()
|
||||
- $._cluster: $.find(KubernetesCluster).require()
|
||||
|
||||
deploy:
|
||||
|
||||
deployInstance:
|
||||
Body:
|
||||
- If: not $.getAttr(deployed, false)
|
||||
Then:
|
||||
- $._environment.reporter.report($this, 'Creating Kubernetes Minion')
|
||||
- $.super($.deploy())
|
||||
- $.setupNode()
|
||||
- $.super($.deployInstance())
|
||||
- $.setAttr(deployed, true)
|
||||
|
||||
setupNode:
|
||||
|
||||
setupEtcd:
|
||||
Body:
|
||||
- $._environment.reporter.report($, 'Configuring ETCD node {0}'.format($.instance.name))
|
||||
- $._environment.reporter.report($, 'Configuring etcd node {0}'.format($.instance.name))
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('EtcdAddMember.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
|
@ -33,7 +34,6 @@ Methods:
|
|||
))
|
||||
- $clusterConfig: $._cluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
- $._environment.reporter.report($this, 'Setup Kubernetes API host')
|
||||
- $template: $resources.yaml('MemberEtcdSetup.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp(),
|
||||
|
@ -41,8 +41,15 @@ Methods:
|
|||
))
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
- $._environment.reporter.report($, 'Setup Kubernetes Minion on {0}'.format($.instance.name))
|
||||
|
||||
setupNode:
|
||||
Body:
|
||||
- $._environment.reporter.report($this, 'Setup Flannel network on {0}'.format($.instance.name))
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('SetupFlannelNode.template')
|
||||
- $.instance.agent.call($template, $resources)
|
||||
|
||||
- $._environment.reporter.report($, 'Setup Kubernetes Minion on {0}'.format($.instance.name))
|
||||
- $template: $resources.yaml('KubeMinionSetup.template').bind(dict(
|
||||
name => $.instance.name,
|
||||
ip => $.getIp(),
|
||||
|
@ -56,6 +63,7 @@ Methods:
|
|||
))
|
||||
- $._cluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
|
||||
removeFromCluster:
|
||||
Body:
|
||||
- If: $.getAttr(deployed, false)
|
||||
|
@ -66,4 +74,3 @@ Methods:
|
|||
- $._cluster.masterNode.instance.agent.call($template, $resources)
|
||||
- $._environment.reporter.report($this, 'Node {0} deleted'.format($.getIp()))
|
||||
- $.setAttr(deployed, false)
|
||||
|
||||
|
|
|
@ -6,15 +6,22 @@ Namespaces:
|
|||
|
||||
Name: KubernetesNode
|
||||
|
||||
|
||||
Properties:
|
||||
instance:
|
||||
Contract: $.class(res:LinuxMuranoInstance).notNull()
|
||||
|
||||
|
||||
Methods:
|
||||
getIp:
|
||||
Body:
|
||||
Return: $.instance.ipAddresses[0]
|
||||
|
||||
deploy:
|
||||
|
||||
deployInstance:
|
||||
Body:
|
||||
- $.instance.deploy()
|
||||
|
||||
|
||||
setupEtcd:
|
||||
setupNode:
|
||||
|
|
|
@ -3,10 +3,10 @@ Version: 1.0.0
|
|||
Name: Destroy Replication Controller
|
||||
|
||||
Parameters:
|
||||
serviceId: $serviceId
|
||||
rcId: $rcId
|
||||
|
||||
Body: |
|
||||
return destroyReplicationController('{0}'.format(args.serviceId).stdout
|
||||
return destroyReplicationController(args.rcId).stdout
|
||||
|
||||
Scripts:
|
||||
destroyReplicationController:
|
|
@ -1,6 +1,6 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Add ETCD member
|
||||
Name: Add etcd member
|
||||
|
||||
Parameters:
|
||||
name: $name
|
||||
|
@ -14,8 +14,8 @@ Scripts:
|
|||
Type: Application
|
||||
Version: 1.0.0
|
||||
EntryPoint: master-add-member.sh
|
||||
Files:
|
||||
- <master-etcd-config.conf>
|
||||
Files: []
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Setup HAProxy
|
||||
|
||||
Parameters:
|
||||
masterIp: $masterIp
|
||||
|
||||
Body: |
|
||||
setup(args.masterIp)
|
||||
|
||||
Scripts:
|
||||
setup:
|
||||
Type: Application
|
||||
Version: 1.0.0
|
||||
EntryPoint: haproxy-setup.sh
|
||||
Files:
|
||||
- default_scripts/haproxy
|
||||
- init_conf/confd.conf
|
||||
- haproxy.tmpl
|
||||
- haproxy.toml
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
|
@ -5,10 +5,9 @@ Name: Setup Kubernetes Master
|
|||
Parameters:
|
||||
name: $name
|
||||
ip: $ip
|
||||
portalNet: $portalNet
|
||||
|
||||
Body: |
|
||||
return setup('{0} {1} {2}'.format(args.name, args.ip, args.portalNet)).stdout
|
||||
return setup('{0} {1}'.format(args.name, args.ip)).stdout
|
||||
|
||||
Scripts:
|
||||
setup:
|
||||
|
@ -16,11 +15,15 @@ Scripts:
|
|||
Version: 1.0.0
|
||||
EntryPoint: master-kube-setup.sh
|
||||
Files:
|
||||
- <kube-apiserver.conf>
|
||||
- <kube-proxy.conf>
|
||||
- <kube-scheduler.conf>
|
||||
- <kubelet.conf>
|
||||
- <kube-controller-manager.conf>
|
||||
- init_conf/kube-apiserver.conf
|
||||
- init_conf/kube-controller-manager.conf
|
||||
- init_conf/kube-scheduler.conf
|
||||
- initd_scripts/kube-apiserver
|
||||
- initd_scripts/kube-controller-manager
|
||||
- initd_scripts/kube-scheduler
|
||||
- default_scripts/kube-apiserver
|
||||
- default_scripts/kube-scheduler
|
||||
- default_scripts/kube-controller-manager
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
|
|
|
@ -16,8 +16,12 @@ Scripts:
|
|||
Version: 1.0.0
|
||||
EntryPoint: minion-kube-setup.sh
|
||||
Files:
|
||||
- <kube-proxy.conf>
|
||||
- <kubelet.conf>
|
||||
- default_scripts/kube-proxy
|
||||
- default_scripts/kubelet
|
||||
- init_conf/kubelet.conf
|
||||
- init_conf/kube-proxy.conf
|
||||
- initd_scripts/kubelet
|
||||
- initd_scripts/kube-proxy
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Setup ETCD Master
|
||||
Name: Setup etcd Master
|
||||
|
||||
Parameters:
|
||||
name: $name
|
||||
|
@ -15,7 +15,9 @@ Scripts:
|
|||
Version: 1.0.0
|
||||
EntryPoint: master-etcd-setup.sh
|
||||
Files:
|
||||
- <master-etcd-config.conf>
|
||||
- default_scripts/etcd-master
|
||||
- init_conf/etcd.conf
|
||||
- initd_scripts/etcd
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Setup ETCD Member
|
||||
Name: Setup etcd Member
|
||||
|
||||
Parameters:
|
||||
name: $name
|
||||
|
@ -13,11 +13,13 @@ Scripts:
|
|||
setup:
|
||||
Type: Application
|
||||
Version: 1.0.0
|
||||
EntryPoint: <member-etcd-setup-wrapper.sh>
|
||||
EntryPoint: member-etcd-setup.sh
|
||||
Files:
|
||||
- <member-etcd-config-p1.conf>
|
||||
- <member-etcd-config-p2.conf>
|
||||
- <member-etcd-setup.sh>
|
||||
- default_scripts/etcd-member
|
||||
- init_conf/etcd.conf
|
||||
- initd_scripts/etcd
|
||||
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Deploy Tomcat
|
||||
Name: Setup Flannel
|
||||
|
||||
Parameters:
|
||||
appName: $appName
|
||||
|
||||
Body: |
|
||||
return getIp(args.appName).stdout
|
||||
setupFlannel()
|
||||
|
||||
Scripts:
|
||||
getIp:
|
||||
setupFlannel:
|
||||
Type: Application
|
||||
Version: 1.0.0
|
||||
EntryPoint: Linux/getIp.sh
|
||||
Files: []
|
||||
EntryPoint: setupFlannelMaster.sh
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
|
@ -0,0 +1,22 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Setup Flannel
|
||||
|
||||
Parameters:
|
||||
|
||||
Body: |
|
||||
setupFlannel()
|
||||
|
||||
Scripts:
|
||||
setupFlannel:
|
||||
Type: Application
|
||||
Version: 1.0.0
|
||||
EntryPoint: setupFlannelNode.sh
|
||||
Files:
|
||||
- init_conf/flanneld.conf
|
||||
- initd_scripts/flanneld
|
||||
- default_scripts/flanneld
|
||||
|
||||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
|
@ -1,6 +1,6 @@
|
|||
FormatVersion: 2.0.0
|
||||
Version: 1.0.0
|
||||
Name: Update Service
|
||||
Name: Update Service
|
||||
|
||||
Parameters:
|
||||
serviceDefinition: $serviceDefinition
|
||||
|
@ -12,8 +12,10 @@ Body: |
|
|||
fileName = '/var/run/murano-kubernetes/' + str(uuid.uuid4()) + '.json'
|
||||
with open(fileName, 'w') as f:
|
||||
json.dump(args.serviceDefinition, f)
|
||||
|
||||
return updateService('{0} {1} {2} {3}'.format(args.isNew, args.serviceDefinition['id'], args.serviceDefinition['kind'], fileName )).stdout
|
||||
|
||||
updateService('{0} {1} {2} {3}'.format(args.isNew, args.serviceDefinition['id'], args.serviceDefinition['kind'], fileName))
|
||||
return getServiceIp(args.serviceDefinition['id']).stdout
|
||||
|
||||
Scripts:
|
||||
updateService:
|
||||
Type: Application
|
||||
|
@ -23,3 +25,12 @@ Scripts:
|
|||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
||||
|
||||
getServiceIp:
|
||||
Type: Application
|
||||
Version: 1.0.0
|
||||
EntryPoint: getServiceIp.sh
|
||||
Files: []
|
||||
Options:
|
||||
captureStdout: true
|
||||
captureStderr: true
|
|
@ -1,204 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
DEBUGLVL=3
|
||||
LOGFILE=/tmp/muranodeployment.log
|
||||
PIPAPPS="pip python-pip pip-python"
|
||||
PIPCMD=""
|
||||
if [ "$DEBUGLVL" -eq 4 ]; then
|
||||
set -x
|
||||
fi
|
||||
function log {
|
||||
if [ "$DEBUGLVL" -gt 0 ]; then
|
||||
chars=$(echo "@$" | wc -c)
|
||||
case $DEBUGLVL in
|
||||
1 )
|
||||
echo -e "LOG:>$@"
|
||||
;;
|
||||
2)
|
||||
echo -e "$(date +"%m-%d-%Y %H:%M") LOG:>$@" | tee --append $LOGFILE
|
||||
;;
|
||||
3)
|
||||
echo -e "$(date +"%m-%d-%Y %H:%M") LOG:>$@" >> $LOGFILE
|
||||
;;
|
||||
4)
|
||||
echo -e "$(date +"%m-%d-%Y %H:%M") LOG:>$@" | tee --append $LOGFILE
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
function lowercase(){
|
||||
echo "$1" | sed "y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/"
|
||||
}
|
||||
function find_pip()
|
||||
{
|
||||
for cmd in $PIPAPPS
|
||||
do
|
||||
_cmd=$(which $cmd 2>/dev/null)
|
||||
if [ $? -eq 0 ];then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z $_cmd ];then
|
||||
echo "Can't find \"pip\" in system, please install it first, exiting!"
|
||||
exit 1
|
||||
else
|
||||
PIPCMD=$_cmd
|
||||
fi
|
||||
}
|
||||
OPTIND=1 # Reset if getopts used previously
|
||||
function collect_args(){
|
||||
_n=$1
|
||||
shift
|
||||
ARGS=''
|
||||
while true
|
||||
do
|
||||
if [[ "$_n" == -* ]] || [ -z "$_n" ]; then
|
||||
OPTIND=$((OPTIND - 1))
|
||||
break
|
||||
fi
|
||||
#echo "_n=$_n ; $OPTIND"
|
||||
if [ -z "$ARGS" ]; then
|
||||
ARGS=$OPTARG
|
||||
else
|
||||
ARGS="$ARGS $_n"
|
||||
fi
|
||||
eval _n=\$$OPTIND
|
||||
OPTIND=$((OPTIND + 1))
|
||||
#sleep 1
|
||||
done
|
||||
echo $ARGS
|
||||
unset _n
|
||||
unset ARGS
|
||||
}
|
||||
function get_os(){
|
||||
KERNEL=$(uname -r)
|
||||
MACH=$(uname -m)
|
||||
OS=$(uname)
|
||||
if [ "${OS}" = "Linux" ] ; then
|
||||
if [ -f /etc/redhat-release ] ; then
|
||||
DistroBasedOn='RedHat'
|
||||
Packager='yum'
|
||||
DIST=$(cat /etc/redhat-release |sed s/\ release.*//)
|
||||
PSUEDONAME=$(cat /etc/redhat-release | sed s/.*\(// | sed s/\)//)
|
||||
REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//)
|
||||
elif [ -f /etc/SuSE-release ] ; then
|
||||
DistroBasedOn='SuSe'
|
||||
Packager='zypper'
|
||||
PSUEDONAME=$(cat /etc/SuSE-release | tr "\n" ' '| sed s/VERSION.*//)
|
||||
REV=$(cat /etc/SuSE-release | tr "\n" ' ' | sed s/.*=\ //)
|
||||
elif [ -f /etc/mandrake-release ] ; then
|
||||
DistroBasedOn='Mandrake'
|
||||
Packager='urpmi urpme'
|
||||
PSUEDONAME=$(cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//)
|
||||
REV=$(cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//)
|
||||
elif [ -f /etc/debian_version ] ; then
|
||||
DistroBasedOn='Debian'
|
||||
Packager='apt-get'
|
||||
DIST=$(cat /etc/lsb-release | grep '^DISTRIB_ID' | awk -F= '{ print $2 }')
|
||||
PSUEDONAME=$(cat /etc/lsb-release | grep '^DISTRIB_CODENAME' | awk -F= '{ print $2 }')
|
||||
REV=$(cat /etc/lsb-release | grep '^DISTRIB_RELEASE' | awk -F= '{ print $2 }')
|
||||
fi
|
||||
if [ -f /etc/UnitedLinux-release ] ; then
|
||||
DIST="${DIST}[$(cat /etc/UnitedLinux-release | tr "\n" ' ' | sed s/VERSION.*//)]"
|
||||
fi
|
||||
OS=$(lowercase $OS)
|
||||
DistroBasedOn=$(lowercase $DistroBasedOn)
|
||||
readonly OS
|
||||
readonly DIST
|
||||
readonly DistroBasedOn
|
||||
readonly PSUEDONAME
|
||||
readonly REV
|
||||
readonly KERNEL
|
||||
readonly MACH
|
||||
#readonly Packager
|
||||
else
|
||||
OS=unknown
|
||||
readonly OS
|
||||
log "OS:$OS"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function add_fw_rule(){
|
||||
_rule_string=$@
|
||||
_tmp_fw_port=$(echo $_rule_string | grep -o -e "dport [0-9]*\s")
|
||||
_tmp_fw_proto=$(echo $_rule_string | grep -o -e "-p \w*\s")
|
||||
_fw_port=$(echo $_tmp_fw_port | awk '{print $2}')
|
||||
_fw_proto=$(echo $_tmp_fw_proto |awk '{print $2}')
|
||||
_fw_reload=""
|
||||
#find iptables and add rule
|
||||
case $DIST in
|
||||
"Fedora")
|
||||
_fw_cmd=$(which firewall-cmd)
|
||||
_fw_port=$(echo $_rule_string | grep -o -e "dport [0-9]*\s" | awk '{print $2}')
|
||||
_fw_proto=$(echo $_rule_string | grep -o -e "-p \w*\s" | awk '{print $2}')
|
||||
_fw_rule="--permanent --add-port=$_fw_port/$_fw_proto"
|
||||
_fw_enable_rules="$_fw_cmd --reload"
|
||||
;;
|
||||
*)
|
||||
_fw_cmd=$(which iptables)
|
||||
_fw_rule=$_rule_string
|
||||
_fw_enable_rules="service $(basename $_fw_cmd) save"
|
||||
;;
|
||||
esac
|
||||
iptcmdsave=$(which iptables-save)
|
||||
if [[ "$_fw_cmd" != '' ]] && [[ "$iptcmdsave" != '' ]]; then
|
||||
eval "$iptcmdsave | grep -e \"$_tmp_fw_port\" | grep -e \"$_tmp_fw_proto\"" > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
eval $_fw_cmd $_fw_rule
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Can't set firewall rules, exiting..."
|
||||
exit 1
|
||||
else
|
||||
if [ -n "$_fw_enable_rules" ]; then
|
||||
log "Running \"$_fw_enable_rules\""
|
||||
$_fw_enable_rules > /dev/null
|
||||
fi
|
||||
log "$_fw_cmd rule with $_fw_rule set."
|
||||
fi
|
||||
else
|
||||
log "$_fw_cmd rule exists."
|
||||
fi
|
||||
else
|
||||
log "There are no fw found..."
|
||||
fi
|
||||
}
|
||||
function enable_init(){
|
||||
_initctrl=""
|
||||
_init_suffix=""
|
||||
_service=$1
|
||||
case $DistroBasedOn in
|
||||
"debian")
|
||||
_initctrl="update-rc.d"
|
||||
_init_suffix="defaults"
|
||||
;;
|
||||
*)
|
||||
_initctrl="chkconfig"
|
||||
_init_suffix="on"
|
||||
;;
|
||||
esac
|
||||
$_initctrl $_service $_init_suffix
|
||||
if [ $? -ne 0 ]; then
|
||||
log "$_initctrl $_service $_init_suffix - fails!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function restart_service(){
|
||||
_service=$1
|
||||
service $_service restart > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Can't start $_service service!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function package_renamer(){
|
||||
_pkg=$1
|
||||
case $DistroBasedOn in
|
||||
"debian")
|
||||
_pkg=$(echo $_pkg | sed 's/-devel$/-dev/')
|
||||
;;
|
||||
*)
|
||||
_pkg=$(echo $_pkg | sed 's/-dev$/-devel/')
|
||||
;;
|
||||
esac
|
||||
echo $_pkg
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
INIT_ETCD_OPTS="--name %%NAME%% --initial-cluster-state existing --initial-cluster %%CLUSTER_CONFIG%% --data-dir /var/lib/etcd --snapshot-count 1000 --listen-peer-urls http://%%IP%%:7001,http://127.0.0.1:7001 --listen-client-urls http://%%IP%%:4001,http://127.0.0.1:4001 --initial-advertise-peer-urls http://%%IP%%:7001 --advertise-client-urls http://%%IP%%:4001,http://127.0.0.1:4001"
|
||||
|
||||
EXISTING_ETCD_OPTS="--name %%NAME%% --data-dir /var/lib/etcd --snapshot-count 1000 --listen-peer-urls http://%%IP%%:7001,http://127.0.0.1:7001 --listen-client-urls http://%%IP%%:4001,http://127.0.0.1:4001 --advertise-client-urls http://%%IP%%:4001,http://127.0.0.1:4001"
|
||||
|
||||
if [ -d /var/lib/etcd/wal/ ]
|
||||
then
|
||||
#This will allow to restart etcd service properly to pick up properties from other peers
|
||||
ETCD_OPTS=$EXISTING_ETCD_OPTS
|
||||
else
|
||||
ETCD_OPTS=$INIT_ETCD_OPTS
|
||||
fi
|
|
@ -0,0 +1,7 @@
|
|||
# flannel Upstart and SysVinit configuration file
|
||||
|
||||
# Customize kube-apiserver binary location
|
||||
# FLANNEL="/opt/bin/flanneld"
|
||||
|
||||
# Use FLANNEL_OPTS to modify the start/restart options
|
||||
FLANNEL_OPTS=""
|
|
@ -0,0 +1,4 @@
|
|||
# Set ENABLED to 1 if you want the init script to start haproxy.
|
||||
ENABLED=1
|
||||
# Add extra flags here.
|
||||
#EXTRAOPTS="-de -m 16"
|
|
@ -5,9 +5,9 @@
|
|||
|
||||
# Use KUBE_APISERVER_OPTS to modify the start/restart options
|
||||
KUBE_APISERVER_OPTS="--address=0.0.0.0 \
|
||||
--port=8080 \
|
||||
--etcd_servers=http://127.0.0.1:4001 \
|
||||
--logtostderr=false \
|
||||
--portal_net=11.1.1.0/24 --log_dir=/var/log/kubernetes"
|
||||
--port=8080 \
|
||||
--etcd_servers=http://127.0.0.1:4001 \
|
||||
--logtostderr=false \
|
||||
--portal_net=11.1.0.0/16 --log_dir=/var/log/kubernetes"
|
||||
|
||||
# Add more envionrment settings used by kube-apiserver here
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
# Use KUBE_CONTROLLER_MANAGER_OPTS to modify the start/restart options
|
||||
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
|
||||
--machines= \
|
||||
--logtostderr=false --log_dir=/var/log/kubernetes"
|
||||
--machines= \
|
||||
--logtostderr=false --log_dir=/var/log/kubernetes"
|
||||
|
||||
# Add more envionrment settings used by kube-controller-manager here
|
|
@ -5,6 +5,6 @@
|
|||
|
||||
# Use KUBE_PROXY_OPTS to modify the start/restart options
|
||||
KUBE_PROXY_OPTS="--etcd_servers=http://127.0.0.1:4001 \
|
||||
--logtostderr=false --master=http://%%MASTER_IP%%:8080 --log_dir=/var/log/kubernetes"
|
||||
--logtostderr=false --master=http://%%MASTER_IP%%:8080 --log_dir=/var/log/kubernetes"
|
||||
|
||||
# Add more envionrment settings used by kube-apiserver here
|
|
@ -5,6 +5,6 @@
|
|||
|
||||
# Use KUBE_SCHEDULER_OPTS to modify the start/restart options
|
||||
KUBE_SCHEDULER_OPTS="--logtostderr=false --log_dir=/var/log/kubernetes \
|
||||
--master=%%MASTER_IP%%:8080 --address=%%IP%%"
|
||||
--master=%%MASTER_IP%%:8080 --address=%%IP%%"
|
||||
|
||||
# Add more envionrment settings used by kube-scheduler here
|
|
@ -5,10 +5,10 @@
|
|||
|
||||
# Use KUBELET_OPTS to modify the start/restart options
|
||||
KUBELET_OPTS="--address=%%IP%% \
|
||||
--port=10250 \
|
||||
--hostname_override=%%IP%% \
|
||||
--etcd_servers=http://127.0.0.1:4001 \
|
||||
--logtostderr=false \
|
||||
--log_dir=/var/log/kubernetes"
|
||||
--port=10250 \
|
||||
--hostname_override=%%IP%% \
|
||||
--api_servers=%%MASTER_IP%%:8080 \
|
||||
--logtostderr=false \
|
||||
--log_dir=/var/log/kubernetes"
|
||||
|
||||
# Add more envionrment settings used by kube-scheduler here
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/sh
|
||||
curl -s http://169.254.169.254/latest/meta-data/local-ipv4
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
/opt/bin/kubectl get service $1 -t '{{.portalIP}}' -o template
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
# $1 - MASTER_IP
|
||||
|
||||
cp -f haproxy.toml /etc/confd/conf.d/
|
||||
cp -f haproxy.tmpl /etc/confd/templates/
|
||||
|
||||
/usr/local/bin/confd -onetime -backend etcd -node $1:4001
|
||||
|
||||
cp -f default_scripts/haproxy /etc/default/
|
||||
|
||||
sed -i.bkp "s/%%MASTER_NODE%%/$1/g" init_conf/confd.conf
|
||||
cp -f init_conf/confd.conf /etc/init/
|
||||
|
||||
service confd start
|
||||
service haproxy start
|
||||
|
||||
sleep 1
|
|
@ -0,0 +1,17 @@
|
|||
defaults
|
||||
option redispatch
|
||||
retries 5
|
||||
maxconn 2000
|
||||
contimeout 5000
|
||||
clitimeout 50000
|
||||
srvtimeout 50000
|
||||
|
||||
{{range $svc := ls "/registry/services/endpoints/default"}}
|
||||
{{$se := printf "/registry/services/endpoints/default/%s" $svc }}{{$ss := printf "/registry/services/specs/default/%s" $svc }}
|
||||
{{$seKey := get $se}}{{$ssKey := get $ss}}{{$seJson := json $seKey.Value}}{{$ssJson := json $ssKey.Value}}
|
||||
listen {{base $svc}} 0.0.0.0:{{$ssJson.port}}
|
||||
mode tcp
|
||||
balance leastconn
|
||||
{{range $index, $endpoint := $seJson.endpoints}}
|
||||
server svr{{$index}} {{$endpoint}}{{end}}
|
||||
{{end}}
|
|
@ -0,0 +1,7 @@
|
|||
[template]
|
||||
src = "haproxy.tmpl"
|
||||
dest = "/etc/haproxy/haproxy.cfg"
|
||||
keys = [
|
||||
"/registry/services"
|
||||
]
|
||||
reload_cmd = "/usr/sbin/service haproxy reload"
|
|
@ -0,0 +1,11 @@
|
|||
description "confd"
|
||||
|
||||
respawn
|
||||
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
exec /usr/local/bin/confd -backend etcd -node %%MASTER_NODE%%:4001 -interval 15
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,30 @@
|
|||
description "Etcd service"
|
||||
author "@jainvipin"
|
||||
|
||||
respawn
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
|
||||
ETCD=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $ETCD ]; then
|
||||
exit 0
|
||||
fi
|
||||
echo "$ETCD binary not found, exiting"
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
ETCD=/opt/bin/$UPSTART_JOB
|
||||
ETCD_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$ETCD" $ETCD_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,32 @@
|
|||
description "Flannel service"
|
||||
author "@chenxingyu"
|
||||
|
||||
# respawn
|
||||
|
||||
# start in conjunction with etcd
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
pre-start script
|
||||
FLANNEL=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $FLANNEL ]; then
|
||||
exit 0
|
||||
fi
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
FLANNEL=/opt/bin/$UPSTART_JOB
|
||||
FLANNEL_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$FLANNEL" $FLANNEL_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,33 @@
|
|||
description "Kube-Apiserver service"
|
||||
author "@jainvipin"
|
||||
|
||||
# respawn
|
||||
|
||||
# start in conjunction with etcd
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/jainvipin/kubernetes-start
|
||||
KUBE_APISERVER=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $KUBE_APISERVER ]; then
|
||||
exit 0
|
||||
fi
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
KUBE_APISERVER=/opt/bin/$UPSTART_JOB
|
||||
KUBE_APISERVER_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$KUBE_APISERVER" $KUBE_APISERVER_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,33 @@
|
|||
description "Kube-Controller-Manager service"
|
||||
author "@jainvipin"
|
||||
|
||||
# respawn
|
||||
|
||||
# start in conjunction with etcd
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
|
||||
KUBE_CONTROLLER_MANAGER=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $KUBE_CONTROLLER_MANAGER ]; then
|
||||
exit 0
|
||||
fi
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
KUBE_CONTROLLER_MANAGER=/opt/bin/$UPSTART_JOB
|
||||
KUBE_CONTROLLER_MANAGER_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$KUBE_CONTROLLER_MANAGER" $KUBE_CONTROLLER_MANAGER_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,33 @@
|
|||
description "Kube-Proxy service"
|
||||
author "@jainvipin"
|
||||
|
||||
# respawn
|
||||
|
||||
# start in conjunction with etcd
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/jainvipin/kubernetes-start
|
||||
KUBE_PROXY=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $KUBE_PROXY ]; then
|
||||
exit 0
|
||||
fi
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
KUBE_PROXY=/opt/bin/$UPSTART_JOB
|
||||
KUBE_PROXY_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$KUBE_PROXY" $KUBE_PROXY_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,33 @@
|
|||
description "Kube-Scheduler service"
|
||||
author "@jainvipin"
|
||||
|
||||
# respawn
|
||||
|
||||
# start in conjunction with etcd
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/jainvipin/kubernetes-start
|
||||
KUBE_SCHEDULER=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $KUBE_SCHEDULER ]; then
|
||||
exit 0
|
||||
fi
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
KUBE_SCHEDULER=/opt/bin/$UPSTART_JOB
|
||||
KUBE_SCHEDULER_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$KUBE_SCHEDULER" $KUBE_SCHEDULER_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,33 @@
|
|||
description "Kubelet service"
|
||||
author "@jainvipin"
|
||||
|
||||
# respawn
|
||||
|
||||
# start in conjunction with etcd
|
||||
start on started etcd
|
||||
stop on stopping etcd
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
|
||||
KUBELET=/opt/bin/$UPSTART_JOB
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
if [ -f $KUBELET ]; then
|
||||
exit 0
|
||||
fi
|
||||
exit 22
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
KUBELET=/opt/bin/$UPSTART_JOB
|
||||
KUBELET_OPTS=""
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$KUBELET" $KUBELET_OPTS
|
||||
end script
|
||||
|
||||
start on runlevel [235]
|
||||
stop on runlevel [016]
|
|
@ -0,0 +1,100 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: etcd
|
||||
# Required-Start: $docker
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start distributed key/value pair service
|
||||
# Description:
|
||||
# http://www.github.com/coreos/etcd
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/etcd)
|
||||
ETCD=/opt/bin/$BASE
|
||||
# This is the pid file managed by etcd itself
|
||||
ETCD_PIDFILE=/var/run/$BASE.pid
|
||||
ETCD_LOGFILE=/var/log/$BASE.log
|
||||
ETCD_OPTS=""
|
||||
ETCD_DESC="Etcd"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if false && [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$ETCD_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check etcd is present
|
||||
if [ ! -x $ETCD ]; then
|
||||
log_failure_msg "$ETCD not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$ETCD_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
ETCD_START="start-stop-daemon \
|
||||
--start \
|
||||
--background \
|
||||
--quiet \
|
||||
--exec $ETCD \
|
||||
--make-pidfile \
|
||||
--pidfile $ETCD_PIDFILE \
|
||||
-- $ETCD_OPTS \
|
||||
>> $ETCD_LOGFILE 2>&1"
|
||||
|
||||
ETCD_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $ETCD_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $ETCD_DESC: $BASE"
|
||||
$ETCD_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $ETCD_DESC: $BASE"
|
||||
$ETCD_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_begin_msg "Restarting $ETCD_DESC: $BASE"
|
||||
$ETCD_STOP
|
||||
$ETCD_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$ETCD_PIDFILE" "$ETCD" "$ETCD_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: flannel
|
||||
# Required-Start: $etcd
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start flannel networking service
|
||||
# Description:
|
||||
# https://github.com/coreos/flannel
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/flannel)
|
||||
FLANNEL=/opt/bin/$BASE
|
||||
# This is the pid file managed by kube-apiserver itself
|
||||
FLANNEL_PIDFILE=/var/run/$BASE.pid
|
||||
FLANNEL_LOGFILE=/var/log/$BASE.log
|
||||
FLANNEL_OPTS=""
|
||||
FLANNEL_DESC="Flannel"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$FLANNEL_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check flanneld is present
|
||||
if [ ! -x $FLANNEL ]; then
|
||||
log_failure_msg "$FLANNEL is not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$FLANNEL_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
FLANNEL_START="start-stop-daemon \
|
||||
--start \
|
||||
--background \
|
||||
--quiet \
|
||||
--exec $FLANNEL \
|
||||
--make-pidfile --pidfile $FLANNEL_PIDFILE \
|
||||
-- $FLANNEL_OPTS \
|
||||
>> $FLANNEL_LOGFILE 2>&1"
|
||||
|
||||
FLANNEL_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $FLANNEL_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $FLANNEL_DESC: $BASE"
|
||||
$KUBE_APISERVER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
|
||||
$KUBE_APISERVER_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
|
||||
$KUBE_APISERVER_STOP
|
||||
$KUBE_APISERVER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$FLANNEL_DESC" "$FLANNEL" "$FLANNEL_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-apiserver
|
||||
# Required-Start: $etcd
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start distrubted key/value pair service
|
||||
# Description:
|
||||
# http://www.github.com/GoogleCloudPlatform/Kubernetes
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/kube-apiserver)
|
||||
KUBE_APISERVER=/opt/bin/$BASE
|
||||
# This is the pid file managed by kube-apiserver itself
|
||||
KUBE_APISERVER_PIDFILE=/var/run/$BASE.pid
|
||||
KUBE_APISERVER_LOGFILE=/var/log/$BASE.log
|
||||
KUBE_APISERVER_OPTS=""
|
||||
KUBE_APISERVER_DESC="Kube-Apiserver"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$KUBE_APISERVER_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kube-apiserver is present
|
||||
if [ ! -x $KUBE_APISERVER ]; then
|
||||
log_failure_msg "$KUBE_APISERVER not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$KUBE_APISERVER_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
KUBE_APISERVER_START="start-stop-daemon \
|
||||
--start \
|
||||
--background \
|
||||
--quiet \
|
||||
--exec $KUBE_APISERVER \
|
||||
--make-pidfile --pidfile $KUBE_APISERVER_PIDFILE \
|
||||
-- $KUBE_APISERVER_OPTS \
|
||||
>> $KUBE_APISERVER_LOGFILE 2>&1"
|
||||
|
||||
KUBE_APISERVER_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $KUBE_APISERVER_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $KUBE_APISERVER_DESC: $BASE"
|
||||
$KUBE_APISERVER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBE_APISERVER_DESC: $BASE"
|
||||
$KUBE_APISERVER_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBE_APISERVER_DESC: $BASE"
|
||||
$KUBE_APISERVER_STOP
|
||||
$KUBE_APISERVER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$KUBE_APISERVER_PIDFILE" "$KUBE_APISERVER" "$KUBE_APISERVER_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-controller-manager
|
||||
# Required-Start: $etcd
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start distrubted key/value pair service
|
||||
# Description:
|
||||
# http://www.github.com/GoogleCloudPlatform/Kubernetes
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/kube-controller-manager)
|
||||
KUBE_CONTROLLER_MANAGER=/opt/bin/$BASE
|
||||
# This is the pid file managed by kube-controller-manager itself
|
||||
KUBE_CONTROLLER_MANAGER_PIDFILE=/var/run/$BASE.pid
|
||||
KUBE_CONTROLLER_MANAGER_LOGFILE=/var/log/$BASE.log
|
||||
KUBE_CONTROLLER_MANAGER_OPTS=""
|
||||
KUBE_CONTROLLER_MANAGER_DESC="Kube-Controller-Manager"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$KUBE_CONTROLLER_MANAGER_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kube-controller-manager is present
|
||||
if [ ! -x $KUBE_CONTROLLER_MANAGER ]; then
|
||||
log_failure_msg "$KUBE_CONTROLLER_MANAGER not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$KUBE_CONTROLLER_MANAGER_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
KUBE_CONTROLLER_MANAGER_START="start-stop-daemon
|
||||
--start --background \
|
||||
--quiet \
|
||||
--exec $KUBE_CONTROLLER_MANAGER \
|
||||
--make-pidfile \
|
||||
--pidfile $KUBE_CONTROLLER_MANAGER_PIDFILE \
|
||||
-- $KUBE_CONTROLLER_MANAGER_OPTS \
|
||||
>> "$KUBE_CONTROLLER_MANAGER_LOGFILE" 2>&1
|
||||
|
||||
KUBE_CONTROLLER_MANAGER_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $KUBE_CONTROLLER_MANAGER_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $KUBE_CONTROLLER_MANAGER_DESC: $BASE"
|
||||
$KUBE_CONTROLLER_MANAGER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBE_CONTROLLER_MANAGER_DESC: $BASE"
|
||||
$KUBE_CONTROLLER_MANAGER_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_daemon_message "Restarting $KUBE_CONTROLLER_MANAGER" || true
|
||||
$KUBE_CONTROLLER_MANAGER_STOP
|
||||
$KUBE_CONTROLLER_MANAGER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$KUBE_CONTROLLER_MANAGER_PIDFILE" "$KUBE_CONTROLLER_MANAGER" "$KUBE_CONTROLLER_MANAGER_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-proxy
|
||||
# Required-Start: $etcd
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start kube-proxy service
|
||||
# Description:
|
||||
# http://www.github.com/GoogleCloudPlatform/Kubernetes
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/kube-proxy)
|
||||
KUBE_PROXY=/opt/bin/$BASE
|
||||
# This is the pid file managed by kube-proxy itself
|
||||
KUBE_PROXY_PIDFILE=/var/run/$BASE.pid
|
||||
KUBE_PROXY_LOGFILE=/var/log/$BASE.log
|
||||
KUBE_PROXY_OPTS=""
|
||||
KUBE_PROXY_DESC="Kube-Proxy"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$KUBE_PROXY_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kube-proxy is present
|
||||
if [ ! -x $KUBE_PROXY ]; then
|
||||
log_failure_msg "$KUBE_PROXY not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$KUBE_PROXY_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
KUBE_PROXY_START="start-stop-daemon \
|
||||
--start \
|
||||
--background \
|
||||
--quiet \
|
||||
--exec $KUBE_PROXY \
|
||||
--make-pidfile --pidfile $KUBE_PROXY_PIDFILE \
|
||||
-- $KUBE_PROXY_OPTS \
|
||||
>> $KUBE_PROXY_LOGFILE 2>&1"
|
||||
|
||||
KUBE_PROXY_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $KUBE_PROXY_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $KUBE_PROXY_DESC: $BASE"
|
||||
$KUBE_PROXY_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBE_PROXY_DESC: $BASE"
|
||||
$KUBE_PROXY_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBE_PROXY_DESC: $BASE"
|
||||
$KUBE_PROXY_STOP
|
||||
$KUBE_PROXY_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$KUBE_PROXY_PIDFILE" "$KUBE_PROXY" "$KUBE_PROXY_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-scheduler
|
||||
# Required-Start: $etcd
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start kube-proxy service
|
||||
# Description:
|
||||
# http://www.github.com/GoogleCloudPlatform/Kubernetes
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/kube-scheduler)
|
||||
KUBE_SCHEDULER=/opt/bin/$BASE
|
||||
# This is the pid file managed by kube-scheduler itself
|
||||
KUBE_SCHEDULER_PIDFILE=/var/run/$BASE.pid
|
||||
KUBE_SCHEDULER_LOGFILE=/var/log/$BASE.log
|
||||
KUBE_SCHEDULER_OPTS=""
|
||||
KUBE_SCHEDULER_DESC="Kube-Scheduler"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$KUBE_SCHEDULER_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kube-scheduler is present
|
||||
if [ ! -x $KUBE_SCHEDULER ]; then
|
||||
log_failure_msg "$KUBE_SCHEDULER not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$KUBE_SCHEDULER_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
KUBE_SCHEDULER_START="start-stop-daemon \
|
||||
--start \
|
||||
--background \
|
||||
--quiet \
|
||||
--exec $KUBE_SCHEDULER \
|
||||
--make-pidfile --pidfile $KUBE_SCHEDULER_PIDFILE \
|
||||
-- $KUBE_SCHEDULER_OPTS \
|
||||
>> $KUBE_SCHEDULER_LOGFILE 2>&1"
|
||||
|
||||
KUBE_SCHEDULER_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $KUBE_SCHEDULER_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $KUBE_SCHEDULER_DESC: $BASE"
|
||||
$KUBE_SCHEDULER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBE_SCHEDULER_DESC: $BASE"
|
||||
$KUBE_SCHEDULER_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_begin_msg "Restarting $KUBE_SCHEDULER_DESC: $BASE"
|
||||
$KUBE_SCHEDULER_STOP
|
||||
$KUBE_SCHEDULER_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$KUBE_SCHEDULER_PIDFILE" "$KUBE_SCHEDULER" "$KUBE_SCHEDULER_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kubelet
|
||||
# Required-Start: $etcd
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description: Start kubelet service
|
||||
# Description:
|
||||
# http://www.github.com/GoogleCloudPlatform/Kubernetes
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/kube-apiserver)
|
||||
KUBELET=/opt/bin/$BASE
|
||||
# This is the pid file managed by kube-apiserver itself
|
||||
KUBELET_PIDFILE=/var/run/$BASE.pid
|
||||
KUBELET_LOGFILE=/var/log/$BASE.log
|
||||
KUBELET_OPTS=""
|
||||
KUBELET_DESC="Kube-Apiserver"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$KUBELET_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kube-apiserver is present
|
||||
if [ ! -x $KUBELET ]; then
|
||||
log_failure_msg "$KUBELET not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$KUBELET_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
KUBELET_START="start-stop-daemon \
|
||||
--start \
|
||||
--background \
|
||||
--quiet \
|
||||
--exec $KUBELET \
|
||||
--make-pidfile --pidfile $KUBELET_PIDFILE \
|
||||
-- $KUBELET_OPTS \
|
||||
>> $KUBELET_LOGFILE 2>&1"
|
||||
|
||||
KUBELET_STOP="start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile $KUBELET_PIDFILE"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
log_begin_msg "Starting $KUBELET_DESC: $BASE"
|
||||
$KUBELET_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBELET_DESC: $BASE"
|
||||
$KUBELET_STOP
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart | force-reload)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $KUBELET_DESC: $BASE"
|
||||
$KUBELET_STOP
|
||||
$KUBELET_START
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$KUBELET_PIDFILE" "$KUBELET" "$KUBELET_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -1,142 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
INSTALLER_OPTS=""
|
||||
UNINSTALLER_OPTS=""
|
||||
PMGR=""
|
||||
PMGR_LIST_OPTS=""
|
||||
|
||||
function include(){
|
||||
curr_dir=$(cd $(dirname "$0") && pwd)
|
||||
inc_file_path=$curr_dir/$1
|
||||
if [ -f "$inc_file_path" ]; then
|
||||
. $inc_file_path
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function set_install_options(){
|
||||
case $1 in
|
||||
apt-get )
|
||||
INSTALLER_OPTS="-y -q install"
|
||||
UNINSTALLER_OPTS="-y -q remove"
|
||||
PMGR="dpkg"
|
||||
PMGR_LIST_OPTS="-s"
|
||||
;;
|
||||
yum )
|
||||
INSTALLER_OPTS="--assumeyes install"
|
||||
UNINSTALLER_OPTS="--assumeyes erase"
|
||||
PMGR="rpm"
|
||||
PMGR_LIST_OPTS="-q"
|
||||
;;
|
||||
urpm* )
|
||||
INSTALLER_OPTS="-y"
|
||||
UNINSTALLER_OPTS=""
|
||||
PMGR="rpm"
|
||||
PMGR_LIST_OPTS="-q"
|
||||
;;
|
||||
zypper )
|
||||
INSTALLER_OPTS="install"
|
||||
UNINSTALLER_OPTS="remove --quiet"
|
||||
PMGR="rpm"
|
||||
PMGR_LIST_OPTS="-q"
|
||||
;;
|
||||
pip )
|
||||
INSTALLER_OPTS="install"
|
||||
UNINSTALLER_OPTS="uninstall --yes"
|
||||
find_pip
|
||||
PACKAGER=$PIPCMD
|
||||
PMGR=$PIPCMD
|
||||
PMGR_LIST_OPTS="freeze | grep"
|
||||
;;
|
||||
* )
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
PACKAGER=$(which $1)
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Can't find \"$1\", exiting!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
function package_install(){
|
||||
PKG=$1
|
||||
eval "$PMGR $PMGR_LIST_OPTS $PKG" > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
log "\"$PKG\" already installed"
|
||||
else
|
||||
log "Installing \"$PKG\" ..."
|
||||
$PACKAGER $INSTALLER_OPTS $PKG > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
log "\"$PKG\" installation fails, exiting!"
|
||||
exit 1
|
||||
else
|
||||
log "\t\t...success"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
function package_uninstall(){
|
||||
PKG=$1
|
||||
eval "$PMGR $PMGR_LIST_OPTS $PKG" > /dev/null 2>&1
|
||||
if [ $? -eq 1 ]; then
|
||||
log "\"$PKG\" not installed"
|
||||
else
|
||||
log "Unnstalling \"$PKG\" ..."
|
||||
$PACKAGER $UNINSTALLER_OPTS $PKG > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
log "\"$PKG\" uninstallation fails, exiting!"
|
||||
exit 1
|
||||
else
|
||||
log "\t\t...success"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
function run_install(){
|
||||
for PKG in $@
|
||||
do
|
||||
package_install $PKG
|
||||
done
|
||||
}
|
||||
function run_uninstall(){
|
||||
for PKG in $@
|
||||
do
|
||||
package_uninstall $PKG
|
||||
done
|
||||
}
|
||||
# Main workflow
|
||||
include "common.sh"
|
||||
if [ $# -eq 0 ]; then
|
||||
script=$(basename $0)
|
||||
echo -e "Usage:\n\t* install packages -- ./$script -p package_manager -i package0 [packageN]\n\t* remove packages -- ./$script -p package_manager -r package0 [packageN]"
|
||||
exit 1
|
||||
fi
|
||||
Packager=''
|
||||
get_os
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Unsupported *nix version ($DistroBasedOn - $DIST/$PSUEDONAME/$REV/$MACH)"
|
||||
exit 1
|
||||
fi
|
||||
while getopts ":p:i:r:" opt ; do
|
||||
case "$opt" in
|
||||
p)
|
||||
if [[ "$OPTARG" != sys ]]; then
|
||||
Packager=$OPTARG
|
||||
fi
|
||||
set_install_options $Packager
|
||||
;;
|
||||
i)
|
||||
n=$OPTARG
|
||||
run_install $(collect_args $n $@)
|
||||
break;
|
||||
;;
|
||||
r)
|
||||
n=$OPTARG
|
||||
run_uninstall $(collect_args $n $@)
|
||||
break;
|
||||
;;
|
||||
\?)
|
||||
log "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
|
@ -7,6 +7,4 @@
|
|||
sed -i.bkp "s/%%NAME%%/$1/g" minion-node.json
|
||||
sed -i.bkp "s/%%IP%%/$2/g" minion-node.json
|
||||
|
||||
/opt/bin/kubectl -f create minion-node.json
|
||||
|
||||
/opt/bin/kubectl delete node 127.0.0.1
|
||||
/opt/bin/kubectl create -f minion-node.json
|
||||
|
|
|
@ -6,9 +6,13 @@
|
|||
service etcd stop
|
||||
mkdir /var/lib/etcd
|
||||
|
||||
sed -i.bkp "s/%%NAME%%/$1/g" master-etcd-config.conf
|
||||
sed -i.bkp "s/%%IP%%/$2/g" master-etcd-config.conf
|
||||
sed -i.bkp "s/%%NAME%%/$1/g" default_scripts/etcd-master
|
||||
sed -i.bkp "s/%%IP%%/$2/g" default_scripts/etcd-master
|
||||
|
||||
cp -f master-etcd-config.conf /etc/default/etcd
|
||||
cp -f default_scripts/etcd-master /etc/default/etcd
|
||||
cp init_conf/etcd.conf /etc/init/
|
||||
|
||||
chmod +x initd_scripts/*
|
||||
cp initd_scripts/etcd /etc/init.d/
|
||||
service etcd start
|
||||
sleep 5
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
# $1 - NAME
|
||||
# $2 - IP
|
||||
# $3 - PORTAL_NET
|
||||
|
||||
service kube-proxy stop
|
||||
service kube-scheduler stop
|
||||
|
@ -17,29 +16,26 @@ service kube-apiserver stop
|
|||
mkdir /var/log/kubernetes
|
||||
mkdir -p /var/run/murano-kubernetes
|
||||
|
||||
#Preapre service configs
|
||||
#sed -i.bkp "s/%%PORTAL_NET%%/$3/g" kube-apiserver.conf
|
||||
sed -i.bkp "s/%%MASTER_IP%%/$2/g" default_scripts/kube-scheduler
|
||||
sed -i.bkp "s/%%IP%%/$2/g" default_scripts/kube-scheduler
|
||||
|
||||
#sed -i.bkp "s/%%MASTER_IP%%/$2/g" kube-proxy.conf
|
||||
cp -f default_scripts/kube-apiserver /etc/default/
|
||||
cp -f default_scripts/kube-scheduler /etc/default/
|
||||
cp -f default_scripts/kube-controller-manager /etc/default/
|
||||
|
||||
sed -i.bkp "s/%%MASTER_IP%%/$2/g" kube-scheduler.conf
|
||||
sed -i.bkp "s/%%IP%%/$2/g" kube-scheduler.conf
|
||||
cp init_conf/kube-apiserver.conf /etc/init/
|
||||
cp init_conf/kube-controller-manager.conf /etc/init/
|
||||
cp init_conf/kube-scheduler.conf /etc/init/
|
||||
|
||||
#sed -i.bkp "s/%%IP%%/$2/g" kubelet.conf
|
||||
|
||||
|
||||
cp -f kube-apiserver.conf /etc/default/kube-apiserver
|
||||
#cp -f kube-proxy.conf /etc/default/kube-proxy
|
||||
cp -f kube-scheduler.conf /etc/default/kube-scheduler
|
||||
#cp -f kubelet.conf /etc/default/kubelet
|
||||
cp -f kube-controller-manager.conf /etc/default/kube-controller-manager
|
||||
chmod +x initd_scripts/*
|
||||
cp initd_scripts/kube-apiserver /etc/init.d/
|
||||
cp initd_scripts/kube-controller-manager /etc/init.d/
|
||||
cp initd_scripts/kube-scheduler /etc/init.d/
|
||||
|
||||
service kube-apiserver start
|
||||
service kube-scheduler start
|
||||
service kube-controller-manager start
|
||||
#service kubelet start
|
||||
#service kube-proxy start
|
||||
|
||||
/opt/bin/kubectl delete node 127.0.0.1
|
||||
|
||||
sleep 1
|
||||
sleep 1
|
|
@ -1 +0,0 @@
|
|||
INIT_ETCD_OPTS="--name %%NAME%% --initial-cluster-state existing --initial-cluster
|
|
@ -1,11 +0,0 @@
|
|||
--data-dir /var/lib/etcd --snapshot-count 1000 --listen-peer-urls http://%%IP%%:7001,http://127.0.0.1:7001 --listen-client-urls http://%%IP%%:4001,http://127.0.0.1:4001 --initial-advertise-peer-urls http://%%IP%%:7001 --advertise-client-urls http://%%IP%%:4001,http://127.0.0.1:4001"
|
||||
|
||||
EXISTING_ETCD_OPTS="--name %%NAME%% --data-dir /var/lib/etcd --snapshot-count 1000 --listen-peer-urls http://%%IP%%:7001,http://127.0.0.1:7001 --listen-client-urls http://%%IP%%:4001,http://127.0.0.1:4001 --advertise-client-urls http://%%IP%%:4001,http://127.0.0.1:4001"
|
||||
|
||||
if [ -d /var/lib/etcd/wal/ ]
|
||||
then
|
||||
#This will allow to restart etcd service properly to pick up properties from other peers
|
||||
ETCD_OPTS=$EXISTING_ETCD_OPTS
|
||||
else
|
||||
ETCD_OPTS=$INIT_ETCD_OPTS
|
||||
fi
|
|
@ -1 +0,0 @@
|
|||
ETCD_OPTS="--name %%NAME%% --initial-cluster-state existing --initial-cluster %%ETCD_INITIAL_CLUSTER%% --data-dir /var/lib/etcd --snapshot-count 1000 --listen-peer-urls http://%%IP%%:7001,http://127.0.0.1:7001 --listen-client-urls http://%%IP%%:4001,http://127.0.0.1:4001 --initial-advertise-peer-urls http://%%IP%%:7001 --advertise-client-urls http://%%IP%%:4001,http://127.0.0.1:4001"
|
|
@ -1,9 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# $1 - NAME
|
||||
# $2 - IP
|
||||
# $3 - ETCD_INITIAL_CLUSTER
|
||||
|
||||
chmod +x ./member-etcd-setup.sh
|
||||
|
||||
./member-etcd-setup.sh $1 $2 $3 2>&1 | tee /tmp/member-etcd-setup.log
|
|
@ -7,16 +7,14 @@
|
|||
service etcd stop
|
||||
|
||||
mkdir /var/lib/etcd
|
||||
first=`cat member-etcd-config-p1.conf`
|
||||
echo -n $first > member-etcd-config.conf
|
||||
echo -n " " >> member-etcd-config.conf
|
||||
echo -n $3 >> member-etcd-config.conf
|
||||
cat member-etcd-config-p2.conf >> member-etcd-config.conf
|
||||
sed -i.bkp "s/%%NAME%%/$1/g" default_scripts/etcd-member
|
||||
sed -i.bkp "s/%%IP%%/$2/g" default_scripts/etcd-member
|
||||
sed -i.bkp "s#%%CLUSTER_CONFIG%%#$3#g" default_scripts/etcd-member
|
||||
|
||||
sed -i.bkp "s/%%NAME%%/$1/g" member-etcd-config.conf
|
||||
sed -i.bkp "s/%%IP%%/$2/g" member-etcd-config.conf
|
||||
|
||||
cp -f member-etcd-config.conf /etc/default/etcd
|
||||
cp -f default_scripts/etcd-member /etc/default/etcd
|
||||
cp init_conf/etcd.conf /etc/init/
|
||||
chmod +x initd_scripts/etcd
|
||||
cp initd_scripts/etcd /etc/init.d/
|
||||
|
||||
service etcd start
|
||||
|
||||
|
|
|
@ -4,39 +4,24 @@
|
|||
# $2 - IP
|
||||
# $3 - MASTER_IP
|
||||
|
||||
if [ "$3" != "$2" ]; then
|
||||
service kube-proxy stop
|
||||
service kube-scheduler stop
|
||||
service kube-controller-manager stop
|
||||
service kubelet stop
|
||||
service kube-apiserver stop
|
||||
|
||||
#Disable managmenets services on a minion
|
||||
chmod -x /etc/init.d/kube-controller-manager
|
||||
chmod -x /etc/init.d/kube-apiserver
|
||||
chmod -x /etc/init.d/kube-scheduler
|
||||
else
|
||||
service kube-proxy stop
|
||||
service kubelet stop
|
||||
fi
|
||||
#Create log folder for Kubernetes services
|
||||
mkdir /var/log/kubernetes
|
||||
mkdir -p /var/run/murano-kubernetes
|
||||
|
||||
#Preapre service configs
|
||||
sed -i.bkp "s/%%MASTER_IP%%/$3/g" default_scripts/kube-proxy
|
||||
sed -i.bkp "s/%%MASTER_IP%%/$3/g" default_scripts/kubelet
|
||||
sed -i.bkp "s/%%IP%%/$2/g" default_scripts/kubelet
|
||||
|
||||
sed -i.bkp "s/%%MASTER_IP%%/$3/g" kube-proxy.conf
|
||||
cp init_conf/kubelet.conf /etc/init/
|
||||
cp init_conf/kube-proxy.conf /etc/init/
|
||||
|
||||
#sed -i.bkp "s/%%MASTER_IP%%/$3/g" kube-scheduler.conf
|
||||
#sed -i.bkp "s/%%IP%%/$2/g" kube-scheduler.conf
|
||||
sed -i.bkp "s/%%IP%%/$2/g" kubelet.conf
|
||||
|
||||
|
||||
cp -f kube-proxy.conf /etc/default/kube-proxy
|
||||
cp -f kubelet.conf /etc/default/kubelet
|
||||
chmod +x initd_scripts/*
|
||||
cp initd_scripts/kubelet /etc/init.d/
|
||||
cp initd_scripts/kube-proxy /etc/init.d/
|
||||
|
||||
cp -f default_scripts/kube-proxy /etc/default
|
||||
cp -f default_scripts/kubelet /etc/default/
|
||||
|
||||
service kubelet start
|
||||
service kube-proxy start
|
||||
|
||||
sleep 1
|
||||
sleep 1
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
/opt/bin/etcdctl mk /coreos.com/network/config '{"Network":"10.200.0.0/16"}'
|
|
@ -0,0 +1,25 @@
|
|||
#!/bin/bash
|
||||
|
||||
cp init_conf/flanneld.conf /etc/init/
|
||||
chmod +x initd_scripts/flanneld
|
||||
cp initd_scripts/flanneld /etc/init.d/
|
||||
cp default_scripts/flanneld /etc/default/
|
||||
|
||||
|
||||
service flanneld start
|
||||
|
||||
source /run/flannel/subnet.env 2> /dev/null
|
||||
while [ -z "$FLANNEL_SUBNET" ]
|
||||
do
|
||||
sleep 1
|
||||
source /run/flannel/subnet.env 2> /dev/null
|
||||
done
|
||||
|
||||
|
||||
ip link set dev docker0 down
|
||||
brctl delbr docker0
|
||||
|
||||
echo DOCKER_OPTS=\"-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker
|
||||
|
||||
service docker restart
|
||||
|
|
@ -3,7 +3,6 @@ Templates:
|
|||
masterNode:
|
||||
?:
|
||||
type: io.murano.apps.docker.kubernetes.KubernetesMasterNode
|
||||
# portalNet: $.appConfiguration.portalNet
|
||||
instance:
|
||||
?:
|
||||
type: io.murano.resources.LinuxMuranoInstance
|
||||
|
@ -26,6 +25,18 @@ Templates:
|
|||
assignFloatingIp: $.appConfiguration.assignFloatingIP
|
||||
keyname: $.instanceConfiguration.keyPair
|
||||
|
||||
gatewayNode:
|
||||
?:
|
||||
type: io.murano.apps.docker.kubernetes.KubernetesGatewayNode
|
||||
instance:
|
||||
?:
|
||||
type: io.murano.resources.LinuxMuranoInstance
|
||||
name: generateHostname($.appConfiguration.gatewayNamingPattern, $index)
|
||||
flavor: $.instanceConfiguration.flavor
|
||||
image: $.instanceConfiguration.osImage
|
||||
assignFloatingIp: $.appConfiguration.assignGatewayFloatingIP
|
||||
keyname: $.instanceConfiguration.keyPair
|
||||
|
||||
Application:
|
||||
?:
|
||||
type: io.murano.apps.docker.kubernetes.KubernetesCluster
|
||||
|
@ -33,6 +44,9 @@ Application:
|
|||
masterNode: $masterNode
|
||||
minionNodes: repeat($minionNode, $.appConfiguration.maxMinionCount)
|
||||
nodeCount: $.appConfiguration.minionCount
|
||||
gatewayCount: $.appConfiguration.gatewayCount
|
||||
gatewayNodes: repeat($gatewayNode, $.appConfiguration.maxGatewayCount)
|
||||
|
||||
|
||||
Forms:
|
||||
- appConfiguration:
|
||||
|
@ -59,30 +73,59 @@ Forms:
|
|||
required: true
|
||||
minValue: 1
|
||||
description: >-
|
||||
Select number of minions
|
||||
# - name: portalNet
|
||||
# type: string
|
||||
# initial: '11.1.1.1/24'
|
||||
# label: Select portalNet
|
||||
# description: >-
|
||||
# TODO
|
||||
Select maximum number of minions
|
||||
- name: assignFloatingIP
|
||||
type: boolean
|
||||
initial: true
|
||||
label: Assign Floating IP
|
||||
label: Assign floating IP for Kubernetes nodes
|
||||
description: >-
|
||||
Select to true to assign floating IP automatically
|
||||
Check to assign floating IP to Kubernetes nodes
|
||||
initial: false
|
||||
required: false
|
||||
- name: unitNamingPattern
|
||||
type: string
|
||||
initial: 'kube-0#'
|
||||
initial: 'kube-#'
|
||||
required: false
|
||||
regexpValidator: '^(([a-zA-Z0-9#][a-zA-Z0-9-#]*[a-zA-Z0-9#])\.)*([A-Za-z0-9#]|[A-Za-z0-9#][A-Za-z0-9-#]*[A-Za-z0-9#])$'
|
||||
label: Hostname
|
||||
label: Kubernetes node hostname pattern
|
||||
widgetMedia:
|
||||
js: ['muranodashboard/js/support_placeholder.js']
|
||||
css: {all: ['muranodashboard/css/support_placeholder.css']}
|
||||
|
||||
- name: gatewayCount
|
||||
type: integer
|
||||
label: Initial/current number of gateway nodes
|
||||
initial: 1
|
||||
minValue: 0
|
||||
required: true
|
||||
description: >-
|
||||
External traffic will be routed through gateway nodes
|
||||
- name: maxGatewayCount
|
||||
type: integer
|
||||
label: Maximum number of gateway nodes
|
||||
initial: 2
|
||||
required: true
|
||||
minValue: 0
|
||||
description: >-
|
||||
Maximum number of gateway nodes
|
||||
- name: assignGatewayFloatingIP
|
||||
type: boolean
|
||||
initial: true
|
||||
label: Assign floating IP for gateway nodes
|
||||
description: >-
|
||||
Check to assign floating IP to gateway nodes
|
||||
initial: false
|
||||
required: false
|
||||
- name: gatewayNamingPattern
|
||||
type: string
|
||||
initial: 'gateway-#'
|
||||
required: false
|
||||
regexpValidator: '^(([a-zA-Z0-9#][a-zA-Z0-9-#]*[a-zA-Z0-9#])\.)*([A-Za-z0-9#]|[A-Za-z0-9#][A-Za-z0-9-#]*[A-Za-z0-9#])$'
|
||||
label: Gateway hostname pattern
|
||||
widgetMedia:
|
||||
js: ['muranodashboard/js/support_placeholder.js']
|
||||
css: {all: ['muranodashboard/css/support_placeholder.css']}
|
||||
|
||||
- instanceConfiguration:
|
||||
fields:
|
||||
- name: title
|
||||
|
|
|
@ -11,4 +11,5 @@ Classes:
|
|||
io.murano.apps.docker.kubernetes.KubernetesCluster: KubernetesCluster.yaml
|
||||
io.murano.apps.docker.kubernetes.KubernetesMasterNode: KubernetesMasterNode.yaml
|
||||
io.murano.apps.docker.kubernetes.KubernetesMinionNode: KubernetesMinionNode.yaml
|
||||
io.murano.apps.docker.kubernetes.KubernetesGatewayNode: KubernetesGatewayNode.yaml
|
||||
io.murano.apps.docker.kubernetes.KubernetesNode: KubernetesNode.yaml
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
Namespaces:
|
||||
=: io.murano.apps.docker.kubernetes
|
||||
doc: io.murano.apps.docker
|
||||
docker: io.murano.apps.docker
|
||||
std: io.murano
|
||||
sys: io.murano.system
|
||||
|
||||
Name: KubernetesPod
|
||||
|
||||
Extends:
|
||||
- doc:DockerContainerHost
|
||||
- doc:DockerHelpers
|
||||
Extends:
|
||||
- docker:DockerContainerHost
|
||||
- docker:DockerHelpers
|
||||
|
||||
Properties:
|
||||
name:
|
||||
|
@ -24,24 +24,14 @@ Properties:
|
|||
replicationFactor:
|
||||
Contract: $.int().notNull().check($ >= 0)
|
||||
|
||||
exposePorts:
|
||||
Contract: $.string().notNull()
|
||||
Default: ''
|
||||
|
||||
exposePortsMap:
|
||||
Contract:
|
||||
$.int().notNull().check($ > 0 and $ < 65536): $.int().notNull().check($ > 0 and $ < 65536)
|
||||
Usage: Runtime
|
||||
|
||||
Methods:
|
||||
initialize:
|
||||
Body:
|
||||
- $._environment: $.find(std:Environment).require()
|
||||
|
||||
- $.exposePortsMap: $.convertPortMapping($.exposePorts)
|
||||
|
||||
- $podName: $.getPodName()
|
||||
- $podDefinition: $.getAttr(podDefinition, null)
|
||||
- $podName: $._getPodName()
|
||||
- $podDefinition: $.getAttr(lastPodDeployed, null)
|
||||
- If: $podDefinition = null
|
||||
Then:
|
||||
- $podDefinition:
|
||||
|
@ -54,181 +44,119 @@ Methods:
|
|||
id: $podName
|
||||
containers: []
|
||||
volumes: []
|
||||
labels: $.getPodLabels($podName)
|
||||
|
||||
- $.setAttr(podDefinition, $podDefinition)
|
||||
labels: $._getPodLabels($podName)
|
||||
|
||||
- $.setAttr(lastPodDeployed, $podDefinition)
|
||||
- $._podDefinition: $podDefinition
|
||||
- $.setAttr(lastReplicationFactor, $.getAttr(lastReplicationFactor, $.replicationFactor))
|
||||
|
||||
convertPortMapping:
|
||||
Arguments:
|
||||
mappingString:
|
||||
Contract: $.string().notNull()
|
||||
_getPodName:
|
||||
Body:
|
||||
- $result: {}
|
||||
- For: t
|
||||
In: $mappingString.replace(',', ';').split(';')
|
||||
Do:
|
||||
- $pair: $t.split('=')
|
||||
- If: len($pair) != 2
|
||||
Then:
|
||||
Continue:
|
||||
- $key: int($pair[0].trim())
|
||||
- $result[$key]: int($pair[0-1].trim()) # yaql 0.2.x doesn't support unary operators
|
||||
- Return: $result
|
||||
- Return: toLower($.name)
|
||||
|
||||
deploy:
|
||||
Body:
|
||||
- $.kubernetesCluster.deploy()
|
||||
- $lastPodDeployId: $.getAttr(lastPodDeployId, null)
|
||||
- $lastPodChangeId: $.getAttr(lastPodChangeId, null)
|
||||
- If: $lastPodDeployId != $lastPodChangeId
|
||||
Then:
|
||||
- $.deployPod()
|
||||
- $.setAttr(lastPodDeployId, $lastPodChangeId)
|
||||
|
||||
- $previouslyExposedPorts: $.getAttr(exposedPorts, dict())
|
||||
- For: mapping
|
||||
In: $previouslyExposedPorts.where(not $ in $this.exposePortsMap)
|
||||
Do: $.destroyService(servicePort => $mapping)
|
||||
|
||||
- For: mapping
|
||||
In: $.exposePortsMap
|
||||
Do:
|
||||
- $previousMapping: $previouslyExposedPorts.get($mapping)
|
||||
- If: $previousMapping != $.exposePortsMap.get($mapping)
|
||||
Then:
|
||||
- $.deployService(
|
||||
servicePort => $mapping,
|
||||
containerPort => $.exposePortsMap.get($mapping),
|
||||
isNew => $previousMapping = null
|
||||
)
|
||||
- $.setAttr(exposedPorts, $.exposePortsMap)
|
||||
|
||||
- $previousReplicationFactor: $.getAttr(replicationFactor, 0)
|
||||
- If: $previousReplicationFactor != $.replicationFactor
|
||||
Then:
|
||||
- If: $.replicationFactor > 0
|
||||
Then: $.deployReplicationController()
|
||||
Else: $.destroyReplicationController()
|
||||
- $.setAttr(replicationFactor, $.replicationFactor)
|
||||
|
||||
deployPod:
|
||||
Body:
|
||||
- $isNew: $.getAttr(lastPodDeployId, null) = null
|
||||
- $resources: new(sys:Resources)
|
||||
- $template: $resources.yaml('UpdatePod.template').bind(dict(
|
||||
podDefinition => $._podDefinition,
|
||||
isNew => $isNew
|
||||
))
|
||||
- $.kubernetesCluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
deployService:
|
||||
Arguments:
|
||||
- servicePort:
|
||||
Contract: $.int().notNull()
|
||||
- containerPort:
|
||||
Contract: $.int().notNull()
|
||||
- isNew:
|
||||
Contract: $.bool().notNull()
|
||||
Body:
|
||||
- $definition: $.buildServiceDefinition(servicePort => $servicePort, containerPort => $containerPort)
|
||||
- $resources: new(sys:Resources)
|
||||
|
||||
- $template: $resources.yaml('UpdateService.template').bind(dict(
|
||||
serviceDefinition => $definition,
|
||||
isNew => $isNew
|
||||
))
|
||||
- $.kubernetesCluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
destroyService:
|
||||
Arguments:
|
||||
- servicePort:
|
||||
Contract: $.int().notNull()
|
||||
Body:
|
||||
- $id: $.getServiceId($servicePort)
|
||||
- $resources: new(sys:Resources)
|
||||
|
||||
- $template: $resources.yaml('DestroyService.template').bind(dict(serviceId => $id))
|
||||
- $.kubernetesCluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
deployReplicationController:
|
||||
Body:
|
||||
- $definition: $.buildReplicationControllerDefinition()
|
||||
- $isNew: $.getAttr(replicationFactor, 0) = 0
|
||||
- $resources: new(sys:Resources)
|
||||
|
||||
- $template: $resources.yaml('UpdateReplicationController.template').bind(dict(
|
||||
controllerDefinition => $definition,
|
||||
isNew => $isNew
|
||||
))
|
||||
- $.kubernetesCluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
destroyReplicationController:
|
||||
Body:
|
||||
- $id: $.getReplicationControllerId()
|
||||
- $resources: new(sys:Resources)
|
||||
|
||||
- $template: $resources.yaml('DestroyReplicationController.template').bind(dict(serviceId => $id))
|
||||
- $.kubernetesCluster.masterNode.instance.agent.call($template, $resources)
|
||||
|
||||
getPodName:
|
||||
Body:
|
||||
- $name: $.getAttr(podName, null)
|
||||
- If: $name = null
|
||||
Then:
|
||||
- $name: toLower(format('pod-{0}', randomName()))
|
||||
- $.setAttr(podName, $name)
|
||||
- Return: $name
|
||||
|
||||
getPodLabels:
|
||||
_getPodLabels:
|
||||
Arguments:
|
||||
- podName:
|
||||
Contract: $.string().notNull()
|
||||
Body:
|
||||
Return: $.labels2Map(toLower($.labels)).mergeWith(dict(id => $podName))
|
||||
|
||||
getReplicationControllerId:
|
||||
Body:
|
||||
- Return: $.getPodName().replace('pod-', 'rc-')
|
||||
|
||||
getServiceId:
|
||||
hostContainer:
|
||||
Arguments:
|
||||
- servicePort:
|
||||
Contract: $.int().notNull()
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
- image:
|
||||
Contract: $.string().notNull()
|
||||
- commands:
|
||||
Contract:
|
||||
- $.string().notNull()
|
||||
Default: []
|
||||
- env:
|
||||
Contract:
|
||||
$.string().notNull(): $.string().notNull()
|
||||
Default: {}
|
||||
- ports:
|
||||
Contract:
|
||||
- $.class(docker:ApplicationPort)
|
||||
Default: []
|
||||
- volumes:
|
||||
Contract:
|
||||
$.string().notNull(): $.class(docker:DockerVolume).notNull()
|
||||
Default: {}
|
||||
Body:
|
||||
- Return: $.getPodName().replace('pod-', format('svc-{0}-', $servicePort))
|
||||
- $._deleteContainer($applicationName)
|
||||
|
||||
buildServiceDefinition:
|
||||
- $container:
|
||||
name: toLower($applicationName)
|
||||
image: $image
|
||||
command: $commands
|
||||
ports: $ports.select($this._getPortDefinition($))
|
||||
volumeMounts: $volumes.keys().select(dict(name => $this._generateVolumeName($env.get($)), mountPath => $))
|
||||
env: $env.keys().select(dict(key => $, value => $env.get($)))
|
||||
|
||||
- $newVolumes: $volumes.keys().where(not $this._generateVolumeName($) in $._podDefinition.desiredState.volumes.name).
|
||||
select($this._buildVolumeEntry($volumes.get($)))
|
||||
|
||||
- $diff:
|
||||
desiredState:
|
||||
manifest:
|
||||
containers: [$container]
|
||||
volumes: $newVolumes
|
||||
- $._podDefinition: $._podDefinition.mergeWith($diff)
|
||||
- $.deploy()
|
||||
- $._environment.reporter.report($, 'Creating services for Pod {0}'.format($.name))
|
||||
- $.kubernetesCluster.createServices(
|
||||
applicationName => $applicationName,
|
||||
applicationPorts => $ports,
|
||||
podId => $._getPodName())
|
||||
- Return: $.getEndpoints($applicationName)
|
||||
|
||||
|
||||
getEndpoints:
|
||||
Arguments:
|
||||
- servicePort:
|
||||
Contract: $.int().notNull()
|
||||
- containerPort:
|
||||
Contract: $.int().notNull()
|
||||
- applicationName:
|
||||
Contract: $.string().notNull()
|
||||
Body:
|
||||
- Return: $.kubernetesCluster.serviceEndpoints.where($.applicationName = $applicationName)
|
||||
|
||||
|
||||
_getPortDefinition:
|
||||
Arguments:
|
||||
- port:
|
||||
Contract: $.class(docker:ApplicationPort).notNull()
|
||||
Body:
|
||||
- $result:
|
||||
containerPort: $port.port
|
||||
- If: $port.scope = node
|
||||
Then:
|
||||
$result.hostPort: $port.port
|
||||
- Return: $result
|
||||
|
||||
|
||||
_buildVolumeEntry:
|
||||
Arguments:
|
||||
- volume:
|
||||
Contract: $.class(doc:DockerVolume).notNull()
|
||||
Body:
|
||||
- $type: $volume.getType()
|
||||
- Value: $type
|
||||
Match:
|
||||
HostDir:
|
||||
- $spec:
|
||||
hostDir:
|
||||
path: $volume.getParameters()
|
||||
TempVolume:
|
||||
- $spec:
|
||||
emptyDir: {}
|
||||
Default:
|
||||
- Throw: UnknownDockerVolumeType
|
||||
Message: format('Unknown docker volume type {0}', $type)
|
||||
- Return:
|
||||
id: $.getServiceId($servicePort)
|
||||
kind: Service
|
||||
apiVersion: v1beta1
|
||||
port: $servicePort
|
||||
containerPort: $containerPort
|
||||
selector:
|
||||
id: $.getPodName()
|
||||
name: $._generateVolumeName($volume)
|
||||
source: $spec
|
||||
|
||||
buildReplicationControllerDefinition:
|
||||
Body:
|
||||
Return:
|
||||
id: $.getReplicationControllerId()
|
||||
kind: ReplicationController
|
||||
apiVersion: v1beta1
|
||||
desiredState:
|
||||
replicas: $.replicationFactor
|
||||
replicaSelector:
|
||||
id: $.getPodName()
|
||||
podTemplate:
|
||||
desiredState: $._podDefinition.desiredState
|
||||
labels: $._podDefinition.labels
|
||||
|
||||
deleteContainer:
|
||||
_deleteContainer:
|
||||
Arguments:
|
||||
- name:
|
||||
Contract: $.string().notNull()
|
||||
|
@ -241,92 +169,71 @@ Methods:
|
|||
Then:
|
||||
- $._podDefinition.desiredState.manifest.containers: $newContainers
|
||||
- $._podDefinition.desiredState.manifest.volumes: $newVolumes
|
||||
- $.setAttr(lastPodChangeId, randomName())
|
||||
- $.setAttr(podDefinition, $._podDefinition)
|
||||
|
||||
hostContainer:
|
||||
|
||||
|
||||
deleteContainer:
|
||||
Arguments:
|
||||
- name:
|
||||
Contract: $.string().notNull()
|
||||
- image:
|
||||
Contract: $.string().notNull()
|
||||
- commands:
|
||||
Contract:
|
||||
- $.string().notNull()
|
||||
- env:
|
||||
Contract:
|
||||
$.string().notNull(): $.string().notNull()
|
||||
- ports:
|
||||
Contract:
|
||||
- $.int().notNull()
|
||||
- volumes:
|
||||
Contract:
|
||||
$.string().notNull(): $.class(doc:DockerVolume).notNull()
|
||||
Body:
|
||||
- $.deleteContainer($name)
|
||||
- $._deleteContainer($name)
|
||||
- $.kubernetesCluster.destroyService(
|
||||
applicationName => $name,
|
||||
podId => $._getPodName())
|
||||
- $.deploy()
|
||||
|
||||
- For: port
|
||||
In: $ports
|
||||
Do:
|
||||
- $securityGroupIngress:
|
||||
- ToPort: $port
|
||||
FromPort: $port
|
||||
IpProtocol: tcp
|
||||
External: true
|
||||
- $._environment.securityGroupManager.addGroupIngress($securityGroupIngress)
|
||||
|
||||
- $container:
|
||||
name: toLower($name)
|
||||
image: $image
|
||||
command: $commands
|
||||
cpu: 1000
|
||||
ports: $ports.select(dict(containerPort => $, hostPort => $))
|
||||
volumeMounts: $volumes.select(dict(name => $this.generateVolumeName($env.get($)), mountPath => $))
|
||||
env: $env.select(dict(key => $, value => $env.get($)))
|
||||
|
||||
- $newVolumes: $volumes.where(not $this.generateVolumeName($) in $._podDefinition.desiredState.volumes.name).
|
||||
select($this.buildVolumeEntry($volumes.get($)))
|
||||
|
||||
- $diff:
|
||||
desiredState:
|
||||
manifest:
|
||||
containers: [$container]
|
||||
volumes: $newVolumes
|
||||
|
||||
- $.setAttr(lastPodChangeId, randomName())
|
||||
- $._podDefinition: $._podDefinition.mergeWith($diff)
|
||||
- $.setAttr(podDefinition, $._podDefinition)
|
||||
|
||||
generateVolumeName:
|
||||
_generateVolumeName:
|
||||
Arguments:
|
||||
- volume:
|
||||
Contract: $.class(doc:DockerVolume).notNull()
|
||||
Contract: $.class(docker:DockerVolume).notNull()
|
||||
Body:
|
||||
Return: toLower($volume.name)
|
||||
|
||||
buildVolumeEntry:
|
||||
|
||||
deploy:
|
||||
Body:
|
||||
- $prevPod: $.getAttr(lastPodDeployed, null)
|
||||
- $prevReplicationFactor: $.getAttr(lastReplicationFactor, 0)
|
||||
|
||||
- $podDefinition: $._podDefinition
|
||||
- If: $prevPod != $podDefinition
|
||||
Then:
|
||||
- $._environment.reporter.report($, 'Deploying Pod {0}'.format($.name))
|
||||
- $.kubernetesCluster.createPod(
|
||||
definition => $podDefinition, isNew => not $.getAttr(deployed, false))
|
||||
- $.setAttr(lastPodDeployed, $podDefinition)
|
||||
- $.setAttr(deployed, true)
|
||||
|
||||
- If: $.replicationFactor != $prevReplicationFactor or $prevPod != $podDefinition
|
||||
Then:
|
||||
- If: $.replicationFactor > 0
|
||||
Then:
|
||||
- $._environment.reporter.report($, 'Deploying Replication Controller for Pod {0}'.format($.name))
|
||||
- $rcDefinition: $._buildReplicationControllerDefinition($podDefinition)
|
||||
- $.kubernetesCluster.createReplicationController(
|
||||
definition => $rcDefinition, isNew => $prevReplicationFactor = 0)
|
||||
- If: $.replicationFactor = 0 and $prevReplicationFactor > 0
|
||||
Then:
|
||||
- $.kubernetesCluster.destroyReplicationController($.__getReplicationControllerId())
|
||||
- $.setAttr(lastReplicationFactor, $.replicationFactor)
|
||||
|
||||
_buildReplicationControllerDefinition:
|
||||
Arguments:
|
||||
- volume:
|
||||
Contract: $.class(doc:DockerVolume).notNull()
|
||||
- podDefinition:
|
||||
Contract: {}
|
||||
Body:
|
||||
- $type: $volume.getType()
|
||||
- Value: $type
|
||||
Match:
|
||||
HostDir:
|
||||
- $spec:
|
||||
hostDir:
|
||||
path: $volume.getParameters()
|
||||
TempVolume:
|
||||
- $spec:
|
||||
emptyDir: {}
|
||||
Default:
|
||||
- Throw: UnknownDockerVolumeType
|
||||
Message: format('Unknown docker volume type {0}', $type)
|
||||
- Return:
|
||||
name: $.generateVolumeName($volume)
|
||||
source: $spec
|
||||
Return:
|
||||
id: $._getReplicationControllerId()
|
||||
kind: ReplicationController
|
||||
apiVersion: v1beta1
|
||||
desiredState:
|
||||
replicas: $.replicationFactor
|
||||
replicaSelector:
|
||||
id: $._getPodName()
|
||||
podTemplate:
|
||||
desiredState: $podDefinition.desiredState
|
||||
labels: $podDefinition.labels
|
||||
|
||||
getIp:
|
||||
_getReplicationControllerId:
|
||||
Body:
|
||||
Return: $.kubernetesCluster.getIp()
|
||||
|
||||
- Return: $._getPodName().replace('pod-', 'rc-')
|
||||
|
|
|
@ -7,7 +7,6 @@ Application:
|
|||
labels: $.appConfiguration.labels
|
||||
kubernetesCluster: $.appConfiguration.kubernetesCluster
|
||||
replicationFactor: $.appConfiguration.replicationFactor
|
||||
exposePorts: $.appConfiguration.exposePorts
|
||||
|
||||
|
||||
Forms:
|
||||
|
@ -37,11 +36,3 @@ Forms:
|
|||
initial: 2
|
||||
required: true
|
||||
minValue: 0
|
||||
- name: exposePorts
|
||||
type: string
|
||||
label: Expose Ports
|
||||
required: false
|
||||
description: >-
|
||||
List of ports to expose for the Pod in a form of either
|
||||
"port1;port2" or
|
||||
"publicPort1=containerPort1;publicPort2=containerPort2"
|
||||
|
|
Loading…
Reference in New Issue