Convert build-app and a new worker handler

Change-Id: Ief90ef40e2f21db6345316e3125f198131d2b034
Partially-fixes-bug: #1302552
This commit is contained in:
James Li 2016-02-23 03:30:33 +00:00 committed by devdatta-kulkarni
parent 01297c64c8
commit fc58f9422f
5 changed files with 384 additions and 6 deletions

View File

@ -26,6 +26,7 @@ from solum.common import service
from solum.common import trace_data
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
from solum.worker.handlers import default as default_handler
from solum.worker.handlers import noop as noop_handler
from solum.worker.handlers import shell as shell_handler
@ -55,6 +56,7 @@ def main():
handlers = {
'noop': noop_handler.Handler,
'default': default_handler.Handler,
'shell': shell_handler.Handler,
}

View File

@ -32,7 +32,7 @@ from solum.common import exception as exc
from solum.common import solum_swiftclient
from solum.openstack.common import log as solum_log
from solum.uploaders import tenant_logger
from solum.worker.lp_handlers import utils
from solum.worker.app_handlers import utils
from swiftclient import exceptions as swiftexp

View File

@ -14,6 +14,7 @@
"""LP handler for building apps running on solum language packs"""
import io
import logging
import os
import random
@ -25,8 +26,8 @@ from oslo_config import cfg
from solum.common import clients
from solum.common import solum_swiftclient
from solum.openstack.common import log as solum_log
from solum.worker.lp_handlers import base
from solum.worker.lp_handlers import utils
from solum.worker.app_handlers import base
from solum.worker.app_handlers import utils
from swiftclient import exceptions as swiftexp
@ -36,6 +37,7 @@ LOG = solum_log.getLogger(__name__)
cfg.CONF.import_opt('container_mem_limit', 'solum.worker.config',
group='worker')
mem_limit = cfg.CONF.worker.container_mem_limit
build_timeout = cfg.CONF.worker.docker_build_timeout
UNITTEST_TIMEOUT = 1800 # 30 minutes
@ -243,5 +245,128 @@ class DockerHandler(base.BaseHandler):
logger.upload()
return result
def build_app(self, *args):
pass
def build_app(self, app_name, git_info, lp_obj_name, lp_img_tag,
run_cmd):
logger = self._get_tenant_logger('build')
if not self._prepare(git_info, lp_obj_name, lp_img_tag, logger):
logger.upload()
return
timeout_cmd = ('timeout --signal=SIGKILL {t} {cmd}').format(
t=build_timeout, cmd='./build.sh')
# username = (''.join(random.choice(string.ascii_lowercase)
# for _ in range(8)))
# useradd_cmd = ('useradd -s /bin/bash -u {uid} -m {uname} ||'
# ' usermod -d /app $(getent passwd {uid}'
# ' | cut -d: -f1)').format(uid=self.docker_cmd_uid,
# uname=username)
# Will run user's arbitrary build.sh as root in container,
# waiting for the following docker patch to remap the root in
# a container to an unprivileged user on host:
# https://github.com/docker/docker/pull/12648
# If the docker patch is finally abandoned, we should run build.sh as
# unprivileged by using the commented code above, in which case
# we may want to leverage the following docker feature:
# https://github.com/docker/docker/pull/10775/commits
content_build = ('FROM {lp}\n'
'COPY code /app\n'
'WORKDIR /solum/bin\n'
'RUN chmod +x build.sh\n'
'CMD {cmd}').format(lp=self.lp, cmd=timeout_cmd)
df = 'Dockerfile.build'
fname = '{}/{}'.format(self.work_dir, df)
try:
with open(fname, 'w') as f:
f.write(content_build)
except OSError as e:
LOG.error('Error in creating Dockerfile %s, %s' % (fname, str(e)))
logger.log(logging.ERROR, 'Preparing building app DU image failed')
logger.upload()
return
tenant = self.context.tenant
ts = utils.timestamp()
storage_obj_name = '{name}-{ts}-{sha}'.format(name=app_name, ts=ts,
sha=self.source_sha)
du_image = '{tenant}-{obj}'.format(tenant=tenant,
obj=storage_obj_name)
du_image_in_build = '{img}:{tag}'.format(img=du_image, tag='build')
logger.log(logging.INFO, 'Building DU image, preparing to run'
' build script')
build_result = self._docker_build_with_retry(du_image_in_build, logger,
path=self.work_dir,
dockerfile=df,
pull=False)
self.images.append(du_image_in_build)
if build_result != 0:
logger.log(logging.ERROR, 'Failed building DU image.')
logger.upload()
return
logger.log(logging.INFO, 'Building DU image, running build script')
ct = None
result = -1
try:
ct = self.docker.create_container(image=du_image_in_build,
mem_limit=mem_limit,
memswap_limit=-1)
self.containers.append(ct)
self.docker.start(container=ct.get('Id'))
result = self.docker.wait(container=ct.get('Id'))
except (errors.DockerException, errors.APIError) as e:
LOG.error('Error running build script, assembly: %s, %s' %
(self.assembly.uuid, str(e)))
logger.log(logging.ERROR, 'Running build script failed')
logger.upload()
return
if result != 0:
logger.log(logging.ERROR, 'Build script returns with %s' % result)
logger.upload()
return
try:
self.docker.commit(container=ct.get('Id'), repository=du_image,
tag='build')
except (errors.DockerException, errors.APIError) as e:
LOG.error('Error committing the built image layer from build'
' script, assembly: %s, %s' %
(self.assembly.uuid, str(e)))
logger.log(logging.ERROR, 'Error building DU with the output'
' of build script')
logger.upload()
return
content_run = ('FROM {img}\n'
'WORKDIR /app\n'
'CMD {cmd}').format(img=du_image_in_build, cmd=run_cmd)
f = io.BytesIO(content_run.encode('utf-8'))
build_result = self._docker_build_with_retry(du_image, logger,
fileobj=f, pull=False)
self.images.append(du_image)
if build_result != 0:
logger.log(logging.ERROR, 'Failed building DU image.')
logger.upload()
return
du_file = '{}/{}'.format(self.work_dir, storage_obj_name)
result = self._docker_save(du_image, du_file)
if result != 0:
logger.log(logging.ERROR, 'Failed saving DU image.')
logger.upload()
return
logger.log(logging.INFO, 'Persisting DU image to backend')
image_loc = self._persist_to_backend(du_file, 'solum_du',
storage_obj_name, logger)
if image_loc is None:
logger.log(logging.ERROR, 'Failed persist DU to backend.')
logger.upload()
return
else:
logger.log(logging.INFO, 'Successfully created DU image.')
logger.upload()
return (image_loc, du_image)

View File

@ -14,7 +14,7 @@
"""LP handler for building apps running on cedarish build packs"""
from solum.worker.lp_handlers import base
from solum.worker.app_handlers import base
class HerokuHandler(base.BaseHandler):

View File

@ -0,0 +1,251 @@
# Copyright 2015 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solum Worker handler."""
from solum.common import exception
from solum.conductor import api as conductor_api
from solum.deployer import api as deployer_api
from solum import objects
from solum.objects import assembly
from solum.objects import image
from solum.openstack.common import log as logging
from solum.worker.app_handlers import default as docker_handler
LOG = logging.getLogger(__name__)
ASSEMBLY_STATES = assembly.States
IMAGE_STATES = image.States
def job_update_notification(ctxt, build_id, status=None, description=None,
created_image_id=None, docker_image_name=None,
assembly_id=None):
"""send a status update to the conductor."""
conductor_api.API(context=ctxt).build_job_update(build_id, status,
description,
created_image_id,
docker_image_name,
assembly_id)
def update_assembly_status(ctxt, assembly_id, status):
if assembly_id is None:
return
data = {'status': status}
conductor_api.API(context=ctxt).update_assembly(assembly_id, data)
def update_lp_status(ctxt, image_id, status, external_ref=None,
docker_image_name=None):
if image_id is None:
return
LOG.debug('Updating languagepack %s status to %s and external_ref to %s'
% (image_id, status, external_ref))
conductor_api.API(context=ctxt).update_image(image_id, status,
external_ref,
docker_image_name)
def validate_lp(ctxt, lp_id, assembly_id):
try:
image = objects.registry.Image.get_lp_by_name_or_uuid(
ctxt, lp_id, include_operators_lp=True)
except exception.ObjectNotFound:
LOG.error('LP not found with id %s, assembly id: %s' %
(lp_id, assembly_id))
return
if (not image or not image.project_id or not image.status or
not image.external_ref or not image.docker_image_name or
image.status.lower() != 'ready'):
LOG.warn("Error building due to language pack %s not ready."
" assembly ID: %s" % (lp_id, assembly_id))
return
return image
class Handler(object):
def echo(self, ctxt, message):
LOG.debug("%s" % message)
def launch_workflow(self, ctxt, build_id, git_info, ports, name,
base_image_id, source_format, image_format,
assembly_id, workflow, test_cmd, run_cmd):
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
except exception.ObjectNotFound:
return
enable_unittest = enable_build = enable_deploy = False
for step in workflow:
if step == 'unittest':
enable_unittest = True
elif step == 'build':
enable_build = True
elif step == 'deploy':
enable_deploy = True
du_image_loc = None
du_image_name = None
with docker_handler.DockerHandler(ctxt, assem, 'custom',
'swift') as lp_handler:
if enable_unittest:
if self._do_unittest(ctxt, lp_handler, build_id, git_info,
name, base_image_id, source_format,
image_format, assembly_id, test_cmd) != 0:
return
if enable_build:
du_image_loc, du_image_name = self._do_build(
ctxt, lp_handler, build_id, git_info, name, base_image_id,
source_format, image_format, assembly_id, run_cmd)
if enable_deploy and du_image_loc and du_image_name:
self._do_deploy(ctxt, assembly_id, ports, du_image_loc,
du_image_name)
def build(self, ctxt, build_id, git_info, name, base_image_id,
source_format, image_format, assembly_id, run_cmd):
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
except exception.ObjectNotFound:
return
with docker_handler.DockerHandler(ctxt, assem, 'custom',
'swift') as lp_handler:
self._do_build(ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format,
assembly_id, run_cmd)
def unittest(self, ctxt, build_id, git_info, name, base_image_id,
source_format, image_format, assembly_id, test_cmd):
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
except exception.ObjectNotFound:
return
with docker_handler.DockerHandler(ctxt, assem, 'custom',
'swift') as lp_handler:
self._do_unittest(ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format,
assembly_id, test_cmd)
def _do_deploy(self, ctxt, assembly_id, ports, du_image_loc,
du_image_name):
deployer_api.API(context=ctxt).deploy(assembly_id=assembly_id,
image_loc=du_image_loc,
image_name=du_image_name,
ports=ports)
def _do_build(self, ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format, assembly_id,
run_cmd):
lp = validate_lp(ctxt, base_image_id, assembly_id)
if not lp:
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
description='language pack not ready',
assembly_id=assembly_id)
return
# Check if the assembly is deleted or being deleted
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
if assem.status == ASSEMBLY_STATES.DELETING:
LOG.debug('Assembly %s is being deleted..skipping next stages'
% assembly_id)
return
except exception.ObjectNotFound:
LOG.debug('Assembly %s was deleted, skipping building.' %
assembly_id)
return
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.BUILDING)
image_info = lp_handler.build_app(name, git_info, lp.external_ref,
lp.docker_image_name, run_cmd)
if not image_info:
job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
description='image not created',
assembly_id=assembly_id)
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
return
else:
job_update_notification(ctxt, build_id, IMAGE_STATES.READY,
description='built successfully',
created_image_id=image_info[0],
docker_image_name=image_info[1],
assembly_id=assembly_id)
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.BUILT)
return (image_info[0], image_info[1])
def _do_unittest(self, ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format, assembly_id,
test_cmd):
if test_cmd is None:
LOG.debug("Unit test command is None; skipping unittests.")
return 0
lp = validate_lp(ctxt, base_image_id, assembly_id)
if not lp:
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
return -1
# Check if the assembly is deleted or being deleted
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
if assem.status == ASSEMBLY_STATES.DELETING:
LOG.debug('Assembly %s is being deleted..skipping next stages'
% assembly_id)
return -1
except exception.ObjectNotFound:
LOG.debug('Assembly %s was deleted, skipping unittesting.' %
assembly_id)
return -1
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.UNIT_TESTING)
result = lp_handler.unittest_app(git_info, lp.external_ref,
lp.docker_image_name, test_cmd)
if result == 0:
status = ASSEMBLY_STATES.UNIT_TESTING_PASSED
elif result > 0:
status = ASSEMBLY_STATES.UNIT_TESTING_FAILED
else:
status = ASSEMBLY_STATES.ERROR
update_assembly_status(ctxt, assembly_id, status)
return result
def build_lp(self, ctxt, image_id, git_info, name, source_format,
image_format, artifact_type):
try:
lp = objects.registry.Image.get_by_id(ctxt, image_id)
except exception.ObjectNotFound:
LOG.error('Image object not found with id %s' % image_id)
return
update_lp_status(ctxt, image_id, IMAGE_STATES.BUILDING)
lp.type = 'languagepack'
image_info = None
with docker_handler.DockerHandler(ctxt, lp, 'custom', 'swift') as lph:
image_info = lph.build_lp(name, git_info)
if image_info:
status = IMAGE_STATES.READY
update_lp_status(ctxt, image_id, status, image_info[0],
image_info[1])
else:
status = IMAGE_STATES.ERROR
update_lp_status(ctxt, image_id, status)