Add openstack tripleo container image build

This change adds a v2 entry point to allow us to build images within the
native tripleoclient. Images will leverage the new
`tripleo_container_image_build` role which will produce simplified
container files and and an image structure inline with the simple
container generation spec.

This change reimplements the client calls for the V1 client solution
without needing to backport all of the changes made to reimplement
`ansible-runner`.

Depends-On: https://review.opendev.org/742207
Change-Id: I279e95722fe7a10148d36c99c7179c33a3a767a0
Signed-off-by: Kevin Carter <kecarter@redhat.com>
(cherry picked from commit 28d1d62c7f)
This commit is contained in:
Emilien Macchi 2020-04-28 16:01:10 -04:00 committed by Kevin Carter (cloudnull)
parent aedc61466f
commit b40432f134
6 changed files with 895 additions and 0 deletions

View File

@ -9,3 +9,4 @@ libssl-dev [platform:dpkg test]
openssl-devel [platform:rpm test]
policycoreutils-python [platform:rpm test !platform:rhel-8 !platform:centos-8 !platform:fedora]
policycoreutils-python-utils [platform:rpm test !platform:rhel-7 !platform:centos-7]
tripleo-ansible [platform:rpm]

View File

@ -106,6 +106,7 @@ openstack.tripleoclient.v1 =
overcloud_ffwd-upgrade_converge = tripleoclient.v1.overcloud_ffwd_upgrade:FFWDUpgradeConverge
overcloud_execute = tripleoclient.v1.overcloud_execute:RemoteExecute
overcloud_generate_fencing = tripleoclient.v1.overcloud_parameters:GenerateFencingParameters
tripleo_container_image_build = tripleoclient.v1.tripleo_container_image:Build
tripleo_container_image_delete = tripleoclient.v1.container_image:TripleOContainerImageDelete
tripleo_container_image_list = tripleoclient.v1.container_image:TripleOContainerImageList
tripleo_container_image_show = tripleoclient.v1.container_image:TripleOContainerImageShow

View File

@ -0,0 +1,223 @@
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import sys
from tripleoclient.tests import fakes
from tripleoclient.tests.v1.overcloud_deploy import fakes as deploy_fakes
from tripleoclient.v1 import tripleo_container_image as tcib
IMAGE_YAML = """---
container_images:
- image_source: "tripleo"
imagename: "test/keystone:tag"
"""
MOCK_WALK = [
("", ["base"], [],),
("/base", ["memcached", "openstack"], ["config.yaml", "test.doc"],),
("/base/memcached", [], ["memcached.yaml"],),
("/base/openstack", ["glance", "keystone", "neutron", "nova"], [],),
(
"/base/openstack/glance",
[],
["glance-registry.yaml", "glance-api.yaml"],
),
("/base/openstack/keystone", [], ["keystone.yaml"],),
("/base/openstack/neutron", ["api"], [],),
("/base/openstack/neutron/api", [], ["neutron-api.yml"],),
("/base/openstack/nova", [], [],),
]
if sys.version_info >= (3, 0):
MOCK_OPEN_PATH = "builtins.open"
else:
MOCK_OPEN_PATH = "tripleoclient.v1.tripleo_container_image.open"
class TestContainerImages(deploy_fakes.TestDeployOvercloud):
def setUp(self):
super(TestContainerImages, self).setUp()
self.app = fakes.FakeApp()
self.os_walk = mock.patch(
"os.walk", autospec=True, return_value=iter(MOCK_WALK)
)
self.os_walk.start()
self.addCleanup(self.os_walk.stop)
self.os_listdir = mock.patch(
"os.listdir", autospec=True, return_value=["config.yaml"]
)
self.os_listdir.start()
self.addCleanup(self.os_listdir.stop)
self.run_ansible_playbook = mock.patch(
"tripleoclient.utils.run_ansible_playbook", autospec=True
)
self.run_ansible_playbook.start()
self.addCleanup(self.run_ansible_playbook.stop)
self.buildah_build_all = mock.patch(
"tripleo_common.image.builder.buildah.BuildahBuilder.build_all",
autospec=True,
)
self.mock_buildah = self.buildah_build_all.start()
self.addCleanup(self.buildah_build_all.stop)
self.cmd = tcib.Build(self.app, None)
def _take_action(self, parsed_args):
self.cmd.image_parents = {"keystone": "base"}
mock_open = mock.mock_open(read_data=IMAGE_YAML)
with mock.patch("os.path.isfile", autospec=True) as mock_isfile:
mock_isfile.return_value = True
with mock.patch("os.path.isdir", autospec=True) as mock_isdir:
mock_isdir.return_value = True
with mock.patch(MOCK_OPEN_PATH, mock_open):
with mock.patch(
"tripleoclient.v1.tripleo_container_image.Build"
".find_image",
autospec=True,
) as mock_find_image:
mock_find_image.return_value = {"tcib_option": "data"}
self.cmd.take_action(parsed_args)
def test_find_image(self):
mock_open = mock.mock_open(read_data='---\ntcib_option: "data"')
with mock.patch(MOCK_OPEN_PATH, mock_open):
image = self.cmd.find_image("keystone", "some/path", "base-image")
self.assertEqual(image, {"tcib_option": "data"})
def test_build_tree(self):
image = self.cmd.build_tree("some/path")
self.assertEqual(
image,
[
{
"base": [
"memcached",
{
"openstack": [
"glance",
"keystone",
{"neutron": ["api"]},
"nova",
]
},
]
}
],
)
def test_image_regex(self):
image = self.cmd.imagename_to_regex("test/centos-binary-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-binary-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-source-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-rdo-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-rhos-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/other-keystone:tag")
self.assertEqual(image, "other-keystone")
def test_rectify_excludes(self):
self.cmd.identified_images = ["keystone", "nova", "glance"]
excludes = self.cmd.rectify_excludes(images_to_prepare=["glance"])
self.assertEqual(excludes, ["keystone", "nova"])
def test_image_build_yaml(self):
arglist = ["--config-file", "config.yaml"]
verifylist = [("config_file", "config.yaml")]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_with_skip_build(self):
arglist = ["--config-file", "config.yaml", "--skip-build"]
verifylist = [("config_file", "config.yaml"), ("skip_build", True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert not self.mock_buildah.called
def test_image_build_with_push(self):
arglist = ["--config-file", "config.yaml", "--push"]
verifylist = [("config_file", "config.yaml"), ("push", True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_with_volume(self):
arglist = ["--config-file", "config.yaml", "--volume", "bind/mount"]
verifylist = [
("config_file", "config.yaml"),
(
"volumes",
[
"/etc/yum.repos.d:/etc/yum.repos.d:z",
"/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z",
"bind/mount",
],
),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_with_exclude(self):
arglist = ["--exclude", "image1"]
verifylist = [
("excludes", ["image1"]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_failure_no_config_file(self):
arglist = ["--config-file", "not-a-file-config.yaml"]
verifylist = [
("config_file", "not-a-file-config.yaml"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(IOError, self.cmd.take_action, parsed_args)
def test_image_build_failure_no_config_dir(self):
arglist = ["--config-path", "not-a-path"]
verifylist = [
("config_path", "not-a-path"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch("os.path.isfile", autospec=True) as mock_isfile:
mock_isfile.return_value = True
self.assertRaises(IOError, self.cmd.take_action, parsed_args)

View File

@ -65,6 +65,118 @@ from tripleoclient import exceptions
LOG = logging.getLogger(__name__ + ".utils")
class Pushd(object):
"""Simple context manager to change directories and then return."""
def __init__(self, directory):
"""This context manager will enter and exit directories.
>>> with Pushd(directory='/tmp'):
... with open('file', 'w') as f:
... f.write('test')
:param directory: path to change directory to
:type directory: `string`
"""
self.dir = directory
self.pwd = self.cwd = os.getcwd()
def __enter__(self):
os.chdir(self.dir)
self.cwd = os.getcwd()
return self
def __exit__(self, *args):
if self.pwd != self.cwd:
os.chdir(self.pwd)
class TempDirs(object):
"""Simple context manager to manage temp directories."""
def __init__(self, dir_path=None, dir_prefix='tripleo', cleanup=True,
chdir=True):
"""This context manager will create, push, and cleanup temp directories.
>>> with TempDirs() as t:
... with open('file', 'w') as f:
... f.write('test')
... print(t)
... os.mkdir('testing')
... with open(os.path.join(t, 'file')) as w:
... print(w.read())
... with open('testing/file', 'w') as f:
... f.write('things')
... with open(os.path.join(t, 'testing/file')) as w:
... print(w.read())
:param dir_path: path to create the temp directory
:type dir_path: `string`
:param dir_prefix: prefix to add to a temp directory
:type dir_prefix: `string`
:param cleanup: when enabled the temp directory will be
removed on exit.
:type cleanup: `boolean`
:param chdir: Change to/from the created temporary dir on enter/exit.
:type chdir: `boolean`
"""
# NOTE(cloudnull): kwargs for tempfile.mkdtemp are created
# because args are not processed correctly
# in py2. When we drop py2 support (cent7)
# these args can be removed and used directly
# in the `tempfile.mkdtemp` function.
tempdir_kwargs = dict()
if dir_path:
tempdir_kwargs['dir'] = dir_path
if dir_prefix:
tempdir_kwargs['prefix'] = dir_prefix
self.dir = tempfile.mkdtemp(**tempdir_kwargs)
self.pushd = Pushd(directory=self.dir)
self.cleanup = cleanup
self.chdir = chdir
def __enter__(self):
if self.chdir:
self.pushd.__enter__()
return self.dir
def __exit__(self, *args):
if self.chdir:
self.pushd.__exit__()
if self.cleanup:
self.clean()
else:
LOG.warning("Not cleaning temporary directory [ %s ]" % self.dir)
def clean(self):
shutil.rmtree(self.dir, ignore_errors=True)
LOG.info("Temporary directory [ %s ] cleaned up" % self.dir)
def makedirs(dir_path):
"""Recursively make directories and log the interaction.
:param dir_path: full path of the directories to make.
:type dir_path: `string`
:returns: `boolean`
"""
try:
os.makedirs(dir_path)
except OSError:
LOG.debug(
'Directory "{}" was not created because it'
' already exists.'.format(
dir_path
)
)
return False
else:
LOG.debug('Directory "{}" was created.'.format(dir_path))
return True
def run_ansible_playbook(logger,
workdir,
playbook,

View File

@ -0,0 +1,558 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
import re
import sys
import uuid
import yaml
import six
from osc_lib.i18n import _
from tripleo_common.exception import NotFound
from tripleo_common.image.builder import buildah
from tripleoclient import command
from tripleoclient import utils
BASE_PATH = os.path.join(
sys.prefix, "share", "tripleo-common", "container-images"
)
# NOTE(cloudnull): This will ensure functionality even when running in a venv.
if sys.prefix != "/usr" and not os.path.isdir(BASE_PATH):
BASE_PATH = os.path.join(
"/usr", "share", "tripleo-common", "container-images"
)
DEFAULT_AUTHFILE = "{}/containers/auth.json".format(
os.environ.get("XDG_RUNTIME_DIR", os.path.expanduser("~"))
)
DEFAULT_ENV_AUTHFILE = os.environ.get("REGISTRY_AUTH_FILE", DEFAULT_AUTHFILE)
DEFAULT_CONFIG = "tripleo_containers.yaml"
DEFAULT_TCIB_CONFIG_BASE = "tcib"
class Build(command.Command):
"""Build tripleo container images with tripleo-ansible."""
auth_required = False
log = logging.getLogger(__name__ + ".Build")
identified_images = list()
image_parents = dict()
image_paths = dict()
def get_parser(self, prog_name):
parser = super(Build, self).get_parser(prog_name)
parser.add_argument(
"--authfile",
dest="authfile",
metavar="<authfile>",
default=DEFAULT_ENV_AUTHFILE,
help=_(
"Path of the authentication file. Use REGISTRY_AUTH_FILE "
"environment variable to override. (default: %(default)s)"
),
)
parser.add_argument(
"--base",
dest="base",
metavar="<base-image>",
default="ubi8",
help=_(
"Base image name, with optional version. Can be 'centos:8', "
"base name image will be 'centos' but 'centos:8' will be "
"pulled to build the base image. (default: %(default)s)"
),
)
parser.add_argument(
"--config-file",
dest="config_file",
metavar="<config-file>",
default=DEFAULT_CONFIG,
help=_(
"YAML config file specifying the images to build. "
"(default: %(default)s)"
),
)
parser.add_argument(
"--config-path",
dest="config_path",
metavar="<config-path>",
default=BASE_PATH,
help=_(
"Base configuration path. This is the base path for all "
"container-image files. If this option is set, the "
"default path for <config-file> will be modified. "
"(default: %(default)s)"
),
)
parser.add_argument(
"--distro",
dest="distro",
default="centos",
metavar="<distro>",
help=_(
"Distro name, if undefined the system will build using the "
"host distro. (default: %(default)s)"
),
)
parser.add_argument(
"--exclude",
dest="excludes",
metavar="<container-name>",
default=[],
action="append",
help=_(
"Name of one container to match against the list of "
"containers to be built to skip. Should be specified "
"multiple times when skipping multiple containers. "
"(default: %(default)s)"
),
)
parser.add_argument(
"--extra-config",
dest="extra_config",
metavar="<extra-config>",
help=_(
"Apply additional options from a given configuration YAML "
"file. This will apply to all containers built. "
"(default: %(default)s)"
),
)
parser.add_argument(
"--namespace",
dest="namespace",
metavar="<registry-namespace>",
default="tripleotrain",
help=_("Container registry namespace (default: %(default)s)"),
)
parser.add_argument(
"--registry",
dest="registry",
metavar="<registry-url>",
default="localhost",
help=_("Container registry URL (default: %(default)s)"),
)
parser.add_argument(
"--skip-build",
dest="skip_build",
default=False,
action="store_true",
help=_(
"Skip or not the build of the images (default: %(default)s)"
),
)
parser.add_argument(
"--tag",
dest="tag",
metavar="<image-tag>",
default="latest",
help=_("Image tag (default: %(default)s)"),
)
parser.add_argument(
"--prefix",
dest="prefix",
metavar="<image-prefix>",
default="openstack",
help=_("Image prefix. (default: %(default)s)"),
)
parser.add_argument(
"--push",
dest="push",
default=False,
action="store_true",
help=_(
"Enable image push to a given registry. "
"(default: %(default)s)"
),
)
parser.add_argument(
"--volume",
dest="volumes",
metavar="<volume-path>",
default=[
"/etc/yum.repos.d:/etc/yum.repos.d:z",
"/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z",
],
action="append",
help=_(
"Container bind mount used when building the image. Should "
"be specified multiple times if multiple volumes."
"(default: %(default)s)"
),
)
parser.add_argument(
"--work-dir",
dest="work_dir",
metavar="<work-directory>",
default="/tmp/container-builds",
help=_(
"TripleO container builds directory, storing configs and "
"logs for each image and its dependencies. "
"(default: %(default)s)"
),
)
return parser
def imagename_to_regex(self, imagename):
if not imagename:
return
# remove any namespace from the start
imagename = imagename.split("/")[-1]
# remove any tag from the end
imagename = imagename.split(":")[0]
# remove supported base names from the start
imagename = re.sub(r"^(openstack|centos|rhel|ubi8)-", "", imagename)
# remove install_type from the start
imagename = re.sub(r"^(binary|source|rdo|rhos)-", "", imagename)
# what results should be acceptable as a regex to build one image
return imagename
def build_tree(self, path, tree=""):
content = []
path = os.path.join(path, tree)
(cur_path, children, _) = next(os.walk(path))
for child in children:
val = self.build_tree(cur_path, child)
if val:
content.append(val)
if content:
if tree:
return {tree: content}
else:
return content
return tree
def index_images(self, path):
for root, __, files in os.walk(path):
if [i for i in files if i.endswith(("yaml", "yml"))]:
self.identified_images.append(os.path.basename(root))
def find_image(self, name, path, base_image):
"""Find an image and load its config.
This will traverse a directory structure looking for an image
directory, when found all configs will be loaded lexically and
returned a single Dictionary.
:param name: Container name.
:type name: String.
:param path: Directory path to traverse.
:type path: String.
:param base: Name of base container image.
:type base: String.
:returns: Dictionary
"""
container_vars = dict()
for root, dirs, files in os.walk(path):
if os.path.basename(root) == name:
for file_name in sorted(files):
if file_name.endswith(("yaml", "yml")):
_option_file = os.path.join(root, file_name)
self.log.debug(
"reading option file: {}".format(_option_file)
)
with open(_option_file) as f:
_options = yaml.safe_load(f)
if _options:
container_vars.update(_options)
base_dir = root
while base_dir != os.sep:
base_dir = os.path.dirname(base_dir)
base_files = [
i
for i in os.listdir(base_dir)
if i.endswith(("yaml", "yml"))
]
if base_files:
self.image_parents[name] = os.path.basename(
base_dir
)
break
else:
self.image_parents[name] = base_image
else:
return container_vars
def rectify_excludes(self, images_to_prepare):
"""Build a dynamic exclude list.
Using the identified images, we check against our expected images
to build a dynamic exclusion list which will extend the user provided
excludes.
:param images_to_prepare: List of expected images.
:type images_to_prepare: List.
:returns: List
"""
excludes = list()
for image in self.identified_images:
if image not in images_to_prepare:
excludes.append(image)
else:
return excludes
def make_dir_tree(self, tree, work_dir):
"""Walk the tree then create and catalog all directories.
As the tree is walked, containers are identified, directories are
created and the Containerfile image relationship is recorded for later
lookup.
:param tree: List of expected images.
:type tree: List.
:param work_dir: Work directory path.
:type work_dir: String.
"""
if isinstance(tree, list):
for item in tree:
self.make_dir_tree(tree=item, work_dir=work_dir)
elif isinstance(tree, dict):
for key, value in tree.items():
self.image_paths[key] = os.path.join(work_dir, key)
utils.makedirs(dir_path=self.image_paths[key])
self.make_dir_tree(tree=value, work_dir=self.image_paths[key])
elif isinstance(tree, six.string_types):
self.image_paths[tree] = os.path.join(work_dir, tree)
utils.makedirs(dir_path=self.image_paths[tree])
def take_action(self, parsed_args):
self.config_file = os.path.expanduser(parsed_args.config_file)
self.config_path = os.path.expanduser(parsed_args.config_path)
authfile = os.path.expanduser(parsed_args.authfile)
if os.path.exists(authfile):
os.environ["REGISTRY_AUTH_FILE"] = authfile
else:
try:
del os.environ["REGISTRY_AUTH_FILE"]
except KeyError:
pass
self.tcib_config_path = os.path.join(
self.config_path, DEFAULT_TCIB_CONFIG_BASE
)
if not os.path.isdir(self.tcib_config_path):
raise IOError(
"Configuration directory {} was not found.".format(
self.tcib_config_path
)
)
if not os.path.isfile(self.config_file):
self.config_file = os.path.join(
os.path.dirname(self.tcib_config_path),
parsed_args.config_file,
)
if not os.path.isfile(self.config_file):
raise IOError(
"Configuration file {} was not found.".format(
self.config_file
)
)
self.log.debug("take_action({})".format(parsed_args))
excludes = parsed_args.excludes
images_to_prepare = list()
# Generate an unique work directory so we can keep configs and logs
# each time we run the command; they'll be stored in work_dir.
work_dir = os.path.join(parsed_args.work_dir, str(uuid.uuid4()))
# Build a tree of images which have a config; this tree will allow
# to concurrently build images which share a common base.
if not os.path.isdir(self.tcib_config_path):
raise NotFound(
"The path {path} does not exist".format(
path=self.tcib_config_path
)
)
images_tree = self.build_tree(path=self.tcib_config_path)
tree_file = "{tree_file}".format(
tree_file=os.path.join(work_dir, "build-tree.yaml")
)
utils.makedirs(os.path.dirname(tree_file))
with open(tree_file, "w") as f:
yaml.safe_dump(
images_tree, f, default_flow_style=False, width=4096
)
self.index_images(path=self.tcib_config_path)
self.make_dir_tree(tree=images_tree, work_dir=work_dir)
# Make sure the unique work directory exists
if not os.path.exists(work_dir):
self.log.debug(
"Creating container builds workspace in: {}".format(work_dir)
)
utils.makedirs(work_dir)
with open(self.config_file, "r") as f:
containers_yaml = yaml.safe_load(f)
for c in containers_yaml["container_images"]:
entry = dict(c)
if not entry.get("image_source", "") == "tripleo":
continue
image = self.imagename_to_regex(entry.get("imagename"))
if image and image not in excludes:
images_to_prepare.append(image)
tcib_inventory = {"all": {"hosts": {}}}
tcib_inventory_hosts = tcib_inventory["all"]["hosts"]
for image in images_to_prepare:
image_config = self.find_image(
image, self.tcib_config_path, parsed_args.base
)
self.log.debug("processing image config {}".format(image))
if image == "base":
image_name = image_from = parsed_args.base
else:
image_name = self.image_parents.get(image, image)
image_from = (
"{registry}/{namespace}"
"/{prefix}-{image}:{tag}".format(
registry=parsed_args.registry,
namespace=parsed_args.namespace,
prefix=parsed_args.prefix,
image=image_name,
tag=parsed_args.tag,
)
)
image_parsed_name = self.imagename_to_regex(imagename=image)
# For each image we will generate Dockerfiles in the work_dir
# following a specific directory structured per image
image_config.update(
{
"workdir": self.image_paths.get(image, work_dir),
"tcib_distro": parsed_args.distro,
"tcib_path": self.image_paths.get(image, work_dir),
"tcib_meta": {"name": image_parsed_name},
"ansible_connection": "local",
}
)
# NOTE(cloudnull): Check if the reference config has a valid
# "from" option. If the reference "from"
# option is valid, it will be used.
image_config["tcib_from"] = image_config.get(
"tcib_from",
image_from
)
tcib_inventory_hosts[image_parsed_name] = image_config
var_file = "{image_name}.yaml".format(
image_name=os.path.join(
image_config["tcib_path"], image_parsed_name,
)
)
utils.makedirs(os.path.dirname(var_file))
with open(var_file, "w") as f:
yaml.safe_dump(
image_config, f, default_flow_style=False, width=4096
)
with utils.TempDirs() as tmp:
playbook = os.path.join(tmp, "tripleo-multi-playbook.yaml")
playdata = [
{
"name": "Generate localhost facts",
"connection": "local",
"hosts": "localhost",
"gather_facts": True,
}
]
generation_playbook = {
"name": "Generate container file(s)",
"connection": "local",
"hosts": "all",
"gather_facts": False,
"roles": [{"role": "tripleo-container-image-build"}],
}
if parsed_args.extra_config:
if not os.path.exists(parsed_args.extra_config):
raise IOError(
"The file provided by <options-apply> does not "
"exist, check you settings and try again."
)
else:
with open(parsed_args.extra_config) as f:
generation_playbook["vars"] = yaml.safe_load(f)
playdata.append(generation_playbook)
with open(playbook, "w") as f:
yaml.safe_dump(
playdata, f, default_flow_style=False, width=4096
)
inventory_file = os.path.join(tmp, 'inventory.yaml')
with open(inventory_file, "w") as f:
yaml.safe_dump(
tcib_inventory, f, default_flow_style=False, width=4096
)
utils.ansible_symlink()
utils.run_ansible_playbook(
logger=self.log,
workdir=tmp,
playbook=playbook,
inventory=inventory_file,
log_path_dir=tmp,
extra_vars={
"ANSIBLE_FORKS": len(tcib_inventory_hosts.keys())
},
gathering_policy="smart"
)
# Ensure anything not intended to be built is excluded
excludes.extend(self.rectify_excludes(images_to_prepare))
if not parsed_args.skip_build:
bb = buildah.BuildahBuilder(
work_dir=work_dir,
deps=images_tree,
base=parsed_args.prefix,
img_type=False,
tag=parsed_args.tag,
namespace=parsed_args.namespace,
registry_address=parsed_args.registry,
push_containers=parsed_args.push,
volumes=parsed_args.volumes,
excludes=list(set(excludes)),
)
try:
bb.build_all()
except SystemError as exp:
self.log.error(
"Buildah failed with the following error: {}".format(exp)
)