Retire Sahara: remove repo content

Sahara project is retiring
- https://review.opendev.org/c/openstack/governance/+/919374

this commit remove the content of this project repo

Depends-On: https://review.opendev.org/c/openstack/project-config/+/919376
Change-Id: I46e97281ceab80a0a0ecbebd861f85ef4deaac65
This commit is contained in:
Ghanshyam Mann 2024-05-10 17:33:46 -07:00
parent 2bfc66f176
commit 9dad58b05a
193 changed files with 8 additions and 5482 deletions

30
.gitignore vendored
View File

@ -1,30 +0,0 @@
*.py[co]
*.egg
*.egg-info
dist
build
eggs
parts
var
sdist
develop-eggs
.installed.cfg
pip-log.txt
.tox
*.mo
.mr.developer.cfg
.DS_Store
Thumbs.db
.venv
.idea
out
target
*.iml
*.ipr
*.iws
*.db
.coverage
ChangeLog
AUTHORS
*.qcow2
diskimage-create.*

View File

@ -1,5 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>
Ivan Berezovskiy <iberezovskiy@mirantis.com>
Sergey Lukjanov <slukjanov@mirantis.com> <me@frostman.ru>

View File

@ -1,73 +0,0 @@
- project:
templates:
- check-requirements
check:
jobs:
- openstack-tox-pep8
- sahara-image-elements-buildimages-ambari:
voting: false
- sahara-image-elements-buildimages-cloudera:
voting: false
- sahara-image-elements-buildimages-mapr:
voting: false
- sahara-image-elements-buildimages-plain:
voting: false
- sahara-image-elements-buildimages-spark:
voting: false
- sahara-image-elements-buildimages-storm:
voting: false
- sahara-image-elements-buildimages-vanilla:
voting: false
gate:
jobs:
- openstack-tox-pep8
post:
jobs:
- publish-openstack-python-branch-tarball
- job:
name: sahara-image-elements-buildimages-basejob
timeout: 5400
run: playbooks/buildimages/run.yaml
- job:
name: sahara-image-elements-buildimages-ambari
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: ambari
- job:
name: sahara-image-elements-buildimages-cloudera
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: cloudera
- job:
name: sahara-image-elements-buildimages-mapr
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: mapr
- job:
name: sahara-image-elements-buildimages-plain
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: plain
- job:
name: sahara-image-elements-buildimages-spark
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: spark
- job:
name: sahara-image-elements-buildimages-storm
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: storm
- job:
name: sahara-image-elements-buildimages-vanilla
parent: sahara-image-elements-buildimages-basejob
vars:
sahara_plugin: vanilla

View File

@ -1,19 +0,0 @@
The source repository for this project can be found at:
https://opendev.org/openstack/sahara-image-elements
Pull requests submitted through GitHub are not monitored.
To start contributing to OpenStack, follow the steps in the contribution guide
to set up and use Gerrit:
https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
Bugs should be filed on Storyboard:
https://storyboard.openstack.org/#!/project/openstack/sahara-image-elements
For more specific information about contributing to this repository, see the
sahara contributor guide:
https://docs.openstack.org/sahara/latest/contributor/contributing.html

View File

@ -1,12 +0,0 @@
Sahara Style Commandments
==========================
- Step 1: Read the OpenStack Style Commandments
https://docs.openstack.org/hacking/latest/
- Step 2: Read on
Sahara Specific Commandments
-----------------------------
None so far

175
LICENSE
View File

@ -1,175 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,2 +0,0 @@
include README.md
graft elements

View File

@ -1,21 +1,10 @@
========================
Team and repository tags
========================
This project is no longer maintained.
.. image:: https://governance.openstack.org/tc/badges/sahara-image-elements.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
.. Change things from this point on
Sahara image elements project
==============================
This repo is a place for Sahara-related for diskimage-builder elements.
Script for creating Fedora and Ubuntu cloud images with our elements and default parameters. You should only need to run this command:
.. sourcecode:: bash
tox -e venv -- sahara-image-create
Note: More information about script `diskimage-create <https://opendev.org/openstack/sahara-image-elements/src/branch/master/diskimage-create/README.rst>`_
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@ -1,16 +0,0 @@
#!/bin/bash
set -eu
set -o pipefail
SCRIPT_HOME=$(dirname $(readlink -f $0))
if [ -d $SCRIPT_HOME/../share/sahara-elements ]; then
_PREFIX=$SCRIPT_HOME/../share/sahara-elements
elif [ -d $SCRIPT_HOME/../../../elements ]; then
_PREFIX=$SCRIPT_HOME/../../..
else
_PREFIX=$SCRIPT_HOME/..
fi
export ELEMENTS_PATH=$_PREFIX/elements
diskimage-create.sh $@

View File

@ -1,137 +0,0 @@
Diskimage-builder script for creation cloud images
==================================================
This script builds Ubuntu, Fedora, CentOS cloud images for use in Sahara.
By default the all plugin are targeted, all images will be built. The '-p'
option can be used to select plugin (vanilla, spark, cloudera, storm, mapr,
ambari, or plain). The '-i' option can be used to select image type (ubuntu,
fedora, or centos7). The '-v' option can be used to select hadoop
version.
For users:
1. Use your environment (export / setenv) to alter the scripts behavior.
Environment variables the script accepts are 'DIB_HADOOP_VERSION_2_7_1',
'JAVA_DOWNLOAD_URL', 'JAVA_TARGET_LOCATION', 'OOZIE_DOWNLOAD_URL',
'HIVE_VERSION',
'[ubuntu|fedora|centos7]_vanilla_hadoop_2_7_1_image_name',
'ubuntu_spark_image_name', 'ubuntu_storm_image_name',
'ambari_[ubuntu|centos7]_image_name',
'cloudera_[5_7|5_9|5_11]_[ubuntu|centos7]_image_name',
'mapr_[ubuntu|centos7]_image_name',
'plain_[ubuntu|fedora|centos7]_image_name'.
2. For creating all images just clone this repository and run script.
.. sourcecode:: bash
tox -e venv -- sahara-image-create
3. If you want to use your local mirrors, you should specify http urls for
Fedora, CentOS and Ubuntu mirrors using parameters 'FEDORA_MIRROR',
'CENTOS_MIRROR' and 'UBUNTU_MIRROR' like this:
.. sourcecode:: bash
USE_MIRRORS=true FEDORA_MIRROR="url_for_fedora_mirror" \
CENTOS_MIRROR="url_for_centos_mirror" \
UBUNTU_MIRROR="url_for_ubuntu_mirror" tox -e venv -- sahara-image-create
If you want to use your local image, you can specify path of image file using
parameters 'DIB_LOCAL_IMAGE', which defined in project `[diskimage-builder]
(https://github.com/openstack/diskimage-builder)`, like this:
.. sourcecode:: bash
DIB_LOCAL_IMAGE="path_of_image" tox -e venv -- sahara-image-create
NOTE: Do not create all images for all plugins with the same mirrors.
Different plugins use different OS version.
4. To select which plugin to target use the '-p' commandline option like this:
.. sourcecode:: bash
tox -e venv -- sahara-image-create -p [vanilla|spark|cloudera|storm|mapr|ambari|plain]
5. To select which hadoop version to target use the '-v' commandline option
like this:
.. sourcecode:: bash
tox -e venv -- sahara-image-create -v [2.7.1|5.5|5.7|5.9]
Also, if you are planning to select which ambari version to target use the
'-v' commandline option like this:
.. sourcecode:: bash
tox -e venv -- sahara-image-create -v [2.2.0.0|2.2.1.0|2.4.2.0]
6. To select which operating system to target use the '-i' commandline option
like this:
.. sourcecode:: bash
tox -e venv -- sahara-image-create -i [ubuntu|fedora|centos7]
7. To select which Spark version to target use the '-s' commandline option
like this:
.. sourcecode:: bash
tox -e venv -- sahara-image-create -p spark -s [1.3.1|1.6.0|2.1.0|2.2.0] # spark standalone
tox -e venv -- sahara-image-create -p vanilla -v 2.7.1 -s [1.6.0|2.1.0|2.2.0] # spark on vanilla
8. To select which MapR version to target use the '-r' commandline option like
this:
.. sourcecode:: bash
tox -e venv -- sahara-image-create -p mapr -r [5.1.0|5.2.0]
9. If the host system is missing packages required for sahara-image-create,
the '-u' commandline option will instruct the script to install them without
prompt.
NOTE for 4, 5, 6:
For Vanilla you can create ubuntu, fedora and centos7 cloud image with 2.x.x
versions. Use environment variables 'DIB_HADOOP_VERSION_2' to change defaults.
For Spark you can create only ubuntu images, so you shouldn't specify an image
type. The default Spark and HDFS versions included in the build are tested and
known working together with the Sahara Spark plugin, other combinations should
be used only for evaluation or testing purposes. You can select a different
Spark version with commandline option '-s' and Hadoop HDFS version with '-v',
but only Cludera CDH versions are available for now. For Cloudera you can
create ubuntu and centos7 images with preinstalled cloudera hadoop. You
shouldn't specify hadoop version. You can create centos7, ubuntu, fedora images
without hadoop ('plain' image)
NOTE for CentOS images (for vanilla, ambari, and cloudera plugins):
Resizing disk space during firstboot on that images fails with errors
(https://storyboard.openstack.org/#!/story/1304100). So, you will get an instance
that will have a small available disk space. To solve this problem we build
images with 10G available disk space as default. If you need in more available
disk space you should export parameter DIB_IMAGE_SIZE:
.. sourcecode:: bash
DIB_IMAGE_SIZE=40 tox -e venv -- sahara-image-create -i centos7
For all other images parameter DIB_IMAGE_SIZE will be unset.
`DIB_CLOUD_INIT_DATASOURCES` contains a growing collection of data source
modules and most are enabled by default. This causes cloud-init to query each
data source on first boot. This can cause delays or even boot problems
depending on your environment. You must define `DIB_CLOUD_INIT_DATASOURCES` as
a comma-separated list of valid data sources to limit the data sources that
will be queried for metadata on first boot.
For developers:
If you want to add your element to this repository, you should edit this
script in your commit (you should export variables for your element and add
name of element to variables 'element_sequence').

View File

@ -1,864 +0,0 @@
#!/bin/bash
# dummy change
set -e
export IMAGE_SIZE=$DIB_IMAGE_SIZE
# This will unset parameter DIB_IMAGE_SIZE for Ubuntu and Fedora vanilla images
unset DIB_IMAGE_SIZE
# DEBUG_MODE is set by the -d flag, debug is enabled if the value is "true"
DEBUG_MODE="false"
# The default version for a MapR plugin
DIB_DEFAULT_MAPR_VERSION="5.2.0"
# The default version for Spark plugin
DIB_DEFAULT_SPARK_VERSION="2.3.0"
# The default version for Storm plugin
DIB_DEFAULT_STORM_VERSION="1.2.1"
# Bare metal image generation is enabled with the -b flag, it is off by default
SIE_BAREMETAL="false"
# Default list of datasource modules for ubuntu. Workaround for bug #1375645
export CLOUD_INIT_DATASOURCES=${DIB_CLOUD_INIT_DATASOURCES:-"NoCloud, ConfigDrive, OVF, MAAS, Ec2"}
# Tracing control
TRACING=
usage() {
echo
echo "Usage: $(basename $0)"
echo " [-p vanilla|spark|cloudera|storm|mapr|ambari|plain]"
echo " [-i ubuntu|fedora|centos7]"
echo " [-v 2.7.1|2.7.5|2.8.2|3.0.1|5.5|5.7|5.9|5.11|2.2.0.0|2.2.1.0|2.4.2.0]"
echo " [-r 5.1.0|5.2.0]"
echo " [-s 1.6.0|2.1.0|2.2.0|2.3.0]"
echo " [-t 1.0.1|1.1.0|1.1.1|1.2.0|1.2.1]"
echo " [-f qcow2|raw]"
echo " [-d]"
echo " [-u]"
echo " [-j openjdk|oracle-java]"
echo " [-x]"
echo " [-h]"
echo " '-p' is plugin version (default: all plugins)"
echo " '-i' is operating system of the base image (default: all non-deprecated"
echo " by plugin)."
echo " '-v' is hadoop version (default: all supported by plugin)"
echo " '-r' is MapR Version (default: ${DIB_DEFAULT_MAPR_VERSION})"
echo " '-s' is Spark version (default: ${DIB_DEFAULT_SPARK_VERSION})"
echo " '-f' is the image format (default: qcow2)"
echo " '-d' enable debug mode, root account will have password 'hadoop'"
echo " '-u' install missing packages necessary for building"
echo " '-j' is java distribution (default: openjdk)"
echo " '-x' turns on tracing"
echo " '-b' generate a bare metal image"
echo " '-h' display this message"
echo
echo "You shouldn't specify image type for spark plugin"
echo "You shouldn't specify hadoop version for plain images"
echo "Debug mode should only be enabled for local debugging purposes, not for production systems"
echo "By default all images for all plugins will be created"
echo
}
while getopts "p:i:v:f:dur:s:t:j:xhb" opt; do
case $opt in
p)
PLUGIN=$OPTARG
;;
i)
BASE_IMAGE_OS=$OPTARG
;;
v)
HADOOP_VERSION=$OPTARG
;;
d)
DEBUG_MODE="true"
;;
r)
DIB_MAPR_VERSION=$OPTARG
;;
s)
DIB_SPARK_VERSION=$OPTARG
;;
t)
DIB_STORM_VERSION=$OPTARG
;;
f)
IMAGE_FORMAT="-t $OPTARG"
;;
u)
DIB_UPDATE_REQUESTED=true
;;
j)
JAVA_ELEMENT=$OPTARG
;;
x)
TRACING="$TRACING -x"
set -x
;;
b)
SIE_BAREMETAL="true"
;;
h)
usage
exit 0
;;
*)
usage
exit 1
;;
esac
done
shift $((OPTIND-1))
if [ "$1" ]; then
usage
exit 1
fi
JAVA_ELEMENT=${JAVA_ELEMENT:-"openjdk"}
if [ -e /etc/os-release ]; then
platform=$(cat /etc/os-release | awk -F= '/^ID=/ {print tolower($2);}')
# remove eventual quotes around ID=...
platform=$(echo $platform | sed -e 's,^",,;s,"$,,')
elif [ -e /etc/system-release ]; then
case "$(head -1 /etc/system-release)" in
"Red Hat Enterprise Linux Server"*)
platform=rhel
;;
"CentOS"*)
platform=centos
;;
*)
echo -e "Unknown value in /etc/system-release. Impossible to build images.\nAborting"
exit 2
;;
esac
else
echo -e "Unknown host OS. Impossible to build images.\nAborting"
exit 2
fi
# Checks of input
if [ "$DEBUG_MODE" = "true" -a "$platform" != 'ubuntu' ]; then
if [ "$(getenforce)" != "Disabled" ]; then
echo "Debug mode cannot be used from this platform while SELinux is enabled, see https://storyboard.openstack.org/#!/story/1292614"
exit 1
fi
fi
check_spark_version () {
case "$DIB_SPARK_VERSION" in
"1.6.0" | "2.1.0" | "2.2.0" | "2.3.0");;
"")
echo "Spark version not specified"
echo "Spark ${DIB_DEFAULT_SPARK_VERSION} will be used"
DIB_SPARK_VERSION=${DIB_DEFAULT_SPARK_VERSION}
;;
*)
echo -e "Unknown Spark version selected.\nAborting"
exit 1
;;
esac
}
case "$PLUGIN" in
"");;
"vanilla")
case "$HADOOP_VERSION" in
"" | "2.7.1" | "2.7.5" | "2.8.2" | "3.0.1");;
*)
echo -e "Unknown hadoop version selected.\nAborting"
exit 1
;;
esac
case "$BASE_IMAGE_OS" in
"" | "ubuntu" | "fedora" | "centos7");;
*)
echo -e "'$BASE_IMAGE_OS' image type is not supported by '$PLUGIN'.\nAborting"
exit 1
;;
esac
check_spark_version
;;
"cloudera")
case "$BASE_IMAGE_OS" in
"" | "ubuntu" | "centos7");;
*)
echo -e "'$BASE_IMAGE_OS' image type is not supported by '$PLUGIN'.\nAborting"
exit 1
;;
esac
case "$HADOOP_VERSION" in
"" | "5.5" | "5.7" | "5.9" | "5.11");;
*)
echo -e "Unknown hadoop version selected.\nAborting"
exit 1
;;
esac
if [ "$BASE_IMAGE_OS" = "centos7" ]; then
if [ ! -z "$HADOOP_VERSION" -a ! "$HADOOP_VERSION" = "5.5" -a ! "$HADOOP_VERSION" = "5.7" -a ! "$HADOOP_VERSION" = "5.9" -a ! "$HADOOP_VERSION" = "5.11" ]; then
echo -e "Unsupported version combination, Centos 7 can only be used with CDH 5.5 or higher version"
exit 1
fi
fi
if [ -n "$DIB_CDH_MINOR_VERSION" ]; then
echo -e "Continuing image building with custom CDH version: \"$DIB_CDH_MINOR_VERSION\".\n"
fi
;;
"spark")
case "$BASE_IMAGE_OS" in
"" | "ubuntu");;
*)
echo -e "'$BASE_IMAGE_OS' image type is not supported by '$PLUGIN'.\nAborting"
exit 1
;;
esac
check_spark_version
;;
"storm")
case "$BASE_IMAGE_OS" in
"" | "ubuntu");;
*)
echo -e "'$BASE_IMAGE_OS' image type is not supported by '$PLUGIN'.\nAborting"
exit 1
;;
esac
case "$DIB_STORM_VERSION" in
"1.0.1" | "1.1.0" | "1.1.1" | "1.2.0" | "1.2.1");;
"")
echo "Storm version not specified"
echo "Storm ${DIB_DEFAULT_STORM_VERSION} will be used"
DIB_STORM_VERSION=${DIB_DEFAULT_STORM_VERSION}
;;
*)
echo -e "Unknown Storm version selected.\nAborting"
exit 1
;;
esac
if [ -n "$HADOOP_VERSION" ]; then
echo -e "You shouldn't specify hadoop version for '$PLUGIN'.\nAborting"
exit 1
fi
;;
"ambari")
case "$BASE_IMAGE_OS" in
"" | "centos7" | "ubuntu" )
;;
* )
echo "\"$BASE_IMAGE_OS\" image type is not supported by \"$PLUGIN\".\nAborting"
exit 1
;;
esac
case "$HADOOP_VERSION" in
"" | "2.2.0.0" | "2.2.1.0" | "2.4.2.0");;
*)
echo -e "Continuing image building with custom ambari version \"$HADOOP_VERSION\"\n"
;;
esac
;;
"mapr")
case "$BASE_IMAGE_OS" in
"" | "ubuntu" | "centos7");;
*)
echo -e "'$BASE_IMAGE_OS' image type is not supported by '$PLUGIN'.\nAborting"
exit 1
;;
esac
if [ -n "$HADOOP_VERSION" ]; then
echo -e "You shouldn't specify hadoop version for 'mapr'.\nAborting"
exit 1
fi
case "$DIB_MAPR_VERSION" in
"")
echo "MapR version is not specified"
echo "${DIB_DEFAULT_MAPR_VERSION} version would be used"
DIB_MAPR_VERSION=${DIB_DEFAULT_MAPR_VERSION}
;;
"5.1.0" | "5.2.0");;
*)
echo -e "Unknown MapR version.\nExit"
exit 1
;;
esac
;;
"plain")
case "$BASE_IMAGE_OS" in
"" | "ubuntu" | "fedora" | "centos7");;
*)
echo -e "'$BASE_IMAGE_OS' image type is not supported by '$PLUGIN'.\nAborting"
exit 1
;;
esac
if [ -n "$HADOOP_VERSION" ]; then
echo -e "You shouldn't specify hadoop version for '$PLUGIN'.\nAborting"
exit 1
fi
;;
*)
echo -e "Unknown plugin selected.\nAborting"
exit 1
esac
if [ "$PLUGIN" != "mapr" -a -n "$DIB_MAPR_VERSION" ]; then
echo -e "'-r' parameter should be used only with 'mapr' plugin.\nAborting"
exit 1
fi
if [ "$JAVA_ELEMENT" != "openjdk" -a "$JAVA_ELEMENT" != "oracle-java" ]; then
echo "Unknown java distro"
exit 1
fi
#################
is_installed() {
if [ "$platform" = 'ubuntu' -o "$platform" = 'debian' ]; then
dpkg -s "$1" &> /dev/null
else
# centos, fedora, opensuse, or rhel
if ! rpm -q "$1" &> /dev/null; then
rpm -q "$(rpm -q --whatprovides "$1")"
fi
fi
}
need_required_packages() {
case "$platform" in
"ubuntu" | "debian")
package_list="qemu-utils kpartx git"
;;
"fedora")
package_list="qemu-img kpartx git"
;;
"opensuse")
package_list="qemu kpartx git-core"
;;
"rhel" | "centos")
package_list="qemu-kvm qemu-img kpartx git"
;;
*)
echo -e "Unknown platform '$platform' for the package list.\nAborting"
exit 2
;;
esac
for p in `echo $package_list`; do
if ! is_installed $p; then
echo "Package $p is not installed on the system."
return 0
fi
done
return 1
}
if need_required_packages; then
# install required packages if requested
if [ -n "$DIB_UPDATE_REQUESTED" ]; then
case "$platform" in
"ubuntu" | "debian")
sudo apt-get update
sudo apt-get install $package_list -y
;;
"opensuse")
sudo zypper --non-interactive --gpg-auto-import-keys in $package_list
;;
*)
echo -e "Unknown platform '$platform' for installing packages.\nAborting"
exit 2
;;
esac
else
echo "Missing one of the following packages: $package_list"
echo "Please install manually or rerun with the update option (-u)."
exit 1
fi
fi
if [ "$DEBUG_MODE" = "true" ]; then
echo "Using Image Debug Mode, using root-pwd in images, NOT FOR PRODUCTION USAGE."
# Each image has a root login, password is "hadoop"
export DIB_PASSWORD="hadoop"
fi
#################
# Common helper for invoking disk-image-create, adding all the common
# elements and arguments, and setting common environment variables.
#
# Usage:
# image_create DISTRO OUTPUT [args...]
# - DISTRO is the main element of the distribution
# - OUTPUT is the output name for the image
# - any other argument is passed directly to disk-image-create
image_create() {
local distro=$1
shift
local output=$1
shift
# the base elements and args, used in *all* the images
local elements="sahara-version ntp xfs-tools"
if [ $SIE_BAREMETAL = "true" ]; then
elements="grub2 baremetal dhcp-all-interfaces $elements"
else
elements="vm $elements"
fi
local args=""
# debug mode handling
if [ "$DEBUG_MODE" = "true" ]; then
elements="$elements root-passwd"
fi
# mirror handling
if [ -n "$USE_MIRRORS" ]; then
case "$distro" in
ubuntu) elements="$elements apt-mirror" ;;
fedora) elements="$elements fedora-mirror" ;;
centos7) elements="$elements centos-mirror" ;;
esac
fi
disk-image-create $IMAGE_FORMAT $TRACING -o "$output" $args "$distro" $elements "$@"
}
set_hive_version() {
if [ -z "${HIVE_VERSION:-}" ]; then
case "$DIB_HADOOP_VERSION" in
"2.7.1" )
export HIVE_VERSION="0.11.0"
;;
"2.7.5" )
export HIVE_VERSION="2.3.2"
;;
"2.8.2" )
export HIVE_VERSION="2.3.2"
;;
"3.0.1" )
export HIVE_VERSION="3.0.0"
;;
*)
echo -e "Unknown Hadoop version, therefore cannot choose Hive version.\nAborting."
exit 1
;;
esac
fi
}
#############################
# Images for Vanilla plugin #
#############################
if [ -z "$PLUGIN" -o "$PLUGIN" = "vanilla" ]; then
export HADOOP_V2_7_1_NATIVE_LIBS_DOWNLOAD_URL=${HADOOP_V2_7_1_NATIVE_LIBS_DOWNLOAD_URL:-"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/hadoop-native-libs-2.7.1.tar.gz"}
export HADOOP_V2_7_5_NATIVE_LIBS_DOWNLOAD_URL=${HADOOP_V2_7_5_NATIVE_LIBS_DOWNLOAD_URL:-"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/hadoop-native-libs-2.7.5.tar.gz"}
export HADOOP_V2_8_2_NATIVE_LIBS_DOWNLOAD_URL=${HADOOP_V2_8_2_NATIVE_LIBS_DOWNLOAD_URL:-"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/hadoop-native-libs-2.8.2.tar.gz"}
export HADOOP_V3_0_1_NATIVE_LIBS_DOWNLOAD_URL=${HADOOP_V3_0_1_NATIVE_LIBS_DOWNLOAD_URL:-"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/hadoop-native-libs-3.0.1.tar.gz"}
export OOZIE_HADOOP_V2_7_1_DOWNLOAD_URL=${OOZIE_HADOOP_V2_7_1_FILE:-"https://tarballs.openstack.org/sahara-extra/dist/oozie/oozie-4.2.0-hadoop-2.7.1.tar.gz"}
export OOZIE_HADOOP_V2_7_5_DOWNLOAD_URL=${OOZIE_HADOOP_V2_7_5_FILE:-"https://tarballs.openstack.org/sahara-extra/dist/oozie/oozie-4.3.0-hadoop-2.7.5.tar.gz"}
export OOZIE_HADOOP_V2_8_2_DOWNLOAD_URL=${OOZIE_HADOOP_V2_8_2_FILE:-"https://tarballs.openstack.org/sahara-extra/dist/oozie/oozie-4.3.0-hadoop-2.8.2.tar.gz"}
export OOZIE_HADOOP_V3_0_1_DOWNLOAD_URL=${OOZIE_HADOOP_V3_0_1_FILE:-"https://tarballs.openstack.org/sahara-extra/dist/oozie/oozie-5.0.0-hadoop-3.0.1.tar.gz"}
export DIB_HDFS_LIB_DIR="/opt/hadoop/share/hadoop/tools/lib"
export plugin_type="vanilla"
export DIB_SPARK_VERSION
if [ "$DIB_SPARK_VERSION" = "1.6.0" ]; then
export SPARK_HADOOP_DL=hadoop2.6
else
export SPARK_HADOOP_DL=hadoop2.7
fi
ubuntu_elements_sequence="hadoop oozie mysql hive $JAVA_ELEMENT swift_hadoop spark s3_hadoop"
fedora_elements_sequence="hadoop oozie mysql disable-firewall hive $JAVA_ELEMENT swift_hadoop spark s3_hadoop"
centos7_elements_sequence="hadoop oozie mysql disable-firewall hive $JAVA_ELEMENT swift_hadoop spark nc s3_hadoop"
# Workaround for https://bugs.launchpad.net/diskimage-builder/+bug/1204824
# https://storyboard.openstack.org/#!/story/1252684
if [ "$platform" = 'ubuntu' ]; then
echo "**************************************************************"
echo "WARNING: As a workaround for DIB bug 1204824, you are about to"
echo " create a Fedora and CentOS images that has SELinux "
echo " disabled. Do not use these images in production. "
echo "**************************************************************"
fedora_elements_sequence="$fedora_elements_sequence selinux-permissive"
centos7_elements_sequence="$centos7_elements_sequence selinux-permissive"
suffix=".selinux-permissive"
fi
# Ubuntu cloud image
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "ubuntu" ]; then
export DIB_CLOUD_INIT_DATASOURCES=$CLOUD_INIT_DATASOURCES
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.7.1" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_7_1:-"2.7.1"}
export ubuntu_image_name=${ubuntu_vanilla_hadoop_2_7_1_image_name:-"ubuntu_sahara_vanilla_hadoop_2_7_1_latest"}
export DIB_RELEASE=${DIB_RELEASE:-xenial}
set_hive_version
image_create ubuntu $ubuntu_image_name $ubuntu_elements_sequence
unset DIB_RELEASE
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.7.5" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_7_5:-"2.7.5"}
export ubuntu_image_name=${ubuntu_vanilla_hadoop_2_7_5_image_name:-"ubuntu_sahara_vanilla_hadoop_2_7_5_latest"}
export DIB_RELEASE=${DIB_RELEASE:-xenial}
set_hive_version
image_create ubuntu $ubuntu_image_name $ubuntu_elements_sequence
unset DIB_RELEASE
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.8.2" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_8_2:-"2.8.2"}
export ubuntu_image_name=${ubuntu_vanilla_hadoop_2_8_2_image_name:-"ubuntu_sahara_vanilla_hadoop_2_8_2_latest"}
export DIB_RELEASE=${DIB_RELEASE:-xenial}
set_hive_version
image_create ubuntu $ubuntu_image_name $ubuntu_elements_sequence
unset DIB_RELEASE
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "3.0.1" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_3_0_1:-"3.0.1"}
export ubuntu_image_name=${ubuntu_vanilla_hadoop_3_0_1_image_name:-"ubuntu_sahara_vanilla_hadoop_3_0_1_latest"}
export DIB_RELEASE=${DIB_RELEASE:-xenial}
set_hive_version
image_create ubuntu $ubuntu_image_name $ubuntu_elements_sequence
unset DIB_RELEASE
fi
unset DIB_CLOUD_INIT_DATASOURCES
fi
# Fedora cloud image
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "fedora" ]; then
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.7.1" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_7_1:-"2.7.1"}
export fedora_image_name=${fedora_vanilla_hadoop_2_7_1_image_name:-"fedora_sahara_vanilla_hadoop_2_7_1_latest$suffix"}
set_hive_version
image_create fedora $fedora_image_name $fedora_elements_sequence
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.7.5" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_7_5:-"2.7.5"}
export fedora_image_name=${fedora_vanilla_hadoop_2_7_5_image_name:-"fedora_sahara_vanilla_hadoop_2_7_5_latest$suffix"}
set_hive_version
image_create fedora $fedora_image_name $fedora_elements_sequence
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.8.2" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_8_2:-"2.8.2"}
export fedora_image_name=${fedora_vanilla_hadoop_2_8_2_image_name:-"fedora_sahara_vanilla_hadoop_2_8_2_latest$suffix"}
set_hive_version
image_create fedora $fedora_image_name $fedora_elements_sequence
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "3.0.1" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_3_0_1:-"3.0.1"}
export fedora_image_name=${fedora_vanilla_hadoop_3_0_1_image_name:-"fedora_sahara_vanilla_hadoop_3_0_1_latest$suffix"}
set_hive_version
image_create fedora $fedora_image_name $fedora_elements_sequence
fi
fi
# CentOS 7 cloud image
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "centos7" ]; then
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.7.1" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_7_1:-"2.7.1"}
export centos7_image_name=${centos7_vanilla_hadoop_2_7_1_image_name:-"centos7_sahara_vanilla_hadoop_2_7_1_latest$suffix"}
set_hive_version
image_create centos7 $centos7_image_name $centos7_elements_sequence
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.7.5" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_7_5:-"2.7.5"}
export centos7_image_name=${centos7_vanilla_hadoop_2_7_5_image_name:-"centos7_sahara_vanilla_hadoop_2_7_5_latest$suffix"}
set_hive_version
image_create centos7 $centos7_image_name $centos7_elements_sequence
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "2.8.2" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_2_8_2:-"2.8.2"}
export centos7_image_name=${centos7_vanilla_hadoop_2_8_2_image_name:-"centos7_sahara_vanilla_hadoop_2_8_2_latest$suffix"}
set_hive_version
image_create centos7 $centos7_image_name $centos7_elements_sequence
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "3.0.1" ]; then
export DIB_HADOOP_VERSION=${DIB_HADOOP_VERSION_3_0_1:-"3.0.1"}
export centos7_image_name=${centos7_vanilla_hadoop_3_0_1_image_name:-"centos7_sahara_vanilla_hadoop_3_0_1_latest$suffix"}
set_hive_version
image_create centos7 $centos7_image_name $centos7_elements_sequence
fi
fi
unset plugin_type
unset DIB_HDFS_LIB_DIR
unset DIB_SPARK_VERSION
unset SPARK_HADOOP_DL
fi
###########################
# Images for Spark plugin #
###########################
if [ -z "$PLUGIN" -o "$PLUGIN" = "spark" ]; then
export DIB_HDFS_LIB_DIR="/usr/lib/hadoop-mapreduce"
export DIB_CLOUD_INIT_DATASOURCES=$CLOUD_INIT_DATASOURCES
export DIB_SPARK_VERSION
export plugin_type="spark"
if [ "$DIB_SPARK_VERSION" = "2.2.0" -o "$DIB_SPARK_VERSION" = "2.3.0" ]; then
export DIB_CDH_VERSION="5.11"
export DIB_RELEASE=${DIB_RELEASE:-xenial}
else
export DIB_RELEASE=${DIB_RELEASE:-trusty}
export DIB_CDH_VERSION="5.5"
fi
if [ "$DIB_SPARK_VERSION" = "1.6.0" ]; then
export SPARK_HADOOP_DL=hadoop2.6
else
export SPARK_HADOOP_DL=hadoop2.7
fi
# Tell the cloudera element to install only hdfs
export DIB_CDH_HDFS_ONLY=1
ubuntu_elements_sequence="$JAVA_ELEMENT swift_hadoop spark hadoop-cloudera s3_hadoop"
export ubuntu_image_name=${ubuntu_spark_image_name:-"ubuntu_sahara_spark_latest"}
# Creating Ubuntu cloud image
image_create ubuntu $ubuntu_image_name $ubuntu_elements_sequence
unset SPARK_HADOOP_DL
unset DIB_CLOUD_INIT_DATASOURCES
unset DIB_HDFS_LIB_DIR
unset DIB_CDH_HDFS_ONLY
unset DIB_CDH_VERSION
unset DIB_SPARK_VERSION
unset DIB_HADOOP_VERSION
unset DIB_RELEASE
unset plugin_type
fi
##########################
# Image for Storm plugin #
##########################
if [ -z "$PLUGIN" -o "$PLUGIN" = "storm" ]; then
export DIB_CLOUD_INIT_DATASOURCES=$CLOUD_INIT_DATASOURCES
export DIB_STORM_VERSION
export ubuntu_image_name=${ubuntu_storm_image_name:-"ubuntu_sahara_storm_latest_$DIB_STORM_VERSION"}
ubuntu_elements_sequence="$JAVA_ELEMENT zookeeper storm"
# Creating Ubuntu cloud image
export DIB_RELEASE=${DIB_RELEASE:-xenial}
image_create ubuntu $ubuntu_image_name $ubuntu_elements_sequence
unset DIB_RELEASE
unset DIB_CLOUD_INIT_DATASOURCES
fi
############################
# Images for Ambari plugin #
############################
if [ -z "$PLUGIN" -o "$PLUGIN" = "ambari" ]; then
export DIB_AMBARI_VERSION="$HADOOP_VERSION"
export plugin_type="ambari"
# set the temporary folder for hadoop-openstack.jar file
export DIB_HDFS_LIB_DIR="/opt"
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "ubuntu" ]; then
ambari_ubuntu_image_name=${ambari_ubuntu_image_name:-ubuntu_sahara_ambari}
ambari_element_sequence="ambari $JAVA_ELEMENT swift_hadoop kdc"
export DIB_RELEASE="trusty"
image_create ubuntu $ambari_ubuntu_image_name $ambari_element_sequence
unset DIB_RELEASE
fi
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "centos7" ]; then
ambari_centos7_image_name=${ambari_centos7_image_name:-"centos7-sahara-ambari"}
ambari_element_sequence="disable-selinux ambari $JAVA_ELEMENT disable-firewall swift_hadoop kdc nc"
image_create centos7 $ambari_centos7_image_name $ambari_element_sequence
fi
unset DIB_HDFS_LIB_DIR
unset plugin_type
unset DIB_AMBARI_VERSION
fi
#########################
# Images for CDH plugin #
#########################
if [ -z "$PLUGIN" -o "$PLUGIN" = "cloudera" ]; then
# Cloudera installation requires additional space
export DIB_MIN_TMPFS=5
export plugin_type="cloudera"
export DIB_HDFS_LIB_DIR="/usr/lib/hadoop-mapreduce"
if [ -n "$DIB_CDH_MINOR_VERSION" ]; then
# cut minor version number, e.g. from 5.7.1 to 5.7
# this is needed if user specified minor version but didn't specify
# hadoop version by '-v' parameter
HADOOP_VERSION=${DIB_CDH_MINOR_VERSION%.*}
fi
cloudera_elements_sequence="hadoop-cloudera swift_hadoop kdc"
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "ubuntu" ]; then
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.5" ]; then
export DIB_CDH_VERSION="5.5"
cloudera_5_5_ubuntu_image_name=${cloudera_5_5_ubuntu_image_name:-ubuntu_sahara_cloudera_5_5_0}
# Cloudera supports 14.04 Ubuntu in 5.5
export DIB_RELEASE="trusty"
image_create ubuntu $cloudera_5_5_ubuntu_image_name $cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_RELEASE
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.7" ]; then
export DIB_CDH_VERSION="5.7"
export DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
cloudera_5_7_ubuntu_image_name=${cloudera_5_7_ubuntu_image_name:-ubuntu_sahara_cloudera_$DIB_CDH_MINOR_VERSION}
export DIB_RELEASE="trusty"
image_create ubuntu $cloudera_5_7_ubuntu_image_name $cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_RELEASE DIB_CDH_MINOR_VERSION
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.9" ]; then
export DIB_CDH_VERSION="5.9"
export DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
cloudera_5_9_ubuntu_image_name=${cloudera_5_9_ubuntu_image_name:-ubuntu_sahara_cloudera_$DIB_CDH_MINOR_VERSION}
export DIB_RELEASE="trusty"
image_create ubuntu $cloudera_5_9_ubuntu_image_name $cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_RELEASE DIB_CDH_MINOR_VERSION
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.11" ]; then
export DIB_CDH_VERSION="5.11"
export DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
cloudera_5_11_ubuntu_image_name=${cloudera_5_11_ubuntu_image_name:-ubuntu_sahara_cloudera_$DIB_CDH_MINOR_VERSION}
export DIB_RELEASE="xenial"
image_create ubuntu $cloudera_5_11_ubuntu_image_name $cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_RELEASE DIB_CDH_MINOR_VERSION
fi
fi
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "centos7" ]; then
centos7_cloudera_elements_sequence="selinux-permissive disable-firewall nc"
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.5" ]; then
export DIB_CDH_VERSION="5.5"
cloudera_5_5_centos7_image_name=${cloudera_5_5_centos7_image_name:-centos7_sahara_cloudera_5_5_0}
image_create centos7 $cloudera_5_5_centos7_image_name $cloudera_elements_sequence $centos7_cloudera_elements_sequence
unset DIB_CDH_VERSION
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.7" ]; then
export DIB_CDH_VERSION="5.7"
export DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
cloudera_5_7_centos7_image_name=${cloudera_5_7_centos7_image_name:-centos7_sahara_cloudera_$DIB_CDH_MINOR_VERSION}
image_create centos7 $cloudera_5_7_centos7_image_name $cloudera_elements_sequence $centos7_cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_CDH_MINOR_VERSION
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.9" ]; then
export DIB_CDH_VERSION="5.9"
export DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
cloudera_5_9_centos7_image_name=${cloudera_5_9_centos7_image_name:-centos7_sahara_cloudera_$DIB_CDH_MINOR_VERSION}
image_create centos7 $cloudera_5_9_centos7_image_name $cloudera_elements_sequence $centos7_cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_CDH_MINOR_VERSION
fi
if [ -z "$HADOOP_VERSION" -o "$HADOOP_VERSION" = "5.11" ]; then
export DIB_CDH_VERSION="5.11"
export DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
cloudera_5_11_centos7_image_name=${cloudera_5_11_centos7_image_name:-centos7_sahara_cloudera_$DIB_CDH_MINOR_VERSION}
image_create centos7 $cloudera_5_11_centos7_image_name $cloudera_elements_sequence $centos7_cloudera_elements_sequence
unset DIB_CDH_VERSION DIB_CDH_MINOR_VERSION
fi
fi
unset DIB_CDH_MINOR_VERSION
unset DIB_HDFS_LIB_DIR
unset DIB_MIN_TMPFS
unset plugin_type
fi
##########################
# Images for MapR plugin #
##########################
if [ -z "$PLUGIN" -o "$PLUGIN" = "mapr" ]; then
export DIB_MAPR_VERSION=${DIB_MAPR_VERSION:-${DIB_DEFAULT_MAPR_VERSION}}
export plugin_type="mapr"
export DIB_CLOUD_INIT_DATASOURCES=$CLOUD_INIT_DATASOURCES
export DIB_IMAGE_SIZE=${IMAGE_SIZE:-"10"}
#MapR repository requires additional space
export DIB_MIN_TMPFS=10
mapr_ubuntu_elements_sequence="ssh hadoop-mapr $JAVA_ELEMENT"
mapr_centos_elements_sequence="ssh hadoop-mapr selinux-permissive $JAVA_ELEMENT disable-firewall nc"
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "ubuntu" ]; then
export DIB_RELEASE=${DIB_RELEASE:-trusty}
mapr_ubuntu_image_name=${mapr_ubuntu_image_name:-ubuntu_${DIB_RELEASE}_mapr_${DIB_MAPR_VERSION}_latest}
image_create ubuntu $mapr_ubuntu_image_name $mapr_ubuntu_elements_sequence
unset DIB_RELEASE
fi
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "centos7" ]; then
mapr_centos7_image_name=${mapr_centos7_image_name:-centos_7_mapr_${DIB_MAPR_VERSION}_latest}
image_create centos7 $mapr_centos7_image_name $mapr_centos_elements_sequence
unset DIB_CLOUD_INIT_DATASOURCES
fi
unset plugin_type
fi
################
# Plain images #
################
if [ -z "$PLUGIN" -o "$PLUGIN" = "plain" ]; then
# generate plain (no Hadoop components) images for testing
common_elements="ssh"
ubuntu_elements_sequence="$common_elements"
fedora_elements_sequence="$common_elements"
centos7_elements_sequence="$common_elements disable-firewall disable-selinux nc"
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "ubuntu" ]; then
plain_image_name=${plain_ubuntu_image_name:-ubuntu_plain}
export DIB_RELEASE=${DIB_RELEASE:-xenial}
image_create ubuntu $plain_image_name $ubuntu_elements_sequence
unset DIB_RELEASE
fi
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "fedora" ]; then
plain_image_name=${plain_fedora_image_name:-fedora_plain}
image_create fedora $plain_image_name $fedora_elements_sequence
fi
if [ -z "$BASE_IMAGE_OS" -o "$BASE_IMAGE_OS" = "centos7" ]; then
plain_image_name=${plain_centos7_image_name:-centos7_plain}
image_create centos7 $plain_image_name $centos7_elements_sequence
fi
fi

1
elements/.gitignore vendored
View File

@ -1 +0,0 @@
*~

View File

@ -1,69 +0,0 @@
Diskimage-builder tools for creation cloud images
=================================================
Steps how to create cloud image with Apache Hadoop installed using diskimage-builder project:
1. Clone the repository "https://github.com/openstack/diskimage-builder" locally. Note: Make sure you have commit 43b96d91 in your clone, it provides a mapping for default-jre.
.. sourcecode:: bash
git clone https://github.com/openstack/diskimage-builder
2. Add ~/diskimage-builder/bin/ directory to your path (for example, PATH=$PATH:/home/$USER/diskimage-builder/bin/ ).
3. Export the following variable ELEMENTS_PATH=/home/$USER/diskimage-builder/elements/ to your .bashrc. Then source it.
4. Copy file "img-build-sudoers" from ~/disk-image-builder/sudoers.d/ to your /etc/sudoers.d/.
.. sourcecode:: bash
chmod 440 /etc/sudoers.d/img-build-sudoers
chown root:root /etc/sudoers.d/img-build-sudoers
5. Export sahara-elements commit id variable (from sahara-extra directory):
.. sourcecode:: bash
export SAHARA_ELEMENTS_COMMIT_ID=`git show --format=%H | head -1`
6. Move elements/ directory to disk-image-builder/elements/
.. sourcecode:: bash
mv elements/* /path_to_disk_image_builder/diskimage-builder/elements/
7. Export DIB commit id variable (from DIB directory):
.. sourcecode:: bash
export DIB_COMMIT_ID=`git show --format=%H | head -1`
8. Call the following command to create cloud image is able to run on OpenStack:
8.1. Ubuntu cloud image
.. sourcecode:: bash
JAVA_FILE=jdk-7u21-linux-x64.tar.gz DIB_HADOOP_VERSION=1.2.1 OOZIE_FILE=oozie-4.0.0.tar.gz disk-image-create base vm hadoop oozie ubuntu root-passwd -o ubuntu_hadoop_1_2_1
8.2. Fedora cloud image
.. sourcecode:: bash
JAVA_FILE=jdk-7u21-linux-x64.tar.gz DIB_HADOOP_VERSION=1.2.1 OOZIE_FILE=oozie-4.0.0.tar.gz DIB_IMAGE_SIZE=10 disk-image-create base vm fedora hadoop root-passwd oozie -o fedora_hadoop_1_2_1
Note: If you are building this image from Ubuntu or Fedora 18 OS host, you should add element 'selinux-permissive'.
.. sourcecode:: bash
JAVA_FILE=jdk-7u21-linux-x64.tar.gz DIB_HADOOP_VERSION=1.2.1 OOZIE_FILE=oozie-4.0.0.tar.gz DIB_IMAGE_SIZE=10 disk-image-create base vm fedora hadoop root-passwd oozie selinux-permissive -o fedora_hadoop_1_2_1
In this command 'DIB_HADOOP_VERSION' parameter is version of hadoop needs to be installed.
You can use 'JAVA_DOWNLOAD_URL' parameter to specify download link for JDK (tarball or bin).
'DIB_IMAGE_SIZE' is parameter that specifes a volume of hard disk of instance. You need to specify it because Fedora and CentOS don't use all available volume.
If you have already downloaded the jdk package, move it to "elements/hadoop/install.d/" and use its filename as 'JAVA_FILE' parameter.
In order of working EDP components with Sahara DIB images you need pre-installed Oozie libs.
Use OOZIE_DOWNLOAD_URL to specify link to Oozie archive (tar.gz). For example the Oozie libraries
for Hadoop 2.7.1 are available from:
https://tarballs.openstack.org/sahara-extra/dist/oozie/oozie-4.2.0-hadoop-2.7.1.tar.gz
If you have already downloaded archive, move it to "elements/oozie/install.d/" and use its filename as 'OOZIE_FILE' parameter.

View File

@ -1,14 +0,0 @@
======
ambari
======
Installs Ambari Management Console
Environment Variables
---------------------
AMBARI_VERSION
:Required: No
:Default: 2.4.2.0
:Description: Version of Ambari Management Console to install
:Example: ``AMBARI_VERSION="2.2.1.0"`` installs Ambari 2.2.1.0

View File

@ -1,3 +0,0 @@
java
mysql
package-installs

View File

@ -1,19 +0,0 @@
wget:
phase: pre-install.d
ambari-agent:
ambari-log4j:
ambari-metrics-hadoop-sink:
ambari-metrics-monitor:
ambari-server:
ambari-metrics-collector:
redhat-lsb:
nc:
unzip:
curl:
tar:
zip:
rpcbind:
fuse-libs:
fuse:
snappy-devel:
rng-tools:

View File

@ -1,20 +0,0 @@
{
"family": {
"debian": {
"nc": "netcat",
"redhat-lsb": "",
"snappy-devel": "",
"fuse-libs": "fuse",
"ambari-metrics-monitor": "ambari-metrics-assembly",
"ambari-metrics-collector": "ambari-metrics-assembly",
"ambari-metrics-hadoop-sink": "ambari-metrics-assembly",
"ambari-log4j": ""
},
"redhat": {
"nc": "nc",
"redhat-lsb": "redhat-lsb",
"snappy-devel": "snappy-devel",
"fuse-libs": "fuse-libs"
}
}
}

View File

@ -1,14 +0,0 @@
#!/bin/bash
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "${DISTRO_NAME}" in
rhel7 | centos7 )
chkconfig ambari-server off
chkconfig ambari-agent off
;;
esac

View File

@ -1,14 +0,0 @@
#!/bin/bash
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "${DISTRO_NAME}" in
rhel7 | centos7 )
config=/etc/python/cert-verification.cfg
[ -e $config ] && sed -i "s%^\(verify=\s*\).*$%verify=disable%" $config
;;
esac

View File

@ -1,19 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
DIB_AMBARI_VERSION="${DIB_AMBARI_VERSION:-2.4.2.0}"
case "${DISTRO_NAME}" in
rhel7 | centos7 )
wget http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/$DIB_AMBARI_VERSION/ambari.repo -O /etc/yum.repos.d/ambari.repo
;;
ubuntu )
wget http://public-repo-1.hortonworks.com/ambari/ubuntu14/2.x/updates/$DIB_AMBARI_VERSION/ambari.list -O /etc/apt/sources.list.d/ambari.list
apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD
;;
esac

View File

@ -1,13 +0,0 @@
==========
apt-mirror
==========
This element sets up the mirror for updating the Ubuntu cloud image.
Using a mirror improves the speed of the image building.
Environment Variables
---------------------
UBUNTU_MIRROR
:Required: Yes
:Description: URL to the Ubuntu mirror.

View File

@ -1,9 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
rm /etc/apt/apt.conf.d/01proxy

View File

@ -1,11 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
mkdir -p /etc/apt/apt.conf.d/
touch /etc/apt/apt.conf.d/01proxy
echo -e "Acquire::http { Proxy \"$UBUNTU_MIRROR\"; };" > /etc/apt/apt.conf.d/01proxy

View File

@ -1,12 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ -z "$UBUNTU_MIRROR" ]; then
echo "You should specify parameter 'UBUNTU_MIRROR'"
exit 2
fi

View File

@ -1,13 +0,0 @@
=============
centos-mirror
=============
This element sets up the mirror for updating the CentOS cloud image.
Using a mirror improves the speed of the image building.
Environment Variables
---------------------
CENTOS_MIRROR
:Required: Yes
:Description: URL to the CentOS mirror.

View File

@ -1,9 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
mv /opt/repos/* /etc/yum.repos.d/

View File

@ -1,19 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
mkdir -p /opt/repos
mv /etc/yum.repos.d/* /opt/repos/
cat >> /etc/yum.repos.d/centos.repo <<EOF
[Local-Repository]
name=CentOS \$releasever - \$basearch - Local
baseurl=$CENTOS_MIRROR
enabled=1
gpgcheck=0
priority=1
EOF
yum makecache

View File

@ -1,12 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ -z "$CENTOS_MIRROR" ]; then
echo "You should specify parameter 'CENTOS_MIRROR'"
exit 2
fi

View File

@ -1,11 +0,0 @@
================
disable-firewall
================
This elements disables all firewalls on the image.
Recognized firewalls:
- iptables
- ip6tables
- firewalld

View File

@ -1,18 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "${DISTRO_NAME}" in
fedora | centos7 | rhel7 )
if type -p systemctl && [[ $(systemctl --no-pager list-unit-files firewalld) =~ 'enabled' ]]; then
systemctl disable firewalld
fi
;;
* )
echo "Unknown distro: $DISTRO_NAME. Skipping."
;;
esac

View File

@ -1,25 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "$DISTRO_NAME" in
fedora | centos7 | rhel7 )
install-packages iptables-services
;;
esac
case "${DISTRO_NAME}" in
fedora | rhel7)
if type -p service; then
service ip6tables save
chkconfig ip6tables off
fi
;;
* )
echo "Unknown distro: $DISTRO_NAME. Skipping."
;;
esac

View File

@ -1,25 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "$DISTRO_NAME" in
fedora | centos7 | rhel7)
install-packages iptables-services
;;
esac
case "${DISTRO_NAME}" in
fedora | rhel7)
if type -p service; then
service iptables save
chkconfig iptables off
fi
;;
* )
echo "Unknown distro: $DISTRO_NAME. Skipping."
;;
esac

View File

@ -1,30 +0,0 @@
=====
extjs
=====
This element downloads extjs from its website, caching it so it is
not downloaded every time, and optionally unpacking it.
Environment Variables
---------------------
The element can be configured by exporting variables using a
`environment.d` script.
EXTJS_DESTINATION_DIR
:Required: Yes
:Description: The directory where to extract (or copy) extjs; must be
an absolute directory within the image. The directory is created if not
existing already.
:Example: ``EXTJS_DESTINATION_DIR=/usr/share/someapp``
EXTJS_DOWNLOAD_URL
:Required: No
:Default: ``https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip``
:Description: The URL from where to download extjs.
EXTJS_NO_UNPACK
:Required: No
:Default: *unset*
:Description: If set to 1, then the extjs tarball is simply copied to the
location specified by ``EXTJS_DESTINATION_DIR``.

View File

@ -1,2 +0,0 @@
cache-url
package-installs

View File

@ -1,19 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
EXTJS_DOWNLOAD_URL=${EXTJS_DOWNLOAD_URL:-"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip"}
extjs_archive=/tmp/$(basename $EXTJS_DOWNLOAD_URL)
mkdir -p $EXTJS_DESTINATION_DIR
if [ -z "${EXTJS_NO_UNPACK:-}" ]; then
unzip -d "$EXTJS_DESTINATION_DIR" $extjs_archive
rm -f $extjs_archive
else
mv $extjs_archive $EXTJS_DESTINATION_DIR
fi

View File

@ -1 +0,0 @@
unzip:

View File

@ -1,9 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
[ -n "$EXTJS_DESTINATION_DIR" ]

View File

@ -1,14 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
EXTJS_DOWNLOAD_URL=${EXTJS_DOWNLOAD_URL:-"https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/ext-2.2.zip"}
filename=$(basename $EXTJS_DOWNLOAD_URL)
cached_package="$DIB_IMAGE_CACHE/$filename"
$TMP_HOOKS_PATH/bin/cache-url "$EXTJS_DOWNLOAD_URL" $cached_package
sudo install -D -g root -o root -m 0644 $cached_package $TARGET_ROOT/tmp/

View File

@ -1,13 +0,0 @@
=============
fedora-mirror
=============
This element sets up the mirror for updating the Fedora cloud image.
Using a mirror improves the speed of the image building.
Environment Variables
---------------------
FEDORA_MIRROR
:Required: Yes
:Description: URL to the Fedora mirror.

View File

@ -1,9 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
mv /opt/repos/* /etc/yum.repos.d/

View File

@ -1,19 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
mkdir -p /opt/repos
mv /etc/yum.repos.d/* /opt/repos
cat >> /etc/yum.repos.d/fedora.repo <<EOF
[Local-Repository]
name=Fedora \$releasever - \$basearch - Local
baseurl=$FEDORA_MIRROR
enabled=1
gpgcheck=0
priority=1
EOF
yum makecache

View File

@ -1,12 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ -z "$FEDORA_MIRROR" ]; then
echo "You should specify parameter 'FEDORA_MIRROR'"
exit 2
fi

View File

@ -1,21 +0,0 @@
==========
hadoop-cdh
==========
Installs Hadoop CDH 4 (the Cloudera distribution), configures SSH.
Only HDFS is installed at this time.
This element is used by Spark version 1.0.2.
This element is deprecated and will be deleted when support for Spark 1.0.2
will be dropped from Sahara.
Environment Variables
---------------------
DIB_CDH_VERSION
:Required: Yes.
:Description: Version of the CDH platform to install.
:Example: ``DIB_CDH_VERSION=CDH4``

View File

@ -1,4 +0,0 @@
java
package-installs
sahara-version
ssh

View File

@ -1,4 +0,0 @@
wget:
phase: pre-install.d
hadoop-hdfs-namenode:
hadoop-hdfs-datanode:

View File

@ -1,34 +0,0 @@
#!/bin/bash
# This element installs Hadoop CDH 4 HDFS from Cloudera.
# It does not do a full install of CDH, it installs the miminum needed to
# Spark to run correctly.
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
echo "Creating hadoop user & group"
case "$DISTRO_NAME" in
ubuntu )
if ! getent group hadoop > /dev/null; then
addgroup hadoop
fi
if ! getent passwd hadoop > /dev/null; then
adduser --ingroup hadoop --disabled-password --gecos GECOS hadoop
fi
adduser hadoop sudo
;;
esac
echo "Pre-configuring Hadoop"
cat >> /home/hadoop/.bashrc <<EOF
PATH=$PATH:/usr/sbin
EOF
cat >> /etc/hadoop/hadoop-env.sh <<EOF
export HADOOP_LOG_DIR=/mnt/log/hadoop/\$USER
export HADOOP_SECURE_DN_LOG_DIR=/mnt/log/hadoop/hdfs
EOF

View File

@ -1,28 +0,0 @@
#!/bin/bash
# This script contains some fixes needed to have the Cloudera Ubuntu
# packages up and running
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "$DISTRO_NAME" in
ubuntu )
# pietro: small hack to fix install problems on ubuntu
# the CDH package contains a broken symlink instead of the log4j jar file
# these 4 lines should go away once Cloudera fixes the package
echo "Fixing install problem for CDH: log4j";
wget https://repo1.maven.org/maven2/org/slf4j/slf4j-log4j12/1.6.1/slf4j-log4j12-1.6.1.jar;
sudo rm /usr/lib/hadoop/lib/slf4j-log4j12-1.6.1.jar;
sudo mv slf4j-log4j12-1.6.1.jar /usr/lib/hadoop/lib/
mkdir /run/hadoop
chown hadoop:hadoop /run/hadoop/
mkdir -p /var/run/hadoop ; chown hadoop:hadoop /var/run/hadoop
mkdir -p /mnt/log/hadoop ; chown hadoop:hadoop /mnt/log/hadoop
;;
esac

View File

@ -1,29 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "$DISTRO_NAME" in
ubuntu )
# Here more versions of CDH could be supported by downloading the right repository package.
wget -P /tmp "http://archive.cloudera.com/cdh4/one-click-install/precise/amd64/cdh4-repository_1.0_all.deb"
# Pin packages from cloudera repository
cat >> /etc/apt/preferences.d/cloudera << EOF
Package: *
Pin: origin "archive.cloudera.com"
Pin-Priority: 800
EOF
dpkg -i /tmp/cdh4-repository_1.0_all.deb
rm /tmp/cdh4-repository_1.0_all.deb
wget -O - http://archive.cloudera.com/cdh4/ubuntu/precise/amd64/cdh/archive.key | sudo apt-key add -
;;
*)
echo "Distro $DISTRO_NAME not supported by CDH. Exiting."
exit 1
;;
esac

View File

@ -1,28 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
echo "##############################################################"
echo "## ##"
echo "## WARNING: this element (hadoop-cdh) has been deprecated ##"
echo "## Please use the hadoop-cloudera element ##"
echo "## ##"
echo "##############################################################"
if [ -z "${JAVA_DOWNLOAD_URL:-}" ]; then
if [ -z "${JAVA_FILE:-}" ]; then
echo "JAVA_FILE and JAVA_DOWNLOAD_URL are not set. Proceeding with distro native Java."
fi
fi
if [ -z "$DIB_CDH_VERSION" ]; then
echo "DIB_CDH_VERSION is not set. Impossible to install hadoop. Exit"
exit 1
fi
if [ $DIB_CDH_VERSION != "CDH4" ]; then
echo "CDH version $DIB_CDH_VERSION not supported. Exiting."
exit 1
fi

View File

@ -1,37 +0,0 @@
===============
hadoop-cloudera
===============
Installs cloudera (cloudera-manager-agent cloudera-manager-daemons
cloudera-manager-server cloudera-manager-server-db-2 hadoop-hdfs-namenode
hadoop-hdfs-secondarynamenode hadoop-hdfs-datanode hadoop-yarn-resourcemanager
hadoop-yarn-nodemanager hadoop-mapreduce hadoop-mapreduce-historyserver) and
Java (oracle-j2sdk1.7) packages from cloudera repositories:
`cdh5 <http://archive.cloudera.com/cdh5/>`_ and
`cm5 <http://archive.cloudera.com/cm5>`_.
Also installs Cloudera distribution of Apache Kafka for CDH version >= 5.5 from
Cloudera repository: `kafka <http://archive.cloudera.com/kafka>`_.
In order to create the Cloudera images with ``diskimage-create.sh``, use the
following syntax to select the ``cloudera`` plugin:
.. sourcecode:: bash
diskimage-create.sh -p cloudera
Environment Variables
---------------------
The element can be configured by exporting variables using a
`environment.d` script.
DIB_CDH_HDFS_ONLY
:Required: No
:Description: If set will install only the namenode and datanode
packages with their dependencies.
DIB_CDH_MINOR_VERSION
:Required: No
:Description: If set will install minor version of CDH. Available minor
versions are 5.7.x.

View File

@ -1,4 +0,0 @@
extjs
java
package-installs
ssh

View File

@ -1 +0,0 @@
export EXTJS_DESTINATION_DIR="/var/lib/oozie"

View File

@ -1 +0,0 @@
export DIB_JAVA_DISTRO="cloudera-jdk"

View File

@ -1,30 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ -z "${DIB_CDH_HDFS_ONLY:-}" ]; then
echo "Installing Oracle JDK for Cloudera"
pkg="oracle-j2sdk1.7"
install-packages $pkg
JDK_HOME=""
case $DISTRO_NAME in
centos7 | rhel7 )
JDK_HOME=$(find /usr/java -name 'jdk*cloudera' -print)
;;
ubuntu )
JDK_HOME="/usr/lib/jvm/java-7-oracle-cloudera"
;;
esac
JRE_HOME=$JDK_HOME/jre
setup-java-home $JRE_HOME $JDK_HOME
else
echo "Skip this step. JDK has already been installed for Spark."
fi

View File

@ -1,114 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ "$DISTRO_NAME" = "ubuntu" ]; then
export DEBIAN_FRONTEND=noninteractive
export RUNLEVEL=1
fi
# Install the rest of CDH unless a limited HDFS install was requested
if [ -z "${DIB_CDH_HDFS_ONLY:-}" ]; then
install-packages \
cloudera-manager-agent \
cloudera-manager-daemons \
cloudera-manager-server \
cloudera-manager-server-db-2 \
hadoop-hdfs-secondarynamenode \
hadoop-mapreduce \
hadoop-mapreduce-historyserver \
hadoop-yarn-nodemanager \
hadoop-yarn-resourcemanager \
hbase \
hive-hcatalog \
hive-metastore \
hive-server2 \
hive-webhcat-server \
hue \
oozie \
spark-core \
zookeeper \
flume-ng \
hadoop-kms \
hbase-solr \
impala \
impala-server \
impala-state-store \
impala-catalog \
impala-shell \
keytrustee-keyprovider \
sentry \
solr-server \
solr-doc \
search \
spark-history-server \
sqoop2 \
kafka \
kafka-server
fi
DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
case "$DIB_CDH_VERSION" in
5.7)
HADOOP_OPENSTACK_5_7_x_URL="https://repository.cloudera.com/artifactory/repo/org/apache/hadoop/hadoop-openstack/2.6.0-cdh$DIB_CDH_MINOR_VERSION/hadoop-openstack-2.6.0-cdh$DIB_CDH_MINOR_VERSION.jar"
;;
5.9)
HADOOP_OPENSTACK_5_9_x_URL="https://repository.cloudera.com/artifactory/repo/org/apache/hadoop/hadoop-openstack/2.6.0-cdh$DIB_CDH_MINOR_VERSION/hadoop-openstack-2.6.0-cdh$DIB_CDH_MINOR_VERSION.jar"
;;
5.11)
HADOOP_OPENSTACK_5_11_x_URL="https://repository.cloudera.com/artifactory/repo/org/apache/hadoop/hadoop-openstack/2.6.0-cdh$DIB_CDH_MINOR_VERSION/hadoop-openstack-2.6.0-cdh$DIB_CDH_MINOR_VERSION.jar"
;;
esac
HADOOP_OPENSTACK_5_5_0_URL="https://repository.cloudera.com/artifactory/repo/org/apache/hadoop/hadoop-openstack/2.6.0-cdh5.5.0/hadoop-openstack-2.6.0-cdh5.5.0.jar"
dest=/usr/lib/hadoop-mapreduce/hadoop-openstack.jar
case "$DIB_CDH_VERSION" in
5.5)
wget -O $dest $HADOOP_OPENSTACK_5_5_0_URL
if [ -z "${DIB_CDH_HDFS_ONLY:-}" ]; then
# Create links to keep backward version support.
ln -s /usr/lib/oozie/oozie-sharelib-yarn /usr/lib/oozie/oozie-sharelib-yarn.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-mr1 /usr/lib/oozie/oozie-sharelib-mr1.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-yarn.tar.gz /usr/lib/oozie/oozie-sharelib.tar.gz
fi
;;
5.7)
wget -O $dest $HADOOP_OPENSTACK_5_7_x_URL
if [ -z "${DIB_CDH_HDFS_ONLY:-}" ]; then
# Create links to keep backward version support.
ln -s /usr/lib/oozie/oozie-sharelib-yarn /usr/lib/oozie/oozie-sharelib-yarn.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-mr1 /usr/lib/oozie/oozie-sharelib-mr1.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-yarn.tar.gz /usr/lib/oozie/oozie-sharelib.tar.gz
fi
;;
5.9)
wget -O $dest $HADOOP_OPENSTACK_5_9_x_URL
if [ -z "${DIB_CDH_HDFS_ONLY:-}" ]; then
# Create links to keep backward version support.
ln -s /usr/lib/oozie/oozie-sharelib-yarn /usr/lib/oozie/oozie-sharelib-yarn.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-mr1 /usr/lib/oozie/oozie-sharelib-mr1.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-yarn.tar.gz /usr/lib/oozie/oozie-sharelib.tar.gz
fi
;;
5.11)
wget -O $dest $HADOOP_OPENSTACK_5_11_x_URL
if [ -z "${DIB_CDH_HDFS_ONLY:-}" ]; then
# Create links to keep backward version support.
ln -s /usr/lib/oozie/oozie-sharelib-yarn /usr/lib/oozie/oozie-sharelib-yarn.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-mr1 /usr/lib/oozie/oozie-sharelib-mr1.tar.gz
ln -s /usr/lib/oozie/oozie-sharelib-yarn.tar.gz /usr/lib/oozie/oozie-sharelib.tar.gz
fi
;;
*)
echo "Unhandled version $DIB_CDH_VERSION for hadoop-openstack.jar"
exit 1
esac
if [ "$DISTRO_NAME" = "ubuntu" ]; then
unset RUNLEVEL
fi

View File

@ -1,6 +0,0 @@
wget:
phase: pre-install.d
ntp:
hadoop-hdfs-datanode:
hadoop-hdfs-namenode:
# other packages are installed conditionally in install.d/50-install-cloudera

View File

@ -1,45 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
for i in cloudera-scm-agent \
cloudera-scm-server \
cloudera-scm-server-db \
hadoop-hdfs-datanode \
hadoop-hdfs-namenode \
hadoop-hdfs-secondarynamenode \
hadoop-mapreduce-historyserver \
hadoop-yarn-nodemanager \
hadoop-yarn-resourcemanager \
hive-metastore \
hive-server2 \
hive-webhcat-server \
hue \
oozie \
postgresql
do
if [ "$DISTRO_NAME" = "ubuntu" ]; then
update-rc.d -f $i remove
else
chkconfig $i off
fi
done
if [ $DIB_CDH_VERSION != "5.0" ]; then
for i in impala-catalog \
impala-server \
impala-state-store \
solr-server \
spark-history-server
do
if [ "$DISTRO_NAME" = "ubuntu" ]; then
update-rc.d -f $i remove
else
chkconfig $i off
fi
done
fi

View File

@ -1,304 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
function add_repo_5_5_0 {
case $DISTRO_NAME in
ubuntu )
# Add repository with postgresql package (it's dependency of cloudera packages)
# Base image doesn't contain this repo
echo -e 'deb http://nova.clouds.archive.ubuntu.com/ubuntu/ trusty universe multiverse main' >> /etc/apt/sources.list
# Cloudera repositories
echo 'deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh5.5.0 contrib' > /etc/apt/sources.list.d/cdh5.list
echo 'deb-src http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh5.5.0 contrib' >> /etc/apt/sources.list.d/cdh5.list
wget -qO - http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh/archive.key | apt-key add -
echo 'deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm5.5.0 contrib' > /etc/apt/sources.list.d/cm5.list
echo 'deb-src http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm5.5.0 contrib' >> /etc/apt/sources.list.d/cm5.list
wget -qO - http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm/archive.key | apt-key add -
wget -O /etc/apt/sources.list.d/kms.list http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/cloudera.list
wget -qO - http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/archive.key | apt-key add -
# add Kafka repository
echo -e 'deb http://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/ trusty-kafka2.0.2 contrib' >> /etc/apt/sources.list
wget -qO - https://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/archive.key | apt-key add -
#change repository priority
echo -e 'Package: zookeeper\nPin: origin "archive.cloudera.com"\nPin-Priority: 1001' > /etc/apt/preferences.d/cloudera-pin
apt-get update
;;
centos7 | rhel7 )
releasever=7
echo '[cloudera-cdh5]' > /etc/yum.repos.d/cloudera-cdh5.repo
echo "name=Cloudera's Distribution for Hadoop, Version 5" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "baseurl=http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/5.5.0/" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "gpgkey = http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-cdh5.repo
echo '[cloudera-manager]' > /etc/yum.repos.d/cloudera-manager.repo
echo 'name=Cloudera Manager' >> /etc/yum.repos.d/cloudera-manager.repo
echo "baseurl=http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/5.5.0/" >> /etc/yum.repos.d/cloudera-manager.repo
echo "gpgkey = http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-manager.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-manager.repo
echo '[navigator-keytrustee]' > /etc/yum.repos.d/kms.repo
echo "name=Cloudera's Distribution for navigator-Keytrustee, Version 5" >> /etc/yum.repos.d/kms.repo
RETURN_CODE="$(curl -s -o /dev/null -w "%{http_code}" http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/)"
if [ "$RETURN_CODE" == "404" ]; then
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_VERSION/" >> /etc/yum.repos.d/kms.repo
else
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/kms.repo
fi
echo "gpgkey = http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/kms.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/kms.repo
echo "[cloudera-kafka]" > /etc/yum.repos.d/cloudera-kafka.repo
echo "name=Cloudera's Distribution for kafka, Version 2.0.2" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "baseurl=http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/2.0.2/" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgkey = http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgcheck = 1" >> /etc/yum.repos.d/cloudera-kafka.repo
yum clean all
;;
esac
}
function add_repo_5_7_x {
case $DISTRO_NAME in
ubuntu )
# Add repository with postgresql package (it's dependency of cloudera packages)
# Base image doesn't contain this repo
echo -e 'deb http://nova.clouds.archive.ubuntu.com/ubuntu/ trusty universe multiverse main' >> /etc/apt/sources.list
# Cloudera repositories
echo "deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh$DIB_CDH_MINOR_VERSION contrib" > /etc/apt/sources.list.d/cdh5.list
echo "deb-src http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh$DIB_CDH_MINOR_VERSION contrib" >> /etc/apt/sources.list.d/cdh5.list
wget -qO - http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh/archive.key | apt-key add -
echo "deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm$DIB_CDH_MINOR_VERSION contrib" > /etc/apt/sources.list.d/cm5.list
echo "deb-src http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm$DIB_CDH_MINOR_VERSION contrib" >> /etc/apt/sources.list.d/cm5.list
wget -qO - http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm/archive.key | apt-key add -
wget -O /etc/apt/sources.list.d/kms.list http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/cloudera.list
wget -qO - http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/archive.key | apt-key add -
# add Kafka repository
echo -e 'deb http://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/ trusty-kafka2.0.2 contrib' >> /etc/apt/sources.list
wget -qO - https://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/archive.key | apt-key add -
#change repository priority
echo -e 'Package: zookeeper\nPin: origin "archive.cloudera.com"\nPin-Priority: 1001' > /etc/apt/preferences.d/cloudera-pin
apt-get update
;;
centos7 | rhel7 )
releasever=7
echo '[cloudera-cdh5]' > /etc/yum.repos.d/cloudera-cdh5.repo
echo "name=Cloudera's Distribution for Hadoop, Version 5" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "baseurl=http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "gpgkey = http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-cdh5.repo
echo '[cloudera-manager]' > /etc/yum.repos.d/cloudera-manager.repo
echo 'name=Cloudera Manager' >> /etc/yum.repos.d/cloudera-manager.repo
echo "baseurl=http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/cloudera-manager.repo
echo "gpgkey = http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-manager.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-manager.repo
echo '[navigator-keytrustee]' > /etc/yum.repos.d/kms.repo
echo "name=Cloudera's Distribution for navigator-Keytrustee, Version 5" >> /etc/yum.repos.d/kms.repo
RETURN_CODE="$(curl -s -o /dev/null -w "%{http_code}" http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/)"
if [ "$RETURN_CODE" == "404" ]; then
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_VERSION/" >> /etc/yum.repos.d/kms.repo
else
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/kms.repo
fi
echo "gpgkey = http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/kms.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/kms.repo
echo "[cloudera-kafka]" > /etc/yum.repos.d/cloudera-kafka.repo
echo "name=Cloudera's Distribution for kafka, Version 2.0.2" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "baseurl=http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/2.0.2/" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgkey = http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgcheck = 1" >> /etc/yum.repos.d/cloudera-kafka.repo
yum clean all
;;
esac
}
function add_repo_5_9_x {
case $DISTRO_NAME in
ubuntu )
# Add repository with postgresql package (it's dependency of cloudera packages)
# Base image doesn't contain this repo
echo -e 'deb http://nova.clouds.archive.ubuntu.com/ubuntu/ trusty universe multiverse main' >> /etc/apt/sources.list
# Cloudera repositories
echo "deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh$DIB_CDH_MINOR_VERSION contrib" > /etc/apt/sources.list.d/cdh5.list
echo "deb-src http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh trusty-cdh$DIB_CDH_MINOR_VERSION contrib" >> /etc/apt/sources.list.d/cdh5.list
wget -qO - http://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh/archive.key | apt-key add -
echo "deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm$DIB_CDH_MINOR_VERSION contrib" > /etc/apt/sources.list.d/cm5.list
echo "deb-src http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm$DIB_CDH_MINOR_VERSION contrib" >> /etc/apt/sources.list.d/cm5.list
wget -qO - http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm/archive.key | apt-key add -
wget -O /etc/apt/sources.list.d/kms.list http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/cloudera.list
wget -qO - http://archive.cloudera.com/navigator-keytrustee5/ubuntu/trusty/amd64/navigator-keytrustee/archive.key | apt-key add -
# add Kafka repository
echo -e 'deb http://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/ trusty-kafka2.0.2 contrib' >> /etc/apt/sources.list
wget -qO - https://archive.cloudera.com/kafka/ubuntu/trusty/amd64/kafka/archive.key | apt-key add -
#change repository priority
echo -e 'Package: zookeeper\nPin: origin "archive.cloudera.com"\nPin-Priority: 1001' > /etc/apt/preferences.d/cloudera-pin
apt-get update
;;
centos7 | rhel7 )
releasever=7
echo '[cloudera-cdh5]' > /etc/yum.repos.d/cloudera-cdh5.repo
echo "name=Cloudera's Distribution for Hadoop, Version 5" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "baseurl=http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "gpgkey = http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-cdh5.repo
echo '[cloudera-manager]' > /etc/yum.repos.d/cloudera-manager.repo
echo 'name=Cloudera Manager' >> /etc/yum.repos.d/cloudera-manager.repo
echo "baseurl=http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/cloudera-manager.repo
echo "gpgkey = http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-manager.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-manager.repo
echo '[navigator-keytrustee]' > /etc/yum.repos.d/kms.repo
echo "name=Cloudera's Distribution for navigator-Keytrustee, Version 5" >> /etc/yum.repos.d/kms.repo
RETURN_CODE="$(curl -s -o /dev/null -w "%{http_code}" http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/)"
if [ "$RETURN_CODE" == "404" ]; then
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_VERSION/" >> /etc/yum.repos.d/kms.repo
else
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/kms.repo
fi
echo "gpgkey = http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/kms.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/kms.repo
echo "[cloudera-kafka]" > /etc/yum.repos.d/cloudera-kafka.repo
echo "name=Cloudera's Distribution for kafka, Version 2.0.2" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "baseurl=http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/2.0.2/" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgkey = http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgcheck = 1" >> /etc/yum.repos.d/cloudera-kafka.repo
yum clean all
;;
esac
}
function add_repo_5_11_x {
case $DISTRO_NAME in
ubuntu )
# Add repository with postgresql package (it's dependency of cloudera packages)
# Base image doesn't contain this repo
echo -e 'deb http://nova.clouds.archive.ubuntu.com/ubuntu/ xenial universe multiverse main' >> /etc/apt/sources.list
# Cloudera repositories
echo "deb [arch=amd64] http://archive.cloudera.com/cdh5/ubuntu/xenial/amd64/cdh xenial-cdh$DIB_CDH_MINOR_VERSION contrib" > /etc/apt/sources.list.d/cdh5.list
echo "deb-src http://archive.cloudera.com/cdh5/ubuntu/xenial/amd64/cdh xenial-cdh$DIB_CDH_MINOR_VERSION contrib" >> /etc/apt/sources.list.d/cdh5.list
wget -qO - http://archive.cloudera.com/cdh5/ubuntu/xenial/amd64/cdh/archive.key | apt-key add -
echo "deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/xenial/amd64/cm xenial-cm$DIB_CDH_MINOR_VERSION contrib" > /etc/apt/sources.list.d/cm5.list
echo "deb-src http://archive.cloudera.com/cm5/ubuntu/xenial/amd64/cm xenial-cm$DIB_CDH_MINOR_VERSION contrib" >> /etc/apt/sources.list.d/cm5.list
wget -qO - http://archive.cloudera.com/cm5/ubuntu/xenial/amd64/cm/archive.key | apt-key add -
wget -O /etc/apt/sources.list.d/kms.list http://archive.cloudera.com/navigator-keytrustee5/ubuntu/xenial/amd64/navigator-keytrustee/cloudera.list
wget -qO - http://archive.cloudera.com/navigator-keytrustee5/ubuntu/xenial/amd64/navigator-keytrustee/archive.key | apt-key add -
# add Kafka repository
echo -e 'deb http://archive.cloudera.com/kafka/ubuntu/xenial/amd64/kafka/ xenial-kafka2.2.0 contrib' >> /etc/apt/sources.list
wget -qO - https://archive.cloudera.com/kafka/ubuntu/xenial/amd64/kafka/archive.key | apt-key add -
#change repository priority
echo -e 'Package: zookeeper\nPin: origin "archive.cloudera.com"\nPin-Priority: 1001' > /etc/apt/preferences.d/cloudera-pin
apt-get update
;;
centos7 | rhel7 )
releasever=7
echo '[cloudera-cdh5]' > /etc/yum.repos.d/cloudera-cdh5.repo
echo "name=Cloudera's Distribution for Hadoop, Version 5" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "baseurl=http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo "gpgkey = http://archive.cloudera.com/cdh5/redhat/$releasever/x86_64/cdh/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-cdh5.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-cdh5.repo
echo '[cloudera-manager]' > /etc/yum.repos.d/cloudera-manager.repo
echo 'name=Cloudera Manager' >> /etc/yum.repos.d/cloudera-manager.repo
echo "baseurl=http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/cloudera-manager.repo
echo "gpgkey = http://archive.cloudera.com/cm5/redhat/$releasever/x86_64/cm/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-manager.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/cloudera-manager.repo
echo '[navigator-keytrustee]' > /etc/yum.repos.d/kms.repo
echo "name=Cloudera's Distribution for navigator-Keytrustee, Version 5" >> /etc/yum.repos.d/kms.repo
RETURN_CODE="$(curl -s -o /dev/null -w "%{http_code}" http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/)"
if [ "$RETURN_CODE" == "404" ]; then
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_VERSION/" >> /etc/yum.repos.d/kms.repo
else
echo "baseurl=http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/$DIB_CDH_MINOR_VERSION/" >> /etc/yum.repos.d/kms.repo
fi
echo "gpgkey = http://archive.cloudera.com/navigator-keytrustee5/redhat/$releasever/x86_64/navigator-keytrustee/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/kms.repo
echo 'gpgcheck = 1' >> /etc/yum.repos.d/kms.repo
echo "[cloudera-kafka]" > /etc/yum.repos.d/cloudera-kafka.repo
echo "name=Cloudera's Distribution for kafka, Version 2.2.0" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "baseurl=http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/2.2.0/" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgkey = http://archive.cloudera.com/kafka/redhat/$releasever/x86_64/kafka/RPM-GPG-KEY-cloudera" >> /etc/yum.repos.d/cloudera-kafka.repo
echo "gpgcheck = 1" >> /etc/yum.repos.d/cloudera-kafka.repo
yum clean all
;;
esac
}
DIB_CDH_MINOR_VERSION=${DIB_CDH_MINOR_VERSION:-$DIB_CDH_VERSION.0}
# Call version-specific script to install the desired version of CDH
case "$DIB_CDH_VERSION" in
5.5)
echo "Installing CDH Version $DIB_CDH_VERSION..."
add_repo_5_5_0
;;
5.7)
echo "Installing CDH Version $DIB_CDH_MINOR_VERSION..."
add_repo_5_7_x
;;
5.9)
echo "Installing CDH Version $DIB_CDH_MINOR_VERSION..."
add_repo_5_9_x
;;
5.11)
echo "Installing CDH Version $DIB_CDH_MINOR_VERSION..."
add_repo_5_11_x
;;
*)
echo "Invalid CDH Version : $DIB_CDH_VERSION specified, exiting install."
exit 1
esac

View File

@ -1,44 +0,0 @@
===========
hadoop-mapr
===========
Creates images with local mirrors of MapR repositories:
`core <http://package.mapr.com/releases/>`_ and
`ecosystem <http://package.mapr.com/releases/ecosystem-4.x/>`_.
Installs `OpenJDK <http://http://openjdk.java.net/>`_ and
`Scala <https://www.scala-lang.org/>`_.
In order to create the MapR images with ``diskimage-create.sh``, use the
following syntax to select the ``MapR`` plugin:
.. sourcecode:: bash
diskimage-create.sh -p mapr [-i ubuntu|centos7|rhel7] [-r 5.1.0 | 5.2.0]
In order to speed up image creation process you can download archives with MapR
repositories and specify environment variables:
``DIB_MAPR_CORE_DEB_REPO``, ``DIB_MAPR_CORE_RPM_REPO``,
``DIB_MAPR_ECO_DEB_REPO``, ``DIB_MAPR_ECO_RPM_REPO``.
For example:
.. sourcecode:: bash
export DIB_MAPR_CORE_DEB_REPO="file://<path-to-archive>/mapr-v5.2.0GA.deb.tgz"
export DIB_MAPR_CORE_RPM_REPO="file://<path-to-archive>/mapr-v5.2.0GA.rpm.tgz"
export DIB_MAPR_ECO_DEB_REPO="http://<URL>/mapr-ecosystem.deb.tgz"
export DIB_MAPR_ECO_RPM_REPO="http://<URL>/mapr-ecosystem.rpm.tgz"
diskimage-create.sh -p mapr -r 5.2.0
Environment Variables
---------------------
DIB_MAPR_VERSION
:Required: Yes
:Description: Version of MapR to install.
:Example: ``DIB_MAPR_VERSION=5.2.0``
SCALA_VERSION
:Required: No
:Description: Force a specific version of Scala (disable autodetection).
:Example: ``SCALA_VERSION=2.12.2``

View File

@ -1,4 +0,0 @@
extjs
java
mysql
package-installs

View File

@ -1,2 +0,0 @@
export EXTJS_DESTINATION_DIR="/opt/mapr-repository/"
export EXTJS_NO_UNPACK=1

View File

@ -1,155 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
echo "START: installing MapR core dependencies"
if [ "$DISTRO_NAME" = "ubuntu" ]; then
# Required for libicu48
cat >> /etc/apt/sources.list.d/security_repo.list << EOF
deb http://security.ubuntu.com/ubuntu precise-security main
EOF
# Required for libmysqlclient16
cat >> /etc/apt/sources.list.d/security_repo.list << EOF
deb http://old-releases.ubuntu.com/ubuntu lucid-security main
EOF
apt-get update
install-packages binutils \
daemon \
dpkg-dev \
dpkg-repack \
gcc \
gcc-4.8 \
gcc-doc \
gcc-multilib \
iputils-arping \
libasan0 \
libatomic1 \
libc-dev-bin \
libc6 \
libc6-dev \
libcrypt-passwdmd5-perl \
libgcc-4.8-dev \
libgomp1 \
libgssglue1 \
libicu48 \
libitm1 \
libmysqlclient-dev \
libmysqlclient16 \
libmysqlclient18 \
libnfsidmap2 \
libquadmath0 \
libsasl2-dev \
libsasl2-modules-gssapi-mit \
libssl0.9.8 \
libtirpc1 \
libtsan0 \
libxslt1.1 \
linux-libc-dev \
manpages-dev \
mtools \
mysql-common \
nfs-common \
open-iscsi \
openjdk-6-jre \
rpcbind \
sdparm \
syslinux \
syslinux-common \
unzip \
watchdog \
zlib1g-dev \
zip
elif [ "$DISTRO_NAME" = "centos7" -o "$DISTRO_NAME" = "rhel7" ]; then
install-packages cdparanoia-libs \
cups \
cups-libs \
createrepo \
cvs \
cyrus-sasl-gssapi \
cyrus-sasl-plain \
db4-cxx \
db4-devel \
foomatic \
foomatic-db \
foomatic-db-filesystem \
foomatic-db-ppds \
gdbm-devel \
gettext \
ghostscript \
ghostscript-fonts \
glibc \
glibc-common \
glibc-devel \
glibc-headers \
gstreamer \
gstreamer-plugins-base \
gstreamer-tools \
hdparm \
irqbalance \
iso-codes \
kernel-headers \
lcms-libs \
libXt \
libXv \
libXxf86vm \
libgomp \
libgssglue \
libgudev1 \
libicu \
libmng \
liboil \
libtheora \
libtirpc \
libvisual \
libxslt \
mesa-dri-drivers \
mesa-dri-filesystem \
mesa-dri1-drivers \
mesa-libGL \
mesa-libGLU \
mesa-private-llvm \
mtools \
nc \
numactl \
openjpeg-libs \
patch \
pax \
perl-CGI \
perl-ExtUtils-MakeMaker \
perl-ExtUtils-ParseXS \
perl-Test-Harness \
perl-Test-Simple \
perl-devel \
phonon-backend-gstreamer \
poppler \
poppler-data \
poppler-utils \
portreserve \
qt \
qt-sqlite \
qt-x11 \
qt3 \
redhat-lsb \
redhat-lsb-compat \
redhat-lsb-core \
redhat-lsb-graphics \
redhat-lsb-printing \
rpcbind \
sdparm \
soci \
soci-mysql \
syslinux \
syslinux-nonlinux \
unzip \
urw-fonts \
yum-utils \
xml-common \
zip
fi
echo "END: installing MapR core dependencies"

View File

@ -1,47 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
echo "START: installing Scala"
#Current available version
DEF_VERSION="2.11.6"
RETURN_CODE="$(curl -s -L -o /dev/null -w "%{http_code}" https://www.scala-lang.org/)"
if [ "$RETURN_CODE" != "200" ]; then
echo "https://www.scala-lang.org is unreachable" && exit 1
fi
if [ -n "${SCALA_VERSION:-}" ]; then
VERSION=${SCALA_VERSION}
elif [ "trusty" == "${DIB_RELEASE:-}" ]; then
# scale >= 2.12 for ubuntu depends on openjdk-8, not available on trusty
VERSION=${DEF_VERSION}
else
VERSION="$(curl -s -L --fail https://www.scala-lang.org| tr -d '\n' | sed 's/^.*<div[^<]\+scala-version">[^0-9]\+\([0-9\.\?]\+\)<.\+$/\1/')"
if [ $? != 0 -o -z "${VERSION}" ]; then
echo "Installing default version $DEF_VERSION"
VERSION=${DEF_VERSION}
fi
fi
PKG=scala-${VERSION}
URL="https://downloads.lightbend.com/scala/${VERSION}"
if [ "$DISTRO_NAME" = "ubuntu" ]; then
wget -N ${URL}/${PKG}.deb
dpkg -i ${PKG}.deb
rm ${PKG}.deb
elif [ "$DISTRO_NAME" = "centos7" -o "$DISTRO_NAME" = "rhel7" ]; then
rpm -Uhv ${URL}/${PKG}.rpm
fi
echo "END: installing Scala"

View File

@ -1,38 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
source "$(dirname $0)/../resources/package_utils.sh"
echo "START: installing MapR core repository"
get_repo_url() {
local repo_url=""
if [ "$DISTRO_NAME" = "ubuntu" ]; then
repo_url="${DIB_MAPR_CORE_DEB_REPO:-http://package.mapr.com/releases/v${DIB_MAPR_VERSION}/ubuntu/mapr-v${DIB_MAPR_VERSION}GA.deb.tgz}"
elif [ "$DISTRO_NAME" = "centos7" -o "$DISTRO_NAME" = "rhel7" ]; then
repo_url="${DIB_MAPR_CORE_RPM_REPO:-http://package.mapr.com/releases/v${DIB_MAPR_VERSION}/redhat/mapr-v${DIB_MAPR_VERSION}GA.rpm.tgz}"
fi
echo "$repo_url"
}
MAPR_REPO_URL="$(get_repo_url)"
MAPR_REPO_DIR="/opt/mapr-repository/core"
MAPR_REPO_NAME="mapr_core"
echo "Downloading MapR repository archive"
mkdir -p "$MAPR_REPO_DIR" && curl "$MAPR_REPO_URL" | tar -xz -C "$MAPR_REPO_DIR"
echo "Creating local repository"
create_repo "$MAPR_REPO_DIR"
echo "Adding MapR repository"
add_local_repo "$MAPR_REPO_NAME" "$MAPR_REPO_DIR"
echo "END: installing MapR core repository"

View File

@ -1,59 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
VERSIONS_PY="$(dirname $0)/../resources/versions.py"
source "$(dirname $0)/../resources/package_utils.sh"
echo "START: installing MapR ecosystem repository"
get_repo_url() {
local repo_url=""
if [ "$DISTRO_NAME" = "ubuntu" ]; then
case "$DIB_MAPR_VERSION" in
"5.1.0")
repo_url="${DIB_MAPR_ECO_DEB_REPO:-http://package.mapr.com/releases/ecosystem-5.x/ubuntu binary/}"
;;
"5.2.0")
repo_url="${DIB_MAPR_ECO_DEB_REPO:-http://package.mapr.com/releases/MEP/MEP-2.0.0/ubuntu/ binary trusty}"
;;
esac
elif [ "$DISTRO_NAME" = "centos7" -o "$DISTRO_NAME" = "rhel7" ]; then
case "$DIB_MAPR_VERSION" in
"5.1.0")
repo_url="${DIB_MAPR_ECO_RPM_REPO:-http://package.mapr.com/releases/ecosystem-5.x/redhat}"
;;
"5.2.0")
repo_url="${DIB_MAPR_ECO_RPM_REPO:-http://package.mapr.com/releases/MEP/MEP-2.0.0/redhat}"
;;
esac
fi
echo "$repo_url"
}
RPM_REPO="${DIB_MAPR_ECO_RPM_REPO:-}"
DEB_REPO="${DIB_MAPR_ECO_DEB_REPO:-}"
MAPR_REPO_DIR="/opt/mapr-repository/ecosystem"
MAPR_REPO_URL="$(get_repo_url)"
MAPR_REPO_NAME="mapr_ecosystem"
MAPR_PKG_GROUPS="$(dirname $0)/../resources/packages.json"
if [ -f $(dirname $0)/../resources/spec_${DIB_MAPR_VERSION}_${DISTRO_NAME}.json ]; then
MAPR_SPEC="$(dirname $0)/../resources/spec_${DIB_MAPR_VERSION}_${DISTRO_NAME}.json"
else
MAPR_SPEC="$(dirname $0)/../resources/spec_${DIB_MAPR_VERSION}.json"
fi
echo "Creating local MapR ecosystem repository"
localize_repo "$MAPR_REPO_NAME" "$MAPR_REPO_URL" "$MAPR_PKG_GROUPS" "$MAPR_SPEC" "$MAPR_REPO_DIR"
echo $MAPR_SPEC
echo "END: installing MapR ecosystem repository"

View File

@ -1,13 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ "$DISTRO_NAME" = "rhel7" -o "$DISTRO_NAME" = "centos7" ]; then
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum install -y soci soci-mysql
yum remove -y epel-release
fi

View File

@ -1,2 +0,0 @@
curl:
wget:

View File

@ -1,12 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ "$DISTRO_NAME" = "centos7" -o "$DISTRO_NAME" = "rhel7" ]; then
sed '/^Defaults requiretty*/ s/^/#/' -i /etc/sudoers
fi

View File

@ -1,13 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ "$DISTRO_NAME" = "ubuntu" ]; then
apt-get update
elif [ "$DISTRO_NAME" = "centos7" -o "$DISTRO_NAME" = "rhel7" ]; then
yum clean all && yum repolist
fi

View File

@ -1,226 +0,0 @@
# execute_in_directory <directory> <command>
execute_in_directory() {
local directory="$(readlink -f "$1")"; shift
local cmd="$*"
pushd "$directory" && eval "$cmd" && popd
}
# get_distro
get_distro() {
echo "$DISTRO_NAME"
}
# download_apt_package <package> [version] [directory]
download_apt_package() {
local package="$1"
local version="${2:-}"
local directory="${3:-$(pwd)}"
local package_spec="$package${version:+=$version*}"
execute_in_directory "$directory" apt-get --allow-unauthenticated download "$package_spec"
}
# download_yum_package <package> [version] [directory]
download_yum_package() {
local package="$1"
local version="${2:-}"
local directory="${3:-$(pwd)}"
local package_spec="$package${version:+-$version*}"
yumdownloader --destdir "$directory" "$package_spec"
}
# download_package <package> [version] [directory] [distro]
download_package() {
local package="$1"
local version="${2:-}"
local directory="${3:-$(pwd)}"
local distro="${4:-$(get_distro)}"
if [[ "$distro" == "ubuntu" ]]; then
download_apt_package "$package" "$version" "$directory"
elif [[ "$distro" == "centos7" || "$distro" == "rhel7" ]]; then
download_yum_package "$package" "$version" "$directory"
fi
}
# get_packages <package_groups_file> <spec_file> [version_separator]
get_packages() {
local package_groups_file="$1"
local spec_file="$2"
local version_separator="${3:-:}"
python "$VERSIONS_PY" --separator "$version_separator" "$package_groups_file" "$spec_file"
}
# download_packages <package_groups_file> <spec_file> [directory] [distro]
download_packages() {
local package_groups_file="$1"
local spec_file="$2"
local directory="${3:-$(pwd)}"
local distro="${4:-$(get_distro)}"
local version_separator=":"
local packages="$(get_packages "$package_groups_file" "$spec_file" "$version_separator")"
for package in $packages; do
IFS="$version_separator" read -ra package_version <<< "$package"
download_package "${package_version[@]}" "$directory" "$distro"
done
}
# create_apt_repo <directory>
create_apt_repo() {
local directory="$(readlink -f "$1")"
local binary_dir="$directory/binary"
local packages_gz="$binary_dir/Packages.gz"
mkdir -p "$binary_dir"
execute_in_directory "$directory" "dpkg-scanpackages -m . /dev/null | gzip -9c > $packages_gz"
}
# create_yum_repo <directory>
create_yum_repo() {
local directory="$(readlink -f "$1")"
createrepo "$directory"
}
# create_repo <directory> [distro]
create_repo() {
local directory="$(readlink -f "$1")"
local distro="${2:-$(get_distro)}"
if [[ "$distro" == "ubuntu" ]]; then
create_apt_repo "$directory"
elif [[ "$distro" == "centos7" || "$distro" == "rhel7" ]]; then
create_yum_repo "$directory"
fi
}
# add_apt_repo <repo_name> <repo_url>
add_apt_repo() {
local repo_name="$1"
local repo_url="$2"
local repo="deb $repo_url"
local repo_path="/etc/apt/sources.list.d/$repo_name.list"
echo "$repo" > "$repo_path" && apt-get update
}
# add_yum_repo <repo_name> <repo_url>
add_yum_repo() {
local repo_name="$1"
local repo_url="$2"
local repo_path="/etc/yum.repos.d/$repo_name.repo"
cat > "$repo_path" << EOF
[$repo_name]
name=$repo_name
baseurl=$repo_url
enabled=1
gpgcheck=0
protect=1
EOF
yum clean all && rm -rf /var/cache/yum/* && yum check-update
}
# add_repo <repo_name> <repo_url> [distro]
add_repo() {
local repo_name="$1"
local repo_url="$2"
local distro="${3:-$(get_distro)}"
if [[ "$distro" == "ubuntu" ]]; then
add_apt_repo "$repo_name" "$repo_url"
elif [[ "$distro" == "centos7" || "$distro" == "rhel7" ]]; then
add_yum_repo "$repo_name" "$repo_url"
fi
}
# add_local_apt_repo <repo_name> <directory>
add_local_apt_repo() {
local repo_name="$1"
local directory="$(readlink -f "$2")"
local repo_url="file:$directory binary/"
add_apt_repo "$repo_name" "$repo_url"
}
# add_local_yum_repo <repo_name> <directory>
add_local_yum_repo() {
local repo_name="$1"
local directory="$(readlink -f "$2")"
local repo_url="file://$directory"
add_yum_repo "$repo_name" "$repo_url"
}
# add_local_repo <repo_name> <directory> [distro]
add_local_repo() {
local repo_name="$1"
local directory="$(readlink -f "$2")"
local distro="${3:-$(get_distro)}"
if [[ "$distro" == "ubuntu" ]]; then
add_local_apt_repo "$repo_name" "$directory"
elif [[ "$distro" == "centos7" || "$distro" == "rhel7" ]]; then
add_local_yum_repo "$repo_name" "$directory"
fi
}
# remove_apt_repo <repo_name>
remove_apt_repo() {
local repo_name="$1"
local repo_path="/etc/apt/sources.list.d/$repo_name.list"
rm "$repo_path" && apt-get update
}
# remove_yum_repo <repo_name>
remove_yum_repo() {
local repo_name="$1"
local repo_path="/etc/yum.repos.d/$repo_name.repo"
rm "$repo_path"
}
# remove_repo <repo_name> [distro]
remove_repo() {
local repo_name="$1"
local distro="${2:-$(get_distro)}"
if [[ "$distro" == "ubuntu" ]]; then
remove_apt_repo "$repo_name"
elif [[ "$distro" == "centos7" || "$distro" == "rhel7" ]]; then
remove_yum_repo "$repo_name"
fi
}
# create_local_repo <repo_name> <repo_url> <package_groups_file> <spec_file> <directory>
create_local_repo() {
local repo_name="$1"
local repo_url="$2"
local package_groups_file="$3"
local spec_file="$4"
local directory="$5"
add_repo "$repo_name" "$repo_url"
mkdir -p "$directory" && directory="$(readlink -f "$directory")"
download_packages "$package_groups_file" "$spec_file" "$directory"
remove_repo "$repo_name"
create_repo "$directory"
}
# localize_repo <repo_name> <repo_url> <package_groups_file> <spec_file> <directory>
localize_repo() {
local repo_name="$1"
local repo_url="$2"
local package_groups_file="$3"
local spec_file="$4"
local directory="$5"
mkdir -p "$directory" && directory="$(readlink -f "$directory")"
create_local_repo "$repo_name" "$repo_url" "$package_groups_file" "$spec_file" "$directory"
add_local_repo "$repo_name" "$directory"
}

View File

@ -1,140 +0,0 @@
{
"asynchbase": {
"all": [
"mapr-asynchbase"
]
},
"drill": {
"all": [
"mapr-drill"
]
},
"flume": {
"all": [
"mapr-flume"
]
},
"hbase": {
"all": [
"mapr-hbase",
"mapr-hbase-internal",
"mapr-hbase-master",
"mapr-hbase-regionserver",
"mapr-hbasethrift",
"mapr-hbase-rest"
],
"0.98.12": [
"mapr-hbase",
"mapr-hbase-internal",
"mapr-hbase-master",
"mapr-hbase-regionserver",
"mapr-hbasethrift",
"mapr-libhbase",
"mapr-hbase-rest"
],
"1.1.1": [
"mapr-hbase",
"mapr-hbase-internal",
"mapr-hbase-master",
"mapr-hbase-regionserver",
"mapr-hbasethrift",
"mapr-libhbase",
"mapr-hbase-rest"
]
},
"hive": {
"all": [
"mapr-hive",
"mapr-hivemetastore",
"mapr-hiveserver2"
]
},
"httpfs": {
"all": [
"mapr-httpfs"
]
},
"hue": {
"all": [
"mapr-hue",
"mapr-hue-base",
"mapr-hue-livy"
],
"3.10.0": [
"mapr-hue",
"mapr-hue-livy"
]
},
"impala": {
"all": [
"mapr-impala",
"mapr-impala-catalog",
"mapr-impala-server",
"mapr-impala-statestore",
"mapr-impala-udf"
]
},
"mahout": {
"all": [
"mapr-mahout"
]
},
"oozie": {
"all": [
"mapr-oozie",
"mapr-oozie-internal"
]
},
"pig": {
"all": [
"mapr-pig"
]
},
"sentry": {
"all": [
"mapr-sentry"
]
},
"spark": {
"all": [
"mapr-spark",
"mapr-spark-historyserver",
"mapr-spark-master"
]
},
"sqoop": {
"all": [
"mapr-sqoop2-client",
"mapr-sqoop2-server"
]
},
"storm": {
"all": [
"mapr-storm",
"mapr-storm-ui",
"mapr-storm-nimbus",
"mapr-storm-supervisor"
]
},
"tez": {
"all": [
"mapr-tez"
]
},
"kafka": {
"all": [
"mapr-kafka"
]
},
"kafka-connect": {
"all": [
"mapr-kafka-connect-hdfs",
"mapr-kafka-connect-jdbc"
]
},
"kafka-rest": {
"all": [
"mapr-kafka-rest"
]
}
}

View File

@ -1,46 +0,0 @@
{
"drill": [
"1.1.0",
"1.2.0",
"1.4.0"
],
"flume": [
"1.5.0",
"1.6.0"
],
"hbase": [
"0.98.9",
"0.98.12"
],
"hive": [
"0.13",
"1.0",
"1.2"
],
"httpfs": [
"1.0"
],
"hue": [
"3.8.1",
"3.9.0"
],
"impala": [
"1.4.1"
],
"mahout": [
"0.10.0"
],
"oozie": [
"4.2.0"
],
"pig": [
"0.14",
"0.15"
],
"sqoop": [
"2.0.0"
],
"spark": [
"1.5.2"
]
}

View File

@ -1,50 +0,0 @@
{
"drill": [
"1.9.0"
],
"flume": [
"1.6.0"
],
"hbase": [
"1.1.1"
],
"hive": [
"1.2"
],
"httpfs": [
"1.0"
],
"hue": [
"3.10.0"
],
"impala": [
"2.5.0"
],
"mahout": [
"0.12.0"
],
"oozie": [
"4.2.0"
],
"pig": [
"0.16"
],
"sqoop": [
"2.0.0"
],
"spark": [
"2.0.1"
],
"sentry": [
"1.6.0"
],
"kafka": [
"0.9.0"
],
"kafka-connect": [
"2.0.1"
],
"kafka-rest": [
"2.0.1"
]
}

View File

@ -1,47 +0,0 @@
{
"drill": [
"1.9.0"
],
"flume": [
"1.6.0"
],
"hbase": [
"1.1.1"
],
"hive": [
"1.2"
],
"httpfs": [
"1.0"
],
"hue": [
"3.10.0"
],
"mahout": [
"0.12.0"
],
"oozie": [
"4.2.0"
],
"pig": [
"0.16"
],
"sqoop": [
"2.0.0"
],
"spark": [
"2.0.1"
],
"sentry": [
"1.6.0"
],
"kafka": [
"0.9.0"
],
"kafka-connect": [
"2.0.1"
],
"kafka-rest": [
"2.0.1"
]
}

View File

@ -1,82 +0,0 @@
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import sys
_GROUP_VERSION_SEPARATOR = ","
_ALL_GROUP_VERSION = "all"
def _build_parser():
parser = argparse.ArgumentParser()
parser.add_argument("packages", help="path to the packages.json")
parser.add_argument("spec", help="path to the spec.json")
parser.add_argument("--separator", default=":",
help="separator between package name"
" and version in output")
return parser
def _load_json(path):
with open(path) as json_file:
return json.load(json_file)
def _version_matches(version, group_version):
for gv in group_version.split(_GROUP_VERSION_SEPARATOR):
if version.startswith(gv):
return True
return False
def _get_packages(version, group_spec):
for group_version in group_spec:
if _version_matches(version, group_version):
return group_spec[group_version]
return group_spec[_ALL_GROUP_VERSION]
def _get_package_versions(spec, package_groups):
return [(package, version)
for pg_name, versions in spec.items()
for version in versions
for package in _get_packages(version, package_groups[pg_name])]
parser = _build_parser()
def main(args=None):
args = parser.parse_args(args or sys.argv[1:])
spec = _load_json(args.spec)
package_groups = _load_json(args.packages)
separator = args.separator
package_versions = _get_package_versions(spec, package_groups)
package_format = "%s" + separator + "%s\n"
package_versions = map(lambda pv: package_format % pv, package_versions)
sys.stdout.writelines(package_versions)
if __name__ == "__main__":
main()

View File

@ -1,14 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
echo "MapR version ${DIB_MAPR_VERSION}"
if [ -z ${DIB_MAPR_VERSION} ]; then
echo "DIB_MAPR_VERSION not set.\nExit"
exit 1
fi

View File

@ -1,36 +0,0 @@
======
hadoop
======
Installs Java and Hadoop, configures SSH.
HOWTO build Hadoop Native Libs
------------------------------
- Install: *jdk >= 6*, *maven*, *cmake* and *protobuf >= 2.5.0*
- Get Hadoop source code:
.. code:: bash
wget http://archive.apache.org/dist/hadoop/core/hadoop-2.7.1/hadoop-2.7.1-src.tar.gz
- Unpack source:
.. code:: bash
tar xvf hadoop-2.7.1-src.tar.gz
- Build Hadoop:
.. code:: bash
cd hadoop-2.7.1-src
mvn package -Pdist,native -DskipTests
- Create tarball with Hadoop Native Libs:
.. code:: bash
cd hadoop-dist/target/hadoop-2.7.1/lib
tar -czvf hadoop-native-libs-2.7.1.tar.gz native

View File

@ -1,5 +0,0 @@
cache-url
java
package-installs
sahara-version
ssh

View File

@ -1,137 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
function install_hadoop_v2 {
case "$DIB_HADOOP_VERSION" in
"2.7.1")
hadoop_native_libs_url="${HADOOP_V2_7_1_NATIVE_LIBS_DOWNLOAD_URL}"
;;
"2.7.5")
hadoop_native_libs_url="${HADOOP_V2_7_5_NATIVE_LIBS_DOWNLOAD_URL}"
;;
"2.8.2")
hadoop_native_libs_url="${HADOOP_V2_8_2_NATIVE_LIBS_DOWNLOAD_URL}"
;;
"3.0.1")
hadoop_native_libs_url="${HADOOP_V3_0_1_NATIVE_LIBS_DOWNLOAD_URL}"
;;
*)
echo "Invalid DIB_HADOOP_VERSION: $DIB_HADOOP_VERSION"
exit 1
;;
esac
package="hadoop-$DIB_HADOOP_VERSION.tar.gz"
echo "Installing hadoop"
INSTALL_DIR="/opt"
HADOOP_HOME="/opt/hadoop"
mkdir -p "$INSTALL_DIR"
tar xvf "$tmp_dir/$package" -C "$INSTALL_DIR"
ln -s "$INSTALL_DIR/hadoop-$DIB_HADOOP_VERSION" "$HADOOP_HOME"
chown -R hadoop:hadoop "$INSTALL_DIR/hadoop-$DIB_HADOOP_VERSION"
chown -R hadoop:hadoop "$HADOOP_HOME"
rm -r $tmp_dir
echo "Inject Hadoop native libs"
rm -r "$HADOOP_HOME/lib/native"
wget "$hadoop_native_libs_url"
native_libs_filename=$(basename "$hadoop_native_libs_url")
tar xvf "$native_libs_filename" -C "$HADOOP_HOME/lib"
rm "$native_libs_filename"
echo "Pre-configuring Hadoop"
HADOOP_PID_DIR="/var/run/hadoop"
cat >> /etc/profile.d/hadoop.sh <<EOF
export HADOOP_COMMON_HOME=$HADOOP_HOME
export PATH=\$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export HADOOP_HDFS_HOME=\$HADOOP_COMMON_HOME
export HADOOP_YARN_HOME=\$HADOOP_COMMON_HOME
export HADOOP_MAPRED_HOME=\$HADOOP_COMMON_HOME
export HADOOP_PID_DIR=$HADOOP_PID_DIR
export YARN_PID_DIR=$HADOOP_PID_DIR
export HADOOP_MAPRED_PID_DIR=$HADOOP_PID_DIR
EOF
sed -i -e "s,\#export HADOOP_LOG_DIR=.*,export HADOOP_LOG_DIR=/mnt/log/hadoop/\$USER," \
-e "s,export HADOOP_SECURE_DN_LOG_DIR=.*,export HADOOP_SECURE_DN_LOG_DIR=/mnt/log/hadoop/hdfs," \
$HADOOP_HOME/etc/hadoop/hadoop-env.sh
echo "source $JAVA_RC" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
sed -i -e "s,YARN_LOG_DIR=.*,YARN_LOG_DIR=/mnt/log/hadoop/yarn," \
$HADOOP_HOME/etc/hadoop/yarn-env.sh
echo "source $JAVA_RC" >> $HADOOP_HOME/etc/hadoop/yarn-env.sh
# enable swiftfs
# Sahara expects the jar to be in both folders, even though only one is
# the "official" classpath. Until images and Sahara become more strictly
# coupled we must maintain this legacy behavior.
ln -s ${HADOOP_HOME}/share/hadoop/tools/lib/hadoop-openstack-${DIB_HADOOP_VERSION}.jar ${HADOOP_HOME}/share/hadoop/common/lib/
}
case "$DISTRO_NAME" in
fedora | ubuntu | rhel7 | centos7 )
;;
*)
echo "Unknown distro: $DISTRO_NAME. Exiting."
exit 1
;;
esac
echo "Hadoop setup begins for $DISTRO_NAME"
tmp_dir=/tmp/hadoop
echo "Creating hadoop user & group"
case "$DISTRO_NAME" in
ubuntu )
addgroup hadoop
adduser --ingroup hadoop --disabled-password --gecos GECOS hadoop
adduser hadoop sudo
;;
fedora | rhel7 | centos7 )
adduser -G adm,wheel hadoop
;;
esac
install_hadoop_v2
echo "Applying firstboot script"
RC_SCRIPT_DIR=""
if [ "$DISTRO_NAME" == "ubuntu" ]; then
# File '/etc/rc.local' may not exist
if [ -f "/etc/rc.local" ]; then
mv /etc/rc.local /etc/rc.local.old
fi
RC_SCRIPT_DIR="/etc"
else
# File '/etc/rc.d/rc.local' may not exist
if [ -f "/etc/rc.d/rc.local" ]; then
mv /etc/rc.d/rc.local /etc/rc.d/rc.local.old
fi
RC_SCRIPT_DIR="/etc/rc.d"
fi
# Make sure that rc-local.service starts after cloud-init.service,
# so that the cloud-init user (centos, ubuntu, cloud-user, ...)
# is available. See also:
# https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1468103
# All version of distributions affected by this change use systemd.
mkdir -p /etc/systemd/system/rc-local.service.d/
cat >/etc/systemd/system/rc-local.service.d/override.conf <<EOF
[Unit]
After=cloud-init.service
EOF
install -D -g root -o root -m 0755 $(dirname $0)/firstboot $RC_SCRIPT_DIR/rc.local
# make sure it is run, be it on SysV, upstart, or systemd
chmod +x $RC_SCRIPT_DIR/rc.local

View File

@ -1,71 +0,0 @@
#!/bin/bash
# dib-lint: disable=executable
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
function firstboot_common {
distro=$(lsb_release -is || :)
RUNDIR=/run
if [ ! -d $RUNDIR ]; then
RUNDIR=/var/run
fi
case "$distro" in
Ubuntu )
mkdir -p /home/ubuntu/.ssh
touch /home/ubuntu/.ssh/authorized_keys
chown -R ubuntu:ubuntu /home/ubuntu
;;
Fedora )
sleep 20
rm /etc/resolv.conf
service network restart
if [ $(lsb_release -rs) -ge '19' ]; then
user=fedora
else
user=ec2-user
fi
until [[ -n $(grep "$user:" /etc/passwd) && -n $(grep "$user:" /etc/group) ]]; do
sleep 1
done
chown -R $user:$user /home/$user
;;
CentOS )
case "$(lsb_release -rs)" in
7.*)
chown -R centos:centos /home/centos
;;
esac
;;
* )
echo "Unknown distro: $distro. Exiting."
exit 1
;;
esac
mkdir -p /mnt/log/hadoop
chown hadoop:hadoop /mnt/log/hadoop
mkdir -p $RUNDIR/hadoop
chown hadoop:hadoop $RUNDIR/hadoop
}
DIB_HADOOP_VERSION=$(su - hadoop hadoop version | head -1 | awk '{print $2}')
firstboot_common
# Clean
if [ "$distro" == "Ubuntu" ]; then
if [ -f /etc/rc.local.old ]; then
mv /etc/rc.local.old /etc/rc.local
fi
else
if [ -f /etc/rc.d/rc.local.old ]; then
mv /etc/rc.d/rc.local.old /etc/rc.d/rc.local
fi
fi
exit 0

View File

@ -1,3 +0,0 @@
wget:
tar:
redhat-lsb-core:

View File

@ -1,10 +0,0 @@
{
"family": {
"debian": {
"redhat-lsb-core": ""
},
"redhat": {
"redhat-lsb-core": "redhat-lsb-core"
}
}
}

View File

@ -1,24 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ -z "${JAVA_DOWNLOAD_URL:-}" ]; then
if [ -z "${JAVA_FILE:-}" ]; then
echo "JAVA_FILE and JAVA_DOWNLOAD_URL are not set. Proceeding with distro native Java."
fi
fi
if [ -z "$DIB_HADOOP_VERSION" ]; then
echo "DIB_HADOOP_VERSION is not set. Impossible to install hadoop. Exit"
exit 1
fi
version_check=$(echo $DIB_HADOOP_VERSION | sed -e '/[0-9]\.[0-9]\.[0-9]/d')
if [[ -z $version_check ]]; then
echo "All variables are set, continue."
else
echo "Version error. Exit"
exit 1
fi

View File

@ -1,33 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
function download_hadoop_package {
package=$1
echo "Hadoop version $DIB_HADOOP_VERSION will be downloaded"
cached_package="$DIB_IMAGE_CACHE/$package"
$TMP_HOOKS_PATH/bin/cache-url "http://archive.apache.org/dist/hadoop/core/hadoop-$DIB_HADOOP_VERSION/$package" $cached_package
sudo install -D -g root -o root -m 0644 $cached_package $tmp_dir
}
function download_hadoop_v2 {
download_hadoop_package "hadoop-$DIB_HADOOP_VERSION.tar.gz"
}
case "$DISTRO_NAME" in
fedora | ubuntu | rhel7 | centos7 )
;;
*)
echo "Unknown distro: $DISTRO_NAME. Exiting."
exit 1
;;
esac
tmp_dir=$TARGET_ROOT/tmp/hadoop
mkdir -p $tmp_dir
download_hadoop_v2

View File

@ -1,18 +0,0 @@
================
hdp-local-mirror
================
This element creates mirror with HDP packages.
Environment Variables
---------------------
DIB_HDP_STACK_URL
:Required: Yes
:Description: URL of the HDP stack
:Example: ``DIB_HDP_STACK_URL="http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.2.0/HDP-2.3.2.0-centos6-rpm.tar.gz"``
DIB_HDP_UTILS_URL
:Required: Yes
:Description: URL of HDP Utils
:Example: ``DIB_HDP_UTILS_URL="http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6/HDP-UTILS-1.1.0.20-centos6.tar.gz"``

View File

@ -1,3 +0,0 @@
disable-firewall
package-installs
source-repositories

View File

@ -1,12 +0,0 @@
case "${DISTRO_NAME}" in
centos7 | rhel7 | fedora )
export DIB_DEFAULT_APACHE_DIR="/var/www/html"
;;
ubuntu )
export DIB_DEFAULT_APACHE_DIR="/var/www"
;;
* )
echo "Unsupported distro: ${DISTRO_NAME}"
exit 1
;;
esac

View File

@ -1,13 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
case "${DISTRO_NAME}" in
centos7 | rhel7 )
chkconfig httpd on
;;
esac

View File

@ -1 +0,0 @@
apache-server:

View File

@ -1,10 +0,0 @@
{
"family": {
"redhat": {
"apache-server": "httpd"
},
"debian": {
"apache-server": "apache2"
}
}
}

View File

@ -1,10 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
[ -n "$DIB_HDP_STACK_URL" ]
[ -n "$DIB_HDP_UTILS_URL" ]

View File

@ -1 +0,0 @@
hdp-stack tar ${DIB_DEFAULT_APACHE_DIR}/hdp/ ${DIB_HDP_STACK_URL}

View File

@ -1 +0,0 @@
hdp-utils tar ${DIB_DEFAULT_APACHE_DIR}/hdp-utils/ ${DIB_HDP_UTILS_URL}

View File

@ -1,21 +0,0 @@
====
hive
====
Installs Hive on Ubuntu and Fedora.
Hive stores metadata in MySQL databases. So, this element requires the
``mysql`` element.
Environment Variables
---------------------
HIVE_VERSION
:Required: Yes, if ``HIVE_DOWNLOAD_URL`` is not set.
:Description: Version of Hive to fetch from apache.org.
:Example: ``HIVE_VERSION=0.11.0``
HIVE_DOWNLOAD_URL
:Required: Yes, if ``HIVE_VERSION`` is not set.
:Default: ``http://archive.apache.org/dist/hive/hive-$HIVE_VERSION/hive-$HIVE_VERSION-bin.tar.gz``
:Description: Download URL of the Hive package.

View File

@ -1,4 +0,0 @@
cache-url
hadoop
mysql
package-installs

View File

@ -1,2 +0,0 @@
tar:
phase: post-install.d

View File

@ -1,43 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
tmp_dir=/tmp/hive
cd $tmp_dir
if [ -z "${HIVE_DOWNLOAD_URL:-}" ]; then
# hive package has renamed to "apache-hive-*" since 0.13.0
if [[ "$HIVE_VERSION" < "0.13.0" ]]; then
HIVE_FILE=hive-$HIVE_VERSION-bin.tar.gz
else
HIVE_FILE=apache-hive-$HIVE_VERSION-bin.tar.gz
fi
else
HIVE_FILE=$(basename $HIVE_DOWNLOAD_URL)
fi
tar xzf $HIVE_FILE
HIVE_DIR="${HIVE_FILE%.*}"
HIVE_DIR="${HIVE_DIR%.*}"
mv $HIVE_DIR /opt/hive
rm -r $tmp_dir
chmod -R a+rw /opt/hive
HIVE_MASTER_VERSION=${HIVE_VERSION%.*}.0
# NOTE(jfreud): Hive 0.11.0 was released without this DB upgrade script.
# All future versions of Hive do have the script.
if [ "$HIVE_VERSION" != "0.11.0" ]; then
# replace the relative path with absolute path
sed -i "s/SOURCE hive-txn-schema-$HIVE_MASTER_VERSION.mysql.sql;/SOURCE \/opt\/hive\/scripts\/metastore\/upgrade\/mysql\/hive-txn-schema-$HIVE_MASTER_VERSION.mysql.sql;/" /opt/hive/scripts/metastore/upgrade/mysql/hive-schema-$HIVE_MASTER_VERSION.mysql.sql
fi
ln -s /usr/share/java/mysql-connector-java.jar /opt/hive/lib/libmysql-java.jar
chown -R hadoop:hadoop /opt/hive
cat >> /home/hadoop/.bashrc <<EOF
HIVE_HOME=/opt/hive
PATH=\$PATH:\$HIVE_HOME/bin
EOF

View File

@ -1,15 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ -z "${HIVE_DOWNLOAD_URL:-}" ]; then
version_check=$(echo $HIVE_VERSION | sed -e '/[0-9]\.[0-9]\{1,2\}\.[0-9]/d')
if [ ! -z $version_check ]; then
echo -e "Unable to install Hive: You should specify HIVE_DOWNLOAD_URL or HIVE_VERSION.\nAborting"
exit 1
fi
fi

View File

@ -1,24 +0,0 @@
#!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
tmp_dir=$TARGET_ROOT/tmp/hive
mkdir -p $tmp_dir
if [ -z ${HIVE_DOWNLOAD_URL:-} ]; then
# hive package has renamed to "apache-hive-*" since 0.13.0
if [[ "$HIVE_VERSION" < "0.13.0" ]]; then
HIVE_FILE=hive-$HIVE_VERSION-bin.tar.gz
else
HIVE_FILE=apache-hive-$HIVE_VERSION-bin.tar.gz
fi
HIVE_DOWNLOAD_URL=http://archive.apache.org/dist/hive/hive-$HIVE_VERSION/$HIVE_FILE
fi
HIVE_FILE=$(basename $HIVE_DOWNLOAD_URL)
cached_tar="$DIB_IMAGE_CACHE/$HIVE_FILE"
$TMP_HOOKS_PATH/bin/cache-url $HIVE_DOWNLOAD_URL $cached_tar
sudo install -D -g root -o root -m 0644 $cached_tar $tmp_dir

View File

@ -1,6 +0,0 @@
====
java
====
This element configures JAVA_HOME, PATH and the alternatives
for java and javac.

Some files were not shown because too many files have changed in this diff Show More