61 lines
1.8 KiB
Bash
Executable File
61 lines
1.8 KiB
Bash
Executable File
#!/bin/bash
|
|
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
|
|
set -x
|
|
fi
|
|
set -eu
|
|
set -o pipefail
|
|
|
|
|
|
case "$plugin_type" in
|
|
"vanilla" )
|
|
HADOOP_TOOLS_DIR_PATH="/opt/hadoop/share/hadoop/tools/lib"
|
|
HADOOP_ENV_SH_PATH="/opt/hadoop/etc/hadoop/hadoop-env.sh"
|
|
SPARK_JARS_DIR_PATH="/opt/spark/jars"
|
|
;;
|
|
"spark" )
|
|
HADOOP_TOOLS_DIR_PATH="/usr/lib/hadoop/client"
|
|
SPARK_JARS_DIR_PATH="/opt/spark/jars"
|
|
;;
|
|
"cloudera" )
|
|
echo -n "The s3_hadoop element is not supported on CDH,"
|
|
echo " because the relevant libraries are already in the right place."
|
|
exit 1
|
|
;;
|
|
*)
|
|
echo "The s3_hadoop element is only supported on Vanilla and Spark."
|
|
exit 1
|
|
esac
|
|
|
|
# NOTE: By defintion, the Spark standalone plugin does not contain Hadoop in
|
|
# its entirety. Therefore, there are no Hadoop-specific environment settings
|
|
# available for modification.
|
|
if [ "$plugin_type" != "spark" ]; then
|
|
if [ -f "$HADOOP_ENV_SH_PATH" ]; then
|
|
cat >> $HADOOP_ENV_SH_PATH <<EOF
|
|
for f in $HADOOP_TOOLS_DIR_PATH/*.jar; do
|
|
if [ "\$HADOOP_CLASSPATH" ]; then
|
|
export HADOOP_CLASSPATH=\$HADOOP_CLASSPATH:\$f
|
|
else
|
|
export HADOOP_CLASSPATH=\$f
|
|
fi
|
|
done
|
|
EOF
|
|
else
|
|
echo "Something went wrong: couldn't find Hadoop env settings."
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
if [ -d "$SPARK_JARS_DIR_PATH" ]; then
|
|
cp $HADOOP_TOOLS_DIR_PATH/*aws*.jar $SPARK_JARS_DIR_PATH
|
|
chmod 0644 $SPARK_JARS_DIR_PATH/*aws*jar
|
|
else
|
|
# NOTE: In the case of Vanilla, the user may have disabled the Spark
|
|
# element. So, check for the existence of the directory explicitly, but
|
|
# crucially do do not consider it an error if the folder does not exist.
|
|
if [ "$plugin_type" != "vanilla" ]; then
|
|
echo "Something went wrong: couldn't find Spark installation."
|
|
exit 1
|
|
fi
|
|
fi
|