Add bashate checks to pep8 step

Similarly to pep8 checks, this allows enforcing a consistent
style of the shell scripts accross modfications. For now
only the indentation is enforced to reduce code churn.

Closes-Bug: 1648099
Change-Id: Ie66cbe1aea4bd01a8bba8833ef6cbd2cff6a7c6a
This commit is contained in:
Dirk Mueller 2016-10-25 22:46:14 +02:00
parent 73c5f6ea4f
commit 80fc5a2d42
20 changed files with 118 additions and 113 deletions

View File

@ -53,10 +53,8 @@ echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}"
echo "Missing lines in master : ${baseline_missing}"
echo "Missing lines in proposed change : ${current_missing}"
if [ $allowed_missing -ge $current_missing ];
then
if [ $baseline_missing -lt $current_missing ];
then
if [ $allowed_missing -ge $current_missing ]; then
if [ $baseline_missing -lt $current_missing ]; then
show_diff $baseline_report $current_report
echo "We believe you can test your code with 100% coverage!"
else

View File

@ -3,38 +3,37 @@
. /etc/sysconfig/heat-params
if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then
if [ "$ENABLE_CINDER" == "False" ]; then
# FIXME(yuanying): Use ephemeral disk for docker storage
# Currently Ironic doesn't support cinder volumes,
# so we must use preserved ephemeral disk instead of a cinder volume.
device_path=$(readlink -f /dev/disk/by-label/ephemeral0)
else
attempts=60
while [ ${attempts} -gt 0 ]; do
device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$)
if [ -n "${device_name}" ]; then
break
if [ "$ENABLE_CINDER" == "False" ]; then
# FIXME(yuanying): Use ephemeral disk for docker storage
# Currently Ironic doesn't support cinder volumes,
# so we must use preserved ephemeral disk instead of a cinder volume.
device_path=$(readlink -f /dev/disk/by-label/ephemeral0)
else
attempts=60
while [ ${attempts} -gt 0 ]; do
device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$)
if [ -n "${device_name}" ]; then
break
fi
echo "waiting for disk device"
sleep 0.5
udevadm trigger
let attempts--
done
if [ -z "${device_name}" ]; then
echo "ERROR: disk device does not exist" >&2
exit 1
fi
echo "waiting for disk device"
sleep 0.5
udevadm trigger
let attempts--
done
if [ -z "${device_name}" ]; then
echo "ERROR: disk device does not exist" >&2
exit 1
device_path=/dev/disk/by-id/${device_name}
fi
device_path=/dev/disk/by-id/${device_name}
fi
fi
$configure_docker_storage_driver
if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then
if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) \
= $(uname -r) ]; then
if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) = $(uname -r) ]; then
ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Cluster node kernel version: $(uname -r)"
echo "ERROR: ${ERROR_MESSAGE}" >&2
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'"

View File

@ -3,8 +3,8 @@
. /etc/sysconfig/heat-params
if [ -z "$KUBE_NODE_IP" ]; then
# FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
myip="${KUBE_NODE_IP}"

View File

@ -5,16 +5,16 @@
echo "configuring kubernetes (master)"
if [ -z "$KUBE_NODE_IP" ]; then
# FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
sed -i '
/^ETCD_LISTEN_CLIENT_URLS=/ s/=.*/="http:\/\/0.0.0.0:2379"/
/^ETCD_LISTEN_CLIENT_URLS=/ s/=.*/="http:\/\/0.0.0.0:2379"/
' /etc/etcd/etcd.conf
sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
' /etc/kubernetes/config
KUBE_API_ARGS="--runtime-config=api/all=true"
@ -30,11 +30,11 @@ else
fi
sed -i '
/^KUBE_API_ADDRESS=/ s/=.*/='"${KUBE_API_ADDRESS}"'/
/^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"|
/^KUBE_API_ARGS=/ s/KUBE_API_ARGS.//
/^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/
/^KUBE_ADMISSION_CONTROL=/ s/=.*/=""/
/^KUBE_API_ADDRESS=/ s/=.*/='"${KUBE_API_ADDRESS}"'/
/^KUBE_SERVICE_ADDRESSES=/ s|=.*|="--service-cluster-ip-range='"$PORTAL_NETWORK_CIDR"'"|
/^KUBE_API_ARGS=/ s/KUBE_API_ARGS.//
/^KUBE_ETCD_SERVERS=/ s/=.*/="--etcd-servers=http:\/\/127.0.0.1:2379"/
/^KUBE_ADMISSION_CONTROL=/ s/=.*/=""/
' /etc/kubernetes/apiserver
cat << _EOC_ >> /etc/kubernetes/apiserver
#Uncomment the following line to disable Load Balancer feature
@ -44,8 +44,8 @@ KUBE_API_ARGS="$KUBE_API_ARGS"
_EOC_
sed -i '
/^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s/KUBE_CONTROLLER_MANAGER_ARGS.*/#Uncomment the following line to enable Kubernetes Load Balancer feature \n#KUBE_CONTROLLER_MANAGER_ARGS="--cloud-config=\/etc\/sysconfig\/kube_openstack_config --cloud-provider=openstack"/
/^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/
/^KUBE_CONTROLLER_MANAGER_ARGS=/ s/KUBE_CONTROLLER_MANAGER_ARGS.*/#Uncomment the following line to enable Kubernetes Load Balancer feature \n#KUBE_CONTROLLER_MANAGER_ARGS="--cloud-config=\/etc\/sysconfig\/kube_openstack_config --cloud-provider=openstack"/
' /etc/kubernetes/controller-manager
KUBELET_ARGS="--register-node=true --register-schedulable=false --config=/etc/kubernetes/manifests --hostname-override=$KUBE_NODE_IP"
@ -56,7 +56,7 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
fi
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_ARGS=/ s|=.*|='"$KUBELET_ARGS"'|
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_ARGS=/ s|=.*|='"$KUBELET_ARGS"'|
' /etc/kubernetes/kubelet

View File

@ -5,8 +5,8 @@
echo "configuring kubernetes (minion)"
if [ -z "$KUBE_NODE_IP" ]; then
# FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
# FIXME(yuanying): Set KUBE_NODE_IP correctly
KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
fi
ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP}
@ -20,9 +20,9 @@ fi
KUBE_MASTER_URI="$KUBE_PROTOCOL://$KUBE_MASTER_IP:$KUBE_API_PORT"
sed -i '
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
/^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"|
/^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"|
/^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/
/^KUBE_ETCD_SERVERS=/ s|=.*|="--etcd-servers=http://'"$ETCD_SERVER_IP"':2379"|
/^KUBE_MASTER=/ s|=.*|="--master='"$KUBE_MASTER_URI"'"|
' /etc/kubernetes/config
# NOTE: Kubernetes plugin for Openstack requires that the node name registered
@ -40,19 +40,19 @@ if [ -n "${INSECURE_REGISTRY_URL}" ]; then
fi
sed -i '
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_API_SERVER=/ s|=.*|="--api-servers='"$KUBE_MASTER_URI"'"|
/^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"|
/^KUBELET_ADDRESS=/ s/=.*/="--address=0.0.0.0"/
/^KUBELET_HOSTNAME=/ s/=.*/=""/
/^KUBELET_API_SERVER=/ s|=.*|="--api-servers='"$KUBE_MASTER_URI"'"|
/^KUBELET_ARGS=/ s|=.*|="'"${KUBELET_ARGS}"'"|
' /etc/kubernetes/kubelet
sed -i '
/^KUBE_PROXY_ARGS=/ s|=.*|='"$KUBE_CONFIG"'|
/^KUBE_PROXY_ARGS=/ s|=.*|='"$KUBE_CONFIG"'|
' /etc/kubernetes/proxy
if [ "$NETWORK_DRIVER" = "flannel" ]; then
sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' /etc/sysconfig/flanneld
# Make sure etcd has a flannel configuration

View File

@ -4,5 +4,5 @@
setenforce 0
sed -i '
/^SELINUX=/ s/=.*/=permissive/
/^SELINUX=/ s/=.*/=permissive/
' /etc/selinux/config

View File

@ -69,8 +69,8 @@ USER_TOKEN=`curl -k -s -i -X POST -H "$content_type" -d "$auth_json" $url \
# Get CA certificate for this cluster
curl -k -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
-H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
# Create config for client's csr
cat > ${cert_conf_dir}/client.conf <<EOF
@ -111,7 +111,7 @@ chmod 500 "${cert_dir}"
chown -R kube:kube "${cert_dir}"
sed -i '
s|CA_CERT|'"$CA_CERT"'|
s|CLIENT_CERT|'"$CLIENT_CERT"'|
s|CLIENT_KEY|'"$CLIENT_KEY"'|
s|CA_CERT|'"$CA_CERT"'|
s|CLIENT_CERT|'"$CLIENT_CERT"'|
s|CLIENT_KEY|'"$CLIENT_KEY"'|
' /srv/kubernetes/kubeconfig.yaml

View File

@ -21,7 +21,7 @@ set -o nounset
set -o pipefail
if [ "$TLS_DISABLED" == "True" ]; then
exit 0
exit 0
fi
if [[ -z "${KUBE_NODE_PUBLIC_IP}" ]]; then
@ -33,11 +33,11 @@ fi
sans="IP:${KUBE_NODE_PUBLIC_IP},IP:${KUBE_NODE_IP}"
if [ "${KUBE_NODE_PUBLIC_IP}" != "${KUBE_API_PUBLIC_ADDRESS}" ] \
&& [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then
&& [ -n "${KUBE_API_PUBLIC_ADDRESS}" ]; then
sans="${sans},IP:${KUBE_API_PUBLIC_ADDRESS}"
fi
if [ "${KUBE_NODE_IP}" != "${KUBE_API_PRIVATE_ADDRESS}" ] \
&& [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then
&& [ -n "${KUBE_API_PRIVATE_ADDRESS}" ]; then
sans="${sans},IP:${KUBE_API_PRIVATE_ADDRESS}"
fi
MASTER_HOSTNAME=${MASTER_HOSTNAME:-}
@ -91,8 +91,8 @@ USER_TOKEN=`curl -k -s -i -X POST -H "$content_type" -d "$auth_json" $url \
# Get CA certificate for this cluster
curl -k -X GET \
-H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
-H "X-Auth-Token: $USER_TOKEN" \
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
# Create config for server's csr
cat > ${cert_conf_dir}/server.conf <<EOF

View File

@ -28,7 +28,7 @@ fi
echo "creating flanneld config in etcd"
while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \
-X PUT --data-urlencode value@${FLANNEL_JSON}; do
-X PUT --data-urlencode value@${FLANNEL_JSON}; do
echo "waiting for etcd"
sleep 1
done

View File

@ -12,7 +12,7 @@ FLANNEL_JSON=/etc/sysconfig/flannel-network.json
FLANNELD_CONFIG=/etc/sysconfig/flanneld
sed -i '
/^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/
/^FLANNEL_ETCD=/ s/=.*/="http:\/\/127.0.0.1:2379"/
' /etc/sysconfig/flanneld
# Generate a flannel configuration that we will

View File

@ -14,7 +14,7 @@ FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service
FLANNEL_JSON=/etc/sysconfig/flannel-network.json
sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' $FLANNELD_CONFIG
. $FLANNELD_CONFIG
@ -35,7 +35,7 @@ fi
echo "creating flanneld config in etcd"
while ! curl -sf -L $FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_KEY}/config \
-X PUT --data-urlencode value@${FLANNEL_JSON}; do
-X PUT --data-urlencode value@${FLANNEL_JSON}; do
echo "waiting for etcd"
sleep 1
done

View File

@ -5,8 +5,8 @@
echo "Configuring ${NETWORK_DRIVER} network service ..."
if [ "$NETWORK_DRIVER" == "docker" ]; then
DOCKER_NETWORK_OPTIONS="--cluster-store etcd://$ETCD_SERVER_IP:2379\
--cluster-advertise $SWARM_NODE_IP:9379"
DOCKER_NETWORK_OPTIONS="--cluster-store etcd://$ETCD_SERVER_IP:2379 \
--cluster-advertise $SWARM_NODE_IP:9379"
sed -i "/^DOCKER_NETWORK_OPTIONS=/ s#=.*#='$DOCKER_NETWORK_OPTIONS'#" \
/etc/sysconfig/docker-network
fi

View File

@ -48,7 +48,7 @@ After=docker.service
RemainAfterExit=yes
ExecStartPre=-/usr/bin/docker pull openstackmagnum/rexray:alpine
ExecStart=/usr/bin/rm -f /var/run/rexray/rexray.pid && \
/usr/bin/docker run -d --name=rexray --privileged -p 7979:7979 \
/usr/bin/docker run -d --name=rexray --privileged -p 7979:7979 \
-v /run/docker/plugins:/run/docker/plugins \
-v /var/lib/rexray:/var/lib/rexray:z \
-v /var/log/rexray:/var/log/rexray \

View File

@ -20,25 +20,25 @@ EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/docker daemon -H fd:// \\
-H tcp://0.0.0.0:2375 \\
-H tcp://0.0.0.0:2375 \\
END_SERVICE_TOP
if [ "$TLS_DISABLED" = 'False' ]; then
cat >> /etc/systemd/system/docker.service << END_TLS
--tlsverify \\
--tlscacert="/etc/docker/ca.crt" \\
--tlskey="/etc/docker/server.key" \\
--tlscert="/etc/docker/server.crt" \\
--tlsverify \\
--tlscacert="/etc/docker/ca.crt" \\
--tlskey="/etc/docker/server.key" \\
--tlscert="/etc/docker/server.crt" \\
END_TLS
fi
cat >> /etc/systemd/system/docker.service << END_SERVICE_BOTTOM
\$OPTIONS \\
\$DOCKER_STORAGE_OPTIONS \\
\$DOCKER_NETWORK_OPTIONS \\
\$INSECURE_REGISTRY
\$OPTIONS \\
\$DOCKER_STORAGE_OPTIONS \\
\$DOCKER_NETWORK_OPTIONS \\
\$INSECURE_REGISTRY
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity

View File

@ -18,14 +18,14 @@ TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill swarm-agent
ExecStartPre=-/usr/bin/docker rm swarm-agent
ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION
ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY \\
-e https_proxy=$HTTPS_PROXY \\
-e no_proxy=$NO_PROXY \\
--name swarm-agent \\
swarm:$SWARM_VERSION \\
join \\
--addr $myip:2375 \\
etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/
ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY \\
-e https_proxy=$HTTPS_PROXY \\
-e no_proxy=$NO_PROXY \\
--name swarm-agent \\
swarm:$SWARM_VERSION \\
join \\
--addr $myip:2375 \\
etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/
Restart=always
ExecStop=/usr/bin/docker stop swarm-agent
ExecStartPost=/usr/local/bin/notify-heat
@ -42,10 +42,10 @@ SCRIPT=/usr/local/bin/notify-heat
cat > $SCRIPT << EOF
#!/bin/sh
until etcdctl \
--peers $ETCD_SERVER_IP:2379 \
--timeout 1s \
--total-timeout 5s \
ls /v2/keys/swarm/docker/swarm/nodes/$myip:2375
--peers $ETCD_SERVER_IP:2379 \
--timeout 1s \
--total-timeout 5s \
ls /v2/keys/swarm/docker/swarm/nodes/$myip:2375
do
echo "Waiting for swarm agent registration..."
sleep 5

View File

@ -12,26 +12,26 @@ TimeoutStartSec=0
ExecStartPre=-/usr/bin/docker kill swarm-manager
ExecStartPre=-/usr/bin/docker rm swarm-manager
ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION
ExecStart=/usr/bin/docker run --name swarm-manager \\
-v /etc/docker:/etc/docker \\
-p 2376:2375 \\
-e http_proxy=$HTTP_PROXY \\
-e https_proxy=$HTTPS_PROXY \\
-e no_proxy=$NO_PROXY \\
swarm:$SWARM_VERSION \\
manage -H tcp://0.0.0.0:2375 \\
--strategy $SWARM_STRATEGY \\
--replication \\
--advertise $NODE_IP:2376 \\
ExecStart=/usr/bin/docker run --name swarm-manager \\
-v /etc/docker:/etc/docker \\
-p 2376:2375 \\
-e http_proxy=$HTTP_PROXY \\
-e https_proxy=$HTTPS_PROXY \\
-e no_proxy=$NO_PROXY \\
swarm:$SWARM_VERSION \\
manage -H tcp://0.0.0.0:2375 \\
--strategy $SWARM_STRATEGY \\
--replication \\
--advertise $NODE_IP:2376 \\
END_SERVICE_TOP
if [ $TLS_DISABLED = 'False' ]; then
cat >> /etc/systemd/system/swarm-manager.service << END_TLS
--tlsverify \\
--tlscacert=/etc/docker/ca.crt \\
--tlskey=/etc/docker/server.key \\
--tlscert=/etc/docker/server.crt \\
--tlsverify \\
--tlscacert=/etc/docker/ca.crt \\
--tlskey=/etc/docker/server.key \\
--tlscert=/etc/docker/server.crt \\
END_TLS
fi
@ -41,8 +41,8 @@ cat >> /etc/systemd/system/swarm-manager.service << END_SERVICE_BOTTOM
ExecStop=/usr/bin/docker stop swarm-manager
Restart=always
ExecStartPost=/usr/bin/curl -k -i -X POST -H 'Content-Type: application/json' -H 'X-Auth-Token: $WAIT_HANDLE_TOKEN' \\
--data-binary "'"'{"Status": "SUCCESS", "Reason": "Setup complete", "Data": "OK", "UniqueId": "00000"}'"'" \\
"$WAIT_HANDLE_ENDPOINT"
--data-binary "'"'{"Status": "SUCCESS", "Reason": "Setup complete", "Data": "OK", "UniqueId": "00000"}'"'" \\
"$WAIT_HANDLE_ENDPOINT"
[Install]
WantedBy=multi-user.target

View File

@ -5,7 +5,7 @@
echo "Configuring mesos (master)"
myip=$(ip addr show eth0 |
awk '$1 == "inet" {print $2}' | cut -f1 -d/)
awk '$1 == "inet" {print $2}' | cut -f1 -d/)
# Fix /etc/hosts
sed -i "s/127.0.1.1/$myip/" /etc/hosts

View File

@ -5,7 +5,7 @@
echo "Configuring mesos (slave)"
myip=$(ip addr show eth0 |
awk '$1 == "inet" {print $2}' | cut -f1 -d/)
awk '$1 == "inet" {print $2}' | cut -f1 -d/)
zk=""
for master_ip in $MESOS_MASTERS_IPS; do

View File

@ -6,6 +6,7 @@
# ascii betical order.
bandit>=1.1.0 # Apache-2.0
bashate>=0.2 # Apache-2.0
coverage>=4.0 # Apache-2.0
doc8 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD

View File

@ -98,6 +98,13 @@ commands =
doc8 -e .rst specs/ doc/source/ contrib/ CONTRIBUTING.rst HACKING.rst README.rst
bash tools/flake8wrap.sh {posargs}
bandit -r magnum -x tests -n5 -ll
bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
-not \( -type d -name doc -prune \) \
-not \( -type d -name contrib -prune \) \
-type f \
-name \*.sh \
-print0 | xargs -0 bashate -v -iE006,E010,E042 -eE005"
[testenv:venv]
commands = {posargs}