* SC2086: Double quote to prevent globbing and word splitting.
* SC2027: The surrounding quotes actually unquote this.
Remove or escape them.
* SC2016: Expressions don't expand in single quotes,
use double quotes for that.
* SC2236: Use -n instead of ! -z.
* SC2233: Remove superfluous (..) around condition.
Signed-off-by: Thanh Ha <zxiiro@gmail.com>
Change-Id: I73207aa87646472bdb1c7848ba9608ac702bda53
OS_RELEASE=$(facter lsbdistrelease | tr '[:upper:]' '[:lower:]')
if [[ "$OS_RELEASE" == "18.04" && "$OS" == 'ubuntu' ]]; then
+ # We do not want var expansion here as profile script expands at runtime.
+ # shellcheck disable=SC2016
echo 'export PATH=$HOME/.local/bin:$PATH' >> /etc/profile
fi
eval cmake -DCMAKE_INSTALL_PREFIX="$INSTALL_PREFIX" $cmake_opts ..
/opt/build-wrapper/build-wrapper-linux-x86-64 --out-dir "$WORKSPACE/bw-output" \
- make $make_opts
+ make "$make_opts"
/opt/sonar-scanner/bin/sonar-scanner \
- -Dsonar.projectKey=${PROJECT_KEY} \
- -Dsonar.organization=${PROJECT_ORGANIZATION} \
+ -Dsonar.projectKey="${PROJECT_KEY}" \
+ -Dsonar.organization="${PROJECT_ORGANIZATION}" \
-Dsonar.sources=. \
-Dsonar.cfamily.build-wrapper-output="$WORKSPACE/bw-output" \
- -Dsonar.host.url=${SONAR_HOST_URL} \
- -Dsonar.login=${API_TOKEN}
+ -Dsonar.host.url="${SONAR_HOST_URL}" \
+ -Dsonar.login="${API_TOKEN}"
# DOCKER_IMAGE_TAG variable gets constructed after lf-docker-get-container-tag builder step
# is executed. It constructs the image name and the appropriate tag in the same varaiable.
docker_build_command="docker build ${DOCKER_ARGS:-} -t "$CONTAINER_PUSH_REGISTRY/$DOCKER_NAME:$DOCKER_IMAGE_TAG" ."
-echo $docker_build_command
-eval $docker_build_command | tee "$WORKSPACE/docker_build_log.txt"
+echo "$docker_build_command"
+eval "$docker_build_command" | tee "$WORKSPACE/docker_build_log.txt"
# Login to the registry
do_login() {
docker_version=$( docker -v | awk '{print $3}')
- if version_lt $docker_version "17.06.0" && \
+ if version_lt "$docker_version" "17.06.0" && \
"$DOCKERHUB_REGISTRY" == "docker.io" && \
"$DOCKERHUB_EMAIL:-none" != 'none'
then
# docker login requests an email address if nothing is passed to it
# Nexus, however, does not need this and ignores the value
- set_creds $REGISTRY
+ set_creds "$REGISTRY"
do_login "$REGISTRY" none
done
fi
# Login to docker.io after determining if email is needed.
if [ "${DOCKERHUB_REGISTRY:-none}" != 'none' ]
then
- set_creds $DOCKERHUB_REGISTRY
+ set_creds "$DOCKERHUB_REGISTRY"
if [ "${DOCKERHUB_EMAIL:-none}" != 'none' ]
then
do_login "$DOCKERHUB_REGISTRY" "$DOCKERHUB_EMAIL"
set -ue -o pipefail
echo "---> Pushing image: $CONTAINER_PUSH_REGISTRY/$DOCKER_NAME:$DOCKER_IMAGE_TAG"
docker_push_command="docker push "$CONTAINER_PUSH_REGISTRY/$DOCKER_NAME:$DOCKER_IMAGE_TAG""
-echo $docker_push_command
-eval $docker_push_command
+echo "$docker_push_command"
+eval "$docker_push_command"
ssh_port=$(git remote show origin | grep Fetch | grep 'ssh://' \
| awk -F'/' '{print $3}' | awk -F':' '{print $2}')
- if [ -z $ssh_url ]; then
+ if [ -z "$ssh_url" ]; then
echo "ERROR: Gerrit SSH URL not found."
exit 1
fi
# Remove any leading or trailing quotes surrounding the strings
# which can cause parse errors when passed as CLI options to commands
-PROJECT="$(echo $PROJECT | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
-GERRIT_COMMIT_MESSAGE="$(echo $GERRIT_COMMIT_MESSAGE | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
-GERRIT_HOST="$(echo $GERRIT_HOST | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
-GERRIT_TOPIC="$(echo $GERRIT_TOPIC | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
-GERRIT_USER="$(echo $GERRIT_USER | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
-REVIEWERS_EMAIL="$(echo $REVIEWERS_EMAIL | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
+PROJECT="$(echo "$PROJECT" | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
+GERRIT_COMMIT_MESSAGE="$(echo "$GERRIT_COMMIT_MESSAGE" | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
+GERRIT_HOST="$(echo "$GERRIT_HOST" | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
+GERRIT_TOPIC="$(echo "$GERRIT_TOPIC" | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
+GERRIT_USER="$(echo "$GERRIT_USER" | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
+REVIEWERS_EMAIL="$(echo "$REVIEWERS_EMAIL" | sed "s/^\([\"']\)\(.*\)\1\$/\2/g")"
CHANGE_ID=$(ssh -p 29418 "$GERRIT_USER@$GERRIT_HOST" gerrit query \
limit:1 owner:self is:open project:"$PROJECT" \
if [ -n "${JIRA_URL}" ];
then
- BASE_URL=$(echo $JIRA_URL | awk -F'/' '{print $3}')
+ BASE_URL=$(echo "$JIRA_URL" | awk -F'/' '{print $3}')
JIRA_LINK=$(git rev-list --format=%B --max-count=1 HEAD | grep -io "http[s]*://$BASE_URL/" || true)
- if [[ ! -z "$JIRA_LINK" ]]
+ if [[ -n "$JIRA_LINK" ]]
then
echo 'Remove JIRA URLs from commit message'
echo 'Add jira references as: Issue: <JIRAKEY>-<ISSUE#>, instead of URLs'
export get_cfg
get_cloud_cfg() {
- if [ -z $1 ]; then
+ if [ -z "$1" ]; then
>&2 echo "Usage: get_cloud_cfg CFG_DIR"
exit 1
fi
}
get_launcher_factory() {
- if [ -z $1 ]; then
+ if [ -z "$1" ]; then
>&2 echo "Usage: get_launcher_factory JNLP|SSH"
exit 1
fi
}
get_minion_options() {
- if [ -z $1 ]; then
+ if [ -z "$1" ]; then
>&2 echo "Usage: get_minion_options CFG_FILE"
exit 1
fi
hardware_id=$(get_cfg "$cfg_file" HARDWARE_ID "")
network_id=$(get_cfg "$cfg_file" NETWORK_ID "")
- udi_default="$(get_cfg "$(dirname $cfg_file)/cloud.cfg" USER_DATA_ID "jenkins-init-script")"
+ udi_default="$(get_cfg "$(dirname "$cfg_file")/cloud.cfg" USER_DATA_ID "jenkins-init-script")"
user_data_id=$(get_cfg "$cfg_file" USER_DATA_ID "$udi_default")
# Handle Sandbox systems that might have a different cap.
availability_zone=$(get_cfg "$cfg_file" AVAILABILITY_ZONE "")
start_timeout=$(get_cfg "$cfg_file" START_TIMEOUT "600000")
- kpn_default="$(get_cfg "$(dirname $cfg_file)/cloud.cfg" KEY_PAIR_NAME "jenkins-ssh")"
+ kpn_default="$(get_cfg "$(dirname "$cfg_file")/cloud.cfg" KEY_PAIR_NAME "jenkins-ssh")"
key_pair_name=$(get_cfg "$cfg_file" KEY_PAIR_NAME "$kpn_default")
num_executors=$(get_cfg "$cfg_file" NUM_EXECUTORS "1")
| grep -i 'OpenStack Cloud Plugin' \
| awk -F':' '{print $2}' | awk -F' ' '{print $1}')"
if version_ge "$OS_PLUGIN_VER" "2.35"; then
- if [ ! -z "$volume_size" ]; then
+ if [ -n "$volume_size" ]; then
echo " new BootSource.VolumeFromImage(\"$image_name\", $volume_size),"
else
echo " new BootSource.Image(\"$image_name\"),"
echo " $retention_time"
else # SlaveOptions() structure for versions <= 2.34
- if [ ! -z "$volume_size" ]; then
+ if [ -n "$volume_size" ]; then
echo " new BootSource.VolumeFromImage(\"$image_name\", $volume_size),"
else
echo " new BootSource.Image(\"$image_name\"),"
}
get_template_cfg() {
- if [ -z $2 ]; then
+ if [ -z "$2" ]; then
>&2 echo "Usage: get_template_cfg CFG_FILE SILO [MINION_PREFIX]"
exit 1
fi
local minion_prefix="${3:-}"
- template_name=$(basename $cfg_file .cfg)
+ template_name=$(basename "$cfg_file" .cfg)
labels=$(get_cfg "$cfg_file" LABELS "")
echo "minion_options = new SlaveOptions("
echo ")"
}
-mapfile -t clouds < <(ls -d1 $OS_CLOUD_DIR/*/)
+mapfile -t clouds < <(ls -d1 "$OS_CLOUD_DIR"/*/)
for silo in $silos; do
for cloud in "${clouds[@]}"; do
cfg_dir="${cloud}"
echo "Processing $cfg_dir"
- insert_file="$SCRIPT_DIR/$silo/$(basename $cloud)/cloud-cfg.txt"
- mkdir -p "$(dirname $insert_file)"
+ insert_file="$SCRIPT_DIR/$silo/$(basename "$cloud")/cloud-cfg.txt"
+ mkdir -p "$(dirname "$insert_file")"
rm -f "$insert_file"
echo "" >> "$insert_file"
echo "//////////////////////////////////////////////////" >> "$insert_file"
- echo "// Cloud config for $(basename $cloud)" >> "$insert_file"
+ echo "// Cloud config for $(basename "$cloud")" >> "$insert_file"
echo "//////////////////////////////////////////////////" >> "$insert_file"
echo "" >> "$insert_file"
- echo "templates = []" >> $insert_file
- mapfile -t templates < <(find $cfg_dir -maxdepth 1 -not -type d -not -name "cloud.cfg")
+ echo "templates = []" >> "$insert_file"
+ mapfile -t templates < <(find "$cfg_dir" -maxdepth 1 -not -type d -not -name "cloud.cfg")
for template in "${templates[@]}"; do
get_template_cfg "$template" "$silo" "$node_prefix" >> "$insert_file"
echo "templates.add(template)" >> "$insert_file"
exit 1
fi
- mapfile -t vars < <(cat $global_vars)
+ mapfile -t vars < <(cat "$global_vars")
rm -f insert.txt
for var in "${vars[@]}"; do
continue
fi
- key=$(echo $var | cut -d\= -f1)
- value=$(echo $var | cut -d\= -f2)
+ key=$(echo "$var" | cut -d\= -f1)
+ value=$(echo "$var" | cut -d\= -f2)
echo " '$key': '$value'," >> insert.txt
done
for file in jenkins-config/clouds/openstack/*/*; do
# Set the $IMAGE_NAME variable to the the file's IMAGE_NAME value
- export "$(grep ^IMAGE_NAME= $file)"
+ export "$(grep ^IMAGE_NAME= "$file")"
# The image should be listed as active
if ! openstack image list --property name="$IMAGE_NAME" | grep "active"; then
error=true
fi
# Set the $HARDWARE_ID variable to the the file's HARDWARE_ID value
- export "$(grep ^HARDWARE_ID= $file)"
+ export "$(grep ^HARDWARE_ID= "$file")"
# The flavor should be listed. Spaces in grep string ensure complete match.
if ! openstack flavor list | grep " $HARDWARE_ID "; then
done
popd
-if [ ! -z "$(ls -A archives/job-configs)" ]; then
+if [ -n "$(ls -A archives/job-configs)" ]; then
tar cJvf archives/job-configs.tar.xz archives/job-configs
rm -rf archives/job-configs
fi
# Check for "-f" maven param, indicating a change in pom location.
pom_path="pom.xml"
-file_path=$(echo $MAVEN_PARAMS | grep -E "\-f \S+" | awk '{ print $2 }')
-if [ ! -z $file_path ]; then
- if [ -d $file_path ]; then
+file_path=$(echo "$MAVEN_PARAMS" | grep -E "\-f \S+" | awk '{ print $2 }')
+if [ -n "$file_path" ]; then
+ if [ -d "$file_path" ]; then
pom_path="$file_path/pom.xml"
else
pom_path="$file_path"
-v "/x:project/x:groupId" \
--elif "/x:project/x:parent/x:groupId" \
-v "/x:project/x:parent/x:groupId" \
- --else -o "" $pom_path)
+ --else -o "" "$pom_path")
project_path="${project//.//}"
mkdir -p "$WORKSPACE/m2repo/$project_path"
set +x
CLI_LOCATION="/tmp/nexus-iq-cli-${NEXUS_IQ_CLI_VERSION}.jar"
-wget -nv https://download.sonatype.com/clm/scanner/nexus-iq-cli-${NEXUS_IQ_CLI_VERSION}.jar -O ${CLI_LOCATION}
+wget -nv "https://download.sonatype.com/clm/scanner/nexus-iq-cli-${NEXUS_IQ_CLI_VERSION}.jar" -O "${CLI_LOCATION}"
echo "-a" > cli-auth.txt
echo "${CLM_USER}:${CLM_PASSWORD}" >> cli-auth.txt
-java -jar ${CLI_LOCATION} @cli-auth.txt -xc -i ${CLM_PROJECT_NAME} -s https://nexus-iq.wl.linuxfoundation.org -t build .
+java -jar "${CLI_LOCATION}" @cli-auth.txt -xc -i "${CLM_PROJECT_NAME}" -s https://nexus-iq.wl.linuxfoundation.org -t build .
rm cli-auth.txt
-rm ${CLI_LOCATION}
+rm "${CLI_LOCATION}"
set -eux -o pipefail
-mapfile -t os_ports < <(openstack --os-cloud "$os_cloud" port list -f value -c ID -c status | egrep DOWN | awk '{print $1}')
+mapfile -t os_ports < <(openstack --os-cloud "$os_cloud" port list -f value -c ID -c status | grep -E DOWN | awk '{print $1}')
if [ ${#os_ports[@]} -eq 0 ]; then
echo "No orphaned ports found."
# Swap to creating END_PACKAGES if we are running in a CI job (determined by if
# we have a workspace env) or if the starting packages listing already exists.
PACKAGES="${START_PACKAGES}"
-if ( [ "${workspace}" ] || [ -f "${START_PACKAGES}" ] )
+if [ "${workspace}" ] || [ -f "${START_PACKAGES}" ]
then
PACKAGES="${END_PACKAGES}"
fi
;;
esac
-if ( [ -f "${START_PACKAGES}" ] && [ -f "${END_PACKAGES}" ] )
+if [ -f "${START_PACKAGES}" ] && [ -f "${END_PACKAGES}" ]
then
# ` || true` Ignore exit code because diff exits 1 when there is a diff
diff "${START_PACKAGES}" "${END_PACKAGES}" > "${DIFF_PACKAGES}" || true
if hash packer.io 2>/dev/null; then
CURRENT_VERSION="$(packer.io --version)"
- if version_lt $CURRENT_VERSION $PACKER_VERSION; then
+ if version_lt "$CURRENT_VERSION" "$PACKER_VERSION"; then
echo "Packer version $CURRENT_VERSION installed is less than $PACKER_VERSION available, updating Packer."
packer_install
else
mkdir -p "$ARCHIVE_PUPPETLINT_DIR"
cd "$WORKSPACE/$PUPPET_DIR"
-gem install puppet-lint -v $PUPPET_LINT_VERSION
+gem install puppet-lint -v "$PUPPET_LINT_VERSION"
echo "---> Running puppet-lint"
"$BINDIR/puppet-lint" . | tee -a "$ARCHIVE_PUPPETLINT_DIR/puppet-lint.log"
| tee $pip_list_diffs; then
echo "No diffs" | tee $pip_list_diffs
fi
- mkdir -p $WORKSPACE/archives
- cp $pip_list_pre $pip_list_post $pip_list_diffs $WORKSPACE/archives
- rm -rf $pip_list_pre $pip_list_post $pip_list_diffs
- ls $WORKSPACE/archives
+ mkdir -p "$WORKSPACE/archives"
+ cp "$pip_list_pre" "$pip_list_post" "$pip_list_diffs" "$WORKSPACE/archives"
+ rm -rf "$pip_list_pre" "$pip_list_post" "$pip_list_diffs"
+ ls "$WORKSPACE/archives"
# Would just like to 'exit 0' here but we can't because the
# log-deploy.sh script is 'appended' to this file and it would not
# be executed.
else
- pip list > $pip_list_pre
+ pip list > "$pip_list_pre"
# These 'pip installs' only need to be executed during pre-build
requirements_file=$(mktemp /tmp/requirements-XXXX.txt)
python -m pip install --user --quiet --upgrade pip
python -m pip install --user --quiet --upgrade setuptools
python -m pip install --user --quiet --upgrade -r "$requirements_file"
- rm -rf $requirements_file
+ rm -rf "$requirements_file"
fi
# Fetch the release-schema.yaml
wget -q https://raw.githubusercontent.com/lfit/releng-global-jjb/master/schema/release-schema.yaml
-release_files=$(git diff-tree --no-commit-id -r $GERRIT_PATCHSET_REVISION --name-only -- "releases/")
+release_files=$(git diff-tree --no-commit-id -r "$GERRIT_PATCHSET_REVISION" --name-only -- "releases/")
echo "RELEASE FILES ARE AS FOLLOWS: $release_files"
if (( $(grep -c . <<<"$release_files") > 1 )); then
fi
echo "--> Verifying $release_file schema."
-lftools schema verify $release_file release-schema.yaml
+lftools schema verify "$release_file" release-schema.yaml
VERSION="$(niet ".version" "$release_file")"
LOG_DIR="$(niet ".log_dir" "$release_file")"
PATCH_DIR="$(mktemp -d)"
LOGS_URL=${LOGS_URL%/} # strip any trailing '/'
-echo "wget -P "$PATCH_DIR" "${LOGS_URL}/"staging-repo.txt.gz"
+echo "wget -P $PATCH_DIR ${LOGS_URL}/staging-repo.txt.gz"
wget -P "$PATCH_DIR" "${LOGS_URL}/"staging-repo.txt.gz
nexus_release(){
for staging_url in $(zcat "$PATCH_DIR"/staging-repo.txt.gz | awk -e '{print $2}'); do
# extract the domain name from URL
- NEXUS_URL=$(echo $staging_url | sed -e 's|^[^/]*//||' -e 's|/.*$||')
+ NEXUS_URL=$(echo "$staging_url" | sed -e 's|^[^/]*//||' -e 's|/.*$||')
# extract the staging repo from URL
STAGING_REPO=${staging_url#*repositories/}
echo "Merge will run"
echo "LOG DIR: $LOG_DIR"
pushd "$PATCH_DIR"
- echo "wget "${LOGS_URL}"/patches/{"${PROJECT//\//-}".bundle,taglist.log.gz}"
+ echo "wget ${LOGS_URL}/patches/{${PROJECT//\//-}.bundle,taglist.log.gz}"
wget "${LOGS_URL}"/patches/{"${PROJECT//\//-}".bundle,taglist.log.gz}
gunzip taglist.log.gz
cat "$PATCH_DIR"/taglist.log
allowed_version_regex="^((v?)([0-9]+)\.([0-9]+)\.([0-9]+))$"
if [[ ! $VERSION =~ $allowed_version_regex ]]; then
echo "The version $VERSION is not a semantic valid version"
- echo "Allowed versions are "v#.#.#" or "#.#.#" aka SemVer"
+ echo "Allowed versions are \"v#.#.#\" or \"#.#.#\" aka SemVer"
echo "See https://semver.org/ for more details on SemVer"
exit 1
fi
echo "Showing latest signature for $PROJECT:"
gpg --import "$SIGNING_PUBKEY"
-echo "git tag -v "$VERSION""
+echo "git tag -v $VERSION"
git tag -v "$VERSION"
########## Merge Part ##############
[[ $last_char != "/" ]] && RTD_BUILD_URL="$RTD_BUILD_URL/"; :
json=$(curl -X POST -d "branches=${GERRIT_BRANCH}" -d "token=$RTD_TOKEN" "$RTD_BUILD_URL")
-build_triggered=$(echo $json | jq -r .build_triggered)
+build_triggered=$(echo "$json" | jq -r .build_triggered)
if [ "$build_triggered" != "true" ]; then
echo "ERROR: Build was not triggered."
PARALLEL="${PARALLEL:-true}"
if [ "${PARALLEL}" = true ]; then
- if [ ! -z "$TOX_ENVS" ]; then
+ if [ -n "$TOX_ENVS" ]; then
detox -e "$TOX_ENVS" | tee -a "$ARCHIVE_TOX_DIR/detox.log"
tox_status="${PIPESTATUS[0]}"
else
tox_status="${PIPESTATUS[0]}"
fi
else
- if [ ! -z "$TOX_ENVS" ]; then
+ if [ -n "$TOX_ENVS" ]; then
tox -e "$TOX_ENVS" | tee -a "$ARCHIVE_TOX_DIR/tox.log"
tox_status="${PIPESTATUS[0]}"
else
# Disable SC2116 as we want to echo a space separated list of TOX_ENVS
# shellcheck disable=SC2116
for i in .tox/*/log; do
- tox_env=$(echo $i | awk -F'/' '{print $2}')
+ tox_env=$(echo "$i" | awk -F'/' '{print $2}')
cp -r "$i" "$ARCHIVE_TOX_DIR/$tox_env"
done
set -e # Logs collected so re-enable
echo "---> whitesource-unified-agent-cli.sh"
jar_location="/tmp/wss-unified-agent-${WSS_UNIFIED_AGENT_VERSION}.jar"
wss_unified_agent_url="https://s3.amazonaws.com/unified-agent/wss-unified-agent-${WSS_UNIFIED_AGENT_VERSION}.jar"
-wget -nv ${wss_unified_agent_url} -O ${jar_location}
+wget -nv "${wss_unified_agent_url}" -O "${jar_location}"
echo "---> Running WhiteSource Unified Agent CLI ..."
-java -jar ${jar_location} -c wss-unified-agent.config \
- -product ${WSS_PRODUCT_NAME} -project ${WSS_PROJECT_NAME} \
- -projectVersion ${GERRIT_BRANCH} ${WSS_UNIFIED_AGENT_OPTIONS:-}
-rm ${jar_location}
+java -jar "${jar_location}" -c wss-unified-agent.config \
+ -product "${WSS_PRODUCT_NAME}" -project "${WSS_PROJECT_NAME}" \
+ -projectVersion "${GERRIT_BRANCH}" "${WSS_UNIFIED_AGENT_OPTIONS:-}"
+rm "${jar_location}"