code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
if ! [ "$(id 'relay')" ]; then
getent group 'relay' > /dev/null 2>&1
exit_code=$?
if [ $exit_code -eq 0 ]; then
echo "creating user relay and adding to relay group"
useradd --no-create-home --system -g"relay" "relay"
elif [ $exit_code -eq 2 ]; then
echo "creating user and group relay"
useradd --no-create-home --system --user-group "relay"
else
echo "could not get group info, failed"
exit 1
fi
fi
echo "creating log directory: /var/log/relayd"
mkdir -p /var/log/relayd
chown relay:relay /var/log/relayd
|
tsheasha/relayd
|
deb/before_install.sh
|
Shell
|
apache-2.0
| 555 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run e2e tests using environment variables exported in e2e.sh.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
# Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml
ARTIFACTS=${WORKSPACE}/_artifacts
mkdir -p ${ARTIFACTS}
: ${KUBE_GCS_RELEASE_BUCKET:="kubernetes-release"}
: ${KUBE_GCS_DEV_RELEASE_BUCKET:="kubernetes-release-dev"}
# Explicitly set config path so staging gcloud (if installed) uses same path
export CLOUDSDK_CONFIG="${WORKSPACE}/.config/gcloud"
echo "--------------------------------------------------------------------------------"
echo "Test Environment:"
printenv | sort
echo "--------------------------------------------------------------------------------"
# When run inside Docker, we need to make sure all files are world-readable
# (since they will be owned by root on the host).
trap "chmod -R o+r '${ARTIFACTS}'" EXIT SIGINT SIGTERM
export E2E_REPORT_DIR=${ARTIFACTS}
e2e_go_args=( \
-v \
--dump="${ARTIFACTS}" \
)
if [[ "${FAIL_ON_GCP_RESOURCE_LEAK:-true}" == "true" ]]; then
case "${KUBERNETES_PROVIDER}" in
gce|gke)
e2e_go_args+=(--check-leaked-resources)
;;
esac
fi
if [[ "${E2E_TEST:-}" == "true" ]]; then
e2e_go_args+=(--test)
if [[ "${SKEW_KUBECTL:-}" == 'y' ]]; then
GINKGO_TEST_ARGS="${GINKGO_TEST_ARGS:-} --kubectl-path=$(pwd)/kubernetes_skew/cluster/kubectl.sh"
fi
if [[ -n "${GINKGO_TEST_ARGS:-}" ]]; then
e2e_go_args+=(--test_args="${GINKGO_TEST_ARGS}")
fi
fi
# Optionally run upgrade tests before other tests.
if [[ "${E2E_UPGRADE_TEST:-}" == "true" ]]; then
e2e_go_args+=(--upgrade_args="${GINKGO_UPGRADE_TEST_ARGS}")
fi
kubetest ${E2E_OPT:-} "${e2e_go_args[@]}" "${@}"
|
dchen1107/test-infra
|
jenkins/e2e-image/e2e-runner.sh
|
Shell
|
apache-2.0
| 2,385 |
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
|
ilya-murzinov/dotfiles
|
nvim/plug.sh
|
Shell
|
apache-2.0
| 117 |
#!/bin/bash
git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh
curl -L https://raw.github.com/tonyseek/oh-my-zsh-seeker-theme/master/install.sh | sh
ln -sf $HOME/.vim/.vimrc $HOME/.vimrc
ln -sf $HOME/.vim/.zshrc $HOME/.zshrc
vim -c "BundleInstall"
bundle/YouCompleteMe/install.sh --clang-completer
sudo pip install virtualenvwrapper
brew install ctags
|
moowiz/dotfiles
|
setup.sh
|
Shell
|
apache-2.0
| 371 |
#!/bin/bash
#
# Copyright (C) 2015 University of Oregon
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
# Update OpenVnmrJ
# First argument is location of previous VnmrJ 4.2 installation
# Second argument is locations of OpenVnmrJ installation
#
# set -x
ovj=$1
prev=$2
if [ ! -d $prev ]
then
if [ $# -lt 3 ]
then
echo "Update source directory $prev does not exist"
fi
exit 1
fi
vnmr_rev=`grep VERSION "$prev"/vnmrrev`
vnmr_rev_1=`echo $vnmr_rev | awk '{ print tolower($1) }'`
vnmr_rev_3=`echo $vnmr_rev | awk '{ print tolower($3) }'`
if [ $vnmr_rev_1 != "openvnmrj" ]
then
if [ $vnmr_rev_3 != "4.2" ]
then
if [ $# -lt 3 ]
then
echo "Can only update OpenVnmrJ from an existing VnmrJ 4.2 or OpenVnmrJ installation"
fi
exit 1
fi
fi
echo "Updating OpenVnmrJ from $prev"
if [ -d $prev/biopack/fidlib ] && [ -d $ovj/biopack ] && [ ! -d $ovj/biopack/fidlib ]
then
echo "Collecting biopack files"
cd $prev/biopack
newdir=$ovj/biopack
tar cf - bin BioPack.dir/BP_doc fidlib 2> /dev/null | (cd $newdir && tar xpf -)
fi
if [ -d $prev/help ] && [ ! -d $ovj/help ]
then
echo "Collecting help"
cd $prev
newdir=$ovj
tar cf - help | (cd $newdir && tar xpf -)
fi
if [ -d $prev/fidlib/Ethylindanone ] && [ ! -d $ovj/fidlib/Ethylindanone ]
then
echo "Collecting fidlib"
cd $prev
newdir=$ovj
rm -rf $newdir/fidlib
tar cf - fidlib | (cd $newdir && tar xpf -)
fi
if [ ! -d $ovj/imaging ]
then
exit 0
fi
if [ -d $prev/imaging/data ] && [ ! -d $ovj/imaging/data ]
then
echo "Collecting imaging data"
cd $prev/imaging
newdir=$ovj/imaging
tar cf - data | (cd $newdir && tar xpf -)
fi
if [ -f $prev/imaging/seqlib/gems ] && [ ! -f $ovj/imaging/seqlib/gems ]
then
echo "Collecting imaging files"
cd $prev/imaging
newdir=$ovj/imaging
macList=" \
acq2Dsense \
aip2Dmask \
aip2Dsense \
aip2Dsmap \
csi2d \
gems \
gemsll_prep \
steam \
steami \
"
for mac in $macList
do
if [ -f maclib/$mac ] && [ ! -f $newdir/maclib/$mac ]
then
cp maclib/$mac $newdir/maclib/$mac
fi
done
parList=" \
acsi2d.par \
csi2d.par \
csi3d.par \
epsi3d.par \
fepsi3d.par \
gemsir.par \
gemsll.par \
gems.par \
laser_csi.par \
sgems.par \
steam2.par \
steamcsi.par \
steam.par \
"
for par in $parList
do
if [ -d parlib/$par ] && [ ! -d $newdir/parlib/$par ]
then
cp -r parlib/$par $newdir/parlib/$par
fi
done
seqList=" \
acsi2d \
csi2d \
csi3d \
epsi3d \
fepsi3d \
gems \
gems_fc \
gemsir \
gemsll \
gemsshim \
laser_csi \
sgems \
steam2 \
steam \
"
for seq in $seqList
do
if [ -f seqlib/$seq ] && [ ! -f $newdir/seqlib/$seq ]
then
cp seqlib/$seq $newdir/seqlib/$seq
cp psglib/"$seq".c $newdir/psglib/"$seq".c
fi
if [ -f templates/vnmrj/protocols/"$seq".xml ] && [ ! -f $newdir/templates/vnmrj/protocols/"$seq".xml ]
then
cp templates/vnmrj/protocols/"$seq".xml $newdir/templates/vnmrj/protocols/"$seq".xml
fi
if [ -d templates/layout/"$seq" ] && [ ! -d $newdir/templates/layout/"$seq" ]
then
cp -r templates/layout/"$seq" $newdir/templates/layout/"$seq"
fi
done
defList=" \
acq2Dsense.xml \
proc2Dmask.xml \
proc2Dsense.xml \
proc2Dsmap.xml \
"
for def in $defList
do
if [ -f templates/layout/default/$def ] && [ ! -f $newdir/templates/layout/default/$def ]
then
cp templates/layout/default/$def $newdir/templates/layout/default/$def
fi
if [ -f templates/layout/fsems/$def ] && [ ! -f $newdir/templates/layout/fsems/$def ]
then
cp templates/layout/fsems/$def $newdir/templates/layout/fsems/$def
fi
done
fi
|
OpenVnmrJ/OpenVnmrJ
|
src/scripts/update_OpenVnmrJ.sh
|
Shell
|
apache-2.0
| 4,541 |
#!/bin/bash
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
#
# Usage:
# alluxio-yarn.sh <numWorkers> <pathHdfs>
function printUsage {
echo "Usage: alluxio-yarn.sh <numWorkers> <pathHdfs> [masterAddress]"
echo -e " numWorkers \tNumber of Alluxio workers to launch"
echo -e " pathHdfs \tPath on HDFS to put alluxio jar and distribute it to YARN"
echo -e " masterAddress \tYarn node to launch the Alluxio master on, defaults to ALLUXIO_MASTER_HOSTNAME"
echo -e " \tUsing \"any\" if the master can be launched on any host of YARN"
echo
echo "Example: ./alluxio-yarn.sh 10 hdfs://localhost:9000/tmp/ ip-172-31-5-205.ec2.internal"
echo "Example: ./alluxio-yarn.sh 10 hdfs://localhost:9000/tmp/ any"
}
if [[ "$#" -lt 2 ]] || [[ "$#" -gt 3 ]]; then
printUsage
exit 1
fi
if [[ -z "$HADOOP_HOME" ]]; then
echo "\$HADOOP_HOME is unset, please set this variable to connect to HDFS and YARN" >&2
exit 1
else
echo "Using \$HADOOP_HOME set to '$HADOOP_HOME'"
fi
if [[ -z "$YARN_HOME" ]]; then
echo "\$YARN_HOME is unset, will use \$HADOOP_HOME instead."
fi
YARN_HOME=${YARN_HOME:-${HADOOP_HOME}}
SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
ALLUXIO_HOME="$(cd "${SCRIPT_DIR}/../../.."; pwd)"
source "${SCRIPT_DIR}/common.sh"
NUM_WORKERS=$1
HDFS_PATH=$2
MASTER_ADDRESS=${3:-${ALLUXIO_MASTER_HOSTNAME}}
ALLUXIO_TARFILE="alluxio.tar.gz"
rm -rf $ALLUXIO_TARFILE
tar -C $ALLUXIO_HOME -zcf $ALLUXIO_TARFILE \
assembly/server/target/alluxio-assembly-server-${VERSION}-jar-with-dependencies.jar libexec \
core/server/common/src/main/webapp \
bin conf integration/yarn/bin/common.sh integration/yarn/bin/alluxio-master-yarn.sh \
integration/yarn/bin/alluxio-worker-yarn.sh \
integration/yarn/bin/alluxio-application-master.sh \
JAR_LOCAL=${ALLUXIO_HOME}/integration/yarn/target/alluxio-integration-yarn-${VERSION}-jar-with-dependencies.jar
echo "Uploading files to HDFS to distribute alluxio runtime"
${HADOOP_HOME}/bin/hadoop fs -mkdir -p ${HDFS_PATH}
${HADOOP_HOME}/bin/hadoop fs -put -f ${ALLUXIO_TARFILE} ${HDFS_PATH}/${ALLUXIO_TARFILE}
${HADOOP_HOME}/bin/hadoop fs -put -f ${JAR_LOCAL} ${HDFS_PATH}/alluxio.jar
${HADOOP_HOME}/bin/hadoop fs -put -f ${SCRIPT_DIR}/alluxio-yarn-setup.sh ${HDFS_PATH}/alluxio-yarn-setup.sh
${HADOOP_HOME}/bin/hadoop fs -put -f ${SCRIPT_DIR}/alluxio-application-master.sh ${HDFS_PATH}/alluxio-application-master.sh
echo "Starting YARN client to launch Alluxio on YARN"
# Add Alluxio java options to the yarn options so that alluxio.yarn.Client can be configured via
# alluxio java options
ALLUXIO_JAVA_OPTS="${ALLUXIO_JAVA_OPTS} -Dalluxio.logger.type=Console"
export YARN_OPTS="${YARN_OPTS} ${ALLUXIO_JAVA_OPTS}"
${YARN_HOME}/bin/yarn jar ${JAR_LOCAL} alluxio.yarn.Client \
-num_workers ${NUM_WORKERS} \
-master_address ${MASTER_ADDRESS} \
-resource_path ${HDFS_PATH}
|
Reidddddd/mo-alluxio
|
integration/yarn/bin/alluxio-yarn.sh
|
Shell
|
apache-2.0
| 3,337 |
#! /bin/bash
# This file must be sourced.
if [ "x${DIG_CRF_HOME}" == "x" ]
then
echo "Please set the DIG_CRF_HOME envar"
exit 1
fi
DATASET_NAME=hbase-dump-2016-06-15
DIG_CRF_APPLY=${DIG_CRF_HOME}/src/applyCrf
DIG_CRF_COUNT=${DIG_CRF_HOME}/src/count
DIG_CRF_EXTRACT=${DIG_CRF_HOME}/src/extract
DIG_CRF_SCRIPT=${DIG_CRF_HOME}
DIG_CRF_UTIL=${DIG_CRF_HOME}/src/util
DIG_CRF_DATA_CONFIG_DIR=${DIG_CRF_HOME}/data/config
ARGPARSE_PY_PATH=${DIG_CRF_HOME}/src/external/argparse/argparse-1.4.0/argparse.py
QUIETER_LOG4J_PROPERTIES_FILE=quieter-log4j.properties
DRIVER_JAVA_OPTIONS="--driver-java-options -Dlog4j.configuration=file:${DIG_CRF_DATA_CONFIG_DIR}/${QUIETER_LOG4J_PROPERTIES_FILE}"
HYBRID_JACCARD_CONFIG_FILE=hybrid_jaccard_config.json
HAIR_EYE_FEATURES_CONFIG_FILE=features.hair-eye
HAIR_EYE_CRF_MODEL_FILE=dig-hair-eye-train.model
NAME_ETHNIC_FEATURES_CONFIG_FILE=features.name-ethnic
NAME_ETHNIC_CRF_MODEL_FILE=dig-name-ethnic-train.model
HDFS_WORK_DIR=hdfs:///user/crogers
HDFS_PRODUCTION_DIR=hdfs:///user/worker/${DATASET_NAME}
HDFS_INPUT_DATA_DIR=${HDFS_PRODUCTION_DIR}/data-filtered
HDFS_CRF_DATA_DIR=${HDFS_PRODUCTION_DIR}/crf
PYTHON_EGG_CACHE=./python-eggs
export PYTHON_EGG_CACHE
DIG_CRF_EGG_FILE=${DIG_CRF_HOME}/CRF++-0.58/python/dist/mecab_python-0.0.0-py2.7-linux-x86_64.egg
DIG_CRF_PYTHON_ZIP_FILE=${DIG_CRF_HOME}/pythonFiles.zip
DATASET_WORK_DIR=${HDFS_WORK_DIR}/${DATASET_NAME}
TITLE_AND_TEXT_TOKENS_FILE=title-and-text-tokens
WORKING_TITLE_AND_TEXT_TOKENS_FILE=${DATASET_WORK_DIR}/${TITLE_AND_TEXT_TOKENS_FILE}.seq
HAIR_EYES_FILE=hair-eyes
WORKING_HAIR_EYES_FILE=${DATASET_WORK_DIR}/${HAIR_EYES_FILE}.seq
WORKING_HAIR_EYES_HJ_FILE=${DATASET_WORK_DIR}/${HAIR_EYES_FILE}-hj.seq
WORKING_HAIR_EYES_TOKENS_FILE=${DATASET_WORK_DIR}/${HAIR_EYES_FILE}-tokens.seq
# 22-Jun-2016: Workingname extraction has been disabled for HJ.
# The old definition was: NAME_ETHNIC_FILE=name-ethnic
ETHNIC_FILE=ethnic
NAME_ETHNIC_FILE=name-ethnic
WORKING_NAME_ETHNIC_FILE=${DATASET_WORK_DIR}/${ETHNIC_FILE}.seq
WORKING_NAME_ETHNIC_HJ_FILE=${DATASET_WORK_DIR}/${ETHNIC_FILE}-hj.seq
WORKING_NAME_ETHNIC_TOKENS_FILE=${DATASET_WORK_DIR}/${NAME_ETHNIC_FILE}-tokens.seq
# These are the files we'll deliver:
PRODUCTION_TITLE_AND_TEXT_TOKENS_FILE=${HDFS_CRF_DATA_DIR}/tokenized
PRODUCTION_HAIR_EYES_FILE=${HDFS_CRF_DATA_DIR}/${HAIR_EYES_FILE}-nohj
PRODUCTION_NAME_ETHNIC_FILE=${HDFS_CRF_DATA_DIR}/${ETHNIC_FILE}-nohj
PRODUCTION_HAIR_EYES_HJ_FILE=${HDFS_CRF_DATA_DIR}/${HAIR_EYES_FILE}
PRODUCTION_NAME_ETHNIC_HJ_FILE=${HDFS_CRF_DATA_DIR}/${ETHNIC_FILE}
|
usc-isi-i2/dig-crf
|
examples/hbase-dump-2016-06-15/config.sh
|
Shell
|
apache-2.0
| 2,564 |
#!/bin/bash
set -e
#
# Copyright 2015 Chef Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# scripts/reporting.sh: Run basic reporting tests.
#
# This script assumes you are starting with a VM with Chef Server
# Latest installed and external DBs set up.
#
# Required Environment Variables
# -------------------------------
#
# CHANGELOG_GITHUB_TOKEN Obtain one here: https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token
basedir=$(dirname $0)
currentdir=$(pwd)
chef_version=$(grep build_version $basedir/../../omnibus/config/projects/chef-server.rb | cut -d\" -f2)
usage() {
echo "Usage: "
echo " ./update-changelog path/to/previous/version/chef-server.deb"
echo ""
echo " CHANGELOG_GITHUB_TOKEN must be set to valid github token. Go "
echo " here to obtain one: https://github.com/settings/tokens/new?description=GitHub%20Changelog%20Generator%20token."
exit 1
}
if [[ -z "$CHANGELOG_GITHUB_TOKEN" ]]; then
usage
fi
if [ "$#" -ne 1 ]; then
usage
fi
pushd $basedir/../../omnibus
bundle install --with=release
bundle exec github_changelog_generator -u chef -p chef-server -t $CHANGELOG_GITHUB_TOKEN --enhancement-labels "enhancement,Enhancement,New Feature" --bug-labels "bug,Bug,Improvement,Upstream Bug" --exclude-labels "duplicate,question,invalid,wontfix,no_changelog" -o $currentdir/NEW_CHANGELOG.md --future-release $chef_version
popd
ar p $1 data.tar.xz | tar x ./opt/opscode/version-manifest.json
cd $basedir/../../omnibus
bundle exec omnibus manifest chef-server -l fatal > $currentdir/version-manifest.json
bundle exec omnibus changelog generate --starting-manifest=$currentdir/opt/opscode/version-manifest.json --ending-manifest=$currentdir/version-manifest.json | grep -v "( -> )" > $currentdir/MODIFIED_COMPONENTS_CHANGELOG.md
|
juliandunn/chef-server-1
|
dev/scripts/update-changelog.sh
|
Shell
|
apache-2.0
| 2,331 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/QuickLayout/QuickLayout.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SwiftEntryKit/SwiftEntryKit.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/QuickLayout/QuickLayout.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SwiftEntryKit/SwiftEntryKit.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
themonki/onebusaway-iphone
|
Carthage/Checkouts/SwiftEntryKit/Example/Pods/Target Support Files/Pods-SwiftEntryKitDemo/Pods-SwiftEntryKitDemo-frameworks.sh
|
Shell
|
apache-2.0
| 8,046 |
#!/bin/bash
DIR=$(dirname $0)
pylint --rcfile $DIR/../.pylintrc $DIR/prettypublictest
|
pcn/prettypublictest
|
run_pylint.sh
|
Shell
|
apache-2.0
| 88 |
#!/usr/bin/env bash
# -------------------------------------------------------
WORKAROUND=0
PIP=1
NEW_OOOQ=1
DEV=0
RUNQ=1
PKGS=1
SCRIPTS=1
VALIDATE=0
RELEASE=master-tripleo-ci
#RELEASE=queens
#RELEASE=rocky
# -------------------------------------------------------
export VIRTHOST=127.0.0.2
echo "Testing virthost connection"
ssh root@$VIRTHOST uname -a || (echo "ssh connection to virthost not ready" && exit 1)
#ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
#ssh-copy-id [email protected]
# -------------------------------------------------------
if [ $WORKAROUND -eq 1 ]; then
sudo yum install -y libguestfs-tools wget
# Due to https://bugzilla.redhat.com/show_bug.cgi?id=1581364 libvirt issue
mkdir rpms; pushd rpms
wget -r -nd -l1 -v --no-parent http://file.rdu.redhat.com/~mbaldess/libvirt-rpms/
sudo yum install -y *rpm
popd
sudo systemctl restart libvirtd
fi
# -------------------------------------------------------
if [ $PIP -eq 1 ]; then
curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
sudo python get-pip.py
fi
# -------------------------------------------------------
if [ $NEW_OOOQ -eq 1 ]; then
if [ $DEV -eq 1 ]; then
echo "ERROR: DEV is not compatible with NEW_OOOQ"
exit 1
fi
sudo rm -rf ~/.quickstart
url=https://raw.githubusercontent.com/openstack/tripleo-quickstart/master/quickstart.sh
curl $url > quickstart.sh
bash quickstart.sh --install-deps
fi
# -------------------------------------------------------
if [ $DEV -eq 1 ]; then
sudo rm -rf ~/.quickstart
# if you already set up ~/git/{tripleo-quickstart,tripleo-quickstart-extras}
# with downloaded code reviews, then they will be used. otherwise, get them
OOOQ_REVIEW=579381
OOOQ_EXTRAS_REVIEW=579382
if [ $NEW_OOOQ -eq 1 ]; then
echo "ERROR: NEW_OOOQ is not compatible with DEV"
exit 1
fi
if [[ ! -d ~/git ]]; then
mkdir ~/git
fi
if [[ ! -d ~/git/tripleo-quickstart ]]; then
pushd ~/git/
if [[ ! -e ~/oooq/git-init.sh ]]; then
echo "~/oooq/git-init.sh is missing. aborting"
exit 1
fi
echo "Cloning master branches of oooq and oooq-extras"
echo "use 'git review -d <number>' to clone here next time"
ln -s ~/oooq/git-init.sh
bash git-init.sh oooq
popd
fi
if [[ -d ~/git/tripleo-quickstart && -d ~/git/tripleo-quickstart-extras ]]; then
if [[ $OOOQ_REVIEW ]]; then
pushd ~/git/tripleo-quickstart
git review -d $OOOQ_REVIEW
popd
fi
if [[ $OOOQ_EXTRAS_REVIEW ]]; then
pushd ~/git/tripleo-quickstart-extras
git review -d $OOOQ_EXTRAS_REVIEW
popd
fi
# take advantage of quickstart-extras-requirements.txt being able to use local dir
echo -n file: > ~/git/tripleo-quickstart/quickstart-extras-requirements.txt
echo ~/git/tripleo-quickstart-extras >> ~/git/tripleo-quickstart/quickstart-extras-requirements.txt
fi
echo "using oooq from ~/git/tripleo-quickstart"
pushd ~/git/tripleo-quickstart
bash quickstart.sh --install-deps
fi
# -------------------------------------------------------
if [ $RUNQ -eq 1 ]; then
if [ $DEV -eq 1 ]; then
NO_CLONE="--no-clone"
fi
time bash quickstart.sh \
--teardown all \
--release $RELEASE \
--nodes ~/oooq/under/nodes.yaml \
--config ~/oooq/under/config.yaml \
--clean \
$NO_CLONE \
$VIRTHOST
if [[ $? -gt 0 ]]; then
if [ $DEV -eq 1 ]; then popd; fi
echo "ERROR: initial run of quickstart failed."
exit 1
fi
fi
# -------------------------------------------------------
if [ $DEV -eq 1 ]; then
popd # leave ~/git/tripleo-quickstart
fi
# -------------------------------------------------------
if [ -d ~/.quickstart/ ]; then
export SSH_ENV=~/.quickstart/ssh.config.ansible
fi
# -------------------------------------------------------
ssh -F $SSH_ENV stack@undercloud "uname -a" || (echo "No ssh for stack@undercloud; exiting."; exit 1)
# -------------------------------------------------------
if [ $PKGS -eq 1 ]; then
if [ ! -d pkgs ]; then
bash pkgs.sh # create packages directory
fi
if [ -d pkgs ]; then
scp -r -F $SSH_ENV pkgs stack@undercloud:/home/stack/pkgs
ssh -F $SSH_ENV stack@undercloud "pushd ~/pkgs ; sudo yum localinstall *.rpm -y ; popd"
ssh -F $SSH_ENV stack@undercloud "sudo yum install -y emacs-nox vim tmux"
else
echo "no local pkgs directory to install on undercloud"
fi
fi
# -------------------------------------------------------
if [ $SCRIPTS -eq 1 ]; then
ssh -F $SSH_ENV stack@undercloud "echo 'curl https://github.com/fultonj.keys >> ~/.ssh/authorized_keys' >> sh_me"
ssh -F $SSH_ENV stack@undercloud "echo 'ssh-keyscan github.com >> ~/.ssh/known_hosts' >> sh_me"
ssh -F $SSH_ENV stack@undercloud "echo 'git clone [email protected]:fultonj/oooq.git' >> sh_me"
ssh -F $SSH_ENV stack@undercloud "echo 'ln -s ~/oooq/over/deploy.sh' >> sh_me"
ssh -F $SSH_ENV stack@undercloud "echo 'ln -s ~/oooq/over/overrides.yaml' >> sh_me"
ssh -F $SSH_ENV stack@undercloud "echo 'source /home/stack/stackrc' >> ~/.bashrc"
ssh -F $SSH_ENV stack@undercloud "echo 'alias os=openstack' >> ~/.bashrc"
ssh -F $SSH_ENV stack@undercloud "echo StrictHostKeyChecking no > ~/.ssh/config; chmod 0600 ~/.ssh/config; rm -f ~/.ssh/known_hosts 2> /dev/null; ln -s /dev/null ~/.ssh/known_hosts;"
ssh -F $SSH_ENV stack@undercloud "curl https://github.com/fultonj.keys >> ~/.ssh/authorized_keys"
fi
# -------------------------------------------------------
if [ $VALIDATE -eq 1 ]; then
if [ $DEV -eq 1 ]; then pushd ~/git/tripleo-quickstart; fi
bash quickstart.sh \
--teardown none \
--retain-inventory \
--tags 'overcloud-validate' \
--release $RELEASE \
--nodes nodes.yaml \
--config config.yaml \
$VIRTHOST
if [ $DEV -eq 1 ]; then popd; fi
fi
|
fultonj/oooq
|
under/deploy.sh
|
Shell
|
apache-2.0
| 5,775 |
#!/usr/bin/env bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Variables
export PROJECT=$(gcloud config get-value project)
export WORK_DIR=${WORK_DIR:="${PWD}/workdir"}
## Install Tools
mkdir -p $WORK_DIR/bin
echo "### "
echo "### Begin Tools install"
echo "### "
## Install tree
if command -v tree 2>/dev/null; then
echo "tree already installed."
else
echo "Installing tree..."
sudo apt-get install tree
sudo mv /usr/bin/tree $WORK_DIR/bin
fi
## Install kubectx
if command -v kubectx 2>/dev/null; then
echo "kubectx already installed."
else
echo "Installing kubectx..."
curl -sLO https://raw.githubusercontent.com/ahmetb/kubectx/"$KUBECTX_VERSION"/kubectx
chmod +x kubectx
mv kubectx $WORK_DIR/bin
echo "kubectx installation complete."
fi
## Install Istio
if [ -d "$WORK_DIR/istio-$ISTIO_VERSION" ] && [ -x "$(command -v istioctl)" ]; then
echo "Istio already installed."
else
echo "Downloading Istio..."
curl -L https://git.io/getLatestIstio | ISTIO_VERSION=$ISTIO_VERSION sh -
cp istio-$ISTIO_VERSION/bin/istioctl $WORK_DIR/bin/.
mv istio-$ISTIO_VERSION $WORK_DIR/
fi
## Install kops
if command -v kops 2>/dev/null; then
echo "kops already installed."
else
echo "Installing kops..."
curl -sLO https://github.com/kubernetes/kops/releases/download/$KOPS_VERSION/kops-linux-amd64
chmod +x kops-linux-amd64
mv kops-linux-amd64 $WORK_DIR/bin/kops
echo "kops installation complete."
fi
|
GoogleCloudPlatform/anthos-workshop
|
common/install-tools.sh
|
Shell
|
apache-2.0
| 1,948 |
#!/bin/bash
# configuration
user=webiopi # login username
pass=raspberry # login password
ipadr=127.0.0.1 # webiopi server ip address
port=8000 # webiopi server port number
pin=4 # GPIO port number
freq=50.0 # frequency
# default pwm frequency is 50 Hz.
if [ "$1" != "" ] ; then
freq=$1
fi
echo "Frequency is $freq [Hz]"
# set specified GPIO port function to pwm mode.
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/function/pwm && echo
# get function
curl -u $user:$pass http://$ipadr:$port/GPIO/$pin/function && echo
# max
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulseFreq/100 && echo
curl -u $user:$pass http://$ipadr:$port/GPIO/$pin/freq && echo
# min
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulseFreq/0.01 && echo
curl -u $user:$pass http://$ipadr:$port/GPIO/$pin/freq && echo
# current pulse
curl -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulse && echo
# set pulse frequency
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulseFreq/$freq && echo
curl -u $user:$pass http://$ipadr:$port/GPIO/$pin/freq && echo
#exit
# hotaru
for (( c=0; c < 3 ; c++ )) ; do
for (( i=0; i<100; i+=5 )) ; do
v=`printf "%02d" $i`;
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulseRatio/0.$v;
echo
done
sleep 0.5
for (( i=99; i>=0 ; i-=5 )) ; do
v=`printf "%02d" $i`;
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulseRatio/0.$v;
echo
done
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/pulseRatio/0.0
echo
sleep 0.7
done
echo
# disable pwm.
curl -X POST -u $user:$pass http://$ipadr:$port/GPIO/$pin/function/in
echo
|
thortex/rpi3-webiopi
|
dev/test/hotaru.sh
|
Shell
|
apache-2.0
| 1,710 |
#!/bin/bash
# Author:Tyson
# E-mail:admin#svipc.com
# Website:http://www.svipc.com
# Version:1.0.0 Aug-16-2015-12:28:58
# Notes:Autoscripts for CentOS/RadHat 5+ Debian 6+ and Ubuntu 12+
Install_MySQL-5-5()
{
cd $Autoscripts_dir/src
src_url=http://cdn.mysql.com/Downloads/MySQL-5.5/mysql-$mysql_5_version.tar.gz && Download_src
id -u mysql >/dev/null 2>&1
[ $? -ne 0 ] && useradd -M -s /sbin/nologin mysql
mkdir -p $mysql_data_dir;chown mysql.mysql -R $mysql_data_dir
tar zxf mysql-$mysql_5_version.tar.gz
cd mysql-$mysql_5_version
if [ "$je_tc_malloc" == '1' ];then
EXE_LINKER="-DCMAKE_EXE_LINKER_FLAGS='-ljemalloc'"
elif [ "$je_tc_malloc" == '2' ];then
EXE_LINKER="-DCMAKE_EXE_LINKER_FLAGS='-ltcmalloc'"
fi
make clean
[ ! -d "$mysql_install_dir" ] && mkdir -p $mysql_install_dir
cmake . -DCMAKE_INSTALL_PREFIX=$mysql_install_dir \
-DMYSQL_DATADIR=$mysql_data_dir \
-DSYSCONFDIR=/etc \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_READLINE=1 \
-DENABLED_LOCAL_INFILE=1 \
-DENABLE_DTRACE=0 \
-DDEFAULT_CHARSET=utf8mb4 \
-DDEFAULT_COLLATION=utf8mb4_general_ci \
-DWITH_EMBEDDED_SERVER=1 \
$EXE_LINKER
make -j `grep processor /proc/cpuinfo | wc -l`
make install
if [ -d "$mysql_install_dir/support-files" ];then
echo "${CSUCCESS}MySQL install successfully! ${CEND}"
cd ..
rm -rf mysql-$mysql_6_version
else
rm -rf $mysql_install_dir
echo "${CFAILURE}MySQL install failed, Please contact the author! ${CEND}"
kill -9 $$
fi
/bin/cp $mysql_install_dir/support-files/mysql.server /etc/init.d/mysqld
chmod +x /etc/init.d/mysqld
OS_CentOS='chkconfig --add mysqld \n
chkconfig mysqld on'
OS_Debian_Ubuntu='update-rc.d mysqld defaults'
OS_command
cd ..
# my.cf
cat > /etc/my.cnf << EOF
[client]
port = 3306
socket = /tmp/mysql.sock
default-character-set = utf8mb4
[mysqld]
port = 3306
socket = /tmp/mysql.sock
basedir = $mysql_install_dir
datadir = $mysql_data_dir
pid-file = $mysql_data_dir/mysql.pid
user = mysql
bind-address = 0.0.0.0
server-id = 1
init-connect = 'SET NAMES utf8mb4'
character-set-server = utf8mb4
skip-name-resolve
#skip-networking
back_log = 300
max_connections = 1000
max_connect_errors = 6000
open_files_limit = 65535
table_open_cache = 128
max_allowed_packet = 4M
binlog_cache_size = 1M
max_heap_table_size = 8M
tmp_table_size = 16M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
sort_buffer_size = 8M
join_buffer_size = 8M
key_buffer_size = 4M
thread_cache_size = 8
query_cache_type = 1
query_cache_size = 8M
query_cache_limit = 2M
ft_min_word_len = 4
log_bin = mysql-bin
binlog_format = mixed
expire_logs_days = 30
log_error = $mysql_data_dir/mysql-error.log
slow_query_log = 1
long_query_time = 1
slow_query_log_file = $mysql_data_dir/mysql-slow.log
performance_schema = 0
#lower_case_table_names = 1
skip-external-locking
default_storage_engine = InnoDB
#default-storage-engine = MyISAM
innodb_file_per_table = 1
innodb_open_files = 500
innodb_buffer_pool_size = 64M
innodb_write_io_threads = 4
innodb_read_io_threads = 4
innodb_thread_concurrency = 0
innodb_purge_threads = 1
innodb_flush_log_at_trx_commit = 2
innodb_log_buffer_size = 2M
innodb_log_file_size = 32M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 90
innodb_lock_wait_timeout = 120
bulk_insert_buffer_size = 8M
myisam_sort_buffer_size = 8M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
interactive_timeout = 28800
wait_timeout = 28800
[mysqldump]
quick
max_allowed_packet = 16M
[myisamchk]
key_buffer_size = 8M
sort_buffer_size = 8M
read_buffer = 4M
write_buffer = 4M
EOF
if [ $Mem -gt 1500 -a $Mem -le 2500 ];then
sed -i 's@^thread_cache_size.*@thread_cache_size = 16@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 16M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 128M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 32M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 256@' /etc/my.cnf
elif [ $Mem -gt 2500 -a $Mem -le 3500 ];then
sed -i 's@^thread_cache_size.*@thread_cache_size = 32@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 32M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 32M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 512M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 64M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 512@' /etc/my.cnf
elif [ $Mem -gt 3500 ];then
sed -i 's@^thread_cache_size.*@thread_cache_size = 64@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 64M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 256M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 1024M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 128M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 1024@' /etc/my.cnf
fi
$mysql_install_dir/scripts/mysql_install_db --user=mysql --basedir=$mysql_install_dir --datadir=$mysql_data_dir
chown mysql.mysql -R $mysql_data_dir
service mysqld start
[ -z "`grep ^'export PATH=' /etc/profile`" ] && echo "export PATH=$mysql_install_dir/bin:\$PATH" >> /etc/profile
[ -n "`grep ^'export PATH=' /etc/profile`" -a -z "`grep $mysql_install_dir /etc/profile`" ] && sed -i "s@^export PATH=\(.*\)@export PATH=$mysql_install_dir/bin:\1@" /etc/profile
. /etc/profile
$mysql_install_dir/bin/mysql -e "grant all privileges on *.* to root@'127.0.0.1' identified by \"$dbrootpwd\" with grant option;"
$mysql_install_dir/bin/mysql -e "grant all privileges on *.* to root@'localhost' identified by \"$dbrootpwd\" with grant option;"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "delete from mysql.user where Password='';"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "delete from mysql.db where User='';"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "delete from mysql.proxies_priv where Host!='localhost';"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "drop database test;"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "reset master;"
rm -rf /etc/ld.so.conf.d/{mysql,mariadb,percona}*.conf
echo "$mysql_install_dir/lib" > mysql.conf
ldconfig
service mysqld stop
}
|
LongTaiJun/Autoscripts
|
include/mysql-5.5.sh
|
Shell
|
apache-2.0
| 6,709 |
#!/bin/sh
path="`pwd`"
repositoryId=h2o-mvn-repo
url="file://${path}/mvn-repo"
ver=6.0.0
mvn deploy:deploy-file -DgroupId=h2o -DartifactId=h2o-common -Dversion=${ver} -Dpackaging=jar -Dfile=../h2o-common/target/h2o-common-${ver}.jar -Durl=$url -DrepositoryId=${repositoryId}
|
sixshot626/h2o
|
deploy/deploy_common.sh
|
Shell
|
bsd-2-clause
| 278 |
pip install dash==0.21.0 # The core dash backend
pip install dash-renderer==0.12.1 # The dash front-end
pip install dash-html-components==0.10.0 # HTML components
pip install dash-core-components==0.22.1 # Supercharged components
pip install plotly --upgrade # Latest Plotly graphing library
|
UWSEDS/LectureNotes
|
PreFall2018/Programming-Interactivity/setup_dash.sh
|
Shell
|
bsd-2-clause
| 297 |
#!/bin/sh -eu
# Put all submodules on master from upstream:
git submodule foreach 'git checkout master && git pull'
|
xmonad/xmonad-testing
|
bin/pull-all.sh
|
Shell
|
bsd-2-clause
| 117 |
# custom settings for domain
# [email protected], 19.11.2014
#
# * file is sourced in setDomainEnv.sh
# * production mode
# * memory args
#
# if we set USER_MEM_ARGS those values will overrode all other memory
# settings in setDomainEnv.sh. distinction only by server_name.
#
# before changes: parameter for all services
# "-Xms256m -Xmx512m -XX:PermSize=128m -XX:MaxPermSize=512m -XX:CompileThreshold=8000"
# "-Xms1024m -Xmx2048m -XX:PermSize=512m -XX:MaxPermSize=1024m"
# "-Xms1024m -Xmx2048m -XX:PermSize=512m -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=256m"
PRODUCTION_MODE="true"
export PRODUCTION_MODE
uma=""
case ${SERVER_NAME} in
AdminServer)
uma="-Xms1024m -Xmx1536m -XX:MaxPermSize=512m"
;;
*soa*)
uma="-Xms2g -Xmx3g -XX:MaxPermSize=1024m"
;;
*oim*)
uma="-Xms2g -Xmx4g -XX:MaxPermSize=1024m"
#EXTRA_JAVA_PROPERTIES="${EXTRA_JAVA_PROPERTIES} -Dimint.env=production -Dimint.config=/l/ora/config/deploy/imint/current/config/imint.yml"
#export EXTRA_JAVA_PROPERTIES
;;
esac
if [ "${uma}" == "" ]
then
echo "WARNING: server name ${SERVER_NAME} unknown (bin/setDomainCustEnv.sh), no memory settings applied"
echo
else
USER_MEM_ARGS=${uma}
export USER_MEM_ARGS
echo "Memory settings defined in bin/setDomainCustEnv.sh: ${uma}"
echo
fi
|
kapfenho/iam-deployer
|
lib/identity/domain/setCustDomainEnv.sh
|
Shell
|
bsd-2-clause
| 1,307 |
#!/usr/bin/env sh
# test_net_seg.bin test_proto pre_train_model label.txt outputfolder [CPU/GPU]
ROOTFILE=/nfs/hn46/xiaolonw/pose_cnncode/caffe-3dnormal_joint_pose
GLOG_logtostderr=1 /nfs/hn46/xiaolonw/pose_cnncode/caffe-3dnormal_joint_pose/build_compute-0-5/tools/test_net_pose.bin /nfs/hn46/xiaolonw/pose_cnncode/caffe-3dnormal_joint_pose/posescript/pose_flic_3neg/pose_test.prototxt /nfs/hn38/users/xiaolonw/pose_models/flic_coarse/pose__iter_44000 /nfs/hn38/users/xiaolonw/FLIC/data/testlist.txt /nfs/hn38/users/xiaolonw/FLIC/data/results
|
xiaolonw/caffe-3dnormal_joint_pose
|
posescript/pose_flic_3neg/test_pose.sh
|
Shell
|
bsd-2-clause
| 648 |
#/bin/bash
cat ../logs/nginx.pid | xargs kill
|
whm2300/nginx
|
test/sbin/stop.sh
|
Shell
|
bsd-2-clause
| 46 |
#!/bin/bash
/etc/init.d/nscd start
/etc/init.d/nslcd start
|
coco-project/dockerfiles
|
base-ldap/lib/02_start_ldap_services.sh
|
Shell
|
bsd-3-clause
| 60 |
#!/bin/bash
VERSION="0.0.14 test"
afftype=".txt"
# trap keyboard interrupt (control-c)
trap control_c SIGINT
function setPath {
cat <<SETPATH
--------------------------------------------------------------------------------------
Error locating ANTS
--------------------------------------------------------------------------------------
It seems that the ANTSPATH environment variable is not set. Please add the ANTSPATH
variable. This can be achieved by editing the .bash_profile in the home directory.
Add:
ANTSPATH=/home/yourname/bin/ants/
Or the correct location of the ANTS binaries.
Alternatively, edit this script ( `basename $0` ) to set up this parameter correctly.
SETPATH
exit 1
}
# Uncomment the line below in case you have not set the ANTSPATH variable in your environment.
# export ANTSPATH=${ANTSPATH:="$HOME/bin/ants/"} # EDIT THIS
#ANTSPATH=YOURANTSPATH
if [ ${#ANTSPATH} -le 3 ]
then
setPath >&2
fi
if [ ! -s ${ANTSPATH}/ANTS ] ; then
echo "ANTS program can't be found. Please (re)define \$ANTSPATH in your environment."
exit
fi
# Test availability of helper scripts.
# No need to test this more than once. Can reside outside of the main loop.
ANTSSCRIPTNAME=${ANTSPATH}/antsIntroduction.sh
PEXEC=${ANTSPATH}/ANTSpexec.sh
SGE=${ANTSPATH}/waitForSGEQJobs.pl
PBS=${ANTSPATH}/waitForPBSQJobs.pl
XGRID=${ANTSPATH}/waitForXGridJobs.pl
SLURM=${ANTSPATH}/waitForSlurmJobs.pl
fle_error=0
for FLE in $ANTSSCRIPTNAME $PEXEC $SGE $XGRID $SLURM
do
if [ ! -x $FLE ] ;
then
echo
echo "--------------------------------------------------------------------------------------"
echo " FILE $FLE DOES NOT EXIST -- OR -- IS NOT EXECUTABLE !!! $0 will terminate."
echo "--------------------------------------------------------------------------------------"
echo " if the file is not executable, please change its permissions. "
fle_error=1
fi
done
if [ $fle_error = 1 ] ; then
exit 1
fi
#assuming .nii.gz as default file type. This is the case for ANTS 1.7 and up
function Usage {
cat <<USAGE
Usage:
`basename $0` -d ImageDimension -o OUTPREFIX <other options> <images>
Compulsory arguments (minimal command line requires SGE cluster, otherwise use -c & -j options):
-d: ImageDimension: 2 or 3 (for 2 or 3 dimensional registration of single volume)
ImageDimension: 4 (for template generation of time-series data)
-o: OUTPREFIX; A prefix that is prepended to all output files.
<images> List of images in the current directory, eg *_t1.nii.gz. Should be at the end
of the command.
NB: All images to be added to the template should be in the same directory, and this script
should be invoked from that directory.
Optional arguments:
-c: Control for parallel computation (default 1) -- 0 == run serially, 1 == SGE qsub,
2 == use PEXEC (localhost), 3 == Apple XGrid, 4 == PBS qsub, 5 == SLURM
-q: Set default queue for PBS jobs (default: nopreempt)
-g: Gradient step size (default 0.25) for template update. Does not affect the step size of individual registrations. The
default of 0.25 should not be increased, smaller numbers result in more cautious template update steps.
-i: Iteration limit (default 4) -- iterations of the template construction (Iteration limit)*NumImages registrations.
-j: Number of cpu cores to use (default: 2; -- requires "-c 2")
-m: Max-iterations in each registration, eg 30x90x30
-n: N4BiasFieldCorrection of moving image (default 1) -- 0 == off, 1 == on. If 1, will run N4 before each registration. It is
more efficient to run N4BiasFieldCorrection on the input images once, then build a template from the corrected images.
-p: Commands to prepend to job scripts (e.g., change into appropriate directory, set paths, etc)
-r: Do rigid-body registration of inputs before creating template (default 0) -- 0 == off 1 == on. Only useful when
you do not have an initial template
-s: Type of similarity metric used for registration.
-t: Type of transformation model used for registration.
-x: XGrid arguments (e.g., -x "-p password -h controlhost")
-z: Use this this volume as the target of all inputs. When not used, the script
will create an unbiased starting point by averaging all inputs. Use the full path!
If you do not have an initial template, it is advisible to run a few iterations with affine
normalization only (-m 1x0x0) to get a sensible initial template, then pass this with -z
to run full deformable registration.
Example:
`basename $0` -d 3 -m 30x50x20 -t GR -s CC -c 1 -o MY -z InitialTemplate.nii.gz *RF*T1x.nii.gz
- In this example 30x50x20 iterations per registration are used for template creation (that is the default)
- Greedy-SyN and CC are the metrics to guide the mapping.
- Output is prepended with MY and the initial template is InitialTemplate.nii.gz (optional).
- The -c option is set to 1, which will result in using the Sun Grid Engine (SGE) to distribute the computation.
- if you do not have SGE, read the help for multi-core computation on the local machine, or Apple X-grid options.
--------------------------------------------------------------------------------------
ANTS was created by:
--------------------------------------------------------------------------------------
Brian B. Avants, Nick Tustison and Gang Song
Penn Image Computing And Science Laboratory
University of Pennsylvania
Please reference http://www.ncbi.nlm.nih.gov/pubmed/20851191 when employing this script
in your studies. A reproducible evaluation of ANTs similarity metric performance in
brain image registration:
* Avants BB, Tustison NJ, Song G, Cook PA, Klein A, Gee JC. Neuroimage, 2011.
Also see http://www.ncbi.nlm.nih.gov/pubmed/19818860 for more details.
The script has been updated and improved since this publication.
--------------------------------------------------------------------------------------
script adapted by N.M. van Strien, http://www.mri-tutorial.com | NTNU MR-Center
--------------------------------------------------------------------------------------
Apple XGrid support by Craig Stark
--------------------------------------------------------------------------------------
USAGE
exit 1
}
function Help {
cat <<HELP
`basename $0` will make a template out of the input files using an elastic
or diffeomorphic transformation. This script builds a template iteratively from the input
images and uses Sun Grid Engine (SGE) or multiple cpu cores on the localhost (min 2) to
parallelize the registration of each subject to the template.
Usage:
`basename $0` -d ImageDimension -o OUTPREFIX <other options> <images>
Example Case:
bash `basename $0` -d 3 -m 30x50x20 -t GR -s CC -c 1 -o MY -z InitialTemplate.nii.gz *RF*T1x.nii.gz
- In this case you use 30x50x20 iterations per registration
- 4 iterations over template creation (that is the default)
- With Greedy-SyN and CC metrics to guide the mapping.
- Output is prepended with MY and the initial template is InitialTemplate.nii.gz (optional).
- The -c option is set to 1 which will try to use SGE to distribute the computation.
- If you do not have SGE, use -c 0 or -c 2 combined with -j.
- Continue reading this help file if things are not yet clear.
Compulsory arguments (minimal command line requires SGE cluster, otherwise use -c & -j options)::
-d: ImageDimension: 2 or 3 (for 2 or 3 dimensional registration of single volume)
ImageDimension: 4 (for template generation of time-series data)
-o: OUTPREFIX; A prefix that is prepended to all output files.
<images> List of images in the current directory, eg *_t1.nii.gz. Should be at the end
of the command.
NB: All files to be added to the template should be in the same directory.
Optional arguments:
-c: Control for parallel computation (default 1) -- 0 == run serially, 1 == SGE qsub,
2 == use PEXEC (localhost), 3 == Apple XGrid, 4 == PBS Grid, 5 == SLURM
-g: Gradient step size; smaller in magnitude results in more cautious steps (default 0.25). This does not affect the step size
of individual registrations; it lets you update the template more cautiously after each iteration by reducing the template
update step size from 0.25 to a smaller positive number.
-i: Iteration limit (default = 4) for template construction. requires 4*NumImages registrations.
-j: Number of cpu cores to use (default: 2; --- set -c option to 2 to use this .
The optimal number of cpu cores to use for template generation depends on the availability of cores, the amount of
free working memory (RAM) and the resolution of the data. High resolution datasets typically require more RAM during
processing. Running out of RAM during a calculation will slow down all processing on your computer.
-m: Max-iterations
Max-Iterations in form: JxKxL where
J = max iterations at coarsest resolution (here, reduce by power of 2^2)
K = middle resolution iterations (here,reduce by power of 2)
L = fine resolution iterations (here, full resolution) !!this level takes much
more time per iteration!!
Adding an extra value before JxKxL (i.e. resulting in IxJxKxL) would add another
iteration level.
-n: N4BiasFieldCorrection of moving image ( 0 = off; 1 = on (default) )
-p: Commands to prepend to job scripts (e.g., change into appropriate directory, set paths, etc)
-r: Do rigid-body registration of inputs before creating template (default 0) -- 0 == off 1 == on. Only useful when
you do not have an initial template
In case a template is specified (-z option), all inputs are registered to that template. If
no template is specified, the inputs will be registered to the averaged input.
-s: Type of similarity metric used for registration.
For intramodal image registration, use:
CC = cross-correlation
MI = mutual information
PR = probability mapping (default)
MSQ = mean square difference (Demons-like)
SSD = sum of squared differences
For intermodal image registration, use:
MI = mutual information
PR = probability mapping (default)
-t: Type of transformation model used for registration.
For rigid image registration, use:
RI = Purely rigid
RA = Affine rigid
For elastic image registration, use:
EL = elastic transformation model (less deformation possible)
For diffeomorphic image registration, use:
SY = SyN with time (default) with arbitrary number of time points in time discretization
S2 = SyN with time optimized specifically for 2 time points in the time discretization
GR = Greedy SyN
EX = Exponential
DD = Diffeomorphic Demons style exponential mapping
-x: XGrid arguments (e.g., -x "-p password -h controlhost")
-z: Use this this volume as the target of all inputs. When not used, the script
will create an unbiased starting point by averaging all inputs. Use the full path!
If you do not have an initial template, it is advisible to run a few iterations with affine
normalization only (-m 1x0x0) to get a sensible initial template, then pass this with -z
to run full deformable registration.
Requirements:
This scripts relies on the following scripts in your $ANTSPATH directory. The script
will terminate prematurely if these files are not present or are not executable.
- antsIntroduction.sh
- pexec.sh
- waitForSGEQJobs.pl (only for use with Sun Grid Engine)
- ANTSpexec.sh (only for use with localhost parallel execution)
- waitForXGridJobs.pl (only for use with Apple XGrid)
- waitForSlurmJobs.pl (only for use with SLURM)
--------------------------------------------------------------------------------------
Get the latest ANTS version at:
--------------------------------------------------------------------------------------
http://sourceforge.net/projects/advants/
--------------------------------------------------------------------------------------
Read the ANTS documentation at:
--------------------------------------------------------------------------------------
http://picsl.upenn.edu/ANTS/
--------------------------------------------------------------------------------------
ANTS was created by:
--------------------------------------------------------------------------------------
Brian B. Avants, Nick Tustison and Gang Song
Penn Image Computing And Science Laboratory
University of Pennsylvania
Please reference http://www.ncbi.nlm.nih.gov/pubmed/20851191 when employing this script
in your studies. A reproducible evaluation of ANTs similarity metric performance in
brain image registration:
* Avants BB, Tustison NJ, Song G, Cook PA, Klein A, Gee JC. Neuroimage, 2011.
Also see http://www.ncbi.nlm.nih.gov/pubmed/19818860 for more details.
The script has been updated and improved since this publication.
--------------------------------------------------------------------------------------
script adapted by N.M. van Strien, http://www.mri-tutorial.com | NTNU MR-Center
--------------------------------------------------------------------------------------
Apple XGrid support by Craig Stark
--------------------------------------------------------------------------------------
HELP
exit 1
}
function reportMappingParameters {
cat <<REPORTMAPPINGPARAMETERS
--------------------------------------------------------------------------------------
Mapping parameters
--------------------------------------------------------------------------------------
ANTSPATH is $ANTSPATH
Dimensionality: $DIM
N4BiasFieldCorrection: $N4CORRECT
Similarity Metric: $METRICTYPE
Transformation: $TRANSFORMATIONTYPE
Regularization: $REGULARIZATION
MaxIterations: $MAXITERATIONS
Number Of MultiResolution Levels: $NUMLEVELS
OutputName prefix: $OUTPUTNAME
Template: $TEMPLATE
Template Update Steps: $ITERATIONLIMIT
Template population: $IMAGESETVARIABLE
--------------------------------------------------------------------------------------
REPORTMAPPINGPARAMETERS
}
function shapeupdatetotemplate {
# local declaration of values
dim=${DIM}
template=${TEMPLATE}
templatename=${TEMPLATENAME}
outputname=${OUTPUTNAME}
gradientstep=-${GRADIENTSTEP}
# debug only
# echo $dim
# echo ${template}
# echo ${templatename}
# echo ${outputname}
# echo ${outputname}*formed.nii*
# echo ${gradientstep}
# We find the average warp to the template and apply its inverse to the template image
# This keeps the template shape stable over multiple iterations of template building
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 1"
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/AverageImages $dim ${template} 1 ${outputname}*formed.nii.gz
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 2"
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/AverageImages $dim ${templatename}warp.nii.gz 0 `ls ${outputname}*Warp.nii.gz | grep -v "InverseWarp"`
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 3"
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/MultiplyImages $dim ${templatename}warp.nii.gz ${gradientstep} ${templatename}warp.nii.gz
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 4"
echo "--------------------------------------------------------------------------------------"
rm -f ${templatename}Affine${afftype}
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 5"
echo "--------------------------------------------------------------------------------------"
# Averaging and inversion code --- both are 1st order estimates.
# if [ ${dim} -eq 2 ] ; then
# ANTSAverage2DAffine ${templatename}Affine${afftype} ${outputname}*Affine${afftype}
# elif [ ${dim} -eq 3 ] ; then
# ANTSAverage3DAffine ${templatename}Affine${afftype} ${outputname}*Affine${afftype}
# fi
${ANTSPATH}/AverageAffineTransform ${dim} ${templatename}Affine${afftype} ${outputname}*Affine${afftype}
${ANTSPATH}/WarpImageMultiTransform ${dim} ${templatename}warp.nii.gz ${templatename}warp.nii.gz -i ${templatename}Affine${afftype} -R ${template}
${ANTSPATH}/WarpImageMultiTransform ${dim} ${template} ${template} -i ${templatename}Affine${afftype} ${templatename}warp.nii.gz ${templatename}warp.nii.gz ${templatename}warp.nii.gz ${templatename}warp.nii.gz -R ${template}
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 6"
echo "--------------------------------------------------------------------------------------"
echo
${ANTSPATH}/MeasureMinMaxMean ${dim} ${templatename}warp.nii.gz ${templatename}warplog.txt 1
}
function ANTSAverage2DAffine {
OUTNM=${templatename}Affine${afftype}
FLIST=${outputname}*Affine${afftype}
NFILES=0
PARAM1=0
PARAM2=0
PARAM3=0
PARAM4=0
PARAM5=0
PARAM6=0
PARAM7=0
PARAM8=0
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM1=` awk -v a=$PARAM1 -v b=$x 'BEGIN{print (a + b)}' ` ; let NFILES=$NFILES+1 ; done
PARAM1=` awk -v a=$PARAM1 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM2=` awk -v a=$PARAM2 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM2=` awk -v a=$PARAM2 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM3=` awk -v a=$PARAM3 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM3=` awk -v a=$PARAM3 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 5 `
for x in $LL ; do PARAM4=` awk -v a=$PARAM4 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM4=` awk -v a=$PARAM4 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 6 `
for x in $LL ; do PARAM5=` awk -v a=$PARAM5 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM5=0 # ` awk -v a=$PARAM5 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 7 `
for x in $LL ; do PARAM6=` awk -v a=$PARAM6 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM6=0 # ` awk -v a=$PARAM6 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM7=` awk -v a=$PARAM7 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM7=` awk -v a=$PARAM7 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM8=` awk -v a=$PARAM8 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM8=` awk -v a=$PARAM8 -v b=$NFILES 'BEGIN{print (a / b)}' `
echo "# Insight Transform File V1.0 " > $OUTNM
echo "# Transform 0 " >> $OUTNM
echo "Transform: MatrixOffsetTransformBase_double_2_2 " >> $OUTNM
echo "Parameters: $PARAM1 $PARAM2 $PARAM3 $PARAM4 $PARAM5 $PARAM6 " >> $OUTNM
echo "FixedParameters: $PARAM7 $PARAM8 " >> $OUTNM
}
function ANTSAverage3DAffine {
OUTNM=${templatename}Affine${afftype}
FLIST=${outputname}*Affine${afftype}
NFILES=0
PARAM1=0
PARAM2=0
PARAM3=0
PARAM4=0
PARAM5=0
PARAM6=0
PARAM7=0
PARAM8=0
PARAM9=0
PARAM10=0
PARAM11=0
PARAM12=0
PARAM13=0
PARAM14=0
PARAM15=0
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM1=` awk -v a=$PARAM1 -v b=$x 'BEGIN{print (a + b)}' ` ; let NFILES=$NFILES+1 ; done
PARAM1=` awk -v a=$PARAM1 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM2=` awk -v a=$PARAM2 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM2=` awk -v a=$PARAM2 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM3=` awk -v a=$PARAM3 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM3=` awk -v a=$PARAM3 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 5 `
for x in $LL ; do PARAM4=` awk -v a=$PARAM4 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM4=` awk -v a=$PARAM4 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 6 `
for x in $LL ; do PARAM5=` awk -v a=$PARAM5 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM5=` awk -v a=$PARAM5 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 7 `
for x in $LL ; do PARAM6=` awk -v a=$PARAM6 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM6=` awk -v a=$PARAM6 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 8 `
for x in $LL ; do PARAM7=` awk -v a=$PARAM7 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM7=` awk -v a=$PARAM7 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 9 `
for x in $LL ; do PARAM8=` awk -v a=$PARAM8 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM8=` awk -v a=$PARAM8 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 10 `
for x in $LL ; do PARAM9=` awk -v a=$PARAM9 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM9=` awk -v a=$PARAM9 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 11 `
for x in $LL ; do PARAM10=` awk -v a=$PARAM10 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM10=0 # ` awk -v a=$PARAM10 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 12 `
for x in $LL ; do PARAM11=` awk -v a=$PARAM11 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM11=0 # ` awk -v a=$PARAM11 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 13 `
for x in $LL ; do PARAM12=` awk -v a=$PARAM12 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM12=0 # ` awk -v a=$PARAM12 -v b=$NFILES 'BEGIN{print (a / b)}' `
# origin params below
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM13=` awk -v a=$PARAM13 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM13=` awk -v a=$PARAM13 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM14=` awk -v a=$PARAM14 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM14=` awk -v a=$PARAM14 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM15=` awk -v a=$PARAM15 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM15=` awk -v a=$PARAM15 -v b=$NFILES 'BEGIN{print (a / b)}' `
echo "# Insight Transform File V1.0 " > $OUTNM
echo "# Transform 0 " >> $OUTNM
echo "Transform: MatrixOffsetTransformBase_double_3_3 " >> $OUTNM
echo "Parameters: $PARAM1 $PARAM2 $PARAM3 $PARAM4 $PARAM5 $PARAM6 $PARAM7 $PARAM8 $PARAM9 $PARAM10 $PARAM11 $PARAM12 " >> $OUTNM
echo "FixedParameters: $PARAM13 $PARAM14 $PARAM15 " >> $OUTNM
}
function jobfnamepadding {
files=`ls job*.sh`
BASENAME1=`echo $files[1] | cut -d 'b' -f 1`
for file in ${files}
do
if [ "${#file}" -eq "9" ]
then
BASENAME2=`echo $file | cut -d 'b' -f 2 `
mv "$file" "${BASENAME1}b_000${BASENAME2}"
elif [ "${#file}" -eq "10" ]
then
BASENAME2=`echo $file | cut -d 'b' -f 2 `
mv "$file" "${BASENAME1}b_00${BASENAME2}"
elif [ "${#file}" -eq "11" ]
then
BASENAME2=`echo $file | cut -d 'b' -f 2 `
mv "$file" "${BASENAME1}b_0${BASENAME2}"
fi
done
}
cleanup()
# example cleanup function
{
cd ${currentdir}/
echo -en "\n*** Performing cleanup, please wait ***\n"
# 1st attempt to kill all remaining processes
# put all related processes in array
runningANTSpids=( `ps -C ANTS -C N4BiasFieldCorrection -C ImageMath| awk '{ printf "%s\n", $1 ; }'` )
# debug only
#echo list 1: ${runningANTSpids[@]}
# kill these processes, skip the first since it is text and not a PID
for ((i = 1; i < ${#runningANTSpids[@]} ; i++))
do
echo "killing: ${runningANTSpids[${i}]}"
kill ${runningANTSpids[${i}]}
done
return $?
}
control_c()
# run if user hits control-c
{
echo -en "\n*** User pressed CTRL + C ***\n"
cleanup
if [ $DOQSUB -eq 1 ] ; then
qdel $jobIDs
elif [ $DOQSUB -eq 5 ]; then
scancel $jobIDs
fi
exit $?
echo -en "\n*** Script cancelled by user ***\n"
}
#initializing variables with global scope
time_start=`date +%s`
currentdir=`pwd`
nargs=$#
MAXITERATIONS=30x90x20
LABELIMAGE=0 # initialize optional parameter
METRICTYPE=CC # initialize optional parameter
TRANSFORMATIONTYPE="GR" # initialize optional parameter
if [[ $dim == 4 ]] ; then
# we use a more constrained regularization for 4D mapping b/c we expect deformations to be relatively small and local
TRANSFORMATIONTYPE="GR_Constrained"
fi
N4CORRECT=1 # initialize optional parameter
DEFQUEUE=nopreempt
DOQSUB=1 # By default, buildtemplateparallel tries to do things in parallel
GRADIENTSTEP=0.25 # Gradient step size, smaller in magnitude means more smaller (more cautious) steps
ITERATIONLIMIT=4
CORES=2
TDIM=0
RIGID=0
RIGIDTYPE=" --do-rigid" # set to an empty string to use affine initialization
range=0
REGTEMPLATE=target
XGRIDOPTS=""
SCRIPTPREPEND=""
# System specific queue options, eg "-q name" to submit to a specific queue
# It can be set to an empty string if you do not need any special cluster options
QSUBOPTS="" # EDIT THIS
OUTPUTNAME=antsBTP
##Getting system info from linux can be done with these variables.
# RAM=`cat /proc/meminfo | sed -n -e '/MemTotal/p' | awk '{ printf "%s %s\n", $2, $3 ; }' | cut -d " " -f 1`
# RAMfree=`cat /proc/meminfo | sed -n -e '/MemFree/p' | awk '{ printf "%s %s\n", $2, $3 ; }' | cut -d " " -f 1`
# cpu_free_ram=$((${RAMfree}/${cpu_count}))
if [ ${OSTYPE:0:6} == 'darwin' ]
then
cpu_count=`sysctl -n hw.physicalcpu`
else
cpu_count=`cat /proc/cpuinfo | grep processor | wc -l`
fi
# Provide output for Help
if [ "$1" == "-h" ]
then
Help >&2
fi
# reading command line arguments
while getopts "c:q:d:g:i:j:h:m:n:o:p:s:r:t:x:z:" OPT
do
case $OPT in
h) #help
echo "$USAGE"
exit 0
;;
c) #use SGE cluster
DOQSUB=$OPTARG
if [[ ${#DOQSUB} -gt 2 ]] ; then
echo " DOQSUB must be an integer value (0=serial, 1=SGE qsub, 2=try pexec, 3=XGrid, 4=PBS qsub, 5=SLURM) you passed -c $DOQSUB "
exit 1
fi
;;
q) #override default qsub queue
DEFQUEUE=$OPTARG
;;
d) #dimensions
DIM=$OPTARG
if [[ ${DIM} -eq 4 ]] ; then
DIM=3
TDIM=4
fi
;;
g) #gradient stepsize (default = 0.25)
GRADIENTSTEP=$OPTARG
;;
i) #iteration limit (default = 3)
ITERATIONLIMIT=$OPTARG
;;
j) #number of cpu cores to use (default = 2)
CORES=$OPTARG
;;
m) #max iterations other than default
MAXITERATIONS=$OPTARG
;;
n) #apply bias field correction
N4CORRECT=$OPTARG
;;
o) #output name prefix
OUTPUTNAME=$OPTARG
TEMPLATENAME=${OUTPUTNAME}template
TEMPLATE=${TEMPLATENAME}.nii.gz
;;
p) #Script prepend
SCRIPTPREPEND=$OPTARG
;;
s) #similarity model
METRICTYPE=$OPTARG
;;
r) #start with rigid-body registration
RIGID=$OPTARG
;;
t) #transformation model
TRANSFORMATIONTYPE=$OPTARG
;;
x) #initialization template
XGRIDOPTS=$XGRIDOPTS
;;
z) #initialization template
REGTEMPLATE=$OPTARG
;;
\?) # getopts issues an error message
echo "$USAGE" >&2
exit 1
;;
esac
done
# Provide different output for Usage and Help
if [ ${TDIM} -eq 4 ] && [ $nargs -lt 5 ]
then
Usage >&2
elif [ ${TDIM} -eq 4 ] && [ $nargs -eq 5 ]
then
echo ""
# This option is required to run 4D template creation on SGE with a minimal command line
elif [ $nargs -lt 6 ]
then
Usage >&2
fi
if [[ $DOQSUB -eq 1 || $DOQSUB -eq 4 ]] ; then
qq=`which qsub`
if [ ${#qq} -lt 1 ] ; then
echo do you have qsub? if not, then choose another c option ... if so, then check where the qsub alias points ...
exit
fi
fi
if [[ $DOQSUB -eq 5 ]]; then
qq=`which sbatch`
if [[ ${#qq} -lt 1 ]]; then
echo "do you have sbatch? if not, then choose another c option ... if so, then check where the sbatch alias points ..."
exit
fi
fi
# Creating the file list of images to make a template from.
# Shiftsize is calculated because a variable amount of arguments can be used on the command line.
# The shiftsize variable will give the correct number of arguments to skip. Issuing shift $shiftsize will
# result in skipping that number of arguments on the command line, so that only the input images remain.
shiftsize=$(($OPTIND - 1))
shift $shiftsize
# The invocation of $* will now read all remaining arguments into the variable IMAGESETVARIABLE
IMAGESETVARIABLE=$*
NINFILES=$(($nargs - $shiftsize))
# FSL not needed anymore, all dependent on ImageMath
# #test if FSL is available in case of 4D, exit if not
# if [ ${TDIM} -eq 4 ] && [ ${#FSLDIR} -le 0 ]
# then
# setFSLPath >&2
# fi
if [ ${NINFILES} -eq 0 ]
then
echo "Please provide at least 2 filenames for the template."
echo "Use `basename $0` -h for help"
exit 1
elif [[ ${NINFILES} -eq 1 ]]
then
range=`${ANTSPATH}/ImageMath $TDIM abs nvols ${IMAGESETVARIABLE} | tail -1 | cut -d "," -f 4 | cut -d " " -f 2 | cut -d "]" -f 1 `
if [ ${range} -eq 1 ] && [ ${TDIM} -ne 4 ]
then
echo "Please provide at least 2 filenames for the template."
echo "Use `basename $0` -h for help"
exit 1
elif [ ${range} -gt 1 ] && [ ${TDIM} -ne 4 ]
then
echo "This is a multivolume file. Use -d 4"
echo "Use `basename $0` -h for help"
exit 1
elif [ ${range} -gt 1 ] && [ ${TDIM} -eq 4 ]
then
echo
echo "--------------------------------------------------------------------------------------"
echo " Creating template of 4D input. "
echo "--------------------------------------------------------------------------------------"
#splitting volume
#setting up working dirs
tmpdir=${currentdir}/tmp_${RANDOM}_${RANDOM}_${RANDOM}_$$
(umask 077 && mkdir ${tmpdir}) || {
echo "Could not create temporary directory! Exiting." 1>&2
exit 1
}
mkdir ${tmpdir}/selection
#split the 4D file into 3D elements
cp ${IMAGESETVARIABLE} ${tmpdir}/
cd ${tmpdir}/
# ${ANTSPATH}/ImageMath $TDIM vol0.nii.gz TimeSeriesSubset ${IMAGESETVARIABLE} ${range}
# rm -f ${IMAGESETVARIABLE}
# selecting 16 volumes randomly from the timeseries for averaging, placing them in tmp/selection folder.
# the script will automatically divide timeseries into $total_volumes/16 bins from wich to take the random volumes;
# if there are more than 32 volumes in the time-series (in case they are smaller
nfmribins=2
let
if [ ${range} -gt 31 ] ; then
BINSIZE=$((${range} / ${nfmribins}))
j=1 # initialize counter j
for ((i = 0; i < ${nfmribins} ; i++))
do
FLOOR=$((${i} * ${BINSIZE}))
BINrange=$((${j} * ${BINSIZE}))
# Retrieve random number between two limits.
number=0 #initialize
while [ "$number" -le $FLOOR ]
do
number=$RANDOM
if [ $i -lt 15 ]
then
let "number %= $BINrange" # Scales $number down within $range.
elif [ $i -eq 15 ]
then
let "number %= $range" # Scales $number down within $range.
fi
done
#debug only
echo
echo "Random number between $FLOOR and $BINrange --- $number"
# echo "Random number between $FLOOR and $range --- $number"
if [ ${number} -lt 10 ]
then
${ANTSPATH}/ImageMath $TDIM selection/vol000${number}.nii.gz ExtractSlice ${IMAGESETVARIABLE} ${number}
# cp vol000${number}.nii.gz selection/
elif [ ${number} -ge 10 ] && [ ${number} -lt 100 ]
then
${ANTSPATH}/ImageMath $TDIM selection/vol00${number}.nii.gz ExtractSlice ${IMAGESETVARIABLE} ${number}
# cp vol00${number}.nii.gz selection/
elif [ ${number} -ge 100 ] && [ ${number} -lt 1000 ]
then
${ANTSPATH}/ImageMath $TDIM selection/vol0${number}.nii.gz ExtractSlice ${IMAGESETVARIABLE} ${number}
# cp vol0${number}.nii.gz selection/
fi
let j++
done
elif [ ${range} -gt ${nfmribins} ] && [ ${range} -lt 32 ]
then
for ((i = 0; i < ${nfmribins} ; i++))
do
number=$RANDOM
let "number %= $range"
if [ ${number} -lt 10 ]
then
${ANTSPATH}/ImageMath $TDIM selection/vol0.nii.gz ExtractSlice ${IMAGESETVARIABLE} ${number}
# cp vol000${number}.nii.gz selection/
elif [ ${number} -ge 10 ] && [ ${number} -lt 100 ]
then
${ANTSPATH}/ImageMath $TDIM selection/vol0.nii.gz ExtractSlice ${IMAGESETVARIABLE} ${number}
# cp vol00${number}.nii.gz selection/
fi
done
elif [ ${range} -le ${nfmribins} ]
then
${ANTSPATH}/ImageMath selection/$TDIM vol0.nii.gz TimeSeriesSubset ${IMAGESETVARIABLE} ${range}
# cp *.nii.gz selection/
fi
# set filelist variable
rm -f ${IMAGESETVARIABLE}
cd selection/
IMAGESETVARIABLE=`ls *.nii.gz`
fi
fi
# exit
# check for an initial template image and perform rigid body registration if requested
if [ ! -s $REGTEMPLATE ]
then
echo
echo "--------------------------------------------------------------------------------------"
echo " No initial template exists. Creating a population average image from the inputs."
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/AverageImages $DIM $TEMPLATE 1 $IMAGESETVARIABLE
else
echo
echo "--------------------------------------------------------------------------------------"
echo " Initial template found. This will be used for guiding the registration. use : $REGTEMPLATE and $TEMPLATE "
echo "--------------------------------------------------------------------------------------"
# now move the initial registration template to OUTPUTNAME, otherwise this input gets overwritten.
cp ${REGTEMPLATE} ${TEMPLATE}
fi
if [ ! -s $TEMPLATE ] ; then
echo Your template : $TEMPLATE was not created. This indicates trouble! You may want to check correctness of your input parameters. exiting.
exit
fi
# remove old job bash scripts
rm -f job*.sh
if [ "$RIGID" -eq 1 ] ;
then
count=0
jobIDs=""
RIGID_IMAGESET=""
for IMG in $IMAGESETVARIABLE
do
RIGID_IMAGESET="$RIGID_IMAGESET rigid_${IMG}"
BASENAME=` echo ${IMG} | cut -d '.' -f 1 `
exe=" ${ANTSPATH}/ANTS $DIM -m MI[${TEMPLATE},${IMG},1,32] -o rigid_${IMG} -i 0 --use-Histogram-Matching --number-of-affine-iterations 10000x10000x10000x10000x10000 $RIGIDTYPE"
exe2="${ANTSPATH}/WarpImageMultiTransform $DIM ${IMG} rigid_${IMG} rigid_${BASENAME}Affine${afftype} -R ${TEMPLATE}"
pexe=" $exe >> job_${count}_metriclog.txt "
qscript="job_${count}_qsub.sh"
rm -f $qscript
if [[ $DOQSUB -eq 5 ]]; then
# SLURM job scripts must start with a shebang
echo '#!/bin/sh' > $qscript
fi
echo "$SCRIPTPREPEND" >> $qscript
echo "$exe" >> $qscript
echo "$exe2" >> $qscript
if [ $DOQSUB -eq 1 ] ; then
id=`qsub -cwd -S /bin/bash -N antsBuildTemplate_rigid -v ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH,ANTSPATH=$ANTSPATH $QSUBOPTS $qscript | awk '{print $3}'`
jobIDs="$jobIDs $id"
sleep 0.5
elif [ $DOQSUB -eq 4 ]; then
echo "cp -R /jobtmp/pbstmp.\$PBS_JOBID/* ${currentdir}" >> $qscript;
id=`qsub -N antsrigid -v ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH,ANTSPATH=$ANTSPATH $QSUBOPTS -q $DEFQUEUE -l nodes=1:ppn=1 -l walltime=4:00:00 $qscript | awk '{print $1}'`
jobIDs="$jobIDs $id"
sleep 0.5
elif [ $DOQSUB -eq 2 ] ; then
# Send pexe and exe2 to same job file so that they execute in series
echo $pexe >> job${count}_r.sh
echo $exe2 >> job${count}_r.sh
elif [ $DOQSUB -eq 3 ] ; then
id=`xgrid $XGRIDOPTS -job submit /bin/bash $qscript | awk '{sub(/;/,"");print $3}' | tr '\n' ' ' | sed 's: *: :g'`
#echo "xgrid $XGRIDOPTS -job submit /bin/bash $qscript"
jobIDs="$jobIDs $id"
elif [[ $DOQSUB -eq 5 ]]; then
id=`sbatch --job-name=antsrigid --export=ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH,ANTSPATH=$ANTSPATH $QSUBOPTS --nodes=1 --cpus-per-task=1 --time=4:00:00 $qscript | rev | cut -f1 -d\ | rev`
jobIDs="$jobIDs $id"
sleep 0.5
elif [ $DOQSUB -eq 0 ] ; then
# execute jobs in series
$exe
$exe2
fi
((count++))
done
if [ $DOQSUB -eq 1 ];
then
# Run jobs on SGE and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS rigid registration on SGE cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
# now wait for the jobs to finish. Rigid registration is quick, so poll queue every 60 seconds
${ANTSPATH}/waitForSGEQJobs.pl 1 60 $jobIDs
# Returns 1 if there are errors
if [ ! $? -eq 0 ]; then
echo "qsub submission failed - jobs went into error state"
exit 1;
fi
fi
if [ $DOQSUB -eq 4 ];
then
# Run jobs on PBS and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS rigid registration on PBS cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
# now wait for the jobs to finish. Rigid registration is quick, so poll queue every 60 seconds
${ANTSPATH}/waitForPBSQJobs.pl 1 60 $jobIDs
# Returns 1 if there are errors
if [ ! $? -eq 0 ]; then
echo "qsub submission failed - jobs went into error state"
exit 1;
fi
fi
# Run jobs on localhost and wait to finish
if [ $DOQSUB -eq 2 ];
then
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS rigid registration on max ${CORES} cpucores. "
echo " Progress can be viewed in job*_metriclog.txt"
echo "--------------------------------------------------------------------------------------"
jobfnamepadding #adds leading zeros to the jobnames, so they are carried out chronologically
chmod +x job*.sh
$PEXEC -j ${CORES} "sh" job*.sh
fi
if [ $DOQSUB -eq 3 ];
then
# Run jobs on XGrid and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS rigid registration on XGrid cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
# now wait for the jobs to finish. Rigid registration is quick, so poll queue every 60 seconds
${ANTSPATH}/waitForXGridJobs.pl -xgridflags "$XGRIDOPTS" -verbose -delay 30 $jobIDs
# Returns 1 if there are errors
if [ ! $? -eq 0 ]; then
echo "XGrid submission failed - jobs went into error state"
exit 1;
fi
fi
if [ $DOQSUB -eq 5 ];
then
# Run jobs on SLURM and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS rigid registration on SLURM cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
# now wait for the jobs to finish. Rigid registration is quick, so poll queue every 60 seconds
${ANTSPATH}/waitForSlurmJobs.pl 1 60 $jobIDs
# Returns 1 if there are errors
if [ ! $? -eq 0 ]; then
echo "SLURM submission failed - jobs went into error state"
exit 1;
fi
fi
# Update template
${ANTSPATH}/AverageImages $DIM $TEMPLATE 1 $RIGID_IMAGESET
# cleanup and save output in seperate folder
mkdir rigid
mv *.cfg rigid*.nii.gz *Affine${afftype} rigid/
# backup logs
if [ $DOQSUB -eq 1 ];
then
mv antsBuildTemplate_rigid* rigid/
# Remove qsub scripts
rm -f job_${count}_qsub.sh
elif [ $DOQSUB -eq 4 ];
then
mv antsrigid* rigid/
# Remove qsub scripts
rm -f job_${count}_qsub.sh
elif [ $DOQSUB -eq 2 ];
then
mv job*.txt rigid/
elif [ $DOQSUB -eq 3 ];
then
rm -f job_*_qsub.sh
elif [[ $DOQSUB -eq 5 ]];
then
mv slurm-*.out rigid/
mv job*.txt rigid/
# Remove qsub scripts
rm -f ${outdir}/job_${count}_qsub.sh
fi
fi # endif RIGID
# Begin Main Loop
ITERATLEVEL=(` echo $MAXITERATIONS | tr 'x' ' ' `)
NUMLEVELS=${#ITERATLEVEL[@]}
# debugging only
#echo $ITERATLEVEL
#echo $NUMLEVELS
#echo ${ITERATIONLIMIT}
echo
echo "--------------------------------------------------------------------------------------"
echo " Start to build template: ${TEMPLATE}"
echo "--------------------------------------------------------------------------------------"
reportMappingParameters
i=0
while [ $i -lt ${ITERATIONLIMIT} ]
do
itdisplay=$((i+1))
rm -f ${OUTPUTNAME}*Warp*.nii*
rm -f job*.sh
# Used to save time by only running coarse registration for the first couple of iterations
# But with decent initialization, this is probably not worthwhile.
# If you uncomment this, replace MAXITERATIONS with ITERATIONS in the call to ants below
#
# # For the first couple of iterations, use high-level registration only
# # eg if MAXITERATIONS=30x90x20, then for iteration 0, do 30x0x0
# # for iteration 1 do 30x90x0, then do 30x90x20 on subsequent iterations
# if [ $i -gt $((NUMLEVELS - 1)) ]
# then
# ITERATIONS=$MAXITERATIONS
# else
#
# ITERATIONS=${ITERATLEVEL[0]}
#
# for (( n = 1 ; n < ${NUMLEVELS}; n++ ))
# do
# ITERATIONS=${ITERATIONS}x$((${ITERATLEVEL[n]} * $((n <= i)) ))
# done
# fi
# Job IDs of jobs submitted to queue in loop below
jobIDs=""
# Reinitialize count to 0
count=0
# Submit registration of each input to volume template to SGE or run locally.
for IMG in $IMAGESETVARIABLE
do
# 1 determine working dir
dir=`pwd`
# 2 determine new filename
POO=${OUTPUTNAME}${IMG}
# 3 Make variable OUTFILENAME and remove anything behind . ; for example .nii.gz.gz
OUTFN=${POO%.*.*}
# 4 Test if outputfilename has only a single extention and remove that
if [ ${#OUTFN} -eq ${#POO} ]
then
OUTFN=${OUTPUTNAME}${IMG%.*}
fi
# 5 prepare registration command
exe="${ANTSSCRIPTNAME} -d ${DIM} -r ${dir}/${TEMPLATE} -i ${dir}/${IMG} -o ${dir}/${OUTFN} -m ${MAXITERATIONS} -n ${N4CORRECT} -s ${METRICTYPE} -t ${TRANSFORMATIONTYPE} -f 1 "
pexe=" $exe >> job_${count}_${i}_metriclog.txt "
# 6 submit to SGE (DOQSUB=1), PBS (DOQSUB=4), PEXEC (DOQSUB=2), XGrid (DOQSUB=3) or else run locally (DOQSUB=0)
if [ $DOQSUB -eq 1 ]; then
id=`qsub -cwd -N antsBuildTemplate_deformable_${i} -S /bin/bash -v ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH,ANTSPATH=$ANTSPATH $QSUBOPTS $exe | awk '{print $3}'`
jobIDs="$jobIDs $id"
sleep 0.5
elif [ $DOQSUB -eq 4 ]; then
qscript="job_${count}_${i}.sh"
echo "$SCRIPTPREPEND" > $qscript
echo "$exe" >> $qscript
echo "cp -R /jobtmp/pbstmp.\$PBS_JOBID/* ${currentdir}" >> $qscript;
id=`qsub -N antsdef${i} -v ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH,ANTSPATH=$ANTSPATH -q $DEFQUEUE -l nodes=1:ppn=1 -l walltime=4:00:00 $QSUBOPTS $qscript | awk '{print $1}'`
jobIDs="$jobIDs $id"
sleep 0.5
elif [ $DOQSUB -eq 2 ] ; then
echo $pexe
echo $pexe >> job${count}_${i}.sh
elif [ $DOQSUB -eq 3 ] ; then
qscript="job_${count}_${i}.sh"
#exe="${ANTSSCRIPTNAME} -d ${DIM} -r ./${TEMPLATE} -i ./${IMG} -o ./${OUTFN} -m ${MAXITERATIONS} -n ${N4CORRECT} -s ${METRICTYPE} -t ${TRANSFORMATIONTYPE} "
echo "$SCRIPTPREPEND" > $qscript
echo "$exe" >> $qscript
id=`xgrid $XGRIDOPTS -job submit /bin/bash $qscript | awk '{sub(/;/,"");print $3}' | tr '\n' ' ' | sed 's: *: :g'`
jobIDs="$jobIDs $id"
qscript="job_${count}_${i}.sh"
elif [[ $DOQSUB -eq 5 ]]; then
echo '#!/bin/sh' > $qscript
echo -e "$SCRIPTPREPEND" >> $qscript
echo -e "$exe" >> $qscript
id=`sbatch --mem-per-cpu=32768M --job-name=antsdef${i} --export=ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1,LD_LIBRARY_PATH=$LD_LIBRARY_PATH,ANTSPATH=$ANTSPATH --nodes=1 --cpus-per-task=1 --time=4:00:00 $QSUBOPTS $qscript | rev | cut -f1 -d\ | rev`
jobIDs="$jobIDs $id"
sleep 0.5
elif [ $DOQSUB -eq 0 ] ; then
bash $exe
fi
# counter updated, but not directly used in this loop
count=`expr $count + 1`;
# echo " submitting job number $count " # for debugging only
done
# SGE wait for script to finish
if [ $DOQSUB -eq 1 ];
then
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS registration on SGE cluster. Iteration: $itdisplay of $ITERATIONLIMIT"
echo "--------------------------------------------------------------------------------------"
# now wait for the stuff to finish - this will take a while so poll queue every 10 mins
${ANTSPATH}/waitForSGEQJobs.pl 1 600 $jobIDs
if [ ! $? -eq 0 ]; then
echo "qsub submission failed - jobs went into error state"
exit 1;
fi
elif [ $DOQSUB -eq 4 ];
then
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS registration on PBS cluster. Iteration: $itdisplay of $ITERATIONLIMIT"
echo "--------------------------------------------------------------------------------------"
# now wait for the stuff to finish - this will take a while so poll queue every 10 mins
${ANTSPATH}/waitForPBSQJobs.pl 1 600 $jobIDs
if [ ! $? -eq 0 ]; then
echo "qsub submission failed - jobs went into error state"
exit 1;
fi
fi
# Run jobs on localhost and wait to finish
if [ $DOQSUB -eq 2 ];
then
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS registration on max ${CORES} cpucores. Iteration: $itdisplay of $ITERATIONLIMIT"
echo " Progress can be viewed in job*_${i}_metriclog.txt"
echo "--------------------------------------------------------------------------------------"
jobfnamepadding #adds leading zeros to the jobnames, so they are carried out chronologically
chmod +x job*.sh
$PEXEC -j ${CORES} sh job*.sh
fi
if [ $DOQSUB -eq 3 ];
then
# Run jobs on XGrid and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS registration on XGrid cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
# now wait for the jobs to finish. This is slow, so poll less often
${ANTSPATH}/waitForXGridJobs.pl -xgridflags "$XGRIDOPTS" -verbose -delay 300 $jobIDs
# Returns 1 if there are errors
if [ ! $? -eq 0 ]; then
echo "XGrid submission failed - jobs went into error state"
exit 1;
fi
fi
if [[ $DOQSUB -eq 5 ]];
then
# Run jobs on SLURM and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting ANTS registration on SLURM cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
# now wait for the stuff to finish - this will take a while so poll queue every 10 mins
${ANTSPATH}/waitForSlurmJobs.pl 1 600 $jobIDs
if [[ ! $? -eq 0 ]];
then
echo "SLURM submission failed - jobs went into error state"
exit 1;
fi
fi
shapeupdatetotemplate ${DIM} ${TEMPLATE} ${TEMPLATENAME} ${OUTPUTNAME} ${GRADIENTSTEP}
echo
echo "--------------------------------------------------------------------------------------"
echo " Backing up results from iteration $itdisplay"
echo "--------------------------------------------------------------------------------------"
mkdir ${TRANSFORMATIONTYPE}_iteration_${i}
cp ${TEMPLATENAME}warp*log.txt *.cfg *${OUTPUTNAME}*.nii.gz ${TRANSFORMATIONTYPE}_iteration_${i}/
# backup logs
if [ $DOQSUB -eq 1 ];
then
mv antsBuildTemplate_deformable_* ${TRANSFORMATIONTYPE}_iteration_${i}
elif [ $DOQSUB -eq 4 ];
then
mv antsdef* ${TRANSFORMATIONTYPE}_iteration_${i}
elif [ $DOQSUB -eq 2 ];
then
mv job*.txt ${TRANSFORMATIONTYPE}_iteration_${i}
elif [ $DOQSUB -eq 3 ];
then
rm -f job_*.sh
elif [[ $DOQSUB -eq 5 ]];
then
mv slurm-*.out ${TRANSFORMATIONTYPE}_iteration_${i}
mv job*.txt ${TRANSFORMATIONTYPE}_iteration_${i}
fi
((i++))
done
# end main loop
rm -f job*.sh
#cleanup of 4D files
if [ "${range}" -gt 1 ] && [ "${TDIM}" -eq 4 ]
then
mv ${tmpdir}/selection/${TEMPLATE} ${currentdir}/
cd ${currentdir}
rm -rf ${tmpdir}/
fi
time_end=`date +%s`
time_elapsed=$((time_end - time_start))
echo
echo "--------------------------------------------------------------------------------------"
echo " Done creating: ${TEMPLATE}"
echo " Script executed in $time_elapsed seconds"
echo " $(( time_elapsed / 3600 ))h $(( time_elapsed %3600 / 60 ))m $(( time_elapsed % 60 ))s"
echo "--------------------------------------------------------------------------------------"
exit 0
|
fbudin69500/ANTs
|
Scripts/buildtemplateparallel.sh
|
Shell
|
bsd-3-clause
| 50,701 |
#!/bin/bash
# Author: Nate Levesque <[email protected]>
# Language: Shell
# Filename: volumeControl.sh
#
# Description:
# Simple script to change the volume, useful to bind to keys in
# desktops/WMs that don't have their own control for it.
#
# Arguments:
# (none): displays the current volume to the terminal and pops up
# a notification with libnotify with the same info
# up: raises the volume 5%
# down: lowers the volume 5%
# mute: mutes/unmutes the volume, prints it the console and shows a
# libnotify notification
#
# Example:
# ./volumeControl.sh mute - toggles mute
# ./volumeControl.sh - shows current playback status
# ./volumeControl.sh up - raise volume 5%
#
MIXER_COMMAND_VOLUME_UP=true
MIXER_COMMAND_TOGGLE_MUTE=true
MIXER_COMMAND_VOLUME_DOWN=true
if which amixer &> /dev/null; then
MIXER_COMMAND_VOLUME_UP="amixer set Master 5%+"
MIXER_COMMAND_VOLUME_DOWN="amixer set Master 5%-"
MIXER_COMMAND_TOGGLE_MUTE="amixer set Master toggle"
fi
if which pamixer &> /dev/null; then
MIXER_COMMAND_VOLUME_UP="pamixer -i 5"
MIXER_COMMAND_VOLUME_DOWN="pamixer -d 5"
MIXER_COMMAND_TOGGLE_MUTE="pamixer -t"
fi
case "$1" in
"")
;;
up)
$MIXER_COMMAND_VOLUME_UP
;;
down)
$MIXER_COMMAND_VOLUME_DOWN
;;
mute)
$MIXER_COMMAND_TOGGLE_MUTE
;;
esac
|
thenaterhood/shellzilla
|
volumeControl.sh
|
Shell
|
bsd-3-clause
| 1,378 |
#!/bin/sh
# NOTE!!!!
# this script should be run via sudo
#
# 1) Before starting, build a centos7 VM or download a cloud image for centos7
# and then save the base qcow2 file. See BASE below.
#
# 2) Using virt-manager define a number of VMs (e.g. 3 VMs if that's what you
# need). You MUST ensure that the libvirt disk name matches the hostname,
# e.g. host-01 uses host-01.qcow2. This should be the default behaviour.
#
# 3) You don't need to install the OS multiple times. Either build once if you
# are need to customize the OS, or just use the cloud image.
#
# 4) ensure your dynamic range for libvirt does not overlap with your IPs. On
# a workstation or laptop, you can run : virsh net-edit default
# and change the range for the IPs allocated to guests. e.g. I use:
#
# <range start='192.168.122.101' end='192.168.122.254'/>
#
# Note: on fedora 23 it seems 192.168.124.x/24 was used, but this may vary
# based on your installation.
#
# This is to allow for static IPs on the guests.
#
# 5) Once you have your guests defined, also define them in the "guests"
# associative array (REQUIRES BASH version 4!!!).
#
# 6) Edit the script being called in chroot (see "content_update" below)
# and add your desired SSH pubkeys for your guests.
#
# This script will reset your guests to a vanilla state.
#
qemu_img_path="/var/lib/libvirt/images"
# check this script for XXXXXXXXXXX (see below)
if egrep -q ".*echo.*ssh-rsa MYPUBKEY.*authorized_keys$" $0 ; then
echo "You still have not updated this script with a valid ssh key for your guests."
echo -n "Do you wish to continue? [y/n]"
read answer
if [ "$answer" != "y" -a "$answer" != "Y" ]; then
echo consider running the following:
echo " sed -i \"s,\\(.*echo.*\\)ssh-rsa MYPUBKEY\\(.*authorized_keys$\\),\\1\$(cat ~/.ssh/id_rsa.pub)\\2,g\" $0"
exit 0
fi
fi
# determine the prefix for the libvirt network
net_prefix=$(virsh net-dumpxml default | grep range | awk -F\' '{ print $2 }' | awk -F. '{ print $1"."$2"."$3 }')
# this needs to exist in /var/lib/libvirt/images/
BASE=fedora-base.qcow2
declare -A guests
# The values are the 4th octet for the guests
# THIS SHOULD BE UPDATED TO MATCH WHAT YOU HAVE
guests=(
# ["host-01"]="81"
# ["host-02"]="82"
# ["host-03"]="83"
["host-04"]="84"
)
# basic sanity checks
if [ ! -f $qemu_img_path/$BASE ]; then
echo "Could not find $BASE ... aborting."
exit 1
fi
# ensure guestmount exists
if ! type -p guestmount ; then
echo "You don't appear to have libguestfs-tools installed, citizen."
exit 1
fi
echo "====== ensure in /etc/hosts"
for host in "${!guests[@]}" ; do
echo "$net_prefix"."${guests["$host"]}" $host
done
function do_in_chroot {
if [ ! -d $1/tmp ]; then
mkdir $1/tmp
fi
cat > $1/tmp/do_in_chroot.sh <<EOS
#!/bin/sh
function content_update {
name=\$1
octet=\$(cat /tmp/guest_octet)
myip=${net_prefix}.\$octet
cat > /etc/sysconfig/network-scripts/ifcfg-enp1s0 <<EOF
DEVICE="enp1s0"
BOOTPROTO="static"
ONBOOT="yes"
TYPE="Ethernet"
NAME="enp1s0"
DEVICE="enp1s0"
IPADDR="\$myip"
NETMASK="255.255.255.0"
GATEWAY="${net_prefix}.1"
DNS1="${net_prefix}.1"
EOF
echo \$name > /etc/hostname
echo GATEWAY=${net_prefix}.1 >> /etc/sysconfig/network
echo SELINUX=permissive > /etc/sysconfig/selinux
echo SELINUXTYPE=targeted >> /etc/sysconfig/selinux
mkdir /root/.ssh/
# ADD YOUR PUB SSH KEY HERE
echo ssh-rsa MYPUBKEY > /root/.ssh/authorized_keys
# END SSH PUB KEY
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
}
content_update \$1
EOS
chmod 755 $1/tmp/do_in_chroot.sh
echo ============================
cat $1/tmp/do_in_chroot.sh
echo ============================
}
function rebuild {
tmpdir=$(mktemp -d /tmp/guestXXXXXXXX)
rm -f $1.qcow2
# create the overlay
qemu-img create -b `pwd`/$BASE -f qcow2 $1.qcow2
# create dir to mount the overlay and update configs
if [ ! -d $tmpdir ]; then
echo "Something went wrong creating $tmpdir... aborting"
exit 1
fi
# mount the overlay
guestmount -a $1.qcow2 -i --rw $tmpdir
# create the script in $tmpdir/tmp/do_in_chroot.sh
do_in_chroot $tmpdir
# add all hosts to /etc/hosts in the guest
for host in "${!guests[@]}" ; do
echo "$net_prefix"."${guests["$host"]}" $host >> $tmpdir/etc/hosts
done
# store the 4th octet in chroot. This is a hack
echo "${guests["$1"]}" >> $tmpdir/tmp/guest_octet
# now call the generated script in the chroot
chroot $tmpdir /tmp/do_in_chroot.sh $1
# now warn the user if the authorized_keys was not updated ....
if grep -q MYPUBKEY $tmpdir/root/.ssh/authorized_keys ; then
echo "Warning: you did not update this script to include real ssh keys in /root/.ssh/authorized_keys"
echo " : see the content_update function and change as needed."
fi
umount $tmpdir
rmdir $tmpdir
}
# sleep for slower machines
sleep 5
cd $qemu_img_path/
for h in "${!guests[@]}" ; do
virsh destroy $h
rebuild $h
done
virsh net-destroy default
virsh net-start default
# sleep for slower machines
sleep 5
for h in "${!guests[@]}" ; do
virsh start $h
done
|
sadsfae/misc-scripts
|
shell/vm-sandbox-tool/vm-reset-fedora.sh
|
Shell
|
bsd-3-clause
| 5,251 |
#!/bin/sh -e
# Edit the following to change the name of the database user that will be created:
APP_DB_USER=piktio
APP_DB_PASS=12345
# Edit the following to change the name of the database that is created (defaults to the user name)
APP_DB_NAME=$APP_DB_USER
# Edit the following to change the version of PostgreSQL that is installed
PG_VERSION=9.3
###########################################################
# Changes below this line are probably not necessary
###########################################################
print_db_usage () {
echo "Your PostgreSQL database has been setup and can be accessed on your local machine on the forwarded port (default: 15432)"
echo " Host: localhost"
echo " Port: 15432"
echo " Database: $APP_DB_NAME"
echo " Username: $APP_DB_USER"
echo " Password: $APP_DB_PASS"
echo ""
echo "Admin access to postgres user via VM:"
echo " vagrant ssh"
echo " sudo su - postgres"
echo ""
echo "psql access to app database user via VM:"
echo " vagrant ssh"
echo " sudo su - postgres"
echo " PGUSER=$APP_DB_USER PGPASSWORD=$APP_DB_PASS psql -h localhost $APP_DB_NAME"
echo ""
echo "Env variable for application development:"
echo " DATABASE_URL=postgresql://$APP_DB_USER:$APP_DB_PASS@localhost:15432/$APP_DB_NAME"
echo ""
echo "Local command to access the database via psql:"
echo " PGUSER=$APP_DB_USER PGPASSWORD=$APP_DB_PASS psql -h localhost -p 15432 $APP_DB_NAME"
}
export DEBIAN_FRONTEND=noninteractive
PROVISIONED_ON=/etc/vm_provision_on_timestamp
if [ -f "$PROVISIONED_ON" ]
then
echo "VM was already provisioned at: $(cat $PROVISIONED_ON)"
echo "To run system updates manually login via 'vagrant ssh' and run 'apt-get update && apt-get upgrade'"
echo ""
print_db_usage
exit
fi
PG_REPO_APT_SOURCE=/etc/apt/sources.list.d/pgdg.list
if [ ! -f "$PG_REPO_APT_SOURCE" ]
then
# Add PG apt repo:
echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > "$PG_REPO_APT_SOURCE"
# Add PGDG repo key:
wget --quiet -O - http://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc | apt-key add -
fi
# Update package list and upgrade all packages
apt-get update
apt-get -y upgrade
apt-get -y install "postgresql-$PG_VERSION" "postgresql-contrib-$PG_VERSION"
PG_CONF="/etc/postgresql/$PG_VERSION/main/postgresql.conf"
PG_HBA="/etc/postgresql/$PG_VERSION/main/pg_hba.conf"
PG_DIR="/var/lib/postgresql/$PG_VERSION/main"
# Edit postgresql.conf to change listen address to '*':
sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/" "$PG_CONF"
# Append to pg_hba.conf to add password auth:
echo "host all all all md5" >> "$PG_HBA"
# Explicitly set default client_encoding
echo "client_encoding = utf8" >> "$PG_CONF"
# Restart so that all new config is loaded:
service postgresql restart
cat << EOF | su - postgres -c psql
-- Create the database user:
CREATE USER $APP_DB_USER WITH PASSWORD '$APP_DB_PASS';
-- Create the database:
CREATE DATABASE $APP_DB_NAME WITH OWNER=$APP_DB_USER
LC_COLLATE='en_US.utf8'
LC_CTYPE='en_US.utf8'
ENCODING='UTF8'
TEMPLATE=template0;
EOF
# Tag the provision time:
date > "$PROVISIONED_ON"
echo "Successfully created PostgreSQL dev virtual machine."
echo ""
print_db_usage
|
jbbrokaw/piktio
|
vagrantpg/Vagrant-setup/bootstrap.sh
|
Shell
|
bsd-3-clause
| 3,409 |
#!/usr/bin/env bash
set -euo pipefail
GIT_BRANCH="${GITHUB_REF/refs\/heads\//}"
git checkout $GIT_BRANCH
echo "On branch $GIT_BRANCH."
if [ "$GIT_BRANCH" != "master" ] && [[ "$GIT_BRANCH" != dependabot/* ]]; then
PUSH_BRANCH=true
echo "Will try to push changes."
else
PUSH_BRANCH=false
echo "Will not push changes."
fi
echo ""
echo "------- Checking TOC -------"
echo ""
# Commit the TOC if outdated
if ! git diff --exit-code ./site/_includes/docs_toc.md
then
if [ "$PUSH_BRANCH" = true ]; then
git add site/_includes/docs_toc.md site/Gemfile.lock
git commit -m "chore: update TOC [CI]"
# Push all the TOC changes
git pull --rebase
git push
else
echo "Outdated TOC."
exit 1
fi
fi
exit 0
|
uwdata/vega-lite
|
scripts/check-and-commit-toc.sh
|
Shell
|
bsd-3-clause
| 737 |
#!/bin/bash
set -e
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
versions=( "$@" )
if [ ${#versions[@]} -eq 0 ]; then
versions=( */ )
fi
versions=( "${versions[@]%/}" )
for version in "${versions[@]}"; do
fullVersion="$(curl -fsSL "https://raw.githubusercontent.com/golang/go/release-branch.go$version/VERSION" 2>/dev/null || true)"
if [ -z "$fullVersion" ]; then
fullVersion="$(curl -fsSL 'https://golang.org/dl' | grep '">go'"$version"'.*\.src\.tar\.gz<' | sed -r 's!.*go([^"/<]+)\.src\.tar\.gz.*!\1!' | sort -V | tail -1)"
fi
if [ -z "$fullVersion" ]; then
echo >&2 "warning: cannot find full version for $version"
continue
fi
fullVersion="${fullVersion#go}" # strip "go" off "go1.4.2"
versionTag="$fullVersion"
[[ "$versionTag" == *.*[^0-9]* ]] || versionTag+='.0'
(
set -x
sed -ri 's/^(ENV GOLANG_VERSION) .*/\1 '"$fullVersion"'/' "$version/Dockerfile"
sed -ri 's/^(FROM golang):.*/\1:'"$versionTag"'/' "$version/"*"/Dockerfile"
cp go-wrapper "$version/"
)
for variant in wheezy; do
if [ -d "$version/$variant" ]; then
(
set -x
cp "$version/Dockerfile" "$version/go-wrapper" "$version/$variant/"
sed -i 's/^FROM .*/FROM buildpack-deps:'"$variant"'-scm/' "$version/$variant/Dockerfile"
)
fi
done
done
|
mattrobenolt/golang
|
update.sh
|
Shell
|
bsd-3-clause
| 1,259 |
#!/bin/sh
# Package
PACKAGE="cops"
DNAME="COPS"
# Others
INSTALL_DIR="/usr/local/${PACKAGE}"
DEFAULT_CFG_FILE="/usr/local/${PACKAGE}/config_local.php.synology"
WEB_DIR="/var/services/web"
CFG_FILE="${WEB_DIR}/${PACKAGE}/config_local.php"
preinst ()
{
exit 0
}
postinst ()
{
# Link
ln -s ${SYNOPKG_PKGDEST} ${INSTALL_DIR}
# Install the web interface
cp -R ${INSTALL_DIR}/share/${PACKAGE} ${WEB_DIR}
# Create a default configuration file
if [ ! -f ${CFG_FILE} ]; then
cp ${DEFAULT_CFG_FILE} ${CFG_FILE}
sed -i -e "s|@calibre_dir@|${wizard_calibre_dir:=/volume1/calibre/}|g" ${CFG_FILE}
sed -i -e "s|@cops_title@|${wizard_cops_title:=COPS}|g" ${CFG_FILE}
sed -i -e "s|@use_url_rewriting@|${wizard_use_url_rewriting:=0}|g" ${CFG_FILE}
chmod ga+w ${CFG_FILE}
fi
exit 0
}
preuninst ()
{
exit 0
}
postuninst ()
{
# Remove link
rm -f ${INSTALL_DIR}
# Remove the web interface
rm -fr ${WEB_DIR}/${PACKAGE}
exit 0
}
preupgrade ()
{
# Save some stuff
rm -fr ${TMP_DIR}/${PACKAGE}
mkdir -p ${TMP_DIR}/${PACKAGE}
mv ${CFG_FILE} ${TMP_DIR}/${PACKAGE}/
exit 0
}
postupgrade ()
{
# Restore some stuff
rm -f ${CFG_FILE}
mv ${TMP_DIR}/${PACKAGE}/config_local.php ${CFG_FILE}
rm -fr ${TMP_DIR}/${PACKAGE}
exit 0
}
|
momiji/spksrc
|
spk/cops/src/installer.sh
|
Shell
|
bsd-3-clause
| 1,341 |
#!/bin/sh
# ignore "error" codes in the env script below
set +e
. /opt/qt56/bin/qt56-env.sh
set -e
# Switch to the gcc version we want
if [ $CC == "gcc" ]; then
export CC=gcc-6;
export CXX=g++-6;
fi
mkdir build
cd build
if [[ "$RELEASE_BUILD" == "1" ]]; then
cmake -DCMAKE_BUILD_TYPE=Release ..
else
cmake -DCMAKE_BUILD_TYPE=Debug ..
fi
make -j2
echo "--- Running unit tests ---"
./bin/renderdoccmd test unit
./bin/qrenderdoc --unittest
|
googlestadia/renderdoc
|
util/travis/linux_compile.sh
|
Shell
|
mit
| 449 |
#!/bin/bash
# Link to the binary
ln -sf /opt/{{ name }}/{{ name }} /usr/local/bin/{{ name }}
# Create an entry to launch on startup
mkdir -p $HOME/.config/autostart/
cp -f /usr/share/applications/{{ name }}.desktop $HOME/.config/autostart/{{ name }}.desktop
|
akovalyov/hipchat-desktop
|
resources/linux/after-install.sh
|
Shell
|
mit
| 260 |
#!/bin/bash
# Useful exports to have while running quicktry locally. This can be used by
# sourcing the file.
# This tells the flask cli where to look for the current application
export PYTHONPATH=`pwd`
# Define the location of the flask appliction
export FLASK_APP=quicktry
# This is the location to the default configuration file. This configuration
# file is important because it also points to the location of the language
# configuration.
export QUICKTRY_SETTINGS=$(pwd)/config.cfg
|
QuickTry/QuickTry-Server
|
exports.sh
|
Shell
|
mit
| 491 |
#!/bin/bash
# Run to install wkhtmltopdf in local folder
version="0.12.4"
wget https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/$version/wkhtmltox-"$version"_linux-generic-amd64.tar.xz
tar xf wkhtmltox-"$version"_linux-generic-amd64.tar.xz
|
mxklb/cuteproject
|
libs/extern/wkhtmltox.sh
|
Shell
|
mit
| 252 |
#!/usr/bin/env bash
# nvm (Node Version Manager) completion
if [ "$NVM_DIR" ] && [ -r "$NVM_DIR"/bash_completion ];
then
. "$NVM_DIR"/bash_completion
fi
|
voku/dotfiles
|
.redpill/completion/available/nvm.completion.bash
|
Shell
|
mit
| 157 |
#!/usr/bin/env bash
set -e
SCRIPT=`pwd`/$0
FILENAME=`basename $SCRIPT`
PATHNAME=`dirname $SCRIPT`
ROOT=$PATHNAME/..
BUILD_DIR=$ROOT/build
CURRENT_DIR=`pwd`
LIB_DIR=$BUILD_DIR/libdeps
PREFIX_DIR=$LIB_DIR/build/
NVM_CHECK="$PATHNAME"/checkNvm.sh
FAST_MAKE=''
NUM_CORES=1;
if [ "$(uname)" == "Darwin" ]; then
NUM_CORES=$(sysctl -n hw.ncpu);
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
NUM_CORES=$(grep -c ^processor /proc/cpuinfo);
fi
export ERIZO_HOME=$ROOT/erizo
usage()
{
cat << EOF
usage: $0 options
Compile erizo libraries:
- Erizo is the C++ core
- Erizo API is the Javascript layer of Erizo (require Erizo to be compiled)
- Erizo Controller implements the signaling, communication with clients and room management
- Spine is a node.js based Erizo client
OPTIONS:
-h Show this message
-E Compile Erizo (Only Release)
-e Compile Erizo (Release and Debug)
-a Compile Erizo API
-c Install Erizo node modules
-d Delete Erizo object files
-f Use 4 threads to build
-s Install Spine
-t Run Tests
EOF
}
pause() {
read -p "$*"
}
check_result() {
if [ "$1" -ne 0 ]
then
exit $1
fi
}
install_erizo_release(){
echo 'Installing erizo...'
cd $ROOT/erizo
cd utils/conan-include-paths
conan export . lynckia/includes
cd ../..
if [ "$(uname)" == "Darwin" ]; then
conan install . --build IncludePathsGenerator
else
conan install . --build IncludePathsGenerator -s compiler.libcxx=libstdc++11
fi
./generateProject.sh -r
./buildProject.sh $FAST_MAKE
if [ "$DELETE_OBJECT_FILES" == "true" ]; then
./cleanObjectFiles.sh
fi
check_result $?
cd $CURRENT_DIR
}
install_erizo(){
echo 'Installing erizo...'
cd $ROOT/erizo
cd utils/conan-include-paths
conan export . lynckia/includes
cd ../..
if [ "$(uname)" == "Darwin" ]; then
conan install . --build IncludePathsGenerator
else
conan install . --build IncludePathsGenerator -s compiler.libcxx=libstdc++11
fi
./generateProject.sh
./buildProject.sh $FAST_MAKE
if [ "$DELETE_OBJECT_FILES" == "true" ]; then
./cleanObjectFiles.sh
fi
check_result $?
cd $CURRENT_DIR
}
install_erizo_api_release(){
echo 'Installing erizoAPI...'
cd $ROOT/erizoAPI
. $NVM_CHECK
nvm use
$FAST_BUILD npm install --unsafe-perm -no_debug=1
check_result $?
cd $CURRENT_DIR
}
install_erizo_api(){
echo 'Installing erizoAPI...'
cd $ROOT/erizoAPI
. $NVM_CHECK
nvm use
$FAST_BUILD npm install --unsafe-perm
check_result $?
cd $CURRENT_DIR
}
install_erizo_controller(){
echo 'Installing erizoController...'
cp $PATHNAME/rtp_media_config_default.js $ROOT/rtp_media_config.js
cp $PATHNAME/bw_distributor_config_default.js $ROOT/bw_distributor_config.js
cd $ROOT/erizo_controller
./installErizo_controller.sh
check_result $?
cd $CURRENT_DIR
}
install_spine(){
echo 'Installing erizo_native_client...'
cd $ROOT/spine
./installSpine.sh
check_result $?
cd $CURRENT_DIR
}
execute_tests(){
echo 'Testing erizo...'
cd $ROOT/erizo
./runTests.sh
check_result $?
cd $CURRENT_DIR
}
if [ "$#" -eq 0 ]
then
install_erizo
install_erizo_api
install_erizo_controller
install_spine
else
while getopts “heEaAcstfd” OPTION
do
case $OPTION in
h)
usage
exit 1
;;
E)
install_erizo_release
;;
e)
install_erizo
;;
A)
install_erizo_api_release
;;
a)
install_erizo_api
;;
c)
install_erizo_controller
;;
s)
install_spine
;;
t)
execute_tests
;;
f)
FAST_MAKE="-j$NUM_CORES"
FAST_BUILD="env JOBS=$NUM_CORES"
echo "Compiling using $NUM_CORES threads"
;;
d)
DELETE_OBJECT_FILES='true'
;;
?)
usage
exit
;;
esac
done
fi
|
lodoyun/licode
|
scripts/installErizo.sh
|
Shell
|
mit
| 3,938 |
#!/bin/bash
set -e
# If this is a pull request then we won't have access to secure variables and can't do integration tests with SauceLabs.
# In this case just do normal local tests
if [ $TRAVIS_PULL_REQUEST != "false" ]
then
# Run default task
gulp
else
echo "travis_fold:start:Tests"
gulp test
echo "travis_fold:end:Tests"
# Send coverage data to coveralls.io
if [ $TRAVIS_BRANCH == "master" ]
then
cat ./.tmp/coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js || true
fi
gulp
fi
|
YonatanKra/ui-grid-columns-filters
|
travis_build.sh
|
Shell
|
mit
| 523 |
#!/bin/bash
cd $(dirname "${BASH_SOURCE[0]}")
WSTEST=$(ls $HOME/Library/Python/2.*/bin/wstest 2>/dev/null)
set -e
if [ ! -f "$WSTEST" ] || [ "$UPGRADE" == "1" ]; then
pip install --user --upgrade unittest2
pip install --user --upgrade autobahntestsuite
WSTEST=$(ls $HOME/Library/Python/2.*/bin/wstest)
fi
if [ "$SERVER" == "1" ]; then
$WSTEST -m fuzzingserver
exit
fi
if [ "$CLIENT" != "1" ]; then
$WSTEST -m fuzzingserver &
WSTEST_PID=$!
cleanup() {
kill $WSTEST_PID
if [ "$SUCCESS" == "1" ]; then
cp -f res/passing.png reports/build.png
printf "\033[0;32m[SUCCESS]\033[0m\n"
else
if [ -d "reports/clients/" ]; then
cp -f res/failing.png reports/build.png
printf "\033[0;31m[FAILURE]\033[0m\n"
else
printf "\033[0;31m[FAILURE]\033[0m Cancelled Early\n"
exit
fi
fi
printf "\033[0;33mDon't forget to run 'test/gh-pages.sh' to process the results.\033[0m\n"
}
trap cleanup EXIT
sleep 1
fi
printf "\033[0;33m[BUILDING]\033[0m\n"
rm -fr reports
mkdir -p reports
mkdir -p /tmp/SwiftWebSocket/tests
cat ../Source/WebSocket.swift > /tmp/SwiftWebSocket/tests/main.swift
echo "" >> /tmp/SwiftWebSocket/tests/main.swift
cat autobahn.swift >> /tmp/SwiftWebSocket/tests/main.swift
swift -Ounchecked /tmp/SwiftWebSocket/tests/main.swift
SUCCESS=1
|
siegesmund/RealmMeteor
|
Carthage/Checkouts/SwiftWebSocket/test/run.sh
|
Shell
|
mit
| 1,288 |
crystal build test.cr --release -o base64_cr
go build -o base64_go test.go
gccgo -O3 -g -o base64_go_gccgo test.go
g++ -O3 -o base64_cpp test.cpp -lcrypto
gcc -O3 -std=c99 -o base64_c test.c
scalac -optimize test.scala
dmd -ofbase64_d -O -release -inline test.d
gdc -o base64_d_gdc -O3 -frelease -finline test.d
ldc2 -ofbase64_d_ldc -O5 -release -inline test.d
nim c -o:base64_nim_gcc -d:release --cc:gcc --verbosity:0 test.nim
nim c -o:base64_nim_clang -d:release --cc:clang --verbosity:0 test.nim
julia -e 'Pkg.add("Codecs")'
cargo build --manifest-path base64.rs/Cargo.toml --release && cp ./base64.rs/target/release/base64 ./base64_rs
mcs -debug- -optimize+ test.cs
if [ ! -d aklomp-base64-ssse ]; then
git clone --depth 1 https://github.com/aklomp/base64.git aklomp-base64-ssse
cd aklomp-base64-ssse
SSSE3_CFLAGS=-mssse3 make
cd -
fi
gcc --std=c99 -O3 test-aklomp.c -I aklomp-base64-ssse/include/ aklomp-base64-ssse/lib/libbase64.o -o base64_c_ak_ssse
wget -qO - https://cpanmin.us | perl - -L perllib MIME::Base64::Perl
|
erickt/benchmarks
|
base64/build.sh
|
Shell
|
mit
| 1,035 |
#!/usr/bin/env bash
usage="./collect_events.sh fromFolder toFolder"
fromFolder=$1
toFolder=$2
if [ -z "$fromFolder" ]
then
echo $usage
exit 1
fi
if [ -z "$toFolder" ]
then
echo $usage
exit 1
fi
echo "collecting events from " $fromFolder " to " $toFolder
folderName=$fromFolder
target_folder=$toFolder/$folderName
mkdir -p $target_folder
target_hydro_folder=$target_folder/HYDRO_RESULTS
mkdir -p $target_hydro_folder
target_urqmd_folder=$target_folder/URQMD_RESULTS
mkdir -p $target_urqmd_folder
target_spvn_folder=$target_folder/SPVN_RESULTS
mkdir -p $target_spvn_folder
event_folder_name="EVENT_RESULTS_"
hydro_folder_name="hydro_results_"
UrQMD_file_name="particle_list_"
spvn_folder_name="spvn_results_"
total_eventNum=0
collected_eventNum=0
for ijob in `ls --color=none $fromFolder | grep "event" `;
do
eventsPath=$fromFolder/$ijob
for iev in `ls --color=none $eventsPath | grep $event_folder_name`
do
echo $iev
event_id=`echo $iev | cut -f 3 -d "_"`
hydrostatus=`tail -n 1 $eventsPath/$iev/$hydro_folder_name$event_id/run.log | cut -f 4 -d " "`
echo $hydrostatus
if [ "$hydrostatus" == "Finished." ]; then
if [ -a $eventsPath/$iev/$spvn_folder_name$event_id/particle_9999_vndata_eta_-0.5_0.5.dat ]; then
mv $eventsPath/$iev/$hydro_folder_name$event_id $target_hydro_folder
mv $eventsPath/$iev/$UrQMD_file_name$event_id.gz $target_urqmd_folder
mv $eventsPath/$iev/$spvn_folder_name$event_id $target_spvn_folder
mv $target_hydro_folder/$hydro_folder_name$event_id/eccentricities_evo_eta_-0.5_0.5.dat $target_spvn_folder/$spvn_folder_name$event_id
mv $target_hydro_folder/$hydro_folder_name$event_id/momentum_anisotropy_eta_-0.5_0.5.dat $target_spvn_folder/$spvn_folder_name$event_id
((collected_eventNum++))
fi
fi
((total_eventNum++))
done
done
echo "Collected events number: " $collected_eventNum " out of " $total_eventNum
./combine_results_into_hdf5.py $target_spvn_folder
mv SPVN_RESULTS.h5 $target_folder/$folderName.h5
rm -fr $target_spvn_folder
|
chunshen1987/HBTcorrelation_MCafterburner
|
ebe_scripts/collect_events.sh
|
Shell
|
mit
| 2,174 |
#!/bin/sh
# Best Practices: https://developers.facebook.com/docs/videos/live-video/best-practices/
# It may be possible to auto-gen the API key: https://developers.facebook.com/docs/graph-api/reference/live-video/
# "You can make a POST request to live_videos edge from the following paths"
# -t 14400 Limit to 4 hour segment as Facebook API specifies
# -g 60 Keyframe interval - every 2 seconds
FACEBOOKURL="rtmp://live-api-a.facebook.com:80/rtmp/"
echo "Enter your Facebook Live Streaming Key"
read STREAMKEY
ffmpeg -y -nostdin \
-thread_queue_size 512 \
-timeout 3000000 \
-i tcp://localhost:11000 \
-t 14400 \
-strict -2 \
-c:a aac -ac 1 -ar 48000 -b:a 128k \
-c:v libx264 \
-preset medium \
-pix_fmt yuv420p \
-r 30 \
-g 60 \
-vb 2048k -minrate 2000k -maxrate 4000k \
-bufsize 4096k -threads 2 \
-f flv "$FACEBOOKURL$STREAMKEY"
|
voc/voctomix
|
example-scripts/ffmpeg/stream-facebook.sh
|
Shell
|
mit
| 858 |
openssl s_client -connect gateway.sandbox.push.apple.com:2195 -cert pnpush.pem -key pnpush.pem
|
opentable/pubnub
|
iOS/verifyCertWithApple.sh
|
Shell
|
mit
| 95 |
#!/bin/sh
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/sys-devel/automake-wrapper/files/am-wrapper-8.sh,v 1.2 2013/02/04 14:00:23 aballier Exp $
# Executes the correct automake version.
#
# If WANT_AUTOMAKE is set (can be a whitespace delimited list of versions):
# - attempt to find an installed version using those
# - if magic keyword 'latest' is found, pick the latest version that exists
# - if nothing found, warn, and proceed as if WANT_AUTOMAKE was not set (below)
# If WANT_AUTOMAKE is not set:
# - Try to detect the version of automake used to generate things (look at
# Makefile.in and aclocal.m4 and any other useful file)
# - If detected version is not found, warn and proceed as if blank slate
# - Try to locate the latest version of automake that exists and run it
(set -o posix) 2>/dev/null && set -o posix
_stderr() { printf 'am-wrapper: %s: %b\n' "${argv0}" "$*" 1>&2; }
warn() { _stderr "warning: $*"; }
err() { _stderr "error: $*"; exit 1; }
unset IFS
which() {
local p
IFS=: # we don't use IFS anywhere, so don't bother saving/restoring
for p in ${PATH} ; do
p="${p}/$1"
[ -e "${p}" ] && echo "${p}" && return 0
done
unset IFS
return 1
}
#
# Sanitize argv[0] since it isn't always a full path #385201
#
argv0=${0##*/}
case $0 in
${argv0})
# find it in PATH
if ! full_argv0=$(which "${argv0}") ; then
err "could not locate ${argv0}; file a bug"
fi
;;
*)
# re-use full/relative paths
full_argv0=$0
;;
esac
if ! seq 0 0 2>/dev/null 1>&2 ; then #338518
seq() {
local f l i
case $# in
1) f=1 i=1 l=$1;;
2) f=$1 i=1 l=$2;;
3) f=$1 i=$2 l=$3;;
esac
while :; do
[ $l -lt $f -a $i -gt 0 ] && break
[ $f -lt $l -a $i -lt 0 ] && break
echo $f
: $(( f += i ))
done
return 0
}
fi
#
# Set up bindings between actual version and WANT_AUTOMAKE;
# Start with last known versions to speed up lookup process.
#
LAST_KNOWN_AUTOMAKE_VER="13"
vers=$(printf '1.%s ' `seq ${LAST_KNOWN_AUTOMAKE_VER} -1 4`)
#
# Helper to scan for a usable program based on version.
#
binary=
all_vers=
find_binary() {
local v
all_vers="${all_vers} $*" # For error messages.
for v ; do
if [ -x "${full_argv0}-${v}" ] ; then
binary="${full_argv0}-${v}"
binary_ver=${v}
return 0
fi
done
return 1
}
#
# Try and find a usable automake version. First check the WANT_AUTOMAKE
# setting (whitespace delimited list), then fallback to the latest.
#
find_latest() {
if ! find_binary ${vers} ; then
# Brute force it.
find_binary $(printf '1.%s ' `seq 99 -1 ${LAST_KNOWN_AUTOMAKE_VER}`)
fi
}
for wx in ${WANT_AUTOMAKE:-latest} ; do
if [ "${wx}" = "latest" ] ; then
find_latest && break
else
find_binary ${wx} && break
fi
done
if [ -z "${binary}" ] && [ -n "${WANT_AUTOMAKE}" ] ; then
warn "could not locate installed version for WANT_AUTOMAKE='${WANT_AUTOMAKE}'; ignoring"
unset WANT_AUTOMAKE
find_latest
fi
if [ -z "${binary}" ] ; then
err "Unable to locate any usuable version of automake.\n" \
"\tI tried these versions:${all_vers}\n" \
"\tWith a base name of '${full_argv0}'."
fi
#
# autodetect helpers
#
do_awk() {
local file=$1 ; shift
local arg=$1 ; shift
local v=$(gawk "{ if (match(\$0, \"$*\", res)) { print res[${arg}]; exit } }" "${file}")
case " ${auto_vers} " in
*" ${v} "*) ;;
*) auto_vers="${auto_vers:+${auto_vers} }${v}" ;;
esac
}
#
# autodetect routine
#
if [ -z "${WANT_AUTOMAKE}" ] ; then
auto_vers=
if [ -r "Makefile.in" ] ; then
do_awk Makefile.in 2 "^# Makefile.in generated (automatically )?by automake ([0-9].[0-9]+)"
fi
if [ -r "aclocal.m4" ] ; then
do_awk aclocal.m4 1 'generated automatically by aclocal ([0-9].[0-9]+)'
do_awk aclocal.m4 1 '[[:space:]]*\\[?AM_AUTOMAKE_VERSION\\(\\[?([0-9].[0-9]+)[^)]*\\]?\\)'
fi
# We don't need to set $binary here as it has already been setup for us
# earlier to the latest available version.
if [ -n "${auto_vers}" ] ; then
if ! find_binary ${auto_vers} ; then
warn "auto-detected versions not found (${auto_vers}); falling back to latest available"
fi
fi
fi
if [ -n "${WANT_AMWRAPPER_DEBUG}" ] ; then
if [ -n "${WANT_AUTOMAKE}" ] ; then
warn "DEBUG: WANT_AUTOMAKE is set to ${WANT_AUTOMAKE}"
fi
warn "DEBUG: will execute <${binary}>"
fi
#
# for further consistency
#
export WANT_AUTOMAKE="${binary_ver}"
#
# Now try to run the binary
#
if [ ! -x "${binary}" ] ; then
# this shouldn't happen
err "${binary} is missing or not executable.\n" \
"\tPlease try installing the correct version of automake."
fi
exec "${binary}" "$@"
# The shell will error out if `exec` failed.
|
rafaelmartins/gentoo-rpi
|
sys-devel/automake-wrapper/files/am-wrapper-8.sh
|
Shell
|
gpl-2.0
| 4,676 |
#!/bin/bash
#
# Copyright (C) 2007 Karel Zak <[email protected]>
#
# This file is part of util-linux.
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
TS_TOPDIR=$(cd ${0%/*} && pwd)
SUBTESTS=
EXCLUDETESTS=
OPTS=
top_srcdir=
top_builddir=
paraller_jobs=1
function num_cpus()
{
if lscpu -p &>/dev/null; then
lscpu -p | grep -cv '^#'
else
echo 1
fi
}
while [ -n "$1" ]; do
case "$1" in
--force)
OPTS="$OPTS --force"
;;
--fake)
OPTS="$OPTS --fake"
;;
--memcheck)
OPTS="$OPTS --memcheck"
;;
--verbose)
OPTS="$OPTS --verbose"
;;
--skip-loopdevs)
OPTS="$OPTS --skip-loopdevs"
;;
--nonroot)
if [ $(id -ru) -eq 0 ]; then
echo "Ignore util-linux test suite [non-root UID expected]."
exit 0
fi
;;
--srcdir=*)
top_srcdir="${1##--srcdir=}"
;;
--builddir=*)
top_builddir="${1##--builddir=}"
;;
--parallel=*)
paraller_jobs="${1##--parallel=}"
;;
--parallel)
paraller_jobs=$(num_cpus)
;;
--exclude=*)
EXCLUDETESTS="${1##--exclude=}"
;;
--*)
echo "Unknown option $1"
echo "Usage: "
echo " $(basename $0) [options] [<component> ...]"
echo "Options:"
echo " --force execute demanding tests"
echo " --fake do not run, setup tests only"
echo " --memcheck run with valgrind"
echo " --verbose verbose mode"
echo " --nonroot ignore test suite if user is root"
echo " --srcdir=<path> autotools top source directory"
echo " --builddir=<path> autotools top build directory"
echo " --parallel=<num> number of parallel test jobs, default: num cpus"
echo " --exclude=<list> exclude tests by list '<utilname>/<testname> ..'"
echo
exit 1
;;
*)
SUBTESTS="$SUBTESTS $1"
;;
esac
shift
done
# For compatibility with autotools is necessary to differentiate between source
# (with test scripts) and build (with temporary files) directories when
# executed by our build-system.
#
# The default is the source tree with this script.
#
if [ -z "$top_srcdir" ]; then
top_srcdir="$TS_TOPDIR/.."
fi
if [ -z "$top_builddir" ]; then
top_builddir="$TS_TOPDIR/.."
fi
OPTS="$OPTS --srcdir=$top_srcdir --builddir=$top_builddir"
declare -a comps
if [ -n "$SUBTESTS" ]; then
# selected tests only
for s in $SUBTESTS; do
if [ -d "$top_srcdir/tests/ts/$s" ]; then
comps+=( $(find $top_srcdir/tests/ts/$s -type f -perm /a+x -regex ".*/[^\.~]*") )
else
echo "Unknown test component '$s'"
exit 1
fi
done
else
if [ ! -f "$top_builddir/test_ttyutils" ]; then
echo "Tests not compiled! Run 'make check' to fix the problem."
exit 1
fi
comps=( $(find $top_srcdir/tests/ts/ -type f -perm /a+x -regex ".*/[^\.~]*") )
fi
if [ -n "$EXCLUDETESTS" ]; then
declare -a xcomps # temporary array
for ts in ${comps[@]}; do
tsname=${ts##*ts/} # test name
if [[ "$EXCLUDETESTS" == *${tsname}* ]]; then
#echo "Ignore ${tsname}."
true
else
xcomps+=($ts)
fi
done
comps=("${xcomps[@]}") # replace the array
fi
unset LIBMOUNT_DEBUG
unset LIBBLKID_DEBUG
unset LIBFDISK_DEBUG
unset LIBSMARTCOLS_DEBUG
echo
echo "-------------------- util-linux regression tests --------------------"
echo
echo " For development purpose only. "
echo " Don't execute on production system! "
echo
if [ $paraller_jobs -gt 1 ]; then
echo " Executing the tests in parallel ($paraller_jobs jobs) "
echo
OPTS="$OPTS --parallel"
fi
count=0
>| $top_builddir/tests/failures
printf "%s\n" ${comps[*]} |
sort |
xargs -I '{}' -P $paraller_jobs -n 1 bash -c "'{}' \"$OPTS\" ||
echo 1 >> $top_builddir/tests/failures"
declare -a fail_file
fail_file=( $( < $top_builddir/tests/failures ) ) || exit 1
rm -f $top_builddir/tests/failures
echo
echo "---------------------------------------------------------------------"
if [ ${#fail_file[@]} -eq 0 ]; then
echo " All ${#comps[@]} tests PASSED"
res=0
else
echo " ${#fail_file[@]} tests of ${#comps[@]} FAILED"
res=1
fi
echo "---------------------------------------------------------------------"
exit $res
|
Romutk/lab3.2n
|
util-linux/tests/run.sh
|
Shell
|
gpl-2.0
| 4,508 |
#!/bin/sh
##########################################################################
# compile.sh - Unix/X11 configuration script
# $Date: 2007/07/01 09:46:46 $
# $Revision: 1.15 $
#
# This is a minimalist configuration script for GLFW, which is used to
# determine the availability of certain features.
#
# This script is not very nice at all (especially the Makefile generation
# is very ugly and hardcoded). Hopefully it will be cleaned up in the
# future, but for now it does a pretty good job.
##########################################################################
##########################################################################
# Check arguments
##########################################################################
silent=no
for arg in "$@"; do
{
case "$arg" in
# Silent?
-q | -quiet | --quiet | --quie | --qui | --qu | --q \
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
esac;
}
done;
##########################################################################
# Misc.
##########################################################################
config_script=$0
# File descriptor usage:
# 0 standard input
# 1 file creation
# 2 errors and warnings
# 3 some systems may open it to /dev/tty
# 4 used on the Kubota Titan
# 5 compiler messages saved in config.log
# 6 checking for... messages and results
exec 5>./config.log
if [ "x$silent" = xyes ]; then
exec 6>/dev/null
else
exec 6>&1
fi
echo "\
This file contains any messages produced by compilers while
running $config_script, to aid debugging if $config_script makes a mistake.
" 1>&5
##########################################################################
# Default compiler settings
##########################################################################
if [ "x$CC" = x ]; then
CC=cc
fi
CFLAGS=
LFLAGS=
LDFLAGS=
INCS=
LIBS="-lGL -lX11"
##########################################################################
# Compilation commands
##########################################################################
compile='$CC -c $CFLAGS conftest.c 1>&5'
link='$CC -o conftest $CFLAGS $LFLAGS conftest.c $LIBS 1>&5'
##########################################################################
# Check on what system we are running
##########################################################################
echo "Checking what kind of system this is... " 1>&6
case "x`uname 2> /dev/null`" in
xLinux)
CFLAGS="$CFLAGS -Dlinux"
LDFLAGS="-shared"
echo " Linux" 1>&6
;;
xDarwin)
CFLAGS="$CFLAGS"
LDFLAGS="-flat_namespace -undefined suppress"
echo " Mac OS X" 1>&6
;;
*)
LDFLAGS="-shared -soname libglfw.so"
echo " Generic Unix" 1>&6
;;
esac
echo " " 1>&6
##########################################################################
# Check for X11 libs/include directories
##########################################################################
echo "Checking for X11 libraries location... " 1>&6
# X11R6 in /usr/X11/lib ?
if [ -r "/usr/X11/lib" ]; then
LFLAGS="$LFLAGS -L/usr/X11/lib"
INCS="-I/usr/X11/include"
echo " X11 libraries location: /usr/X11/lib" 1>&6
# X11R/ in /usr/X11R7/lib ?
elif [ -r "/usr/X11R7/lib" ]; then
LFLAGS="$LFLAGS -L/usr/X11R7/lib"
INCS="-I/usr/X11R7/include"
echo " X11 libraries location: /usr/X11R7/lib" 1>&6
# X11R6 in /usr/X11R6/lib ?
elif [ -r "/usr/X11R6/lib" ]; then
LFLAGS="$LFLAGS -L/usr/X11R6/lib"
INCS="-I/usr/X11R6/include"
echo " X11 libraries location: /usr/X11R6/lib" 1>&6
# X11R5 in /usr/X11R5/lib ?
elif [ -r "/usr/X11R5/lib" ]; then
LFLAGS="$LFLAGS -L/usr/X11R5/lib"
INCS="-I/usr/X11R5/include"
echo " X11 libraries location: /usr/X11R5/lib" 1>&6
# X11R6 in /opt/X11R6/lib (e.g. QNX)?
elif [ -r "/opt/X11R6/lib" ]; then
LFLAGS="$LFLAGS -L/opt/X11R6/lib"
INCS="-I/opt/X11R6/include"
echo " X11 libraries location: /opt/X11R6/lib" 1>&6
# X11R6 in /usr/X/lib ?
elif [ -r "/usr/X/lib" ]; then
LFLAGS="$LFLAGS -L/usr/X/lib"
INCS="-I/usr/X/include"
echo " X11 libraries location: /usr/X/lib" 1>&6
else
# TODO: Detect and report X11R7 in /usr/lib
echo " X11 libraries location: Unknown (assuming linker will find them)" 1>&6
fi
echo " " 1>&6
CFLAGS="$CFLAGS $INCS"
##########################################################################
# Check if we are using GNU C
##########################################################################
echo "Checking whether we are using GNU C... " 1>&6
echo "$config_script: checking whether we are using GNU C" >&5
cat > conftest.c <<EOF
#ifdef __GNUC__
yes;
#endif
EOF
if { ac_try='$CC -E conftest.c'; { (eval echo $config_script: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
use_gcc=yes
else
use_gcc=no
fi
rm -f conftest*
echo " Using GNU C: ""$use_gcc" 1>&6
if [ "x$use_gcc" = xyes ]; then
CC=gcc
fi
echo " " 1>&6
##########################################################################
# Check for X11 RandR availability
##########################################################################
echo "Checking for X11 RandR support... " 1>&6
echo "$config_script: Checking for X11 RandR support" >&5
has_xrandr=no
cat > conftest.c <<EOF
#include <X11/Xlib.h>
#include <X11/extensions/Xrandr.h>
int main() {; return 0;}
EOF
if { (eval echo $config_script: \"$compile\") 1>&5; (eval $compile) 2>&5; }; then
rm -rf conftest*
has_xrandr=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
rm -f conftest*
echo " X11 RandR extension: ""$has_xrandr" 1>&6
if [ "x$has_xrandr" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_XRANDR"
LIBS="$LIBS -lXrandr"
fi
echo " " 1>&6
##########################################################################
# Check for X11 VidMode availability
##########################################################################
if [ "x$has_xrandr" != xyes ]; then
echo "Checking for X11 VidMode support... " 1>&6
echo "$config_script: Checking for X11 VidMode support" >&5
has_xf86vm=no
cat > conftest.c <<EOF
#include <X11/Xlib.h>
#include <X11/extensions/xf86vmode.h>
#if defined(__APPLE_CC__)
#error Not supported under Mac OS X
#endif
int main() {; return 0;}
EOF
if { (eval echo $config_script: \"$compile\") 1>&5; (eval $compile) 2>&5; }; then
rm -rf conftest*
has_xf86vm=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
rm -f conftest*
echo " X11 VidMode extension: ""$has_xf86vm" 1>&6
if [ "x$has_xf86vm" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_XF86VIDMODE"
LIBS="$LIBS -lXxf86vm -lXext"
fi
echo " " 1>&6
fi
##########################################################################
# Check for pthread support
##########################################################################
echo "Checking for pthread support... " 1>&6
echo "$config_script: Checking for pthread support" >&5
has_pthread=no
cat > conftest.c <<EOF
#include <pthread.h>
int main() {pthread_t posixID; posixID=pthread_self(); return 0;}
EOF
# Try -pthread (most systems)
CFLAGS_THREAD="-pthread"
CFLAGS_OLD="$CFLAGS"
CFLAGS="$CFLAGS $CFLAGS_THREAD"
LIBS_OLD="$LIBS"
LIBS="$LIBS -pthread"
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_pthread=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
# Try -lpthread
if [ "x$has_pthread" = xno ]; then
CFLAGS_THREAD="-D_REENTRANT"
CFLAGS="$CFLAGS_OLD $CFLAGS_THREAD"
LIBS="$LIBS_OLD -lpthread"
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_pthread=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
fi
# Try -lsocket (e.g. QNX)
if [ "x$has_pthread" = xno ]; then
CFLAGS="$CFLAGS_OLD"
LIBS="$LIBS_OLD -lsocket"
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_pthread=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
fi
echo " pthread support: ""$has_pthread" 1>&6
if [ "x$has_pthread" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_PTHREAD"
else
LIBS="$LIBS_OLD"
fi
echo " " 1>&6
##########################################################################
# Check for sched_yield support
##########################################################################
if [ "x$has_pthread" = xyes ]; then
echo "Checking for sched_yield support... " 1>&6
echo "$config_script: Checking for sched_yield support" >&5
has_sched_yield=no
LIBS_OLD="$LIBS"
cat > conftest.c <<EOF
#include <pthread.h>
int main() {sched_yield(); return 0;}
EOF
if { (eval echo $config_script: \"$compile\") 1>&5; (eval $compile) 2>&5; }; then
has_sched_yield=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
if [ "x$has_sched_yield" = xno ]; then
LIBS="$LIBS_OLD -lrt"
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
has_sched_yield=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
LIBS="$LIBS_OLD"
fi
fi
rm -f conftest*
echo " sched_yield: ""$has_sched_yield" 1>&6
if [ "x$has_sched_yield" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_SCHED_YIELD"
fi
echo " " 1>&6
fi
##########################################################################
# Check for glXGetProcAddressXXX availability
##########################################################################
echo "Checking for glXGetProcAddress support... " 1>&6
echo "$config_script: Checking for glXGetProcAddress support" >&5
has_glXGetProcAddress=no
has_glXGetProcAddressARB=no
has_glXGetProcAddressEXT=no
# glXGetProcAddress check
cat > conftest.c <<EOF
#include <X11/Xlib.h>
#include <GL/glx.h>
#include <GL/gl.h>
int main() {void *ptr=(void*)glXGetProcAddress("glFun"); return 0;}
EOF
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_glXGetProcAddress=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
rm -f conftest*
# glXGetProcAddressARB check
cat > conftest.c <<EOF
#include <X11/Xlib.h>
#include <GL/glx.h>
#include <GL/gl.h>
int main() {void *ptr=(void*)glXGetProcAddressARB("glFun"); return 0;}
EOF
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_glXGetProcAddressARB=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
rm -f conftest*
# glXGetProcAddressEXT check
cat > conftest.c <<EOF
#include <X11/Xlib.h>
#include <GL/glx.h>
#include <GL/gl.h>
int main() {void *ptr=(void*)glXGetProcAddressEXT("glFun"); return 0;}
EOF
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_glXGetProcAddressEXT=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
rm -f conftest*
echo " glXGetProcAddress extension: ""$has_glXGetProcAddress" 1>&6
echo " glXGetProcAddressARB extension: ""$has_glXGetProcAddressARB" 1>&6
echo " glXGetProcAddressEXT extension: ""$has_glXGetProcAddressEXT" 1>&6
if [ "x$has_glXGetProcAddress" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_GLXGETPROCADDRESS"
fi
if [ "x$has_glXGetProcAddressARB" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_GLXGETPROCADDRESSARB"
fi
if [ "x$has_glXGetProcAddressEXT" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_GLXGETPROCADDRESSEXT"
fi
echo " " 1>&6
##########################################################################
# Check for dlopen support
##########################################################################
echo "Checking for dlopen support... " 1>&6
echo "$config_script: Checking for dlopen support" >&5
has_dlopen=no
cat > conftest.c <<EOF
#include <dlfcn.h>
int main() {void *l=dlopen("libGL.so",RTLD_LAZY|RTLD_GLOBAL); return 0;}
EOF
# First try without -ldl
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_dlopen=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
# Now try with -ldl if the previous attempt failed
if [ "x$has_dlopen" = xno ]; then
LIBS_OLD="$LIBS"
LIBS="$LIBS -ldl"
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_dlopen=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
if [ "x$has_dlopen" = xno ]; then
LIBS="$LIBS_OLD"
fi
fi
rm -f conftest*
echo " dlopen support: ""$has_dlopen" 1>&6
if [ "x$has_dlopen" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_DLOPEN"
fi
echo " " 1>&6
##########################################################################
# Check for sysconf support
##########################################################################
echo "Checking for sysconf support... " 1>&6
echo "$config_script: Checking for sysconf support" >&5
has_sysconf=no
cat > conftest.c <<EOF
#include <unistd.h>
#ifndef _SC_NPROCESSORS_ONLN
#ifndef _SC_NPROC_ONLN
#error Neither _SC_NPROCESSORS_ONLN nor _SC_NPROC_ONLN available
#endif
#endif
int main() {long x=sysconf(_SC_ARG_MAX); return 0; }
EOF
if { (eval echo $config_script: \"$link\") 1>&5; (eval $link) 2>&5; }; then
rm -rf conftest*
has_sysconf=yes
else
echo "$config_script: failed program was:" >&5
cat conftest.c >&5
fi
rm -f conftest*
echo " sysconf support: ""$has_sysconf" 1>&6
if [ "x$has_sysconf" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_SYSCONF"
fi
echo " " 1>&6
##########################################################################
# Check for sysctl support
##########################################################################
echo "Checking for sysctl support... " 1>&6
echo "$config_script: Checking for sysctl support" >&5
has_sysctl=no
cat > conftest.c <<EOF
#include <sys/types.h>
#include <sys/sysctl.h>
#ifdef CTL_HW
#ifdef HW_NCPU
yes;
#endif
#endif
EOF
if { ac_try='$CC -E conftest.c'; { (eval echo $config_script: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
has_sysctl=yes
fi
rm -f conftest*
echo " sysctl support: ""$has_sysctl" 1>&6
if [ "x$has_sysctl" = xyes ]; then
CFLAGS="$CFLAGS -D_GLFW_HAS_SYSCTL"
fi
echo " " 1>&6
##########################################################################
# Post fixups
##########################################################################
if [ "x$use_gcc" = xyes ]; then
CFLAGS_SPEED="-c -I. -I.. $CFLAGS -O3 -ffast-math -Wall"
CFLAGS="-c -I. -I.. $CFLAGS -Os -Wall"
CFLAGS_LINK="$INCS -O3 -ffast-math -Wall"
else
CFLAGS_SPEED="-c -I. -I.. $CFLAGS -O"
CFLAGS="-c -I. -I.. $CFLAGS -O"
CFLAGS_LINK="$INCS -O"
fi
CFLAGS_LINK="-I../include $CFLAGS_LINK"
LFLAGS_LINK="../lib/x11/libglfw.a $LFLAGS -lGLU $LIBS -lm"
##########################################################################
# Create Makefiles
##########################################################################
# ./lib/x11/Makefile.x11
MKNAME='./lib/x11/Makefile.x11'
echo "Creating ""$MKNAME""..." 1>&6
echo " " 1>&6
echo "$config_script: Creating ""$MKNAME""..." >&5
echo "##########################################################################" >$MKNAME
echo "# Automatically generated Makefile for GLFW" >>$MKNAME
echo "##########################################################################" >>$MKNAME
echo "CC = $CC" >>$MKNAME
echo "CFLAGS = $CFLAGS" >>$MKNAME
echo "CFLAGS_SPEED = $CFLAGS_SPEED" >>$MKNAME
echo "LDFLAGS = $LDFLAGS" >>$MKNAME
echo "LFLAGS = $LFLAGS" >>$MKNAME
echo "LIBS = $LIBS" >>$MKNAME
echo " " >>$MKNAME
cat './lib/x11/Makefile.x11.in' >>$MKNAME
# ./examples/Makefile.x11
MKNAME='./examples/Makefile.x11'
echo "Creating ""$MKNAME""..." 1>&6
echo " " 1>&6
echo "$config_script: Creating ""$MKNAME""..." >&5
echo "##########################################################################" >$MKNAME
echo "# Automatically generated Makefile for GLFW" >>$MKNAME
echo "##########################################################################" >>$MKNAME
echo "CC = $CC" >>$MKNAME
echo "CFLAGS = $CFLAGS_LINK" >>$MKNAME
echo "LFLAGS = $LFLAGS_LINK" >>$MKNAME
echo " " >>$MKNAME
cat './examples/Makefile.x11.in' >>$MKNAME
##########################################################################
# Create pkg-config template file
##########################################################################
# ./lib/x11/libglfw.pc.in
MKNAME="./lib/x11/libglfw.pc.in"
echo "Creating ""$MKNAME""..." 1>&6
echo " " 1>&6
echo "$config_script: Creating ""$MKNAME""..." >&5
cat > "$MKNAME" <<EOF
prefix=@PREFIX@
exec_prefix=@PREFIX@
libdir=@PREFIX@/lib
includedir=@PREFIX@/include
Name: GLFW
Description: A portable framework for OpenGL development
Version: 2.6.0
URL: http://glfw.sourceforge.net/
Libs: -L\${libdir} -lglfw $LFLAGS $LIBS -lm
Cflags: -I\${includedir} $CFLAGS_THREAD
EOF
|
DavidMutchler/kiss
|
external/src/glfw/compile.sh
|
Shell
|
gpl-2.0
| 16,929 |
#!/bin/bash
# required env vars:
# HASH
# HASH_ORIG
# KERNELDIR
# KERNEL_RELEASE
# OUTPUT
# PROBE_DEVICE_NAME
# PROBE_NAME
# PROBE_VERSION
set -euo pipefail
ARCH=$(uname -m)
if [[ -f "${KERNELDIR}/scripts/gcc-plugins/stackleak_plugin.so" ]]; then
echo "Rebuilding gcc plugins for ${KERNELDIR}"
(cd "${KERNELDIR}" && make gcc-plugins)
fi
echo Building $PROBE_NAME-$PROBE_VERSION-$ARCH-$KERNEL_RELEASE-$HASH.ko
mkdir -p /build/sysdig
cd /build/sysdig
cmake -DCMAKE_BUILD_TYPE=Release -DPROBE_NAME=$PROBE_NAME -DPROBE_VERSION=$PROBE_VERSION -DPROBE_DEVICE_NAME=$PROBE_DEVICE_NAME -DCREATE_TEST_TARGETS=OFF /build/probe/sysdig
make driver
strip -g driver/$PROBE_NAME.ko
KO_VERSION=$(/sbin/modinfo driver/$PROBE_NAME.ko | grep vermagic | tr -s " " | cut -d " " -f 2)
if [ "$KO_VERSION" != "$KERNEL_RELEASE" ]; then
echo "Corrupted probe, KO_VERSION " $KO_VERSION ", KERNEL_RELEASE " $KERNEL_RELEASE
exit 1
fi
cp driver/$PROBE_NAME.ko $OUTPUT/$PROBE_NAME-$PROBE_VERSION-$ARCH-$KERNEL_RELEASE-$HASH.ko
cp driver/$PROBE_NAME.ko $OUTPUT/$PROBE_NAME-$PROBE_VERSION-$ARCH-$KERNEL_RELEASE-$HASH_ORIG.ko
|
hlieberman/sysdig
|
probe-builder/builder-entrypoint.sh
|
Shell
|
gpl-2.0
| 1,104 |
#!/bin/sh
export DRACUT_SYSTEMD
export NEWROOT
if [ -n "$NEWROOT" ]; then
[ -d $NEWROOT ] || mkdir -p -m 0755 $NEWROOT
fi
if ! [ -d /run/initramfs ]; then
mkdir -p -m 0755 /run/initramfs/log
ln -sfn /run/initramfs/log /var/log
fi
[ -d /run/lock ] || mkdir -p -m 0755 /run/lock
[ -d /run/log ] || mkdir -p -m 0755 /run/log
debug_off() {
set +x
}
debug_on() {
[ "$RD_DEBUG" = "yes" ] && set -x
}
# returns OK if $1 contains literal string $2 (and isn't empty)
strstr() {
[ "${1##*"$2"*}" != "$1" ]
}
# returns OK if $1 matches (completely) glob pattern $2
# An empty $1 will not be considered matched, even if $2 is * which technically
# matches; as it would match anything, it's not an interesting case.
strglob() {
[ -n "$1" -a -z "${1##$2}" ]
}
# returns OK if $1 contains (anywhere) a match of glob pattern $2
# An empty $1 will not be considered matched, even if $2 is * which technically
# matches; as it would match anything, it's not an interesting case.
strglobin() {
[ -n "$1" -a -z "${1##*$2*}" ]
}
# returns OK if $1 contains literal string $2 at the beginning, and isn't empty
str_starts() {
[ "${1#"$2"*}" != "$1" ]
}
# returns OK if $1 contains literal string $2 at the end, and isn't empty
str_ends() {
[ "${1%*"$2"}" != "$1" ]
}
trim() {
local var="$*"
var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
printf "%s" "$var"
}
if [ -z "$DRACUT_SYSTEMD" ]; then
warn() {
check_quiet
echo "<28>dracut Warning: $*" > /dev/kmsg
echo "dracut Warning: $*" >&2
}
info() {
check_quiet
echo "<30>dracut: $*" > /dev/kmsg
[ "$DRACUT_QUIET" != "yes" ] && \
echo "dracut: $*" >&2
}
else
warn() {
echo "Warning: $*" >&2
}
info() {
echo "$*"
}
fi
vwarn() {
while read line || [ -n "$line" ]; do
warn $line;
done
}
vinfo() {
while read line || [ -n "$line" ]; do
info $line;
done
}
# replaces all occurrences of 'search' in 'str' with 'replacement'
#
# str_replace str search replacement
#
# example:
# str_replace ' one two three ' ' ' '_'
str_replace() {
local in="$1"; local s="$2"; local r="$3"
local out=''
while strstr "${in}" "$s"; do
chop="${in%%"$s"*}"
out="${out}${chop}$r"
in="${in#*"$s"}"
done
echo "${out}${in}"
}
killall_proc_mountpoint() {
local _pid
local _t
for _pid in /proc/*; do
_pid=${_pid##/proc/}
case $_pid in
*[!0-9]*) continue;;
esac
[ -e "/proc/$_pid/exe" ] || continue
[ -e "/proc/$_pid/root" ] || continue
strstr "$(ls -l -- "/proc/$_pid" "/proc/$_pid/fd" 2>/dev/null)" "$1" && kill -9 "$_pid"
done
}
getcmdline() {
local _line
local _i
local CMDLINE_ETC_D
local CMDLINE_ETC
local CMDLINE_PROC
unset _line
if [ -e /etc/cmdline ]; then
while read -r _line || [ -n "$_line" ]; do
CMDLINE_ETC="$CMDLINE_ETC $_line";
done </etc/cmdline;
fi
for _i in /etc/cmdline.d/*.conf; do
[ -e "$_i" ] || continue
while read -r _line || [ -n "$_line" ]; do
CMDLINE_ETC_D="$CMDLINE_ETC_D $_line";
done <"$_i";
done
if [ -e /proc/cmdline ]; then
while read -r _line || [ -n "$_line" ]; do
CMDLINE_PROC="$CMDLINE_PROC $_line"
done </proc/cmdline;
fi
CMDLINE="$CMDLINE_ETC_D $CMDLINE_ETC $CMDLINE_PROC"
printf "%s" "$CMDLINE"
}
_dogetarg() {
local _o _val _doecho
unset _val
unset _o
unset _doecho
CMDLINE=$(getcmdline)
for _o in $CMDLINE; do
if [ "${_o%%=*}" = "${1%%=*}" ]; then
if [ -n "${1#*=}" -a "${1#*=*}" != "${1}" ]; then
# if $1 has a "=<value>", we want the exact match
if [ "$_o" = "$1" ]; then
_val="1";
unset _doecho
fi
continue
fi
if [ "${_o#*=}" = "$_o" ]; then
# if cmdline argument has no "=<value>", we assume "=1"
_val="1";
unset _doecho
continue
fi
_val="${_o#*=}"
_doecho=1
fi
done
if [ -n "$_val" ]; then
[ "x$_doecho" != "x" ] && echo "$_val";
return 0;
fi
return 1;
}
getarg() {
debug_off
local _deprecated _newoption
while [ $# -gt 0 ]; do
case $1 in
-d) _deprecated=1; shift;;
-y) if _dogetarg $2 >/dev/null; then
if [ "$_deprecated" = "1" ]; then
[ -n "$_newoption" ] && warn "Kernel command line option '$2' is deprecated, use '$_newoption' instead." || warn "Option '$2' is deprecated."
fi
echo 1
debug_on
return 0
fi
_deprecated=0
shift 2;;
-n) if _dogetarg $2 >/dev/null; then
echo 0;
if [ "$_deprecated" = "1" ]; then
[ -n "$_newoption" ] && warn "Kernel command line option '$2' is deprecated, use '$_newoption=0' instead." || warn "Option '$2' is deprecated."
fi
debug_on
return 1
fi
_deprecated=0
shift 2;;
*) if [ -z "$_newoption" ]; then
_newoption="$1"
fi
if _dogetarg $1; then
if [ "$_deprecated" = "1" ]; then
[ -n "$_newoption" ] && warn "Kernel command line option '$1' is deprecated, use '$_newoption' instead." || warn "Option '$1' is deprecated."
fi
debug_on
return 0;
fi
_deprecated=0
shift;;
esac
done
debug_on
return 1
}
# getargbool <defaultval> <args...>
# False if "getarg <args...>" returns "0", "no", or "off".
# True if getarg returns any other non-empty string.
# If not found, assumes <defaultval> - usually 0 for false, 1 for true.
# example: getargbool 0 rd.info
# true: rd.info, rd.info=1, rd.info=xxx
# false: rd.info=0, rd.info=off, rd.info not present (default val is 0)
getargbool() {
local _b
unset _b
local _default
_default="$1"; shift
_b=$(getarg "$@")
[ $? -ne 0 -a -z "$_b" ] && _b="$_default"
if [ -n "$_b" ]; then
[ $_b = "0" ] && return 1
[ $_b = "no" ] && return 1
[ $_b = "off" ] && return 1
fi
return 0
}
isdigit() {
case "$1" in
*[!0-9]*|"") return 1;;
esac
return 0
}
# getargnum <defaultval> <minval> <maxval> <arg>
# Will echo the arg if it's in range [minval - maxval].
# If it's not set or it's not valid, will set it <defaultval>.
# Note all values are required to be >= 0 here.
# <defaultval> should be with [minval -maxval].
getargnum() {
local _b
unset _b
local _default _min _max
_default="$1"; shift
_min="$1"; shift
_max="$1"; shift
_b=$(getarg "$1")
[ $? -ne 0 -a -z "$_b" ] && _b=$_default
if [ -n "$_b" ]; then
isdigit "$_b" && _b=$(($_b)) && \
[ $_b -ge $_min ] && [ $_b -le $_max ] && echo $_b && return
fi
echo $_default
}
_dogetargs() {
debug_off
local _o _found _key
unset _o
unset _found
CMDLINE=$(getcmdline)
_key="$1"
set --
for _o in $CMDLINE; do
if [ "$_o" = "$_key" ]; then
_found=1;
elif [ "${_o%%=*}" = "${_key%=}" ]; then
[ -n "${_o%%=*}" ] && set -- "$@" "${_o#*=}";
_found=1;
fi
done
if [ -n "$_found" ]; then
[ $# -gt 0 ] && printf '%s' "$*"
return 0
fi
return 1;
}
getargs() {
debug_off
local _val _i _args _gfound _deprecated
unset _val
unset _gfound
_newoption="$1"
_args="$@"
set --
for _i in $_args; do
if [ "$_i" = "-d" ]; then
_deprecated=1
continue
fi
_val="$(_dogetargs $_i)"
if [ $? -eq 0 ]; then
if [ "$_deprecated" = "1" ]; then
[ -n "$_newoption" ] && warn "Option '$_i' is deprecated, use '$_newoption' instead." || warn "Option $_i is deprecated!"
fi
_gfound=1
fi
[ -n "$_val" ] && set -- "$@" "$_val"
_deprecated=0
done
if [ -n "$_gfound" ]; then
if [ $# -gt 0 ]; then
printf '%s' "$*"
fi
debug_on
return 0
fi
debug_on
return 1;
}
# Prints value of given option. If option is a flag and it's present,
# it just returns 0. Otherwise 1 is returned.
# $1 = options separated by commas
# $2 = option we are interested in
#
# Example:
# $1 = cipher=aes-cbc-essiv:sha256,hash=sha256,verify
# $2 = hash
# Output:
# sha256
getoptcomma() {
local line=",$1,"; local opt="$2"; local tmp
case "${line}" in
*,${opt}=*,*)
tmp="${line#*,${opt}=}"
echo "${tmp%%,*}"
return 0
;;
*,${opt},*) return 0;;
esac
return 1
}
# Splits given string 'str' with separator 'sep' into variables 'var1', 'var2',
# 'varN'. If number of fields is less than number of variables, remaining are
# not set. If number of fields is greater than number of variables, the last
# variable takes remaining fields. In short - it acts similary to 'read'.
#
# splitsep sep str var1 var2 varN
#
# example:
# splitsep ':' 'foo:bar:baz' v1 v2
# in result:
# v1='foo', v2='bar:baz'
#
# TODO: ':' inside fields.
splitsep() {
debug_off
local sep="$1"; local str="$2"; shift 2
local tmp
while [ -n "$str" -a "$#" -gt 1 ]; do
tmp="${str%%$sep*}"
eval "$1='${tmp}'"
str="${str#"$tmp"}"
str="${str#$sep}"
shift
done
[ -n "$str" -a -n "$1" ] && eval "$1='$str'"
debug_on
return 0
}
setdebug() {
[ -f /usr/lib/initrd-release ] || return
if [ -z "$RD_DEBUG" ]; then
if [ -e /proc/cmdline ]; then
RD_DEBUG=no
if getargbool 0 rd.debug -d -y rdinitdebug -d -y rdnetdebug; then
RD_DEBUG=yes
[ -n "$BASH" ] && \
export PS4='${BASH_SOURCE}@${LINENO}(${FUNCNAME[0]}): ';
fi
fi
export RD_DEBUG
fi
debug_on
}
setdebug
source_all() {
local f
local _dir
_dir=$1; shift
[ "$_dir" ] && [ -d "/$_dir" ] || return
for f in "/$_dir"/*.sh; do [ -e "$f" ] && . "$f" "$@"; done
}
hookdir=/lib/dracut/hooks
export hookdir
source_hook() {
local _dir
_dir=$1; shift
source_all "/lib/dracut/hooks/$_dir" "$@"
}
check_finished() {
local f
for f in $hookdir/initqueue/finished/*.sh; do
[ "$f" = "$hookdir/initqueue/finished/*.sh" ] && return 0
{ [ -e "$f" ] && ( . "$f" ) ; } || return 1
done
return 0
}
source_conf() {
local f
[ "$1" ] && [ -d "/$1" ] || return
for f in "/$1"/*.conf; do [ -e "$f" ] && . "$f"; done
}
die() {
{
echo "<24>dracut: FATAL: $*";
echo "<24>dracut: Refusing to continue";
} > /dev/kmsg
{
echo "warn dracut: FATAL: \"$*\"";
echo "warn dracut: Refusing to continue";
} >> $hookdir/emergency/01-die.sh
[ -d /run/initramfs ] || mkdir -p -- /run/initramfs
> /run/initramfs/.die
if getargbool 0 "rd.shell"; then
emergency_shell
else
source_hook "shutdown-emergency"
fi
if [ -n "$DRACUT_SYSTEMD" ]; then
systemctl --no-block --force halt
fi
exit 1
}
check_quiet() {
if [ -z "$DRACUT_QUIET" ]; then
DRACUT_QUIET="yes"
getargbool 0 rd.info -d -y rdinfo && DRACUT_QUIET="no"
getargbool 0 rd.debug -d -y rdinitdebug && DRACUT_QUIET="no"
getarg quiet || DRACUT_QUIET="yes"
a=$(getarg loglevel=)
[ -n "$a" ] && [ $a -ge 28 ] && DRACUT_QUIET="yes"
export DRACUT_QUIET
fi
}
check_occurances() {
# Count the number of times the character $ch occurs in $str
# Return 0 if the count matches the expected number, 1 otherwise
local str="$1"
local ch="$2"
local expected="$3"
local count=0
while [ "${str#*$ch}" != "${str}" ]; do
str="${str#*$ch}"
count=$(( $count + 1 ))
done
[ $count -eq $expected ]
}
incol2() {
debug_off
local dummy check;
local file="$1";
local str="$2";
[ -z "$file" ] && return 1;
[ -z "$str" ] && return 1;
while read dummy check restofline || [ -n "$check" ]; do
if [ "$check" = "$str" ]; then
debug_on
return 0
fi
done < $file
debug_on
return 1
}
udevsettle() {
[ -z "$UDEVVERSION" ] && export UDEVVERSION=$(udevadm --version)
if [ $UDEVVERSION -ge 143 ]; then
udevadm settle --exit-if-exists=$hookdir/initqueue/work $settle_exit_if_exists
else
udevadm settle --timeout=30
fi
}
udevproperty() {
[ -z "$UDEVVERSION" ] && export UDEVVERSION=$(udevadm --version)
if [ $UDEVVERSION -ge 143 ]; then
for i in "$@"; do udevadm control --property=$i; done
else
for i in "$@"; do udevadm control --env=$i; done
fi
}
find_mount() {
local dev mnt etc wanted_dev
wanted_dev="$(readlink -e -q $1)"
while read dev mnt etc || [ -n "$dev" ]; do
[ "$dev" = "$wanted_dev" ] && echo "$dev" && return 0
done < /proc/mounts
return 1
}
# usage: ismounted <mountpoint>
# usage: ismounted /dev/<device>
if command -v findmnt >/dev/null; then
ismounted() {
findmnt "$1" > /dev/null 2>&1
}
else
ismounted() {
if [ -b "$1" ]; then
find_mount "$1" > /dev/null && return 0
return 1
fi
while read a m a || [ -n "$m" ]; do
[ "$m" = "$1" ] && return 0
done < /proc/mounts
return 1
}
fi
# root=nfs:[<server-ip>:]<root-dir>[:<nfs-options>]
# root=nfs4:[<server-ip>:]<root-dir>[:<nfs-options>]
nfsroot_to_var() {
# strip nfs[4]:
local arg="$@:"
nfs="${arg%%:*}"
arg="${arg##$nfs:}"
# check if we have a server
if strstr "$arg" ':/' ; then
server="${arg%%:/*}"
arg="/${arg##*:/}"
fi
path="${arg%%:*}"
# rest are options
options="${arg##$path}"
# strip leading ":"
options="${options##:}"
# strip ":"
options="${options%%:}"
# Does it really start with '/'?
[ -n "${path%%/*}" ] && path="error";
#Fix kernel legacy style separating path and options with ','
if [ "$path" != "${path#*,}" ] ; then
options=${path#*,}
path=${path%%,*}
fi
}
# Create udev rule match for a device with its device name, or the udev property
# ID_FS_UUID or ID_FS_LABEL
#
# example:
# udevmatch LABEL=boot
# prints:
# ENV{ID_FS_LABEL}="boot"
#
# TOOD: symlinks
udevmatch() {
case "$1" in
UUID=????????-????-????-????-????????????|LABEL=*|PARTLABEL=*|PARTUUID=????????-????-????-????-????????????)
printf 'ENV{ID_FS_%s}=="%s"' "${1%%=*}" "${1#*=}"
;;
UUID=*)
printf 'ENV{ID_FS_UUID}=="%s*"' "${1#*=}"
;;
PARTUUID=*)
printf 'ENV{ID_FS_PARTUUID}=="%s*"' "${1#*=}"
;;
/dev/?*) printf -- 'KERNEL=="%s"' "${1#/dev/}" ;;
*) return 255 ;;
esac
}
# Prints unique path for potential file inside specified directory. It consists
# of specified directory, prefix and number at the end which is incremented
# until non-existing file is found.
#
# funiq dir prefix
#
# example:
# # ls /mnt
# cdrom0 cdrom1
#
# # funiq /mnt cdrom
# /mnt/cdrom2
funiq() {
local dir="$1"; local prefix="$2"
local i=0
[ -d "${dir}" ] || return 1
while [ -e "${dir}/${prefix}$i" ]; do
i=$(($i+1)) || return 1
done
echo "${dir}/${prefix}$i"
}
# Creates unique directory and prints its path. It's using funiq to generate
# path.
#
# mkuniqdir subdir new_dir_name
mkuniqdir() {
local dir="$1"; local prefix="$2"
local retdir; local retdir_new
[ -d "${dir}" ] || mkdir -m 0755 -p "${dir}" || return 1
retdir=$(funiq "${dir}" "${prefix}") || return 1
until mkdir -m 0755 "${retdir}" 2>/dev/null; do
retdir_new=$(funiq "${dir}" "${prefix}") || return 1
[ "$retdir_new" = "$retdir" ] && return 1
retdir="$retdir_new"
done
echo "${retdir}"
}
# Copy the contents of SRC into DEST, merging the contents of existing
# directories (kinda like rsync, or cpio -p).
# Creates DEST if it doesn't exist. Overwrites files with the same names.
#
# copytree SRC DEST
copytree() {
local src="$1" dest="$2"
mkdir -p "$dest"; dest=$(readlink -e -q "$dest")
( cd "$src"; cp -af . -t "$dest" )
}
# Evaluates command for UUIDs either given as arguments for this function or all
# listed in /dev/disk/by-uuid. UUIDs doesn't have to be fully specified. If
# beginning is given it is expanded to all matching UUIDs. To pass full UUID to
# your command use '$___' as a place holder. Remember to escape '$'!
#
# foreach_uuid_until [ -p prefix ] command UUIDs
#
# prefix - string to put just before $___
# command - command to be evaluated
# UUIDs - list of UUIDs separated by space
#
# The function returns after *first successful evaluation* of the given command
# with status 0. If evaluation fails for every UUID function returns with
# status 1.
#
# Example:
# foreach_uuid_until "mount -U \$___ /mnt; echo OK; umount /mnt" \
# "01234 f512 a235567f-12a3-c123-a1b1-01234567abcb"
foreach_uuid_until() (
cd /dev/disk/by-uuid
[ "$1" = -p ] && local prefix="$2" && shift 2
local cmd="$1"; shift; local uuids_list="$*"
local uuid; local full_uuid; local ___
[ -n "${cmd}" ] || return 1
for uuid in ${uuids_list:-*}; do
for full_uuid in ${uuid}*; do
[ -e "${full_uuid}" ] || continue
___="${prefix}${full_uuid}"
eval ${cmd} && return 0
done
done
return 1
)
# Get kernel name for given device. Device may be the name too (then the same
# is returned), a symlink (full path), UUID (prefixed with "UUID=") or label
# (prefixed with "LABEL="). If just a beginning of the UUID is specified or
# even an empty, function prints all device names which UUIDs match - every in
# single line.
#
# NOTICE: The name starts with "/dev/".
#
# Example:
# devnames UUID=123
# May print:
# /dev/dm-1
# /dev/sdb1
# /dev/sdf3
devnames() {
local dev="$1"; local d; local names
case "$dev" in
UUID=*)
dev="$(foreach_uuid_until '! blkid -U $___' "${dev#UUID=}")" \
&& return 255
[ -z "$dev" ] && return 255
;;
LABEL=*) dev="$(blkid -L "${dev#LABEL=}")" || return 255 ;;
/dev/?*) ;;
*) return 255 ;;
esac
for d in $dev; do
names="$names
$(readlink -e -q "$d")" || return 255
done
echo "${names#
}"
}
usable_root() {
local _i
[ -d "$1" ] || return 1
for _i in "$1"/usr/lib*/ld-*.so "$1"/lib*/ld-*.so; do
[ -e "$_i" ] && return 0
done
for _i in proc sys dev; do
[ -e "$1"/$_i ] || return 1
done
return 0
}
inst_hook() {
local _hookname _unique _name _job _exe
while [ $# -gt 0 ]; do
case "$1" in
--hook)
_hookname="/$2";shift;;
--unique)
_unique="yes";;
--name)
_name="$2";shift;;
*)
break;;
esac
shift
done
if [ -z "$_unique" ]; then
_job="${_name}$$"
else
_job="${_name:-$1}"
_job=${_job##*/}
fi
_exe=$1
shift
[ -x "$_exe" ] || _exe=$(command -v $_exe)
if [ -n "$onetime" ]; then
{
echo '[ -e "$_job" ] && rm -f -- "$_job"'
echo "$_exe $@"
} > "/tmp/$$-${_job}.sh"
else
echo "$_exe $@" > "/tmp/$$-${_job}.sh"
fi
mv -f "/tmp/$$-${_job}.sh" "$hookdir/${_hookname}/${_job}.sh"
}
# inst_mount_hook <mountpoint> <prio> <name> <script>
#
# Install a mount hook with priority <prio>,
# which executes <script> as soon as <mountpoint> is mounted.
inst_mount_hook() {
local _prio="$2" _jobname="$3" _script="$4"
local _hookname="mount-$(str_replace "$1" '/' '\\x2f')"
[ -d "$hookdir/${_hookname}" ] || mkdir -p "$hookdir/${_hookname}"
inst_hook --hook "$_hookname" --unique --name "${_prio}-${_jobname}" "$_script"
}
# add_mount_point <dev> <mountpoint> <filesystem> <fsopts>
#
# Mount <dev> on <mountpoint> with <filesystem> and <fsopts>
# and call any mount hooks, as soon, as it is mounted
add_mount_point() {
local _dev="$1" _mp="$2" _fs="$3" _fsopts="$4"
local _hookname="mount-$(str_replace "$2" '/' '\\x2f')"
local _devname="dev-$(str_replace "$1" '/' '\\x2f')"
echo "$_dev $_mp $_fs $_fsopts 0 0" >> /etc/fstab
exec 7>/etc/udev/rules.d/99-mount-${_devname}.rules
echo 'SUBSYSTEM!="block", GOTO="mount_end"' >&7
echo 'ACTION!="add|change", GOTO="mount_end"' >&7
if [ -n "$_dev" ]; then
udevmatch "$_dev" >&7 || {
warn "add_mount_point dev=$_dev incorrect!"
continue
}
printf ', ' >&7
fi
{
printf -- 'RUN+="%s --unique --onetime ' $(command -v initqueue)
printf -- '--name mount-%%k '
printf -- '%s %s"\n' "$(command -v mount_hook)" "${_mp}"
} >&7
echo 'LABEL="mount_end"' >&7
exec 7>&-
}
# wait_for_mount <mountpoint>
#
# Installs a initqueue-finished script,
# which will cause the main loop only to exit,
# if <mountpoint> is mounted.
wait_for_mount()
{
local _name
_name="$(str_replace "$1" '/' '\\x2f')"
printf '. /lib/dracut-lib.sh\nismounted "%s"\n' $1 \
>> "$hookdir/initqueue/finished/ismounted-${_name}.sh"
{
printf 'ismounted "%s" || ' $1
printf 'warn "\"%s\" is not mounted"\n' $1
} >> "$hookdir/emergency/90-${_name}.sh"
}
# get a systemd-compatible unit name from a path
# (mimicks unit_name_from_path_instance())
dev_unit_name()
{
local dev="$1"
if command -v systemd-escape >/dev/null; then
systemd-escape -p -- "$dev"
return
fi
if [ "$dev" = "/" -o -z "$dev" ]; then
printf -- "-"
exit 0
fi
dev="${1%%/}"
dev="${dev##/}"
dev="$(str_replace "$dev" '\' '\x5c')"
dev="$(str_replace "$dev" '-' '\x2d')"
if [ "${dev##.}" != "$dev" ]; then
dev="\x2e${dev##.}"
fi
dev="$(str_replace "$dev" '/' '-')"
printf -- "%s" "$dev"
}
# set_systemd_timeout_for_dev <dev>
# Set 'rd.timeout' as the systemd timeout for <dev>
set_systemd_timeout_for_dev()
{
local _name
local _needreload
local _noreload
local _timeout
if [ "$1" = "-n" ]; then
_noreload=1
shift
fi
_timeout=$(getarg rd.timeout)
_timeout=${_timeout:-0}
if [ -n "$DRACUT_SYSTEMD" ]; then
_name=$(dev_unit_name "$1")
if ! [ -L ${PREFIX}/etc/systemd/system/initrd.target.wants/${_name}.device ]; then
[ -d ${PREFIX}/etc/systemd/system/initrd.target.wants ] || mkdir -p ${PREFIX}/etc/systemd/system/initrd.target.wants
ln -s ../${_name}.device ${PREFIX}/etc/systemd/system/initrd.target.wants/${_name}.device
type mark_hostonly >/dev/null 2>&1 && mark_hostonly /etc/systemd/system/initrd.target.wants/${_name}.device
_needreload=1
fi
if ! [ -f ${PREFIX}/etc/systemd/system/${_name}.device.d/timeout.conf ]; then
mkdir -p ${PREFIX}/etc/systemd/system/${_name}.device.d
{
echo "[Unit]"
echo "JobTimeoutSec=$_timeout"
} > ${PREFIX}/etc/systemd/system/${_name}.device.d/timeout.conf
type mark_hostonly >/dev/null 2>&1 && mark_hostonly /etc/systemd/system/${_name}.device.d/timeout.conf
_needreload=1
fi
if [ -z "$PREFIX" ] && [ "$_needreload" = 1 ] && [ -z "$_noreload" ]; then
/sbin/initqueue --onetime --unique --name daemon-reload systemctl daemon-reload
fi
fi
}
# wait_for_dev <dev>
#
# Installs a initqueue-finished script,
# which will cause the main loop only to exit,
# if the device <dev> is recognized by the system.
wait_for_dev()
{
local _name
local _noreload
if [ "$1" = "-n" ]; then
_noreload=-n
shift
fi
_name="$(str_replace "$1" '/' '\x2f')"
type mark_hostonly >/dev/null 2>&1 && mark_hostonly "$hookdir/initqueue/finished/devexists-${_name}.sh"
[ -e "${PREFIX}$hookdir/initqueue/finished/devexists-${_name}.sh" ] && return 0
printf '[ -e "%s" ]\n' $1 \
>> "${PREFIX}$hookdir/initqueue/finished/devexists-${_name}.sh"
{
printf '[ -e "%s" ] || ' $1
printf 'warn "\"%s\" does not exist"\n' $1
} >> "${PREFIX}$hookdir/emergency/80-${_name}.sh"
set_systemd_timeout_for_dev $_noreload $1
}
cancel_wait_for_dev()
{
local _name
_name="$(str_replace "$1" '/' '\x2f')"
rm -f -- "$hookdir/initqueue/finished/devexists-${_name}.sh"
rm -f -- "$hookdir/emergency/80-${_name}.sh"
if [ -n "$DRACUT_SYSTEMD" ]; then
_name=$(dev_unit_name "$1")
rm -f -- ${PREFIX}/etc/systemd/system/initrd.target.wants/${_name}.device
rm -f -- ${PREFIX}/etc/systemd/system/${_name}.device.d/timeout.conf
/sbin/initqueue --onetime --unique --name daemon-reload systemctl daemon-reload
fi
}
killproc() {
debug_off
local _exe="$(command -v $1)"
local _sig=$2
local _i
[ -x "$_exe" ] || return 1
for _i in /proc/[0-9]*; do
[ "$_i" = "/proc/1" ] && continue
if [ -e "$_i"/_exe ] && [ "$_i/_exe" -ef "$_exe" ] ; then
kill $_sig ${_i##*/}
fi
done
debug_on
}
need_shutdown() {
>/run/initramfs/.need_shutdown
}
wait_for_loginit()
{
[ "$RD_DEBUG" = "yes" ] || return
[ -e /run/initramfs/loginit.pipe ] || return
debug_off
echo "DRACUT_LOG_END"
exec 0<>/dev/console 1<>/dev/console 2<>/dev/console
# wait for loginit
i=0
while [ $i -lt 10 ]; do
if [ ! -e /run/initramfs/loginit.pipe ]; then
j=$(jobs)
[ -z "$j" ] && break
[ -z "${j##*Running*}" ] || break
fi
sleep 0.1
i=$(($i+1))
done
if [ $i -eq 10 ]; then
kill %1 >/dev/null 2>&1
kill $(while read line || [ -n "$line" ];do echo $line;done</run/initramfs/loginit.pid)
fi
setdebug
rm -f -- /run/initramfs/loginit.pipe /run/initramfs/loginit.pid
}
# pidof version for root
if ! command -v pidof >/dev/null 2>/dev/null; then
pidof() {
debug_off
local _cmd
local _exe
local _rl
local _ret=1
local i
_cmd="$1"
if [ -z "$_cmd" ]; then
debug_on
return 1
fi
_exe=$(type -P "$1")
for i in /proc/*/exe; do
[ -e "$i" ] || continue
if [ -n "$_exe" ]; then
[ "$i" -ef "$_exe" ] || continue
else
_rl=$(readlink -f "$i");
[ "${_rl%/$_cmd}" != "$_rl" ] || continue
fi
i=${i%/exe}
echo ${i##/proc/}
_ret=0
done
debug_on
return $_ret
}
fi
_emergency_shell()
{
local _name="$1"
if [ -n "$DRACUT_SYSTEMD" ]; then
> /.console_lock
echo "PS1=\"$_name:\\\${PWD}# \"" >/etc/profile
systemctl start dracut-emergency.service
rm -f -- /etc/profile
rm -f -- /.console_lock
else
debug_off
echo
/sbin/rdsosreport
echo 'You might want to save "/run/initramfs/rdsosreport.txt" to a USB stick or /boot'
echo 'after mounting them and attach it to a bug report.'
if ! RD_DEBUG= getargbool 0 rd.debug -d -y rdinitdebug -d -y rdnetdebug; then
echo
echo 'To get more debug information in the report,'
echo 'reboot with "rd.debug" added to the kernel command line.'
fi
echo
echo 'Dropping to debug shell.'
echo
export PS1="$_name:\${PWD}# "
[ -e /.profile ] || >/.profile
_ctty="$(RD_DEBUG= getarg rd.ctty=)" && _ctty="/dev/${_ctty##*/}"
if [ -z "$_ctty" ]; then
_ctty=console
while [ -f /sys/class/tty/$_ctty/active ]; do
_ctty=$(cat /sys/class/tty/$_ctty/active)
_ctty=${_ctty##* } # last one in the list
done
_ctty=/dev/$_ctty
fi
[ -c "$_ctty" ] || _ctty=/dev/tty1
case "$(/usr/bin/setsid --help 2>&1)" in *--ctty*) CTTY="--ctty";; esac
setsid $CTTY /bin/sh -i -l 0<>$_ctty 1<>$_ctty 2<>$_ctty
fi
}
emergency_shell()
{
local _ctty
set +e
local _rdshell_name="dracut" action="Boot" hook="emergency"
local _emergency_action
if [ "$1" = "-n" ]; then
_rdshell_name=$2
shift 2
elif [ "$1" = "--shutdown" ]; then
_rdshell_name=$2; action="Shutdown"; hook="shutdown-emergency"
if type plymouth >/dev/null 2>&1; then
plymouth --hide-splash
elif [ -x /oldroot/bin/plymouth ]; then
/oldroot/bin/plymouth --hide-splash
fi
shift 2
fi
echo ; echo
warn "$*"
source_hook "$hook"
echo
_emergency_action=$(getarg rd.emergency)
[ -z "$_emergency_action" ] \
&& [ -e /run/initramfs/.die ] \
&& _emergency_action=halt
if getargbool 1 rd.shell -d -y rdshell || getarg rd.break -d rdbreak; then
_emergency_shell $_rdshell_name
else
warn "$action has failed. To debug this issue add \"rd.shell rd.debug\" to the kernel command line."
[ -z "$_emergency_action" ] && _emergency_action=halt
fi
case "$_emergency_action" in
reboot)
reboot || exit 1;;
poweroff)
poweroff || exit 1;;
halt)
halt || exit 1;;
esac
}
# Retain the values of these variables but ensure that they are unexported
# This is a POSIX-compliant equivalent of bash's "export -n"
export_n()
{
local var
local val
for var in "$@"; do
eval val=\$$var
unset $var
[ -n "$val" ] && eval $var=\"$val\"
done
}
# returns OK if list1 contains all elements of list2, i.e. checks if list2 is a
# sublist of list1. An order and a duplication doesn't matter.
#
# $1 = separator
# $2 = list1
# $3 = list2
# $4 = ignore values, separated by $1
listlist() {
local _sep="$1"
local _list="${_sep}${2}${_sep}"
local _sublist="$3"
[ -n "$4" ] && local _iglist="${_sep}${4}${_sep}"
local IFS="$_sep"
local _v
[ "$_list" = "$_sublist" ] && return 0
for _v in $_sublist; do
if [ -n "$_v" ] && ! ( [ -n "$_iglist" ] && strstr "$_iglist" "$_v" )
then
strstr "$_list" "$_v" || return 1
fi
done
return 0
}
# returns OK if both lists contain the same values. An order and a duplication
# doesn't matter.
#
# $1 = separator
# $2 = list1
# $3 = list2
# $4 = ignore values, separated by $1
are_lists_eq() {
listlist "$1" "$2" "$3" "$4" && listlist "$1" "$3" "$2" "$4"
}
setmemdebug() {
if [ -z "$DEBUG_MEM_LEVEL" ]; then
export DEBUG_MEM_LEVEL=$(getargnum 0 0 4 rd.memdebug)
fi
}
setmemdebug
cleanup_trace_mem()
{
# tracekomem based on kernel trace needs cleanup after use.
if [ "$DEBUG_MEM_LEVEL" -eq 4 ]; then
tracekomem --cleanup
fi
}
# parameters: msg [trace_level:trace]...
make_trace_mem()
{
local msg
msg="$1"
shift
if [ -n "$DEBUG_MEM_LEVEL" ] && [ "$DEBUG_MEM_LEVEL" -gt 0 ]; then
make_trace show_memstats $DEBUG_MEM_LEVEL "[debug_mem]" "$msg" "$@" >&2
fi
}
# parameters: func log_level prefix msg [trace_level:trace]...
make_trace()
{
local func log_level prefix msg msg_printed
local trace trace_level trace_in_higher_levels insert_trace
func=$1
shift
log_level=$1
shift
prefix=$1
shift
msg=$1
shift
if [ -z "$log_level" ]; then
return
fi
msg=$(echo $msg)
msg_printed=0
while [ $# -gt 0 ]; do
trace=${1%%:*}
trace_level=${trace%%+}
[ "$trace" != "$trace_level" ] && trace_in_higher_levels="yes"
trace=${1##*:}
if [ -z "$trace_level" ]; then
trace_level=0
fi
insert_trace=0
if [ -n "$trace_in_higher_levels" ]; then
if [ "$log_level" -ge "$trace_level" ]; then
insert_trace=1
fi
else
if [ "$log_level" -eq "$trace_level" ]; then
insert_trace=1
fi
fi
if [ $insert_trace -eq 1 ]; then
if [ $msg_printed -eq 0 ]; then
echo "$prefix $msg"
msg_printed=1
fi
$func $trace
fi
shift
done
}
# parameters: type
show_memstats()
{
case $1 in
shortmem)
cat /proc/meminfo | grep -e "^MemFree" -e "^Cached" -e "^Slab"
;;
mem)
cat /proc/meminfo
;;
slab)
cat /proc/slabinfo
;;
iomem)
cat /proc/iomem
;;
komem)
tracekomem
;;
esac
}
remove_hostonly_files() {
rm -fr /etc/cmdline /etc/cmdline.d/*.conf "$hookdir/initqueue/finished"
if [ -f /lib/dracut/hostonly-files ]; then
while read line || [ -n "$line" ]; do
[ -e "$line" ] || [ -h "$line" ] || continue
rm -f "$line"
done < /lib/dracut/hostonly-files
fi
}
|
xlpang/dracut
|
modules.d/99base/dracut-lib.sh
|
Shell
|
gpl-2.0
| 33,981 |
#!/bin/bash
runthis(){
## print the command to the logfile
echo "$@"
## run the command and redirect it's error output
## to the logfile
eval "$@"
}
printf "\n\033[1;34m******************************Start cloud_write_read_nameHash.sh script**********************************\033[0m\n"
printf "\033[1;34mThis script writes 10 blocks of BS=4K to cloud store with nameHash flag. Reads back the written data and compares if done correctly.\033[0m\n"
printf "\n\033[1;34mclean up cloud store bucket first\033[0m\n"
runthis "../cloudbacker --accessFile=/home/build/.s3backer_passwd_gcs --size=10M --blockSize=4K --nameHash gs://gcs-nearline-sujatha --erase --force"
runthis "../cloudbacker --accessFile=/home/build/.s3backer_passwd_gcs --size=10M --blockSize=4K gs://gcs-nearline-sujatha --erase --force"
printf "\n\033[1;34mmount and write blocks to cloudstore\033[0m\n"
runthis "../cloudbacker --accessFile=/home/build/.s3backer_passwd_gcs --size=10M --blockSize=4K --nameHash gs://gcs-nearline-sujatha /mnt/source/ --listBlocks"
printf "\n\033[1;34mwrite blocks to cloudstore\033[0m\n"
runthis "dd if=writeTocloud.txt of=/mnt/source/file bs=4096 count=10"
printf "\n\033[1;34munmount file system\033[0m\n"
runthis "umount /mnt/source"
printf "\n\033[1;34mwaiting for cloudbacker to exit\033[0m\n"
ps axho comm| grep cloudbacker > /dev/null
result=$?
while [ "${result}" -eq "0" ]; do
sleep 1
ps axho comm| grep cloudbacker > /dev/null
result=$?
done
printf "\n\033[1;34munmount done\033[0m\n"
printf "\n\033[1;34mremount file system\033[0m\n"
runthis "../cloudbacker --accessFile=/home/build/.s3backer_passwd_gcs --size=10M --blockSize=4K --nameHash gs://gcs-nearline-sujatha /mnt/source/ --listBlocks"
printf "\n\033[1;34mread blocks written earlier\033[0m\n"
runthis "dd of=readFromcloud.txt if=/mnt/source/file bs=4096 count=10"
printf "\n\033[1;34mcompare writeTocloud.txt and readFromcloud.txt to confirm if read write worked correctly.\033[0m\n"
if diff writeTocloud.txt readFromcloud.txt >/dev/null ; then
printf "\n\033[1;32mboth files are same\033[0m\n"
printf "\n\033[1;32m****************** TEST 1 :: PASS ********************\033[0m\n"
else
printf "\n\033[1;31mfiles are different\033[0m\n"
printf "\n\033[1;31m****************** TEST 1 :: FAIL ********************\033[0m\n"
fi
printf "\n\033[1;34munmount file system\033[0m\n"
runthis "umount /mnt/source"
printf "\n\033[1;34mwaiting for cloudbacker to exit\033[0m\n"
ps axho comm| grep cloudbacker > /dev/null
result=$?
while [ "${result}" -eq "0" ]; do
sleep 1
ps axho comm| grep cloudbacker > /dev/null
result=$?
done
printf "\n\033[1;34munmount done\033[0m\n"
printf "\n\033[1;34mremount file system without nameHash flag -- should mount as auto detection is enabled\033[0m\n"
runthis "../cloudbacker --accessFile=/home/build/.s3backer_passwd_gcs --size=10M --blockSize=4K gs://gcs-nearline-sujatha /mnt/source/ --listBlocks"
if [ $? -eq 0 ]; then
printf "\n\033[1;32mfile system mounted with auto-detection\033[0m\n"
printf "\n\033[1;34mread blocks written earlier\033[0m\n"
runthis "dd of=readFromcloud.txt if=/mnt/source/file bs=4096 count=10"
printf "\n\033[1;34mcompare writeTocloud.txt and readFromcloud.txt to confirm if read write worked correctly.\033[0m\n"
if diff writeTocloud.txt readFromcloud.txt >/dev/null ; then
printf "\n\033[1;32mboth files are same\033[0m\n"
printf "\n\033[1;32m****************** TEST 2 :: PASS ********************\033[0m\n"
else
printf "\n\033[1;31mfiles are different\033[0m\n"
printf "\n\033[1;31m****************** TEST 2 :: FAIL ********************\033[0m\n"
fi
printf "\n\033[1;34munmount file system\033[0m\n"
runthis "umount /mnt/source"
printf "\n\033[1;34mwaiting for cloudbacker to exit\033[0m\n"
ps axho comm| grep cloudbacker > /dev/null
result=$?
while [ "${result}" -eq "0" ]; do
sleep 1
ps axho comm| grep cloudbacker > /dev/null
result=$?
done
printf "\n\033[1;34munmount done\033[0m\n"
else
printf "\n\033[1;31mfile system mounting failed\033[0m\n"
printf "\n\033[1;31m****************** TEST 2 :: FAIL ********************\033[0m\n"
fi
printf "\n\033[1;31mNOTE :: cleanup read log files using command 'rm -f readFrom*.txt', if required\033[0m\n"
printf "\n\033[1;34m************************************End cloud_write_read_nameHash.sh script*******************************************\033[0m\n"
|
bprotopopov/s3backer
|
Testing_GCS/cloud_write_read_nameHash.sh
|
Shell
|
gpl-2.0
| 4,523 |
#!/usr/bin/env sh
# Martin Kersner, [email protected]
# 2016/03/10
# TODO store init model and DeepLab-LargeFOV files in one directory
NET_ID=DeepLab-LargeFOV
INIT_PATH=init
MODEL_PATH=exper/voc12/model/${NET_ID}
CONFIG_PATH=exper/voc12/config/${NET_ID}
mkdir -p ${INIT_PATH}
mkdir -p ${MODEL_PATH}
mkdir -p ${CONFIG_PATH}
cd ${INIT_PATH}
wget -nc http://ccvl.stat.ucla.edu/ccvl/init_models/vgg16_20M.caffemodel
cd ../${MODEL_PATH}
ln -s ../../../../${INIT_PATH}/vgg16_20M.caffemodel init.caffemodel
cd ../../config/${NET_ID}
ln -s ../../../../${NET_ID}/solver.prototxt solver.prototxt
ln -s ../../../../${NET_ID}/solver2.prototxt solver2.prototxt
ln -s ../../../../${NET_ID}/train.prototxt train.prototxt
ln -s ../../../../${NET_ID}/test.prototxt test.prototxt
cd ../../../../
|
sadjadasghari/deeplab4a2d
|
get_DeepLab_LargeFOV_voc12_data.sh
|
Shell
|
gpl-3.0
| 788 |
#/bin/bash
function export {
inkscape --export-area-page --export-dpi $1 --export-png=$2 $3
echo
}
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <svg file>"
exit 0
fi
outdir="../src/main/res/mipmap"
dpi=90
outfile=ic_launcher.png
export $dpi $outdir-mdpi/$outfile $1
export $(echo "scale=2; $dpi*1.5" | bc) $outdir-hdpi/$outfile $1
export $(echo "scale=2; $dpi*2" | bc) $outdir-xhdpi/$outfile $1
export $(echo "scale=2; $dpi*3" | bc) $outdir-xxhdpi/$outfile $1
export $(echo "scale=2; $dpi*4" | bc) $outdir-xxxhdpi/$outfile $1
|
jvalue/hochwasser-app
|
app/images/export_launcher.sh
|
Shell
|
agpl-3.0
| 529 |
#!/bin/bash
# get directory of script
DIR="$( cd "$( dirname "$0" )" && pwd )"
# assuming a local guacmole version is located properly
LOCAL_GUACAMOLE="$DIR/../../../guacamole"
LOCAL_AVANGO="$DIR/../../../avango"
# if not, this path will be used
GUACAMOLE=/opt/guacamole/master
AVANGO=/opt/avango/master
# third party libs
export LD_LIBRARY_PATH=/opt/boost/boost_1_55_0/lib:/opt/zmq/current/lib
# schism
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/schism/current/lib/linux_x86:/opt/Awesomium/lib
# avango
export LD_LIBRARY_PATH="$LOCAL_AVANGO/lib":$AVANGO/lib:$LD_LIBRARY_PATH:/opt/pbr/inst_cb/lib
export PYTHONPATH="$LOCAL_AVANGO/lib/python3.4":"$LOCAL_AVANGO/examples":$AVANGO/lib/python3.4:$AVANGO/examples
# guacamole
export LD_LIBRARY_PATH="$LOCAL_GUACAMOLE/lib":$GUACAMOLE/lib:$LD_LIBRARY_PATH
# run daemon
if [ -f "$LOCAL_AVANGO/examples/examples_common/daemon.py" ]
then
python3 $LOCAL_AVANGO/examples/examples_common/daemon.py > /dev/null &
else
python3 $AVANGO/examples/examples_common/daemon.py > /dev/null &
fi
# run program
cd "$DIR" && python3 ./main.py
# kill daemon
kill %1
|
yaroslav-tarasov/avango
|
examples/volume/start.sh
|
Shell
|
lgpl-3.0
| 1,109 |
#!/usr/bin/env bash
set -e
[[ -z $DEBUG ]] || set -x
cur_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
repo_root=$(git rev-parse --show-toplevel 2> /dev/null)
docker_compose_dir="${repo_root}"
(
cd $docker_compose_dir
docker-compose -f ${docker_compose_dir}/docker-compose.release.yml config > ${cur_dir}/docker-compose.yml
)
|
myambition/ambition
|
production/generate-docker-compose.sh
|
Shell
|
unlicense
| 342 |
#!/bin/bash
set -euo pipefail
set -o xtrace
if ! type innoextract >/dev/null 2>&1; then
mkdir -p _deps
pushd _deps
curl -fLO http://constexpr.org/innoextract/files/innoextract-1.6-windows.zip
7z x innoextract-1.6-windows.zip -obin innoextract.exe
innoextract --version
rm innoextract-1.6-windows.zip
popd
fi
if ! type ninja >/dev/null 2>&1; then
mkdir -p _deps
pushd _deps
curl -fLO https://github.com/ninja-build/ninja/releases/download/v1.7.1/ninja-win.zip
7z x ninja-win.zip -obin ninja.exe
ninja --version
rm ninja-win.zip
popd
fi
if [[ $COMPILER =~ msvc-16 && ! -d "_deps/boost_1_56_0-msvc-10.0-64" ]]; then
mkdir -p _deps
pushd _deps
curl -fLO 'http://netix.dl.sourceforge.net/project/boost/boost-binaries/1.56.0/boost_1_56_0-msvc-10.0-64.exe'
# Only extract headers and libraries so it completes quicker. (It's not
# perfect, because innoextract doesn't appear to let you only etract
# particular directories. But it does seem to cut down on the amount of
# unwanted docs/source files we waste time extracting.)
innoextract boost_1_56_0-msvc-10.0-64.exe -I boost -I lib64-msvc-10.0 -s -p
rm boost_1_56_0-msvc-10.0-64.exe
# innoextract writes out a folder called 'app'. Rename it to something useful.
mv app boost_1_56_0-msvc-10.0-64
# Try to keep under Appveyor cache limit by removing anything we don't need.
# - documentation and source files
# - dlls and import libs
# - remove libs linked against static CRT
pushd boost_1_56_0-msvc-10.0-64
rm -fr doc libs tools
rm -fr lib64-msvc-10.0/boost*
rm -fr lib64-msvc-10.0/*vc100-{s,mt-s,sgd,mt-sgd}-*
popd
popd
fi
|
nickhutchinson/benchmark
|
.appveyor_setup.sh
|
Shell
|
apache-2.0
| 1,715 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This contains util code for testing kubectl.
set -o errexit
set -o nounset
set -o pipefail
# Set locale to ensure english responses from kubectl commands
export LANG=C
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8080}
SECURE_API_PORT=${SECURE_API_PORT:-6443}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="gcr.io/google-containers/perl"
IMAGE_PAUSE_V2="gcr.io/google-containers/pause:2.0"
IMAGE_DAEMONSET_R2="gcr.io/google-containers/pause:latest"
IMAGE_DAEMONSET_R2_2="gcr.io/google-containers/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="gcr.io/google_containers/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="gcr.io/google_containers/nginx-slim:0.8"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
# Define variables for resource types to prevent typos.
clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
persistentvolumes="persistentvolumes"
pods="pods"
podtemplates="podtemplates"
replicasets="replicasets"
replicationcontrollers="replicationcontrollers"
roles="roles"
secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
selfsubjectaccessreviews="selfsubjectaccessreviews"
customresourcedefinitions="customresourcedefinitions"
daemonsets="daemonsets"
controllerrevisions="controllerrevisions"
# include shell2junit library
sh2ju="${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
if [[ -f "${sh2ju}" ]]; then
source "${sh2ju}"
else
echo "failed to find third_party/forked/shell2junit/sh2ju.sh"
exit 1
fi
# record_command runs the command and records its output/error messages in junit format
# it expects the first to be the name of the command
# Example:
# record_command run_kubectl_tests
#
# WARNING: Variable changes in the command will NOT be effective after record_command returns.
# This is because the command runs in subshell.
function record_command() {
set +o nounset
set +o errexit
local name="$1"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "Recording: ${name}"
echo "Running command: $@"
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then
echo "Error when running ${name}"
foundError="True"
fi
set -o nounset
set -o errexit
}
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
attempts=$((attempts+1))
done
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
# failures.
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "junit report dir:" ${junit_dir}
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
if [ "$preserve_err_file" != true ] ; then
rm "${ERROR_FILE}"
fi
break
fi
done
}
# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
function wait-for-pods-with-label()
{
local i
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --template '{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
# Code to be run before running the tests.
setup() {
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
kube::etcd::start
# Find a standard sed instance for use with edit scripts
SED=sed
if which gsed &>/dev/null; then
SED=gsed
fi
if ! ($SED --version 2>&1 | grep -q GNU); then
echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'."
exit 1
fi
kube::log::status "Building kubectl"
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
# TODO: we need to note down the current default namespace and set back to this
# namespace after the tests are done.
kubectl config view
CONTEXT="test"
kubectl config set-context "${CONTEXT}"
kubectl config use-context "${CONTEXT}"
kube::log::status "Setup complete"
}
########################################################
# Kubectl version (--short, --client, --output) #
########################################################
run_kubectl_version_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl version"
TEMP="${KUBE_TEMP}"
kubectl get "${kube_flags[@]}" --raw /version
# create version files, one for the client, one for the server.
# these are the files we will use to ensure that the remainder output is correct
kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"
kube::log::status "Testing kubectl version: check client only output matches expected output"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"
kube::log::status "Testing kubectl version: verify json output"
kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"
kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"
kube::log::status "Testing kubectl version: compare json output using additional --short flag"
kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"
kube::log::status "Testing kubectl version: compare json output with yaml output"
kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"
set +o nounset
set +o errexit
}
# Runs all pod related tests.
run_pod_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:pods)"
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
# Describe command should print detailed information
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_object_events_assert pods 'valid-pod'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert pods 'valid-pod' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert pods 'valid-pod' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert pods
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert pods false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert pods true
### Validate Export ###
kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")
### Delete POD valid-pod by id
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --now
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --now
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --grace-period=0
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command succeeds without --force by waiting
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from dumped YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
echo "${output_pod}" | $SED '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod from JSON
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create POD valid-pod from YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with no parameter mustn't kill everything
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete all PODs
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
# Post-condition: no POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
# Detailed tests for describe pod output
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-kubectl-describe-pod
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
### Create a generic secret
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type'
### Create a generic configmap
# Pre-condition: no CONFIGMAP exists
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
### Create a pod disruption budget with minAvailable
# Command
kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-1 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '2'
# Command
kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
### Create a pod disruption budget with maxUnavailable
# Command
kubectl create pdb test-pdb-3 --selector=app=rails --max-unavailable=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-3 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '2'
# Command
kubectl create pdb test-pdb-4 --selector=app=rails --max-unavailable=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'
### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod
# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Describe command (resource only) should print detailed information about environment variables
kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Clean-up
kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
kubectl delete namespace test-kubectl-describe-pod
### Create two PODs
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/redis/redis-master.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-master PODs are created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
### Delete multiple PODs at once
# Pre-condition: valid-pod and redis-master PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
# Command
kubectl delete pods valid-pod redis-master "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
# Post-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Label the valid-pod POD
# Pre-condition: valid-pod is not labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
# Command
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
# Post-condition: valid-pod is labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
### Label the valid-pod POD with empty label value
# Pre-condition: valid-pod does not have label "emptylabel"
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
# Command
kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptylabel" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
### Annotate the valid-pod POD with empty annotation value
# Pre-condition: valid-pod does not have annotation "emptyannotation"
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '<no value>'
# Command
kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptyannotation" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
### Record label change
# Pre-condition: valid-pod does not have record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
# Command
kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod has record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Do not record label change
# Command
kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation still contains command with --record=true
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Record label change with unspecified flag and previous change already recorded
# Command
kubectl label pods valid-pod new-record-change=true "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation contains new change
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create pod-with-precision POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
## Patch preserves precision
# Command
kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
# Post-condition: pod-with-precision POD has patched annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
# Command
kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has label
kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
# Command
kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Cleanup
kubectl delete pod pod-with-precision "${kube_flags[@]}"
### Annotate POD YAML file locally without effecting the live pod.
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Command
kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
# Pre-condition: annotationkey is annotationvalue
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Command
output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
kube::test::if_has_string "${output_message}" "localvalue"
# Cleanup
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
### Create valid-pod POD
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
echo -e "#!/bin/bash\n$SED -i \"s/mock/modified/g\" \$1" > ${TEMP}
chmod +x ${TEMP}
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
rm ${TEMP}
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
## kubectl create --edit won't create anything if user makes no changes
[ "$(EDITOR=cat kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1 | grep 'Edit cancelled')" ]
## Create valid-pod POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## Patch can modify a local object
kubectl patch --local -f pkg/kubectl/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never"
## Patch fails with error message "not patched" and exit code 1
output_message=$(! kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"replicas":7}}' 2>&1)
kube::test::if_has_string "${output_message}" 'not patched'
## Patch pod can change image
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# Post-condition: valid-pod has the record annotation
kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}"
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# prove that yaml input works too
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}'
# Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
# Command
# Needs to retry because other party may change the resource.
for count in {0..3}; do
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
break
fi
done
## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
((resourceVersion+=100))
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the conflict
if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
## --force replace pod can change other field, e.g., spec.container.name
# Command
kubectl get "${kube_flags[@]}" pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
## check replace --grace-period requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
## check replace --timeout requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
#cleaning
rm /tmp/tmp-valid-pod.json
## replace of a cluster scoped resource can succeed
# Pre-condition: a node exists
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test"
}
}
__EOF__
kubectl replace -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test",
"annotations": {"a":"b"},
"resourceVersion": "0"
}
}
__EOF__
# Post-condition: the node command succeeds
kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
kubectl delete node node-v1-test "${kube_flags[@]}"
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
# Pre-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
[[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]]
# Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
## kubectl edit should work on Windows
[ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ]
[ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ]
[ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ]
[ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ]
[ "$(EDITOR=cat kubectl edit ns | grep 'kind: List')" ]
### Label POD YAML file locally without effecting the live pod.
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: name is still valid-pod in the live pod, but command output is the new value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
kube::test::if_has_string "${output_message}" "localonlyvalue"
### Overwriting an existing label is not permitted
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
### --overwrite must be used to overwrite existing label, can be applied to all resources
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is valid-pod-super-sayan
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs from 1 yaml file
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: redis-master and valid-pod PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
### Delete two PODs from 1 yaml file
# Pre-condition: redis-master and valid-pod PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: no PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply should update configuration annotations only if apply is already called
## 1. kubectl create doesn't set the annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is applied
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
## 4. kubectl replace updates an existing annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
# runs specific kubectl create tests
run_create_secret_tests() {
set -o nounset
set -o errexit
### Create generic secret with explicit namespace
# Pre-condition: secret 'mysecret' does not exist
output_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'secrets "mysecret" not found'
# Command
output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run --from-literal=foo=bar -o jsonpath='{.metadata.namespace}' --namespace=user-specified)
# Post-condition: mysecret still not created since --dry-run was used
# Output from 'create' command should contain the specified --namespace value
failure_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${failure_message}" 'secrets "mysecret" not found'
kube::test::if_has_string "${output_message}" 'user-specified'
# Command
output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run --from-literal=foo=bar -o jsonpath='{.metadata.namespace}')
# Post-condition: jsonpath for .metadata.namespace should be empty for object since --namespace was not explicitly specified
kube::test::if_empty_string "${output_message}"
kubectl create configmap tester-create-cm -o json --dry-run | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f -
kubectl delete -ndefault "${kube_flags[@]}" configmap tester-create-cm
set +o nounset
set +o errexit
}
# Runs tests related to kubectl apply.
run_kubectl_apply_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply"
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl apply should be able to clear defaulted fields.
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}"
# Post-Condition: deployment "test-deployment-retainkeys" created
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
# Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Command: apply a deployment "test-deployment-retainkeys" should clear
# defaulted fields and successfully update the deployment
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Clean up
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}"
## kubectl apply -f with label selector should only apply matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
## kubectl apply --prune
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "a" not found'
# cleanup
kubectl delete pods b
# same thing without prune for a sanity check
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check both pods exist
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
# cleanup
kubectl delete pod/a pod/b
## kubectl apply --prune requires a --all flag to select everything
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" \
'all resources selected for prune without explicitly passing --all'
# should apply everything
kubectl apply --all --prune -f hack/testdata/prune
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
kubectl delete pod/a pod/b
## kubectl apply --prune should fallback to delete for non reapable types
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
## kubectl apply --prune --prune-whitelist
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and don't prune pod a by overwriting whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and prune pod a with default whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# cleanup
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
set +o nounset
set +o errexit
}
# Runs tests related to kubectl create --filename(-f) --selector(-l).
run_kubectl_create_filter_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create filter"
## kubectl create -f with label selector should only create matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# create
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
set +o nounset
set +o errexit
}
run_kubectl_apply_deployments_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply deployments"
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
# Runs tests for --save-config tests.
run_save_config_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --save-config"
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# tests kubectl group prefix matching
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
kubectl delete hpa frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_run_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl run"
## kubectl run should create deployments, jobs or cronjob
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By"
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx-extensions "--image=$IMAGE_NGINX" "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
# Command
kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-apps "${kube_flags[@]}"
# Pre-Condition: no Job exists
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: CronJob "pi" is created
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Clean up
kubectl delete cronjobs pi "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_get_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get"
### Test retrieval of non-existing pods
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of non-existing POD with output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of pods when none exist with non-human readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
### Test retrieval of pods when none exist, with human-readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test retrieval of non-existing POD with json output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
# Post-condition: make sure we don't display an empty List
if kube::test::if_has_string "${output_message}" 'List'; then
echo 'Unexpected List output'
echo "${LINENO} $(basename $0)"
exit 1
fi
### Test kubectl get all
output_message=$(kubectl --v=6 --namespace default get all 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get 200 OK from all the url(s)
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1beta1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --allow-missing-template-keys defaults to true for jsonpath templates
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
## check --allow-missing-template-keys defaults to true for go templates
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'missing is not found'
## check --allow-missing-template-keys=false results in an error for a missing key with go
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
### Test kubectl get watch
output_message=$(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'STATUS' # headers
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'pods/valid-pod' # resource name
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
output_message=$(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" '"invalid-pod" not found'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: PODs redis-master and valid-pod exist
# Check that all items in the list are printed
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "redis-master valid-pod"
# cleanup
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_request_timeout_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl request timeout"
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --request-timeout on 'get pod'
output_message=$(kubectl get pod valid-pod --request-timeout=1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout on 'get pod' with --watch
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
## check --request-timeout value with no time unit
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout value with invalid time unit
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_crd_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl crd"
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'foos.company.com:'
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "bars.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "bars",
"kind": "Bar"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:'
run_non_native_resource_tests
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete customresourcedefinitions/bars.company.com "${kube_flags_with_token[@]}"
set +o nounset
set +o errexit
}
kube::util::non_native_resources() {
local times
local wait
local failed
times=30
wait=10
local i
for i in $(seq 1 $times); do
failed=""
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/foos' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/bars' || failed=true
if [ -z "${failed}" ]; then
return 0
fi
sleep ${wait}
done
kube::log::error "Timed out waiting for non-native-resources; tried ${times} waiting ${wait}s between each"
return 1
}
run_non_native_resource_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl non-native resources"
kube::util::non_native_resources
# Test that we can list this new CustomResource (foos)
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new CustomResource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}"
# Test that we can list this new custom resource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test alternate forms
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test all printers, with lists and individual items
kube::log::status "Testing CustomResource printing"
kubectl "${kube_flags[@]}" get foos
kubectl "${kube_flags[@]}" get foos/test
kubectl "${kube_flags[@]}" get foos -o name
kubectl "${kube_flags[@]}" get foos/test -o name
kubectl "${kube_flags[@]}" get foos -o wide
kubectl "${kube_flags[@]}" get foos/test -o wide
kubectl "${kube_flags[@]}" get foos -o json
kubectl "${kube_flags[@]}" get foos/test -o json
kubectl "${kube_flags[@]}" get foos -o yaml
kubectl "${kube_flags[@]}" get foos/test -o yaml
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
output_message=$(kubectl "${kube_flags[@]}" get foos/test -o name)
kube::test::if_has_string "${output_message}" 'foos/test'
# Test patching
kube::log::status "Testing CustomResource patching"
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
# Get local version
CRD_RESOURCE_FILE="${KUBE_TEMP}/crd-foos-test.json"
kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}"
# cannot apply strategic patch locally
CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat ${CRD_PATCH_ERROR_FILE})"
else
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${CRD_PATCH_ERROR_FILE})"
exit 1
fi
# can apply merge patch locally
kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
# can apply merge patch remotely
kubectl "${kube_flags[@]}" patch --record -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
rm "${CRD_RESOURCE_FILE}"
rm "${CRD_PATCH_ERROR_FILE}"
# Test labeling
kube::log::status "Testing CustomResource labeling"
kubectl "${kube_flags[@]}" label foos --all listlabel=true
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
# Test annotating
kube::log::status "Testing CustomResource annotating"
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
# Test describing
kube::log::status "Testing CustomResource describing"
kubectl "${kube_flags[@]}" describe foos
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete foos test --cascade=true
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Bar
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}"
# Test that we can list this new custom resource
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that we can watch the resource.
# Start watcher in background with process substitution,
# so we can read from stdout asynchronously.
kube::log::status "Testing CustomResource watching"
exec 3< <(kubectl "${kube_flags[@]}" get bars --request-timeout=1m --watch-only -o name & echo $! ; wait)
local watch_pid
read <&3 watch_pid
# We can't be sure when the watch gets established,
# so keep triggering events (in the background) until something comes through.
local tries=0
while [ ${tries} -lt 10 ]; do
tries=$((tries+1))
kubectl "${kube_flags[@]}" patch bars/test -p "{\"patched\":\"${tries}\"}" --type=merge
sleep 1
done &
local patch_pid=$!
# Wait up to 30s for a complete line of output.
local watch_output
read <&3 -t 30 watch_output
# Stop the watcher and the patch loop.
kill -9 ${watch_pid}
kill -9 ${patch_pid}
kube::test::if_has_string "${watch_output}" 'bars/test'
# Delete the resource without cascade.
kubectl "${kube_flags[@]}" delete bars test --cascade=false
# Make sure it's gone
kube::test::wait_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create single item via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
# Test that we have create a foo named test
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply an empty patch doesn't change fields
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
# Update a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-updated-subfield.yaml
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
# Delete a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-deleted-subfield.yaml
# Test that apply has deleted the field
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
# Test that the field does not exist
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-added-subfield.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/foo.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create list via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
# Test that we have create a foo and a bar from a list
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that re-apply an list doesn't change anything
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that the fields have the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Update fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-updated-field.yaml
# Test that apply has updated the fields
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
# Delete fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-deleted-field.yaml
# Test that apply has deleted the fields
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
# Test that the fields does not exist
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-added-field.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/multi-crd-list.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply --prune
# Test that no foo or bar exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on foo.yaml that has foo/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right crds exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on bar.yaml that has bar/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right crds exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/bar.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test 'kubectl create' with namespace, and namespace cleanup.
kubectl "${kube_flags[@]}" create namespace non-native-resources
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml --namespace=non-native-resources
kube::test::get_object_assert bars '{{len .items}}' '1' --namespace=non-native-resources
kubectl "${kube_flags[@]}" delete namespace non-native-resources
# Make sure objects go away.
kube::test::wait_object_assert bars '{{len .items}}' '0' --namespace=non-native-resources
# Make sure namespace goes away.
local tries=0
while kubectl "${kube_flags[@]}" get namespace non-native-resources && [ ${tries} -lt 10 ]; do
tries=$((tries+1))
sleep ${tries}
done
set +o nounset
set +o errexit
}
run_recursive_resources_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing recursive resources"
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
# a list but since it contains invalid objects, it will never open.
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# cleaning
rm /tmp/tmp-editor.sh
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
### Convert deployment YAML file locally without affecting the live deployment.
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Command
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")
echo $output_message
# Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
kube::test::if_has_string "${output_message}" "apps/v1beta1"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Rollout on multiple deployments recursively
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Pause the deployments recursively
PRESERVE_ERR_FILE=true
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Resume the deployments recursively
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Retrieve the rollout history of the deployments recursively
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
kube::test::if_has_string "${output_message}" "nginx0-deployment"
kube::test::if_has_string "${output_message}" "nginx1-deployment"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Attempt to pause the replication controllers recursively
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
## Attempt to resume the replication controllers recursively
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
set +o nounset
set +o errexit
}
run_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:namespaces)"
### Create a new namespace
# Pre-condition: only the "default" namespace exists
# The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
# kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
# Command
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Clean up
kubectl delete namespace my-namespace
######################
# Pods in Namespaces #
######################
if kube::test::if_supports_resource "${pods}" ; then
### Create a new namespace
# Pre-condition: the other namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace other
# Post-condition: namespace 'other' is created.
kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
### Create POD valid-pod in specific namespace
# Pre-condition: no POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: a resource cannot be retrieved by name across all namespaces
output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
### Delete POD valid-pod in specific namespace
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other
fi
set +o nounset
set +o errexit
}
run_secrets_test() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing secrets"
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-secrets
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
### Create a generic secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a docker-registry secret in a specific namespace
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='[email protected]' --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a tls secret
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Create a secret using stringData
kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": "secret-string-data"
},
"data": {
"k1":"djE=",
"k2":""
},
"stringData": {
"k2":"v2"
}
}
__EOF__
# Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
# Clean up
kubectl delete secret secret-string-data --namespace=test-secrets
### Create a secret using output flags
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no secret exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
[[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]]
## Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Clean up
kubectl delete namespace test-secrets
set +o nounset
set +o errexit
}
run_configmap_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing configmaps"
kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap'
kubectl delete configmap test-configmap "${kube_flags[@]}"
### Create a new namespace
# Pre-condition: the test-configmaps namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-configmaps
# Post-condition: namespace 'test-configmaps' is created.
kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
### Create a generic configmap in a specific namespace
# Pre-condition: no configmaps namespace exists
kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
[[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete namespace test-configmaps
set +o nounset
set +o errexit
}
run_service_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:services)"
### Create redis-master service from JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Describe command should print detailed information
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_object_events_assert services 'redis-master'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert services 'redis-master' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert services 'redis-master' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert services
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert services false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert services true
### set selector
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Set selector of a local file without talking to the server
kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
# Set command to change the selector.
kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
# Set command to reset the selector back to the original one.
kubectl set selector -f examples/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Show dry-run works on running selector
kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
### Dump current redis-master service
output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")
### Delete redis-master-service by id
# Pre-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create redis-master-service from dumped JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: redis-master service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
### Create redis-master-v1-test service
# Pre-condition: redis-master-service service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "service-v1-test"
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
__EOF__
# Post-condition: service-v1-test service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
### Identity
kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
### Delete services by id
# Pre-condition: service-v1-test exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
kubectl delete service "service-v1-test" "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create two services
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master and redis-slave services are created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
### Custom columns can be specified
# Pre-condition: generate output using custom columns
output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
# Post-condition: should contain name column
kube::test::if_has_string "${output_message}" 'redis-master'
### Delete multiple services at once
# Pre-condition: redis-master and redis-slave services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
# Command
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create an ExternalName service
# Pre-condition: Only the default kubernetes service exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create service externalname beep-boop --external-name bar.com
# Post-condition: beep-boop service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
### Delete beep-boop service by id
# Pre-condition: beep-boop service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
# Command
kubectl delete service beep-boop "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
set +o nounset
set +o errexit
}
run_rc_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicationcontrollers)"
### Create and stop controller, make sure it doesn't leak pods
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rc 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rc 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rc 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rc
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rc false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rc true
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with (wrong) current-replicas and replicas
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with replicas only
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale replication controller from JSON with replicas only
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Clean-up
kubectl delete rc frontend "${kube_flags[@]}"
### Scale multiple replication controllers
kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Scale a job
kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml "${kube_flags[@]}"
# Command
kubectl scale --replicas=2 job/pi
# Post-condition: 2 replicas for pi
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
### Scale a deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Expose a deployment as a service
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
# Command
kubectl expose deployment/nginx-deployment
# Post-condition: service exists and exposes deployment port (80)
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Command
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
# Create a service using service/v1 generator
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
# Verify that expose service works without specifying a port.
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
# Post-condition: service exists with the same port as the original service.
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
# Cleanup services
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
### Expose negative invalid resource test
# Pre-condition: don't need
# Command
output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
# Post-condition: the error message has "cannot expose" string
kube::test::if_has_string "${output_message}" 'cannot expose'
### Try to generate a service with invalid name (exceeding maximum valid size)
# Pre-condition: use --name flag
output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: should fail due to invalid name
kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
# Pre-condition: default run without --name flag; should succeed by truncating the inherited name
output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: inherited name from pod has been truncated
kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostname-testing-sixty-three-characters-in-len\" exposed'
# Clean-up
kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
### Expose multiport object as a new service
# Pre-condition: don't use --port flag
output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
# Post-condition: expose succeeded
kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed'
# Post-condition: generated service has both ports from the exposed pod
kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 2379'
# Clean-up
kubectl delete svc etcd-server "${kube_flags[@]}"
### Delete replication controller with id
# Pre-condition: frontend replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replication controllers
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple controllers at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Auto scale replication controller
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, rc specified by file
kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, rc specified by name
kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
## Set resource limits/request of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's cpu limits
kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set limits/requests of a deployment specified by a file
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Clean up
kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_deployment_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing deployments"
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1beta1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1beta1'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
### Test kubectl create deployment should not fail validation
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
# Clean up
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | $SED "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Set image of all containners of the deployment again when image not change
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
### Set env of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:'
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
# Set env of deployments for all container
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
# Set env of deployments for specific container
kubectl set env deployment nginx-deployment env=prod -c=nginx "${kube_flags[@]}"
# Set env of deployments by configmap
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
# Set env of deployments by secret
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
# Remove specific env of deployment
kubectl set env deployment nginx-deployment env-
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
### Delete a deployment with initializer
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web'
# Delete a deployment
kubectl delete deployment web "${kube_flags[@]}"
# Check Deployment web doesn't exist
output_message=$(! kubectl get deployment web 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"web" not found'
set +o nounset
set +o errexit
}
run_rs_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Set up three deploy, two deploy have same label
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --selector
kubectl scale deploy --replicas=2 -l run=hello
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --all
kubectl scale deploy --replicas=3 --all
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
# Test set commands
# Pre-condition: frontend replica set exists at generation 1
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
kubectl set image rs/frontend "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete a rs with initializer
# Pre-condition: no rs exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a rs
kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx'
# Delete a rs
kubectl delete rs nginx "${kube_flags[@]}"
# check rs nginx doesn't exist
output_message=$(! kubectl get rs nginx 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"nginx" not found'
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
set +o nounset
set +o errexit
}
run_daemonset_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets)"
### Create a rolling update DaemonSet
# Pre-condition: no DaemonSet exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
# Test set commands
kubectl set image daemonsets/bind "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_daemonset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
### Test rolling back a DaemonSet
# Pre-condition: no DaemonSet or its pods exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a DaemonSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the DaemonSet (revision 2)
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo daemonset "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_statefulset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
### Test rolling back a StatefulSet
# Pre-condition: no statefulset or its pods exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a StatefulSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the statefulset (revision 2)
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo statefulset "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up - delete newest configuration
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_multi_resources_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) exists
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Post-condition: mock rc (and mock2) exists
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
#############################
# Multiple Resources via URL#
#############################
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: service(mock) and rc(mock) exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
# Clean up
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_config_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:config set)"
kubectl config set-cluster test-cluster --server="https://does-not-work"
# Get the api cert and add a comment to avoid flag parsing problems
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_writen" == "$r_writen"
set +o nounset
set +o errexit
}
run_kubectl_local_proxy_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl local proxy"
# Make sure the UI can be proxied
start-proxy
check-curl-proxy-code /ui 307
check-curl-proxy-code /api/ui 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
fi
if kube::test::if_supports_resource "${static}" ; then
check-curl-proxy-code /static/ 200
fi
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/ui 307
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
check-curl-proxy-code /custom/api/v1/namespaces 200
stop-proxy
set +o nounset
set +o errexit
}
run_RESTMapper_evaluation_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing RESTMapper"
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
### Non-existent resource type should give a recognizeable error
# Pre-condition: None
# Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
exit 1
fi
rm "${RESTMAPPER_ERROR_FILE}"
# Post-condition: None
set +o nounset
set +o errexit
}
run_clusterroles_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing clusterroles"
# make sure the server was properly bootstrapped with clusterroles and bindings
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
# test `kubectl create clusterrole`
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
kubectl create "${kube_flags[@]}" clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/*
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'
# test `kubectl create clusterrolebinding`
# test `kubectl set subject clusterrolebinding`
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2
kube::test::get_object_assert clusterrolebinding/multi-users "{{range.subjects}}{{.name}}:{{end}}" 'user-1:user-2:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-group --group=foo
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding multi-groups --clusterrole=admin --group=group-1 --group=group-2
kube::test::get_object_assert clusterrolebinding/multi-groups "{{range.subjects}}{{.name}}:{{end}}" 'group-1:group-2:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-sa --serviceaccount=otherfoo:foo
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
# test `kubectl create rolebinding`
# test `kubectl set subject rolebinding`
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin
kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole'
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
kubectl set subject "${kube_flags[@]}" rolebinding admin --user=foo
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:'
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group
kube::test::get_object_assert rolebinding/localrole "{{.roleRef.kind}}" 'Role'
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" rolebinding localrole --group=foo
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" rolebinding sarole --serviceaccount=otherfoo:foo
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
set +o nounset
set +o errexit
}
run_role_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing role"
# Create Role from command (only resource)
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-pod-admin --verb=* --resource=invalid-resource 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"invalid-resource\""
# Create Role from command (resource + group)
kubectl create "${kube_flags[@]}" role group-reader --verb=get,list --resource=deployments.extensions
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'deployments:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=deployments.invalid-group 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"deployments\" in group \"invalid-group\""
# Create Role from command (resource / subresource)
kubectl create "${kube_flags[@]}" role subresource-reader --verb=get,list --resource=pods/status
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
# Create Role from command (resource + group / subresource)
kubectl create "${kube_flags[@]}" role group-subresource-reader --verb=get,list --resource=replicasets.extensions/scale
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'replicasets/scale:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=rs.invalid-group/scale 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"rs\" in group \"invalid-group\""
# Create Role from command (resource + resourcename)
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
# Create Role from command (multi-resources)
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods/status,deployments.extensions
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
set +o nounset
set +o errexit
}
run_assert_short_name_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing assert short name"
kube::log::status "Testing propagation of short names for resources"
output_message=$(kubectl get --raw=/api/v1)
## test if a short name is exported during discovery
kube::test::if_has_string "${output_message}" '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
set +o nounset
set +o errexit
}
run_assert_categories_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing propagation of categories for resources"
output_message=$(kubectl get --raw=/api/v1 | grep -Po '"name":"pods".*?}')
kube::test::if_has_string "${output_message}" '"categories":\["all"\]'
set +o nounset
set +o errexit
}
run_kubectl_create_error_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create with error"
# Passing no arguments to create is an error
! kubectl create
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the empty string
if grep -q "unknown object type \"nil\" in ReplicationController" "${ERROR_FILE}"; then
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
# Posting a pod to namespaces should fail. Also tests --raw forcing the post location
[ "$( kubectl convert -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f - --v=8 2>&1 | grep 'cannot be handled as a Namespace: converting (v1.Pod)')" ]
[ "$( kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --edit 2>&1 | grep 'raw and --edit are mutually exclusive')" ]
set +o nounset
set +o errexit
}
run_cmd_with_img_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing cmd with image"
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment "test1" created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
set +o nounset
set +o errexit
}
run_client_config_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing client config"
# Command
# Pre-condition: kubeconfig "missing" is not a file or directory
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: kubeconfig "missing" is not a file or directory
# Command
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
# Post-condition: --user contains a valid / empty value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Command
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
# Post-condition: --cluster contains a "valid" value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: context "missing-context" does not exist
# Command
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
kube::test::if_has_string "${output_message}" 'context "missing-context" does not exist'
# Post-condition: invalid or missing context returns error
# Pre-condition: cluster "missing-cluster" does not exist
# Command
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
kube::test::if_has_string "${output_message}" 'cluster "missing-cluster" does not exist'
# Post-condition: invalid or missing cluster returns error
# Pre-condition: user "missing-user" does not exist
# Command
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
# Post-condition: invalid or missing user returns error
# test invalid config
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
kube::test::if_has_string "${output_message}" "Error loading config file"
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
kube::test::if_has_string "${output_message}" 'no such file or directory'
set +o nounset
set +o errexit
}
run_service_accounts_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing service accounts"
### Create a new namespace
# Pre-condition: the test-service-accounts namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-service-accounts
# Post-condition: namespace 'test-service-accounts' is created.
kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
### Create a service account in a specific namespace
# Command
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
# Clean-up
kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
# Clean up
kubectl delete namespace test-service-accounts
set +o nounset
set +o errexit
}
run_pod_templates_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing pod templates"
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_stateful_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_lists_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:lists)"
### Create a List with objects from multiple versions
# Command
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
### Delete the List with objects from multiple versions
# Command
kubectl delete service/list-service-test deployment/list-deployment-test
set +o nounset
set +o errexit
}
run_persistent_volumes_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes"
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_persistent_volume_claims_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes claims"
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_storage_class_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing storage class"
### Create and delete storage class
# Pre-condition: no storage classes currently exist
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1",
"metadata": {
"name": "storage-class-name"
},
"provisioner": "kubernetes.io/fake-provisioner-type",
"parameters": {
"zone":"us-east-1b",
"type":"ssd"
}
}
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_nodes_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:nodes)"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_object_events_assert nodes "127.0.0.1"
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert nodes "127.0.0.1" false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert nodes "127.0.0.1" true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert nodes
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert nodes false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert nodes true
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
# Post-condition: node is unschedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
# Post-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
set +o nounset
set +o errexit
}
run_authorization_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing authorization"
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
set +o nounset
set +o errexit
}
run_retrieve_multiple_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
set +o nounset
set +o errexit
}
run_resource_aliasing_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing resource aliasing"
kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_explain_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
set +o nounset
set +o errexit
}
run_swagger_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing swagger"
# Verify schema
file="${KUBE_TEMP}/schema-v1.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of services" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
set +o nounset
set +o errexit
}
run_kubectl_sort_by_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --sort-by"
### sort-by should not panic if no pod exists
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl get pods --sort-by="{metadata.name}"
kubectl get pods --sort-by="{metadata.creationTimestamp}"
### sort-by should works if pod exists
# Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Check output of sort-by
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_has_string "${output_message}" "valid-pod"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### sort-by should works by sorting by name
# Create three PODs
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod1.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod2.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod3.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Check output of sort-by '{metadata.name}'
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
# Check output of sort-by '{metadata.labels.name}'
output_message=$(kubectl get pods --sort-by="{metadata.labels.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod3:sorted-pod2:sorted-pod1:"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Command
kubectl delete "${kube_flags[@]}" pod --grace-period=0 --force --all
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_all_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --all-namespace"
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Verify a specific namespace is ignored when all-namespaces is provided
# Command
kubectl get pods --all-namespaces --namespace=default
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_certificates_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing certificates"
# approve
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
# deny
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
set +o nounset
set +o errexit
}
run_cluster_management_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing cluster-management commands"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
### kubectl cordon update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl cordon "127.0.0.1" --dry-run
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl drain "127.0.0.1" --dry-run
# Post-condition: node still exists, node is still schedulable
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl uncordon update with --dry-run is a no-op
# Pre-condition: node is already schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
response=$(kubectl uncordon "127.0.0.1" --dry-run)
kube::test::if_has_string "${response}" 'already uncordoned'
# Post-condition: node is still schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain command fails when both --selector and a node argument are given
# Pre-condition: node exists and contains label test=label
kubectl label node "127.0.0.1" "test=label"
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
kube::test::if_has_string "${response}" 'cannot specify both a node name'
### kubectl cordon command fails when no arguments are passed
# Pre-condition: node exists
response=$(! kubectl cordon 2>&1)
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
### kubectl cordon selects no nodes with an empty --selector=
# Pre-condition: node "127.0.0.1" is uncordoned
kubectl uncordon "127.0.0.1"
response=$(! kubectl cordon --selector= 2>&1)
kube::test::if_has_string "${response}" 'must provide one or more resources'
# test=label matches our node
response=$(kubectl cordon --selector test=label)
kube::test::if_has_string "${response}" 'node "127.0.0.1" cordoned'
# invalid=label does not match any nodes
response=$(kubectl cordon --selector invalid=label)
kube::test::if_has_not_string "${response}" 'cordoned'
# Post-condition: node "127.0.0.1" is cordoned
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
set +o nounset
set +o errexit
}
run_plugins_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl plugins"
# top-level plugin command
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl -h 2>&1)
kube::test::if_has_string "${output_message}" 'plugin\s\+Runs a command-line plugin'
# no plugins
output_message=$(! kubectl plugin 2>&1)
kube::test::if_has_string "${output_message}" 'no plugins installed'
# single plugins path
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin 2>&1)
kube::test::if_has_string "${output_message}" 'echo\s\+Echoes for test-cmd'
kube::test::if_has_string "${output_message}" 'get\s\+The wonderful new plugin-based get!'
kube::test::if_has_string "${output_message}" 'error\s\+The tremendous plugin that always fails!'
kube::test::if_has_not_string "${output_message}" 'The hello plugin'
kube::test::if_has_not_string "${output_message}" 'Incomplete plugin'
kube::test::if_has_not_string "${output_message}" 'no plugins installed'
# multiple plugins path
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin -h 2>&1)
kube::test::if_has_string "${output_message}" 'echo\s\+Echoes for test-cmd'
kube::test::if_has_string "${output_message}" 'get\s\+The wonderful new plugin-based get!'
kube::test::if_has_string "${output_message}" 'error\s\+The tremendous plugin that always fails!'
kube::test::if_has_string "${output_message}" 'hello\s\+The hello plugin'
kube::test::if_has_not_string "${output_message}" 'Incomplete plugin'
# don't override existing commands
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl get -h 2>&1)
kube::test::if_has_string "${output_message}" 'Display one or many resources'
kube::test::if_has_not_string "$output_message{output_message}" 'The wonderful new plugin-based get'
# plugin help
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello -h 2>&1)
kube::test::if_has_string "${output_message}" 'The hello plugin is a new plugin used by test-cmd to test multiple plugin locations.'
kube::test::if_has_string "${output_message}" 'Usage:'
# run plugin
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello 2>&1)
kube::test::if_has_string "${output_message}" '#hello#'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin echo 2>&1)
kube::test::if_has_string "${output_message}" 'This plugin works!'
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin hello 2>&1)
kube::test::if_has_string "${output_message}" 'unknown command'
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin error 2>&1)
kube::test::if_has_string "${output_message}" 'error: exit status 1'
# plugin tree
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree 2>&1)
kube::test::if_has_string "${output_message}" 'Plugin with a tree of commands'
kube::test::if_has_string "${output_message}" 'child1\s\+The first child of a tree'
kube::test::if_has_string "${output_message}" 'child2\s\+The second child of a tree'
kube::test::if_has_string "${output_message}" 'child3\s\+The third child of a tree'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 --help 2>&1)
kube::test::if_has_string "${output_message}" 'The first child of a tree'
kube::test::if_has_not_string "${output_message}" 'The second child'
kube::test::if_has_not_string "${output_message}" 'child2'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 2>&1)
kube::test::if_has_string "${output_message}" 'child one'
kube::test::if_has_not_string "${output_message}" 'child1'
kube::test::if_has_not_string "${output_message}" 'The first child'
# plugin env
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env -h 2>&1)
kube::test::if_has_string "${output_message}" "This is a flag 1"
kube::test::if_has_string "${output_message}" "This is a flag 2"
kube::test::if_has_string "${output_message}" "This is a flag 3"
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env --test1=value1 -t value2 2>&1)
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CURRENT_NAMESPACE'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CALLER'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_COMMAND=./env.sh'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_SHORT_DESC=The plugin envs plugin'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT=0'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST1=value1'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST2=value2'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST3=default'
set +o nounset
set +o errexit
}
run_impersonation_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing impersonation"
output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
kube::test::if_has_string "${output_message}" 'without impersonating a user'
if kube::test::if_supports_resource "${csr}" ; then
# --as
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1
kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
# --as-group
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
fi
set +o nounset
set +o errexit
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
foundError="False"
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
exit 1
fi
kube::log::status "Checking kubectl version"
kubectl version
# use timestamp as the name of namespace because increasing the variable inside subshell
# does not affect the value of the variable outside the subshell.
create_and_use_new_namespace() {
namespace_number=$(date +%s%N)
kube::log::status "Creating namespace namespace${namespace_number}"
kubectl create namespace "namespace${namespace_number}"
kubectl config set-context "${CONTEXT}" --namespace="namespace${namespace_number}"
}
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
)
# token defined in hack/testdata/auth-tokens.csv
kube_flags_with_token=(
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version")
kube_flags_with_token+=("--match-server-version")
fi
if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
pdb_max_unavailable=".spec.maxUnavailable"
generation_field=".metadata.generation"
template_generation_field=".spec.templateGeneration"
container_len="(len .spec.template.spec.containers)"
image_field0="(index .spec.template.spec.containers 0).image"
image_field1="(index .spec.template.spec.containers 1).image"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
fi
# Make sure "kubernetes" service exists.
if kube::test::if_supports_resource "${services}" ; then
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
# Require the service to exist (either we created it or the API server did)
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
fi
#########################
# Kubectl version #
#########################
record_command run_kubectl_version_tests
#######################
# kubectl config set #
#######################
record_command run_kubectl_config_set_tests
#######################
# kubectl local proxy #
#######################
record_command run_kubectl_local_proxy_tests
#########################
# RESTMapper evaluation #
#########################
record_command run_RESTMapper_evaluation_tests
################
# Cluster Role #
################
if kube::test::if_supports_resource "${clusterroles}" ; then
record_command run_clusterroles_tests
fi
########
# Role #
########
if kube::test::if_supports_resource "${roles}" ; then
record_command run_role_tests
fi
#########################
# Assert short name #
#########################
record_command run_assert_short_name_tests
#########################
# Assert categories #
#########################
## test if a category is exported during discovery
if kube::test::if_supports_resource "${pods}" ; then
record_command run_assert_categories_tests
fi
###########################
# POD creation / deletion #
###########################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_pod_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_save_config_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_create_error_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_apply_tests
record_command run_kubectl_run_tests
record_command run_kubectl_create_filter_tests
fi
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_kubectl_apply_deployments_tests
fi
###############
# Kubectl get #
###############
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_get_tests
fi
######################
# Create #
######################
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_create_secret_tests
fi
##################
# Global timeout #
##################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_request_timeout_tests
fi
#####################################
# CustomResourceDefinitions #
#####################################
# customresourcedefinitions cleanup after themselves.
if kube::test::if_supports_resource "${customresourcedefinitions}" ; then
record_command run_crd_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_cmd_with_img_tests
fi
#####################################
# Recursive Resources via directory #
#####################################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_recursive_resources_tests
fi
##############
# Namespaces #
##############
if kube::test::if_supports_resource "${namespaces}" ; then
record_command run_namespace_tests
fi
###########
# Secrets #
###########
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_secrets_test
fi
fi
######################
# ConfigMap #
######################
if kube::test::if_supports_resource "${namespaces}"; then
if kube::test::if_supports_resource "${configmaps}" ; then
record_command run_configmap_tests
fi
fi
####################
# Client Config #
####################
record_command run_client_config_tests
####################
# Service Accounts #
####################
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
record_command run_service_accounts_tests
fi
#################
# Pod templates #
#################
if kube::test::if_supports_resource "${podtemplates}" ; then
record_command run_pod_templates_tests
fi
############
# Services #
############
if kube::test::if_supports_resource "${services}" ; then
record_command run_service_tests
fi
##################
# DaemonSets #
##################
if kube::test::if_supports_resource "${daemonsets}" ; then
record_command run_daemonset_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_daemonset_history_tests
fi
fi
###########################
# Replication controllers #
###########################
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_rc_tests
fi
fi
######################
# Deployments #
######################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_deployment_tests
fi
######################
# Replica Sets #
######################
if kube::test::if_supports_resource "${replicasets}" ; then
record_command run_rs_tests
fi
#################
# Stateful Sets #
#################
if kube::test::if_supports_resource "${statefulsets}" ; then
record_command run_stateful_set_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_statefulset_history_tests
fi
fi
######################
# Lists #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_lists_tests
fi
fi
######################
# Multiple Resources #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_multi_resources_tests
fi
fi
######################
# Persistent Volumes #
######################
if kube::test::if_supports_resource "${persistentvolumes}" ; then
record_command run_persistent_volumes_tests
fi
############################
# Persistent Volume Claims #
############################
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
record_command run_persistent_volume_claims_tests
fi
############################
# Storage Classes #
############################
if kube::test::if_supports_resource "${storageclass}" ; then
record_command run_storage_class_tests
fi
#########
# Nodes #
#########
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_nodes_tests
fi
########################
# authorization.k8s.io #
########################
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
record_command run_authorization_tests
fi
# kubectl auth can-i
# kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
output_message=$(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type"
output_message=$(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "subresource can not be used with NonResourceURL"
output_message=$(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}")
kube::test::if_empty_string "${output_message}"
fi
# kubectl auth reconcile
if kube::test::if_supports_resource "${clusterroles}" ; then
kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml
kube::test::get_object_assert 'rolebindings -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-RB:'
kube::test::get_object_assert 'roles -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-R:'
kube::test::get_object_assert 'clusterrolebindings -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CRB:'
kube::test::get_object_assert 'clusterroles -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CR:'
kubectl delete "${kube_flags[@]}" rolebindings,role,clusterroles,clusterrolebindings -n some-other-random -l test-cmd=auth
fi
#####################
# Retrieve multiple #
#####################
if kube::test::if_supports_resource "${nodes}" ; then
if kube::test::if_supports_resource "${services}" ; then
record_command run_retrieve_multiple_tests
fi
fi
#####################
# Resource aliasing #
#####################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_resource_aliasing_tests
fi
fi
###########
# Explain #
###########
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_explain_tests
fi
###########
# Swagger #
###########
record_command run_swagger_tests
#####################
# Kubectl --sort-by #
#####################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_sort_by_tests
fi
############################
# Kubectl --all-namespaces #
############################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_all_namespace_tests
fi
################
# Certificates #
################
if kube::test::if_supports_resource "${csr}" ; then
record_command run_certificates_tests
fi
######################
# Cluster Management #
######################
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_cluster_management_tests
fi
###########
# Plugins #
###########
record_command run_plugins_tests
#################
# Impersonation #
#################
record_command run_impersonation_tests
kube::test::clear_all
if [ "$foundError" == "True" ]; then
echo "TEST FAILED"
exit 1
fi
}
run_initializer_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing --include-uninitialized"
### Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/initializer-deployments.yaml 2>&1 "${kube_flags[@]}" || true
### Test kubectl get --include-uninitialized
# Command
output_message=$(kubectl get deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Command
output_message=$(kubectl get deployments web 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Command
output_message=$(kubectl get deployments --show-all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test kubectl describe --include-uninitialized
# Command
output_message=$(kubectl describe deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl describe deployments web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments web --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
### Test kubectl label --include-uninitialized
# Command
output_message=$(kubectl label deployments labelkey1=labelvalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey1}}" 'labelvalue1'
# Command
output_message=$(kubectl label deployments labelkey2=labelvalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey3=labelvalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey4=labelvalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey4}}" 'labelvalue4'
# Command
output_message=$(kubectl label deployments labelkey5=labelvalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey6=labelvalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey6}}" 'labelvalue6'
# Command
output_message=$(kubectl label deployments web labelkey7=labelvalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey7}}" 'labelvalue7'
# Found All Labels
kube::test::get_object_assert 'deployments web' "{{${labels_field}}}" 'map[labelkey1:labelvalue1 labelkey4:labelvalue4 labelkey6:labelvalue6 labelkey7:labelvalue7 run:web]'
### Test kubectl annotate --include-uninitialized
# Command
output_message=$(kubectl annotate deployments annotatekey1=annotatevalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey1}}" 'annotatevalue1'
# Command
output_message=$(kubectl annotate deployments annotatekey2=annotatevalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey3=annotatevalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey4=annotatevalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey4}}" 'annotatevalue4'
# Command
output_message=$(kubectl annotate deployments annotatekey5=annotatevalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey6=annotatevalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey6}}" 'annotatevalue6'
# Command
output_message=$(kubectl annotate deployments web annotatekey7=annotatevalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey7}}" 'annotatevalue7'
### Test kubectl edit --include-uninitialized
[ "$(EDITOR=cat kubectl edit deployments 2>&1 "${kube_flags[@]}" | grep 'edit cancelled, no objects found')" ]
[ "$(EDITOR=cat kubectl edit deployments --include-uninitialized 2>&1 "${kube_flags[@]}" | grep 'Edit cancelled, no changes made.')" ]
### Test kubectl set image --include-uninitialized
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.12 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.13 -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
### Test kubectl set resources --include-uninitialized
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=200m,memory=256Mi -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=512Mi -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
### Test kubectl set selector --include-uninitialized
# Create a service with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-redis-master-service.yaml 2>&1 "${kube_flags[@]}" || true
# Command
output_message=$(kubectl set selector services role=padawan --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "selector updated" should be part of the output
kube::test::if_has_string "${output_message}" 'selector updated'
# Command
output_message=$(kubectl set selector services role=padawan --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl set subject --include-uninitialized
# Create a create clusterrolebinding with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-clusterrolebinding.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
### Test kubectl set serviceaccount --include-uninitialized
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "serviceaccount updated" should be part of the output
kube::test::if_has_string "${output_message}" 'serviceaccount updated'
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl delete --include-uninitialized
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl delete clusterrolebinding --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl delete clusterrolebinding --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "deleted" should be part of the output
kube::test::if_has_string "${output_message}" 'deleted'
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.items}}{{$id_field}}:{{end}}" ''
### Test kubectl apply --include-uninitialized
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune --request-timeout=20 --include-uninitialized=false --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --include-uninitialized --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
kubectl delete --request-timeout=1 deploy web
kubectl delete --request-timeout=1 service redis-master
set +o nounset
set +o errexit
}
|
rajatchopra/kubernetes
|
hack/make-rules/test-cmd-util.sh
|
Shell
|
apache-2.0
| 260,770 |
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
apt-get update && apt-get dist-upgrade -y
clean-install \
openjdk-8-jre-headless \
libjemalloc1 \
localepurge \
dumb-init \
wget
CASSANDRA_PATH="cassandra/${CASSANDRA_VERSION}/apache-cassandra-${CASSANDRA_VERSION}-bin.tar.gz"
CASSANDRA_DOWNLOAD="http://www.apache.org/dyn/closer.cgi?path=/${CASSANDRA_PATH}&as_json=1"
CASSANDRA_MIRROR=`wget -q -O - ${CASSANDRA_DOWNLOAD} | grep -oP "(?<=\"preferred\": \")[^\"]+"`
echo "Downloading Apache Cassandra from $CASSANDRA_MIRROR$CASSANDRA_PATH..."
wget -q -O - $CASSANDRA_MIRROR$CASSANDRA_PATH \
| tar -xzf - -C /usr/local
mkdir -p /cassandra_data/data
mkdir -p /etc/cassandra
mv /logback.xml /cassandra.yaml /jvm.options /etc/cassandra/
mv /usr/local/apache-cassandra-${CASSANDRA_VERSION}/conf/cassandra-env.sh /etc/cassandra/
adduser --disabled-password --no-create-home --gecos '' --disabled-login cassandra
chmod +x /ready-probe.sh
chown cassandra: /ready-probe.sh
DEV_IMAGE=${DEV_CONTAINER:-}
if [ ! -z "$DEV_IMAGE" ]; then
clean-install python;
else
rm -rf $CASSANDRA_HOME/pylib;
fi
mv /kubernetes-cassandra.jar /usr/local/apache-cassandra-${CASSANDRA_VERSION}/lib
mv /cassandra-seed.so /etc/cassandra/
mv /cassandra-seed.h /usr/local/lib/include
apt-get -y purge localepurge
apt-get -y autoremove
apt-get clean
rm -rf \
$CASSANDRA_HOME/*.txt \
$CASSANDRA_HOME/doc \
$CASSANDRA_HOME/javadoc \
$CASSANDRA_HOME/tools/*.yaml \
$CASSANDRA_HOME/tools/bin/*.bat \
$CASSANDRA_HOME/bin/*.bat \
doc \
man \
info \
locale \
common-licenses \
~/.bashrc \
/var/lib/apt/lists/* \
/var/log/* \
/var/cache/debconf/* \
/etc/systemd \
/lib/lsb \
/lib/udev \
/usr/share/doc/ \
/usr/share/doc-base/ \
/usr/share/man/ \
/tmp/* \
/usr/lib/jvm/java-8-openjdk-amd64/jre/plugin \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/javaws \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/jjs \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/orbd \
/usr/lib/jvm/java-8-openjdk-amd64/bin/pack200 \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/policytool \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/rmid \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/rmiregistry \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/servertool \
/usr/lib/jvm/java-8-openjdk-amd64/bin/tnameserv \
/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/unpack200 \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/javaws.jar \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/deploy* \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/desktop \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/*javafx* \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/*jfx* \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libdecora_sse.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libprism_*.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libfxplugins.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libglass.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libgstreamer-lite.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libjavafx*.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/amd64/libjfx*.so \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/ext/jfxrt.jar \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/ext/nashorn.jar \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/oblique-fonts \
/usr/lib/jvm/java-8-openjdk-amd64/jre/lib/plugin.jar \
/usr/lib/jvm/java-8-openjdk-amd64/man
|
kubernetes/examples
|
cassandra/image/files/build.sh
|
Shell
|
apache-2.0
| 4,178 |
#!/bin/bash
./LAProcess -H 9001 -I localhost -P 9000
#LAProcess.exe -H 9001 -I localhost -P 9000 2&> la_log &
|
izenecloud/izenelib
|
test/net/message-framework-testbed/bin/la.sh
|
Shell
|
apache-2.0
| 111 |
#!/bin/bash
path=`dirname $0`
OUTPUT_DIR=$1
TARGET_NAME=plugin_library
OUTPUT_SUFFIX=dylib
CONFIG=Release
#
# Checks exit value for error
#
checkError() {
if [ $? -ne 0 ]
then
echo "Exiting due to errors (above)"
exit -1
fi
}
#
# Canonicalize relative paths to absolute paths
#
pushd $path > /dev/null
dir=`pwd`
path=$dir
popd > /dev/null
if [ -z "$OUTPUT_DIR" ]
then
OUTPUT_DIR=.
fi
pushd $OUTPUT_DIR > /dev/null
dir=`pwd`
OUTPUT_DIR=$dir
popd > /dev/null
echo "OUTPUT_DIR: $OUTPUT_DIR"
xcodebuild -project "$path/Plugin.xcodeproj" -configuration $CONFIG clean
checkError
xcodebuild -project "$path/Plugin.xcodeproj" -configuration $CONFIG
checkError
cp "$path/build/Release/$TARGET_NAME.$OUTPUT_SUFFIX" "$OUTPUT_DIR"
|
scoreflex/scoreflex-corona-plugin
|
mac/build.sh
|
Shell
|
apache-2.0
| 764 |
#!/usr/bin/env bash
export PYTHON_PATH=..;$PYTHON_PATH
|
jurcicek/ndm
|
setup.sh
|
Shell
|
apache-2.0
| 55 |
#!/bin/bash
# (c) Copyright 2019 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
WORK_DIR=${WORK_DIR:-$PWD}
AUTOMATION_DIR=${AUTOMATION_DIR:-"$(git rev-parse --show-toplevel)"}
ANSIBLE_VENV=${ANSIBLE_VENV:-"$WORK_DIR/ansible-venv"}
ARDANA_INPUT=${ARDANA_INPUT:-"$WORK_DIR/input.yml"}
MITOGEN_URL=${MITOGEN_URL:-"https://github.com/dw/mitogen/archive/master.tar.gz"}
ANSIBLE_CFG_ARDANA=${ANSIBLE_CFG_ARDANA:-"$AUTOMATION_DIR/scripts/jenkins/cloud/ansible/ansible.cfg"}
ANSIBLE_CFG_SES=${ANSIBLE_CFG_SES:-"$AUTOMATION_DIR/scripts/jenkins/ses/ansible/ansible.cfg"}
# determine python command to use, preferring compatible python3 over
# python2 but ensuring that we select a python version that supports
# creating an ansible-venv with all the modules specified in the
# requirements.txt installed.
function determine_python_bin {
declare -g python_bin
[[ -n "${python_bin}" ]] && return
local pycmd pybin pyver pyminver
for pycmd in python3 python
do
pybin=$(command -v ${pycmd}) || continue
# check if it is a compatible python version - Ansible needs Python >=3.5, or >=2.6
pyver=$(${pybin} --version 2>&1 | awk '{print $2}')
case "${pyver}" in
2.* )
pyminver=2.6
;;
3.* )
pyminver=3.5
;;
* )
continue
;;
esac
if [[ "$(printf '%s\n' ${pyminver} ${pyver} | sort --version-sort | tail -1)" == "${pyver}" ]]; then
python_bin=${pybin}
break
fi
done
if [[ -z "${python_bin}" ]]; then
echo 1>&2 "ERROR: No compatible python version detected"
return 1
fi
}
function get_from_input {
echo $(grep -v "^#" $ARDANA_INPUT | grep "^${1}:" | cut -d: -f2- | tr -d "'" | tr -d '"')
}
function is_defined {
value=$(get_from_input $1)
if [[ ! -z "${value// }" ]]; then
true
else
false
fi
}
function setup_ansible_venv {
if [ ! -d "$ANSIBLE_VENV" ]; then
determine_python_bin || return 1
virtualenv --python=${python_bin} $ANSIBLE_VENV
# some versions of virtualenv may not ensure that setuptools and
# wheel are installed/upgraded to latest versions which can cause
# issues when trying to install some of the modules specified in
# requirements.txt, so we need to explicitly specify them here.
$ANSIBLE_VENV/bin/pip install --upgrade pip setuptools wheel
$ANSIBLE_VENV/bin/pip install -r $WORK_DIR/requirements.txt
fi
}
function mitogen_enable {
if [[ "${NO_MITOGEN:+true}" == "true" ]]; then
mitogen_disable
return 0
fi
if [ ! -d "mitogen" ]; then
wget -qO- $MITOGEN_URL | tar -xz
mv mitogen-* mitogen
fi
for cfg in "${ANSIBLE_CFG_ARDANA}" "${ANSIBLE_CFG_SES}"; do
if ! grep -Fq "strategy_plugins=" ${cfg}; then
sed -i "/^\[defaults\]/a\strategy_plugins=$WORK_DIR/mitogen/ansible_mitogen/plugins/strategy" \
$cfg
fi
if ! grep -Fxq "strategy=mitogen_linear" $cfg; then
sed -i "/^\[defaults\]/a\strategy=mitogen_linear" $cfg
fi
done
}
function mitogen_disable {
if [ -d "mitogen" ]; then
rm -rf mitogen
fi
for cfg in "${ANSIBLE_CFG_ARDANA}" "${ANSIBLE_CFG_SES}"; do
if grep -Fq "strategy_plugins=" ${cfg}; then
sed -i "/strategy_plugins=/d" \
$cfg
fi
if grep -Fxq "strategy=mitogen_linear" $cfg; then
sed -i "/strategy=mitogen_linear/d" $cfg
fi
done
}
function ansible_playbook {
if ! is_defined cloud_env; then
echo "ERROR: cloud_env must be defined - please check all variables on input.yml"
return 1
else
source $ANSIBLE_VENV/bin/activate
if [[ "$PWD" != *scripts/jenkins/cloud/ansible ]]; then
pushd $AUTOMATION_DIR/scripts/jenkins/cloud/ansible
fi
echo "Running: ansible-playbook -e @$ARDANA_INPUT ${@}"
ansible-playbook ${ANSIBLE_VERBOSE:-} --extra-vars @$ARDANA_INPUT "${@}"
popd
fi
}
function ansible_playbook_ses {
if ! is_defined cloud_env; then
echo "ERROR: cloud_env must be defined - please check all variables on input.yml"
return 1
else
source $ANSIBLE_VENV/bin/activate
if [[ "$PWD" != *scripts/jenkins/ses/ansible ]]; then
pushd $AUTOMATION_DIR/scripts/jenkins/ses/ansible
fi
echo "Running: ansible-playbook ${@}"
ansible-playbook "${@}"
popd
fi
}
# this wrapper will prefer python3 over python2 when running a python script,
# which should be the same version used for creating the ansible-venv, and
# thus should leverage the Python environment provided by the ansible-venv,
# if we have activated the virtualenv, which will have all of the modules
# specified in requirements.txt installed.
function run_python_script {
set +x
determine_python_bin || return 1
$python_bin "${@}"
}
function is_physical_deploy {
cloud_env=$(get_from_input cloud_env)
[[ $cloud_env == qe* ]] || [[ $cloud_env == pcloud* ]] || [[ $cloud_env == hw-* ]]
}
function get_cloud_product {
echo $(get_from_input cloud_product)
}
function get_deployer_ip {
grep -oP "^$(get_from_input cloud_env)\\s+ansible_host=\\K[0-9\\.]+" \
$AUTOMATION_DIR/scripts/jenkins/cloud/ansible/inventory
}
function get_ses_ip {
grep -oP "^$(get_from_input cloud_env)-ses\\s+ansible_host=\\K[0-9\\.]+" \
$AUTOMATION_DIR/scripts/jenkins/ses/ansible/inventory
}
function delete_stack {
if ! is_physical_deploy; then
ansible_playbook heat-stack.yml -e heat_action=delete
fi
}
function prepare_input_model {
if is_defined scenario_name; then
ansible_playbook generate-cloud.yml
else
ansible_playbook clone-input-model.yml
fi
}
function prepare_heat_template {
ansible_playbook generate-heat-template.yml
}
function deploy_heat_template {
ansible_playbook heat-stack.yml
}
function prepare_infra {
if is_physical_deploy; then
ansible_playbook start-deployer-vm.yml
else
prepare_heat_template
delete_stack
deploy_heat_template
fi
}
function build_test_packages_for_stage {
local homeproject=${1} stage=${2} change_ids=${3}
if [ -z "${3}" ]; then
return
fi
pushd $AUTOMATION_DIR/scripts/jenkins/cloud/gerrit
source $ANSIBLE_VENV/bin/activate
GERRIT_VERIFY=0 PYTHONWARNINGS="ignore:Unverified HTTPS request" \
run_python_script -u build_test_package.py --buildnumber ${2} \
--homeproject ${1} -c ${3//,/ -c }
popd
}
function build_test_packages {
local homeproject
if ! is_defined homeproject; then
echo "ERROR: homeproject must be defined - please check all variables on input.yml"
return 1
fi
homeproject=$(get_from_input homeproject)
build_test_packages_for_stage ${homeproject} deploy "$(get_from_input gerrit_change_ids)"
build_test_packages_for_stage ${homeproject} update "$(get_from_input update_gerrit_change_ids)"
build_test_packages_for_stage ${homeproject} upgrade "$(get_from_input upgrade_gerrit_change_ids)"
}
function bootstrap_clm {
test_repo_url=""
if is_defined gerrit_change_ids; then
homeproject=$(get_from_input homeproject)
test_repo_url="http://download.suse.de/ibs/$(sed 's#\b:\b#&/#' <<< $homeproject):/ardana-ci-deploy/standard"
fi
extra_repos=$(sed -e "s/^,//" -e "s/,$//" <<< "$(get_from_input extra_repos),${test_repo_url}")
ansible_playbook bootstrap-clm.yml -e extra_repos="${extra_repos}"
}
function bootstrap_crowbar {
test_repo_url=""
extra_repos=$(sed -e "s/^,//" -e "s/,$//" <<< "$(get_from_input extra_repos),${test_repo_url}")
ansible_playbook bootstrap-crowbar.yml -e extra_repos="${extra_repos}"
}
function deploy_ses_vcloud {
if ! is_physical_deploy && $(get_from_input ses_enabled); then
ses_id=$(get_from_input cloud_env)
os_cloud=$(get_from_input os_cloud)
os_project_name=$(get_from_input os_project_name)
[[ -n $os_project_name ]] && os_project_option="os_project_name=$os_project_name"
deploy_ses_using="network="${ses_id}-cloud_management_net""
# For crowbar SES is deployed on its own network and crowbar accesses it through the
# external router. This is required to prevent the crowbar DHCP from affecting the SES
# cluster.
if [[ "$(get_cloud_product)" == "crowbar" ]]; then
deploy_ses_using="router=${ses_id}-cloud_router_ext"
fi
ansible_playbook_ses ses-heat-stack.yml -e "ses_id=$ses_id $deploy_ses_using os_cloud=$os_cloud $os_project_option"
ansible_playbook_ses bootstrap-ses-node.yml -e ses_id=$ses_id
for i in {1..3}; do
ansible_playbook_ses ses-deploy.yml -e ses_id=$ses_id && break || sleep 5
done
fi
}
function bootstrap_nodes {
if [[ "$(get_cloud_product)" == "crowbar" ]]; then
ansible_playbook bootstrap-crowbar-nodes.yml
elif is_physical_deploy; then
ansible_playbook bootstrap-pcloud-nodes.yml
else
ansible_playbook bootstrap-vcloud-nodes.yml
fi
}
function prepare_install_crowbar {
ansible_playbook prepare-install-crowbar.yml
}
function install_crowbar {
ansible_playbook install-crowbar.yml
}
function register_crowbar_nodes {
ansible_playbook register-crowbar-nodes.yml
}
function deploy_cloud {
if [ "$(get_cloud_product)" == "crowbar" ]; then
ansible_playbook deploy-crowbar.yml
elif $(get_from_input deploy_cloud); then
ansible_playbook deploy-cloud.yml
fi
}
function deploy_ardana_but_dont_run_site_yml {
if $(get_from_input deploy_cloud); then
ansible_playbook deploy-cloud.yml -e "skip_running_site_yml=True"
fi
}
function update_cloud {
if $(get_from_input deploy_cloud) && $(get_from_input update_after_deploy); then
if [ "$(get_cloud_product)" == "crowbar" ]; then
ansible_playbook crowbar-update.yml
else
local update_to_cloudsource maint_updates test_repo_url homeproject extra_repos
update_to_cloudsource="$(get_from_input update_to_cloudsource)"
maint_updates="$(get_from_input maint_updates)"
test_repo_url=""
if is_defined update_gerrit_change_ids; then
homeproject=$(get_from_input homeproject)
test_repo_url="http://download.suse.de/ibs/$(sed 's#\b:\b#&/#' <<< $homeproject):/ardana-ci-update/standard"
fi
extra_repos=$(sed -e "s/^,//" -e "s/,$//" <<< "$(get_from_input update_extra_repos),${test_repo_url}")
if [[ -n "${update_to_cloudsource}" ]] || [[ -n "${maint_updates}" ]]; then
ansible_playbook ardana-update.yml \
${update_to_cloudsource:+-e cloudsource="${update_to_cloudsource}"} \
${extra_repos:+ -e extra_repos="${extra_repos}"}
fi
fi
fi
}
function upgrade_cloud {
if $(get_from_input deploy_cloud) && $(get_from_input update_after_deploy) && [[ -n "$(get_from_input upgrade_cloudsource)" ]]; then
if [ "$(get_cloud_product)" == "crowbar" ]; then
ansible_playbook crowbar-upgrade.yml
else
local test_repo_url homeproject extra_repos
update_to_cloudsource="$(get_from_input update_to_cloudsource)"
maint_updates="$(get_from_input maint_updates)"
test_repo_url=""
if is_defined upgrade_gerrit_change_ids; then
homeproject=$(get_from_input homeproject)
test_repo_url="http://download.suse.de/ibs/$(sed 's#\b:\b#&/#' <<< $homeproject):/ardana-ci-upgrade/standard"
fi
extra_repos=$(sed -e "s/^,//" -e "s/,$//" <<< "$(get_from_input upgrade_extra_repos),${test_repo_url}")
ansible_playbook ardana-disable-repos.yml
ansible_playbook ardana-upgrade.yml \
-e cloudsource="$(get_from_input upgrade_cloudsource)" \
${extra_repos:+ -e extra_repos="${extra_repos}"}
fi
fi
}
function run_tempest {
if $(is_defined tempest_filter_list); then
if [ "$(get_cloud_product)" == "crowbar" ]; then
playbook_name=run-tempest-crowbar
else
playbook_name=run-tempest-ardana
fi
tempest_filter_list=($(echo "$(get_from_input tempest_filter_list)" | tr ',' '\n'))
for filter in "${tempest_filter_list[@]}"; do
ansible_playbook ${playbook_name}.yml -e tempest_run_filter=$filter
done
fi
}
function run_qa_tests {
if $(is_defined qa_test_list); then
qa_test_list=($(echo "$(get_from_input qa_test_list)" | tr ',' '\n'))
for qa_test in "${qa_test_list[@]}"; do
test_args_name="${qa_test^^}_ARGS"
test_args=($(echo "$(get_from_input ${test_args_name})"))
if [ -z "${!test_args_name}" ] && (( ${#test_args[@]} > 0 ))
then
export ${test_args_name}="${test_args[*]}"
fi
ansible_playbook run-ardana-qe-tests.yml -e test_name=$qa_test
done
fi
}
function run_crowbar_tests {
if [[ "$(get_cloud_product)" == "crowbar" ]]; then
ansible_playbook run-crowbar-tests.yml
fi
}
function validate_input {
if ! is_defined cloud_env; then
echo "ERROR: cloud_env must be defined - please check all variables on input.yml"
return 1
elif [[ "${NO_CONFIRM:+true}" == "true" ]]; then
return 0
else
echo "
*****************************************************************************************
** Ardana will be deployed using the following config:
$(cat input.yml| grep -v "^#\|''\|^[[:space:]]*$" | sed -e 's/^/ ** /')
*****************************************************************************************
"
read -p "Continue (y/n)?" choice
case "$choice" in
y|Y ) return 0;;
* ) return 1;;
esac
fi
}
function exit_msg {
DEPLOYER_IP=$(get_deployer_ip)
if ! is_physical_deploy && $(get_from_input ses_enabled); then
SES_IP=$(get_ses_ip)
echo "
*****************************************************************************************
** The '$(get_from_input cloud_env)' SES environment is reachable at:
**
** ssh root@${SES_IP}
**
** Please delete the '$(get_from_input cloud_env)-ses' stack when you're done,
** by loging into the ECP at https://engcloud.prv.suse.net/project/stacks/
** and deleting the heat stack.
*****************************************************************************************
"
fi
echo "
*****************************************************************************************
** The deployer for the '$(get_from_input cloud_env)' environment is reachable at:
**
** ssh ardana@${DEPLOYER_IP}
** or
** ssh root@${DEPLOYER_IP}
**
** Please delete the '$(get_from_input cloud_env)-cloud' stack when you're done,
** by by using one of the following methods:
**
** 1. log into the ECP at https://engcloud.prv.suse.net/project/stacks/
** and delete the stack manually, or
**
** 2. call the delete_stack function from the script library:
** $ source lib.sh
** $ delete_stack
*****************************************************************************************
"
}
|
SUSE-Cloud/automation
|
scripts/jenkins/cloud/manual/lib.sh
|
Shell
|
apache-2.0
| 16,121 |
java -Dfile.encoding=utf-8 -jar run-me.jar
|
cowthan/ImageRepo
|
start.sh
|
Shell
|
apache-2.0
| 42 |
#!/bin/bash
for i in {2..999}; do
printf -v j "Class_%03d" $i
echo "generating $j"
cp Class_001.java $j.java
sed -i "s/Class_001/$j/g" $j.java
done
|
lesaint/experimenting-annotation-processing
|
experimenting-rounds/massive-count-of-annotated-classes/src/main/java/fr/javatronic/blog/massive/annotation2/generate_classes.sh
|
Shell
|
apache-2.0
| 157 |
#!/bin/sh -e
for F in mtc*.swift
do
echo Running $F
swift-t $F
done
|
basheersubei/swift-t
|
stc/docs/gallery/mtc/run.sh
|
Shell
|
apache-2.0
| 73 |
#!/bin/bash
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
GO_CONDUCTOR_DIR=$GOPATH/src/conductor
$CURR_DIR/install.sh
go run $GO_CONDUCTOR_DIR/startclient/startclient.go
|
d3sw/conductor
|
client/go/install_and_run.sh
|
Shell
|
apache-2.0
| 188 |
#!/bin/bash
# REQUIRED:
# ELASTICSEARCH_PORT_9200_TCP_ADDR
# ELASTICSEARCH_PORT_9200_TCP_PORT
synapse /opt/kibana/config/kibana.yml.tmpl
exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
samsara/samsara
|
docker-images/kibana/configure-and-start.sh
|
Shell
|
apache-2.0
| 205 |
/bin/echo "postremove script started [$1]"
if [ "$1" = 0 ]
then
/usr/sbin/userdel -r {{lower-name}} 2> /dev/null || :
/bin/rm -rf /usr/local/{{lower-name}}
fi
/bin/echo "postremove script finished"
exit 0
|
mixradio/mr-clojure
|
src/leiningen/new/mr_clojure/postremove.sh
|
Shell
|
bsd-3-clause
| 211 |
#!/usr/bin/env bash
# run_pytest.bash: Run pytest on cclib with coverage checking. Requires
# `pytest` and `pytest-cov`.
set -euxo pipefail
python -m pytest -v --capture=no --cov=cclib --cov-report=term --cov-report=xml:coverage-unit.xml --terse test -k "not test_method"
pushd data
bash ./regression_download.sh
popd
python -m pytest -v --capture=no --cov=cclib --cov-report=term --cov-report=xml:coverage-regression.xml --cov-append -k test_regression test/regression.py
|
cclib/cclib
|
.github/scripts/run_pytest.bash
|
Shell
|
bsd-3-clause
| 476 |
#!/bin/bash
# This script is meant to be called by the "install" step defined in
# .travis.yml. See http://docs.travis-ci.com/ for more details.
# The behavior of the script is controlled by environment variabled defined
# in the .travis.yml in the top level folder of the project.
# License: 3-clause BSD
set -e
# Fix the compilers to workaround avoid having the Python 3.4 build
# lookup for g++44 unexpectedly.
export CC=gcc
export CXX=g++
echo 'List files from cached directories'
echo 'pip:'
ls $HOME/.cache/pip
if [[ -d $HOME/download ]]; then
echo 'download'
ls $HOME/download
fi
# Deactivate the travis-provided virtual environment and setup a
# conda-based environment instead
deactivate
# Use the miniconda installer for faster download / install of conda
# itself
pushd .
cd
mkdir -p download
cd download
echo "Cached in $HOME/download :"
ls -l
echo
if [[ ! -f miniconda.sh ]]
then
wget http://repo.continuum.io/miniconda/Miniconda-3.6.0-Linux-x86_64.sh \
-O miniconda.sh
fi
chmod +x miniconda.sh && ./miniconda.sh -b
cd ..
echo $(ls /home/travis/m*)
export PATH=/home/travis/miniconda/bin:$PATH
conda update --yes conda
popd
THEANO_COMMIT=bd168a5663ad582b24cee45d284432027e1d790b
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n testenv --yes python=$PYTHON_VERSION pip nose \
numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION pytables
source activate testenv
pip install git+git://github.com/Theano/Theano.git@$THEANO_COMMIT
if [[ "$INSTALL_MKL" == "true" ]]; then
# Make sure that MKL is used
conda install --yes mkl
else
# Make sure that MKL is not used
conda remove --yes --features mkl || echo "MKL not installed"
fi
if [[ "$COVERAGE" == "true" ]]; then
pip install coverage coveralls
fi
# Build scikit-learn in the install.sh script to collapse the verbose
# build output in the travis output when it succeeds.
python --version
python -c "import numpy; print('numpy %s' % numpy.__version__)"
python -c "import scipy; print('scipy %s' % scipy.__version__)"
python -c "import theano; print('theano %s' % theano.__version__)"
python setup.py build_ext --inplace
|
dagbldr/dagbldr
|
continuous_integration/install.sh
|
Shell
|
bsd-3-clause
| 2,186 |
#!/usr/bin/env bash
set -eux
if [[ ! -f convert ]]
then
curl https://s3.amazonaws.com/michael.snoyman.com/convert-old-stackage-22f85f4829da949df601f2facf2d9b8c794232cf.bz2 > convert.bz2
chmod +x convert.bz2
bunzip2 convert.bz2
fi
cd $(dirname ${BASH_SOURCE[0]})
for d in lts-haskell stackage-nightly stackage-snapshots
do
if [[ ! -d "$d" ]]
then
git clone https://github.com/commercialhaskell/$d
else
(cd "$d" && git pull || echo "Git pull failed, ignoring")
fi
done
./convert
cd stackage-snapshots
git add lts nightly
git diff-index --quiet HEAD && echo No changes && exit 0
git config user.name "Stackage build server"
git config user.email "[email protected]"
git commit -m "More conversions $(date)"
GIT_SSH_COMMAND='ssh -i ../../ssh-lts/id_rsa' git push [email protected]:commercialhaskell/stackage-snapshots master
|
mgajda/stackage
|
automated/new-stackage-format/convert.sh
|
Shell
|
mit
| 870 |
#!/bin/bash
source "${EJABBERD_HOME}/scripts/lib/base_config.sh"
source "${EJABBERD_HOME}/scripts/lib/config.sh"
source "${EJABBERD_HOME}/scripts/lib/base_functions.sh"
source "${EJABBERD_HOME}/scripts/lib/functions.sh"
# Do not exit if users already registered
set +e
randpw() {
< /dev/urandom tr -dc A-Z-a-z-0-9 | head -c ${1:-16};
echo;
}
register_user() {
local user=$1
local domain=$2
local password=$3
${EJABBERDCTL} register ${user} ${domain} ${password}
return $?
}
register_all_users() {
# register users from environment $EJABBERD_USERS with given
# password or random password written to stout. Use whitespace
# to seperate users.
#
# sample:
# - add a user with an given password:
# -e "[email protected]:adminSecret"
# - add a user with a random password:
# -e "[email protected]"
# - set password for admin and use random for user1:
# -e "[email protected]:adminSecret [email protected]"
for user in ${EJABBERD_USERS} ; do
local jid=${user%%:*}
local password=${user#*:}
local username=${jid%%@*}
local domain=${jid#*@}
[[ "${password}" == "${jid}" ]] \
&& password=$(randpw)
register_user ${username} ${domain} ${password}
local retval=$?
[[ ${retval} -eq 0 ]] \
&& echo "Password for user ${username}@${domain} is ${password}"
done
}
file_exist ${FIRST_START_DONE_FILE} \
&& exit 0
file_exist ${CLUSTER_NODE_FILE} \
&& exit 0
is_set ${EJABBERD_USERS} \
&& register_all_users
##################################
## Keep for backward compatibility
register_all_ejabberd_admins() {
# add all admins from environment $EJABBERD_ADMINS with the passwords from
# environment $EJABBERD_ADMIN_PASS.
local passwords
local IFS=' '
read -a passwords <<< "${EJABBERD_ADMIN_PWD}"
for admin in ${EJABBERD_ADMINS} ; do
local user=${admin%%@*}
local domain=${admin#*@}
local password=${passwords[0]}
passwords=("${passwords[@]:1}")
register_user ${user} ${domain} ${password}
done
}
register_all_ejabberd_admins_randpw() {
# add all admins from environment $EJABBERD_ADMINS with a random
# password and write the password to stdout.
for admin in ${EJABBERD_ADMINS} ; do
local user=${admin%%@*}
local domain=${admin#*@}
local password=$(randpw)
register_user ${user} ${domain} ${password}
local retval=$?
[[ ${retval} -eq 0 ]] \
&& echo "Password for user ${user}@${domain} is ${password}"
done
}
is_set ${EJABBERD_ADMIN_PWD} \
&& register_all_ejabberd_admins
is_true ${EJABBERD_ADMIN_RANDPWD} \
&& register_all_ejabberd_admins_randpw
exit 0
|
sulphur/docker-ejabberd
|
scripts/post/20_ejabberd_register_users.sh
|
Shell
|
mit
| 2,855 |
#cd spec/dummy
#export BUNDLE_GEMFILE=$PWD/Gemfile
#bundle install
|
ldodds/capybara-ng
|
travis/setup.sh
|
Shell
|
mit
| 67 |
#! /bin/sh
. ../../testenv.sh
analyze 585748_deb.vhd
elab_simulate_failure tb_test
clean
echo "Test successful"
|
tgingold/ghdl
|
testsuite/gna/deb585748/testsuite.sh
|
Shell
|
gpl-2.0
| 116 |
#!/bin/sh
ALL_CONFIGS=$*
GREP="grep"
cat <<- EOF > lib/lufa/Bootloaders/DFU/Keyboard.h
#pragma once
$($GREP "MANUFACTURER[ \t]" $ALL_CONFIGS -h | tail -1)
$($GREP "PRODUCT[ \t]" $ALL_CONFIGS -h | tail -1 | tr -d '\r') Bootloader
$($GREP "QMK_ESC_OUTPUT[ \t]" $ALL_CONFIGS -h | tail -1)
$($GREP "QMK_ESC_INPUT[ \t]" $ALL_CONFIGS -h | tail -1)
$($GREP "QMK_LED[ \t]" $ALL_CONFIGS -h | tail -1)
$($GREP "QMK_SPEAKER[ \t]" $ALL_CONFIGS -h | tail -1)
EOF
|
kmtoki/qmk_firmware
|
tmk_core/make_dfu_header.sh
|
Shell
|
gpl-2.0
| 452 |
#!/usr/bin/env sh
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
pydocstyle invenio_collections && \
isort -rc -c -df **/*.py && \
check-manifest --ignore ".travis-*" && \
sphinx-build -qnNW docs docs/_build/html && \
python setup.py test && \
sphinx-build -qnNW -b doctest docs docs/_build/doctest
|
tiborsimko/invenio-collections
|
run-tests.sh
|
Shell
|
gpl-2.0
| 1,231 |
#! /bin/sh
# Copyright (C) 2006-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure file extensions are matched correctly in the code
# parsing texi files for @setfilename declarations.
# Report from Eric Dorland.
. test-init.sh
echo info_TEXINFOS = bar.texi >Makefile.am
cat >bar.texi <<EOF
@setfilename bar-1.9.info
EOF
$ACLOCAL
$AUTOMAKE --add-missing
$EGREP '[ /]bar-1\.9\.info( |$)' Makefile.in
:
|
kuym/openocd
|
tools/automake-1.15/t/txinfo-setfilename-suffix-strip.sh
|
Shell
|
gpl-2.0
| 1,025 |
#!/bin/sh
#### FOR DEVELOPING ONLY DOES NOT CONTAIN 99kernel INIT SCRIPT TO CONFIG THE KERNEL. ASSUMES YOU ARE DOING A DIRTY FLASH ####
export SRC_ROOT=`readlink -f ../../..`
## time start ##
time_start=$(date +%s.%N)
# Number of jobs (usually the number of cores your CPU has (if Hyperthreading count each core as 2))
MAKE="4"
## Set compiler location to compile with linaro cortex a8
echo "Setting compiler location..."
export ARCH=arm
export CROSS_COMPILE=$SRC_ROOT/prebuilt/linux-x86/toolchain/linaro-arm-cortex-a8/bin/arm-cortex_a8-linux-gnueabi-
## Build kernel using pyramid_defconfig
make holiday_defconfig
make -j`grep 'processor' /proc/cpuinfo | wc -l` ARCH=arm
sleep 1
# Post compile tasks
echo "Copying compiled kernel and modules to Packages/out/"
echo "and building flashable zip"
sleep 1
mkdir -p Packages/
mkdir -p Packages/out/
mkdir -p Packages/out/system/lib/modules/
mkdir -p Packages/out/kernel/
mkdir -p Packages/out/META-INF/
cp -a $(find . -name *.ko -print |grep -v initramfs) Packages/out/system/lib/modules/
cp -rf prebuilt-scripts/META-INF/ Packages/out/
cp -rf prebuilt-scripts/kernel_dir/* Packages/out/kernel/
cp arch/arm/boot/zImage Packages/out/kernel/
# build flashable zip
export curdate=`date "+%m-%d-%Y"`
cd Packages/out/
zip -r ../pyramid-kernel-dev-$curdate.zip .
echo "Deleting Temp files and folders...."
cd ../../
rm -rf Packages/out/
echo "Build Complete, Check Packages directory for flashable zip"
time_end=$(date +%s.%N)
echo -e "${BLDYLW}Total time elapsed: ${TCTCLR}${TXTGRN}$(echo "($time_end - $time_start) / 60"|bc ) ${TXTYLW}minutes${TXTGRN} ($(echo "$time_end - $time_start"|bc ) ${TXTYLW}seconds) ${TXTCLR}"
|
DirtyUnicorns/android_kernel_htc_msm8660-caf
|
build-holiday.sh
|
Shell
|
gpl-2.0
| 1,731 |
#!/bin/sh
set -e
./bootstrap.sh
./configure --enable-maintainer-mode --enable-examples-build --enable-tests-build "$@"
|
pbatard/libusbx
|
autogen.sh
|
Shell
|
lgpl-2.1
| 121 |
#!/bin/bash
set BUILD_DIR="../../../../../../../build/android/gradle/apps/atw_opengl/"
gradlew --project-cache-dir ${BUILD_DIR}.gradle build
jar -tf ${BUILD_DIR}outputs/apk/atw_opengl-all-debug.apk
adb install -r ${BUILD_DIR}outputs/apk/atw_opengl-all-debug.apk
adb shell am start -n com.vulkansamples.atw_opengl/android.app.NativeActivity
|
brenwill/Vulkan-Samples
|
samples/apps/atw/projects/android/gradle/atw_opengl/build.sh
|
Shell
|
apache-2.0
| 342 |
#!/bin/sh
#Move to the folder where ep-lite is installed
cd `dirname $0`
#Was this script started in the bin folder? if yes move out
if [ -d "../bin" ]; then
cd "../"
fi
ignoreRoot=0
for ARG in $*
do
if [ "$ARG" = "--root" ]; then
ignoreRoot=1
fi
done
#Stop the script if it's started as root
if [ "$(id -u)" -eq 0 ] && [ $ignoreRoot -eq 0 ]; then
echo "You shouldn't start Etherpad as root!"
echo "Please type 'Etherpad rocks my socks' or supply the '--root' argument if you still want to start it as root"
read rocks
if [ ! "$rocks" == "Etherpad rocks my socks" ]
then
echo "Your input was incorrect"
exit 1
fi
fi
#Prepare the environment
bin/installDeps.sh $* || exit 1
#Move to the node folder and start
echo "Started Etherpad..."
SCRIPTPATH=`pwd -P`
exec node "$SCRIPTPATH/node_modules/ep_etherpad-lite/node/server.js" $*
|
lpagliari/etherpad-lite
|
bin/run.sh
|
Shell
|
apache-2.0
| 865 |
#~/code/code_bash_shell/BatchCopyOrMoveFiles.sh 380_exp_files.txt
while read line; do
f=`echo "$line"`
echo "$f"
cp "$f" ~/DataFromVahid/Data4EachTrialAfterQC/.
done < $1
|
solgenomics/zeabase
|
code_bash_shell/BatchCopyOrMoveFiles.sh
|
Shell
|
artistic-2.0
| 173 |
#!/bin/bash
FN="yeastGSData_0.28.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/experiment/src/contrib/yeastGSData_0.28.0.tar.gz"
"https://bioarchive.galaxyproject.org/yeastGSData_0.28.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-yeastgsdata/bioconductor-yeastgsdata_0.28.0_src_all.tar.gz"
)
MD5="b23b5a8c2896b2c614069d45bc4c533a"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
blankenberg/bioconda-recipes
|
recipes/bioconductor-yeastgsdata/post-link.sh
|
Shell
|
mit
| 1,307 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile -size 70x46 ${SRCDIR}/input_truecolor_70x46.miff YUV
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_YUV_truecolor_70x46.sh
|
Shell
|
gpl-2.0
| 382 |
#!/bin/sh
# This is what we would do if we needed something more:
export OPAMYES=1 OPAMVERBOSE=1
echo System OCaml version
ocaml -version
echo OPAM versions
opam --version
opam --git-version
opam init
opam switch $OCAML_VERSION
|
OCamlPro/typerex-lint
|
autoconf/travis-install.sh
|
Shell
|
gpl-3.0
| 232 |
#! /bin/sh
if test z"$srcdir" = "z"; then
srcdir=.
fi
command=run_parser_all.sh
one_test_logs_dir=test_log
diffs_dir=diffs
if test "z$LONG_TESTS" != z"yes" && test "z$ALL_TESTS" != z"yes"; then
echo "Skipping long tests that take a lot of time to run"
exit 77
fi
if test "z$TEX_HTML_TESTS" = z"yes"; then
echo "Skipping long tests, only doing HTML TeX tests"
exit 77
fi
dir=contents
arg='contents_and_parts'
name='contents_and_parts'
[ -d "$dir" ] || mkdir $dir
srcdir_test=$dir; export srcdir_test;
cd "$dir" || exit 99
../"$srcdir"/"$command" -dir $dir $arg
exit_status=$?
cat $one_test_logs_dir/$name.log
if test -f $diffs_dir/$name.diff; then
echo
cat $diffs_dir/$name.diff
fi
exit $exit_status
|
samdmarshall/info
|
tp/tests/test_scripts/contents_contents_and_parts.sh
|
Shell
|
gpl-3.0
| 723 |
if test -n "$DATRIE_BASEPATH"; then
case `uname -a` in
CYGWIN*)
DATRIE_BASEPATH=`echo $DATRIE_BASEPATH | sed -e 's/\"//g'`
DATRIE_BASEPATH=`cygpath $DATRIE_BASEPATH`;;
MINGW32*)
DATRIE_BASEPATH=`echo $DATRIE_BASEPATH | sed -e 's/\"//g' -e 's/\\\\/\\//g' -e 's/^\\([a-zA-Z]\\):/\\/\\1/g'`
esac
export DATRIE_BASEPATH=$DATRIE_BASEPATH
export PATH=$DATRIE_BASEPATH/bin:$PATH
# export ACLOCAL_FLAGS="-I $DATRIE_BASEPATH/share/aclocal $ACLOCAL_FLAGS"
if test "x$C_INCLUDE_PATH" = x; then
APPEND=
else
APPEND=":$C_INCLUDE_PATH"
fi
export C_INCLUDE_PATH=$DATRIE_BASEPATH/include$APPEND
if test "x$LIBRARY_PATH" = x; then
APPEND=
else
APPEND=":$LIBRARY_PATH"
fi
export LIBRARY_PATH=$DATRIE_BASEPATH/lib:/lib/w32api$APPEND
if test "x$PKG_CONFIG_PATH" = x; then
APPEND=
else
APPEND=":$PKG_CONFIG_PATH"
fi
export PKG_CONFIG_PATH=$DATRIE_BASEPATH/lib/pkgconfig$APPEND
if test "x$MANPATH" = x; then
APPEND=
else
APPEND=":$MANPATH"
fi
export MANPATH=$DATRIE_BASEPATH/share/man$APPEND
fi
|
omeid/libdatrie
|
nsis/contrib/libdatrienv.sh
|
Shell
|
lgpl-2.1
| 1,037 |
#!/bin/bash
vagrant ssh weave-gs-01 -c "weave launch -iprange 10.2.0.1/16"
vagrant ssh weave-gs-02 -c "weave launch -iprange 10.2.0.1/16 172.17.8.101"
vagrant ssh weave-gs-01 -c "weave launch-dns; weave launch-proxy"
vagrant ssh weave-gs-02 -c "weave launch-dns; weave launch-proxy"
|
spacediver/guides
|
weave-loadbalance/setup-weave.sh
|
Shell
|
apache-2.0
| 284 |
#!/bin/bash
docker-compose stop
docker rm $(docker ps -a -f name=mobsosqueryvisualization_ -q)
|
Gordin/Layers-Dockerfiles
|
mobsos-query-visualization/clean.sh
|
Shell
|
apache-2.0
| 96 |
cd build-test-data && grails test-app && cd ../testOptionalJars && grails test-app && cd ../bookStore && grails test-app && cd ../baseTests && grails test-app && cd ..
|
domurtag/build-test-data
|
bin/testAll.sh
|
Shell
|
apache-2.0
| 168 |
#!/bin/bash
# note: expects to run from top directory
./mono/latest-mono-stable.sh
./fcs/build.sh NuGet
|
dsyme/FSharp.Compiler.Service
|
fcs/cibuild.sh
|
Shell
|
mit
| 105 |
#!/bin/sh
TEST_SCRIPT=./VMake/executableTester.sh
until test -r ${TEST_SCRIPT} ; do
TEST_SCRIPT=../${TEST_SCRIPT}
done
. ${TEST_SCRIPT}
runAndHandleSystemTest "testMPIStream 2 0" "$0" "$@"
|
bmi-forum/bmi-pyre
|
StGermain/Base/IO/tests/testMPIStream.0of2.sh
|
Shell
|
gpl-2.0
| 199 |
#!/bin/bash
# added 2016-04-13 by singh.janmejay
# This file is part of the rsyslog project, released under ASL 2.0
echo ===============================================================================
echo \[dynstats_prevent_premature_eviction-vg.sh\]: test for ensuring metrics are not evicted before unused-ttl with valgrind
. $srcdir/diag.sh init
. $srcdir/diag.sh startup-vg dynstats_reset.conf
. $srcdir/diag.sh wait-for-stats-flush 'rsyslog.out.stats.log'
. $srcdir/diag.sh block-stats-flush
. $srcdir/diag.sh injectmsg-litteral $srcdir/testsuites/dynstats_input_1
. $srcdir/diag.sh allow-single-stats-flush-after-block-and-wait-for-it
. $srcdir/diag.sh injectmsg-litteral $srcdir/testsuites/dynstats_input_2
. $srcdir/diag.sh allow-single-stats-flush-after-block-and-wait-for-it
. $srcdir/diag.sh injectmsg-litteral $srcdir/testsuites/dynstats_input_3
. $srcdir/diag.sh await-stats-flush-after-block
. $srcdir/diag.sh wait-queueempty
. $srcdir/diag.sh wait-for-stats-flush 'rsyslog.out.stats.log'
. $srcdir/diag.sh content-check "foo 001 0"
. $srcdir/diag.sh content-check "foo 006 0"
echo doing shutdown
. $srcdir/diag.sh shutdown-when-empty
echo wait on shutdown
. $srcdir/diag.sh wait-shutdown-vg
. $srcdir/diag.sh check-exit-vg
# because dyn-accumulators for existing metrics were posted-to under a second, they should not have been evicted
. $srcdir/diag.sh custom-content-check 'baz=2' 'rsyslog.out.stats.log'
. $srcdir/diag.sh custom-content-check 'bar=1' 'rsyslog.out.stats.log'
. $srcdir/diag.sh custom-content-check 'foo=3' 'rsyslog.out.stats.log'
# sum is high because accumulators were never reset, and we expect them to last specific number of cycles(when we posted before ttl expiry)
. $srcdir/diag.sh first-column-sum-check 's/.*foo=\([0-9]\+\)/\1/g' 'foo=' 'rsyslog.out.stats.log' 6
. $srcdir/diag.sh first-column-sum-check 's/.*bar=\([0-9]\+\)/\1/g' 'bar=' 'rsyslog.out.stats.log' 1
. $srcdir/diag.sh first-column-sum-check 's/.*baz=\([0-9]\+\)/\1/g' 'baz=' 'rsyslog.out.stats.log' 3
. $srcdir/diag.sh first-column-sum-check 's/.*new_metric_add=\([0-9]\+\)/\1/g' 'new_metric_add=' 'rsyslog.out.stats.log' 3
. $srcdir/diag.sh first-column-sum-check 's/.*ops_overflow=\([0-9]\+\)/\1/g' 'ops_overflow=' 'rsyslog.out.stats.log' 0
. $srcdir/diag.sh first-column-sum-check 's/.*no_metric=\([0-9]\+\)/\1/g' 'no_metric=' 'rsyslog.out.stats.log' 0
. $srcdir/diag.sh exit
|
sematext/rsyslog
|
tests/dynstats_prevent_premature_eviction-vg.sh
|
Shell
|
gpl-3.0
| 2,387 |
#!/bin/bash
# Direct solver to generate initial data
./fsitimedependent -input "./input/turek_FSI2.neu" -n_timesteps 200 -autosave_time_interval 200 -nlevel 1 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "preonly" -max_outer_solver_iter 1 -nrefinement 3 -std_output stdOutput.txt > stdOutput.txt
###########################################
# Direct solver for time info
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_3_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "preonly" -max_outer_solver_iter 1 -nrefinement 3 -nlevel 1 -std_output fsi2_3_preonly.txt > fsi2_3_preonly.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_4_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "preonly" -max_outer_solver_iter 1 -nrefinement 4 -nlevel 1 -std_output fsi2_4_preonly.txt > fsi2_4_preonly.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_5_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "preonly" -max_outer_solver_iter 1 -nrefinement 5 -nlevel 1 -std_output fsi2_5_preonly.txt > fsi2_5_preonly.txt
# Direct solver for convergence and MUMPS info
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_3_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 1 -ksp_monitor_true_residual -ksp_view -mat_mumps_icntl_11 1 -nrefinement 3 -nlevel 1 -std_output fsi2_3_gmres.txt > fsi2_3_gmres.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_4_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 1 -ksp_monitor_true_residual -ksp_view -mat_mumps_icntl_11 1 -nrefinement 4 -nlevel 1 -std_output fsi2_4_gmres.txt > fsi2_4_gmres.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_5_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 1 -ksp_monitor_true_residual -ksp_view -mat_mumps_icntl_11 1 -nrefinement 5 -nlevel 1 -std_output fsi2_5_gmres.txt > fsi2_5_gmres.txt
############################################
# Multigrid solver for time info
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_3_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 60 -nrefinement 3 -nlevel 3 -std_output fsi2_3_gmres_time.txt > fsi2_3_gmres_time.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_4_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 60 -nrefinement 4 -nlevel 4 -std_output fsi2_4_gmres_time.txt > fsi2_4_gmres_time.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_5_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 60 -nrefinement 5 -nlevel 5 -std_output fsi2_5_gmres_time.txt > fsi2_5_gmres_time.txt
# Multigrid solver for convergence and memory info
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_3_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 60 -ksp_monitor_true_residual -mem_infos 1 -nrefinement 3 -nlevel 3 -std_output fsi2_3_gmres_meminfo.txt > fsi2_3_gmres_meminfo.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_4_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 60 -ksp_monitor_true_residual -mem_infos 1 -nrefinement 4 -nlevel 4 -std_output fsi2_4_gmres_meminfo.txt > fsi2_4_gmres_meminfo.txt
./fsitimedependent -input "./input/turek_FSI2.neu" -restart_file_name http://www.math.ttu.edu/~eaulisa/Benchmarks/runs/save/FSI2/turek_FSI2_5_time10.775000 -n_timesteps 10 -autosave_time_interval 40 -rhof 1000 -muf 1 -rhos 10000 -E 1500000 -ni 0.5 -ic_bdc "../../../../lib64/libfsi2_td_2d_turek_hron_benchmark_bdc.so" -outer_ksp_solver "gmres" -max_outer_solver_iter 60 -ksp_monitor_true_residual -mem_infos 1 -nrefinement 5 -nlevel 5 -std_output fsi2_5_gmres_meminfo.txt > fsi2_5_gmres_meminfo.txt
|
FeMTTU/femus
|
applications/FSI/TimeDependent/Benchmarks/run_fsi2_turek.sh
|
Shell
|
lgpl-2.1
| 6,353 |
#!/bin/bash
source ./setenv.sh
FILENAME=libtheora-svn
svn co http://svn.xiph.org/trunk/theora $FILENAME
cd $FILENAME
# remove call to ./configure from the script
head --lines=-1 autogen.sh > autogenmod.sh
chmod +x ./autogenmod.sh
./autogenmod.sh
rm ./config.sub
rm ./config.guess
wget http://git.savannah.gnu.org/cgit/config.git/plain/config.sub -O config.sub
wget http://git.savannah.gnu.org/cgit/config.git/plain/config.guess -O config.guess
wget https://raw.github.com/gabriel/ffmpeg-iphone-build/master/gas-preprocessor.pl -O lib/arm/gas-preprocessor.pl --no-check-certificate
chmod +x lib/arm/gas-preprocessor.pl
patch -p0 < ../../../patches/armv7/libtheora-svn.patch
export CCAS="perl ./arm/gas-preprocessor.pl $DEVROOT/usr/bin/arm-apple-darwin10-gcc-4.2.1"
export OGG_LIBS="-L$IOS_ADDITIONAL_LIBRARY_PATH/lib -logg"
export VORBIS_LIBS="-L$IOS_ADDITIONAL_LIBRARY_PATH/lib -lm -lvorbis -logg"
export LDFLAGS="-Wl,-L$SDKROOT/usr/lib,-L$IOS_ADDITIONAL_LIBRARY_PATH/lib"
export CFLAGS="-std=c99 -arch armv7 -mcpu=cortex-a8 -pipe -no-cpp-precomp -isysroot $SDKROOT -miphoneos-version-min=$IOS_DEPLOY_TGT -I$IOS_ADDITIONAL_LIBRARY_PATH/include -DIOS_ARMV7"
./configure --host=$IOS_HOST_NAME --prefix=$IOS_ADDITIONAL_LIBRARY_PATH --disable-examples
make install
|
humble/ags-geminirue
|
iOS/buildlibs/armv7/theora-svn.sh
|
Shell
|
artistic-2.0
| 1,274 |
#!/usr/bin/env bash
set -exuo pipefail
# Rename arrow.dll to lib_arrow.dll to avoid conflicts with the arrow-cpp arrow.dll
sed -i -e 's/void R_init_arrow/__declspec(dllexport) void R_init_lib_arrow/g' r/src/arrowExports.cpp
sed -i -e 's/useDynLib(arrow/useDynLib(lib_arrow/g' r/NAMESPACE
|
cpcloud/arrow
|
dev/tasks/conda-recipes/r-arrow/build_win.sh
|
Shell
|
apache-2.0
| 291 |
#!/bin/sh
# Based on the relocatable vmlinux file and offset create the
# new vmlinux and System.map file. New vmlinux and System.map is
# intend to be used by debugging tools to retrieve the actual
# addresses of symbols in the kernel.
#
# Usage
# mksysmap vmlinux-old offset
# Author : Jia Ma ([email protected])
# Created on : 21 Sep 2015
# Copyright (c) Samsung Electronics 2015
if test $# -ne 2; then
echo "Usage: $0 vmlinux offset"
exit 1
fi
vmlinux=$1
offset=$2
if [[ -z "$offset" || -z "$vmlinux" ]]; then
echo "$0 : variable not set"
exit 1
fi
if [[ ! -f $vmlinux ]]; then
echo "$vmlinux file not exist!"
exit 1
fi
ARM_TOOLCHAIN=/home/jiama/SRA_DPI_TASK_MSM8996_LUCKYQLTE-JIAMA/android/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android
NM=$ARM_TOOLCHAIN-nm
OBJCOPY=$ARM_TOOLCHAIN-objcopy
if [[ -z "$ARM_TOOLCHAIN" ]]; then
echo "Please specify ARM toolchain"
exit 1
fi
echo "+Patching System.map --> System.map.patched"
### generate runtime System.map file ###
$OBJCOPY --adjust-vma $offset $vmlinux vmlinux.tmp 2>/dev/null
$NM -n vmlinux.tmp | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > System.map.patched
rm -f vmlinux.tmp
echo "+Patching $vmlinux -->vmlinux.patched"
# following simply change the vmlinux from DYN type to EXEC type
# to avoid the JTag load the dyn symbol into the system: e.g. there will be 2 start_kernel in the JTag symbol list, 1 from SYMBOL table, 1 from RELO section
if [[ ! -f "elfedit" ]]; then
echo "Can find elfedit"
exit 1
fi
cp vmlinux vmlinux.patched
elfedit --output-type exec vmlinux.patched
echo "+Done"
|
Jovy23/N930TUVU1APGC_Kernel
|
kaslr_patch.sh
|
Shell
|
gpl-2.0
| 1,664 |
#! /bin/bash
# Copyright (C) 2013 Red Hat, Inc.
# This file is part of elfutils.
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# elfutils is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. $srcdir/backtrace-subr.sh
# This test really cannot be run under valgrind, it tries to introspect
# itself through ptrace and will find bits and pieces of valgrind.
# On top of that valgrind also tries to read all the unwind info and
# will warn and complain about various opcodes it doesn't understand...
unset VALGRIND_CMD
tempfiles dwarf.{bt,err}
(set +ex; testrun ${abs_builddir}/backtrace-dwarf 1>dwarf.bt 2>dwarf.err; true)
cat dwarf.{bt,err}
check_native_unsupported dwarf.err dwarf
check_main dwarf.bt dwarf
|
MonkeyZZZZ/platform_external_elfutils
|
tests/run-backtrace-dwarf.sh
|
Shell
|
gpl-3.0
| 1,231 |
#!/bin/bash
dir=`dirname "$0"`
cd $dir/../data
# download
if ! [ -e rcv1_train.binary ]; then
wget http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_train.binary.bz2
bunzip2 rcv1_train.binary.bz2
fi
if ! [ -e rcv1_test.binary ]; then
wget http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/rcv1_test.binary.bz2
bunzip2 rcv1_test.binary.bz2
fi
name=(train test)
for t in "${name[@]}"
do
echo $t
# shuffle
rnd=rcv1_${t}_rand
shuf rcv1_${t}.binary >$rnd
# split
mkdir -p rcv1/${t}
rm -f rcv1/${t}/*
split -n l/8 --numeric-suffixes=1 --suffix-length=3 $rnd rcv1/${t}/part-
rm $rnd
done
# swap train and test
mv rcv1/train tmp
mv rcv1/test rcv1/train
mv tmp rcv1/test
|
yipeiw/pserver
|
example/linear/rcv1/download.sh
|
Shell
|
apache-2.0
| 750 |
#/bin/sh
#
# From: [email protected] (H.D. Elbers)
# Date: Thu, 12 Dec 1996 12:16:46 +0100 (MET)
# Cc: [email protected] (H.D. Elbers)
#
# Hello Marc,
#
# Since it wasn't possible to play the recorded messages from my Elsa modem
# on my soundblaster with the pvftools used in the "listen" script I hacked it
# a bit to use an external microphone which is connected to the soundblaster.
# I also externded the script with a "played" directory containing compressed
# already played messages.
# Perhaps you can use it for the next vgetty-distribution?
#
# Greetings, Henk.
#
VOICEDIR=/usr/spool/voice/incoming
VM="vm play -s"
MIXER=~/sound/sndkit/mixer
DIALOG=dialog
FLAG=.flag
FILES=va*.rmd
ZFILES=va*.rmd.gz
play_msg()
{
$DIALOG </dev/tty --title "PLAYING FILE" --infobox \
"Playing $choice\npress [space] to skip" 5 51
trap "" SIGINT
stty quit ' '
$MIXER vol 100 > /dev/null 2>&1
$MIXER mic 100 > /dev/null 2>&1
rm -f /var/lock/LCK..ttyS1
$VM $choice
echo $$ > /var/lock/LCK..ttyS1
$MIXER mic 0 > /dev/null 2>&1
$MIXER vol 80 > /dev/null 2>&1
stty sane
trap SIGINT
}
if [ -f /var/lock/LCK..ttyS1 ]
then
echo "modem is locked..."
exit 1
fi
echo $$ > /var/lock/LCK..ttyS1
cd $VOICEDIR
DONE=no
while [ $DONE = "no" ]
do
if $DIALOG </dev/tty --clear --title "PLAY VOICE" \
--menu "Pick a voice file to play" 20 51 14 \
`ls -lt $FILES 2>/dev/null \
| awk '{ printf("%s %s-%s-%s-(%dk)\n",$9,$6,$7,$8,$5/1024) }'` \
played 'already played messages' 2> /tmp/menu.tmp.$$;\
then
choice=`cat /tmp/menu.tmp.$$`
if [ $choice = "played" ]
then
cd $VOICEDIR/played
P_DONE=no
while [ $P_DONE = "no" ]
do
if $DIALOG </dev/tty --clear --title "PLAY VOICE" \
--menu "Pick a voice file to play" 20 51 14 \
`ls -lt $ZFILES 2>/dev/null | awk \
'{ printf("%s %s-%s-%s-(%dk)\n",$9,$6,$7,$8,$5/1024) }'` \
2> /tmp/menu.tmp.$$;
then
choice=`cat /tmp/menu.tmp.$$`
gunzip < $choice > /tmp/menu.rmd.$$
choice=/tmp/menu.rmd.$$
play_msg
rm $choice
else
P_DONE=yes
fi
done
cd $VOICEDIR
else
play_msg
if $DIALOG </dev/tty --clear --title "DELETE FILE" \
--menu $choice 10 60 3 \
1 "keep message" \
2 "move message to $VOICEDIR/played" \
3 "delete message" 2> /tmp/menu.tmp.$$
then
ans=`cat /tmp/menu.tmp.$$`
if [ $ans -eq 2 ];then mv $choice played;gzip played/$choice;fi
if [ $ans -eq 3 ];then rm $choice;fi
fi
fi
else
$DIALOG --clear
DONE=yes
rm -f $FLAG
fi
rm -f /tmp/menu.tmp.$$
done
rm -f /var/lock/LCK..ttyS1
|
Distrotech/mgetty
|
voice/scripts/listen.sh
|
Shell
|
gpl-2.0
| 2,892 |
#!/bin/bash
if ! type chkconfig &> /dev/null; then
update-rc.d kairosdb defaults
else
chkconfig --add kairosdb
chkconfig kairosdb on
fi
/etc/init.d/kairosdb start
|
panqingcui/kairosdb-1
|
src/scripts/install/post_install.sh
|
Shell
|
apache-2.0
| 167 |
#!/usr/bin/env bash
set -e
cd "$(dirname "$BASH_SOURCE")/.."
rm -rf vendor/
source 'hack/.vendor-helpers.sh'
# the following lines are in sorted order, FYI
clone git github.com/Sirupsen/logrus v0.8.2 # logrus is a common dependency among multiple deps
clone git github.com/docker/libtrust 230dfd18c232
clone git github.com/go-check/check 64131543e7896d5bcc6bd5a76287eb75ea96c673
clone git github.com/gorilla/context 14f550f51a
clone git github.com/gorilla/mux e444e69cbd
clone git github.com/kr/pty 5cf931ef8f
clone git github.com/microsoft/hcsshim 2f540b26beafc3d4aded4fc9799af261a1a91352
clone git github.com/mistifyio/go-zfs v2.1.1
clone git github.com/natefinch/npipe 0938d701e50e580f5925c773055eb6d6b32a0cbc
clone git github.com/tchap/go-patricia v2.1.0
clone git golang.org/x/net 3cffabab72adf04f8e3b01c5baf775361837b5fe https://github.com/golang/net.git
clone hg code.google.com/p/gosqlite 74691fb6f837
#get libnetwork packages
clone git github.com/docker/libnetwork 0517ceae7dea82ded435b99af810efa27b56de73
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
clone git github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2
clone git github.com/docker/libkv 60c7c881345b3c67defc7f93a8297debf041d43c
clone git github.com/vishvananda/netns 493029407eeb434d0c2d44e02ea072ff2488d322
clone git github.com/vishvananda/netlink 20397a138846e4d6590e01783ed023ed7e1c38a6
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
clone git github.com/coreos/go-etcd v2.0.0
clone git github.com/hashicorp/consul v0.5.2
# get distribution packages
clone git github.com/docker/distribution 419bbc2da637d9b2a812be78ef8436df7caac70d
clone git github.com/docker/libcontainer v2.2.1
# libcontainer deps (see src/github.com/docker/libcontainer/update-vendor.sh)
clone git github.com/coreos/go-systemd v2
clone git github.com/godbus/dbus v2
clone git github.com/syndtr/gocapability 66ef2aa7a23ba682594e2b6f74cf40c0692b49fb
clone git github.com/golang/protobuf 655cdfa588ea
clone git github.com/Graylog2/go-gelf 6c62a85f1d47a67f2a5144c0e745b325889a8120
clone git github.com/fluent/fluent-logger-golang v1.0.0
# fluent-logger-golang deps
clone git github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa
clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
clean
|
sivabizruntime/dockerfile
|
hack/vendor.sh
|
Shell
|
apache-2.0
| 2,595 |
#!/bin/sh
PROJECT_DIR=/usr/local/apache/www.alhem.net/htdocs/Sockets
PROJECT_NAME=sockets
/usr/local/bin/dp \
-D_RUN_DP \
-DUSE_SCTP -D_THREADSAFE_SOCKETS \
-DHAVE_OPENSSL -DENABLE_POOL -DENABLE_RECONNECT -DENABLE_SOCKS4 \
-DENABLE_IPV6 -DENABLE_DETACH -DENABLE_RESOLVER -DENABLE_TRIGGERS \
-DENABLE_XML \
-fontsize 12 \
-lr -dot -cluster | grep -v std > x.dot
dot -Tpng -o $PROJECT_DIR/sockets.png x.dot
dot -Tcmapx -o $PROJECT_DIR/sockets.cmap x.dot
/usr/local/bin/mkjs $PROJECT_DIR/$PROJECT_NAME.cmap > $PROJECT_DIR/$PROJECT_NAME.js
|
miiimooo/greylist
|
src/Sockets-2.3.5/mkdot.sh
|
Shell
|
gpl-2.0
| 544 |
#!/bin/bash
# BUG(rjeczalik): -ngrokdebug is required, when it's off ngrok stales on
# process pipes causing the test to hang; ioutil.Discard should be
# plugged somewhere to fix this
# ngrok v1 authtoken, see ./ngrokProxy
export E2ETEST_NGROKTOKEN=${E2ETEST_NGROKTOKEN:-}
# aws creds with access to dev.koding.io Route53 hosted zone
export E2ETEST_ACCESSKEY=${E2ETEST_ACCESSKEY:-}
export E2ETEST_SECRETKEY=${E2ETEST_SECRETKEY:-}
run=${1:-}
if [[ ! -z "$run" ]]; then
run="-run $run"
fi
# TODO(rjeczalik): enable after fixing TMS-3077
exit 0
# NOTE(rjeczalik): -noclean is used to keep DNS records, sometimes
# handy for deelopment when AWS is utterly slow.
go test -v koding/kites/e2etest $run -- -debug -ngrokdebug -noclean
# For more options see
#
# go test -v koding/kites/e2etest -- -help
#
|
jack89129/koding
|
go/src/koding/kites/e2etest/e2etest.sh
|
Shell
|
apache-2.0
| 807 |
#!/bin/sh
test_description='diff.*.textconv tests'
. ./test-lib.sh
find_diff() {
sed '1,/^index /d' | sed '/^-- $/,$d'
}
cat >expect.binary <<'EOF'
Binary files a/file and b/file differ
EOF
cat >expect.text <<'EOF'
--- a/file
+++ b/file
@@ -1 +1,2 @@
0
+1
EOF
cat >hexdump <<'EOF'
#!/bin/sh
"$PERL_PATH" -e '$/ = undef; $_ = <>; s/./ord($&)/ge; print $_' < "$1"
EOF
chmod +x hexdump
test_expect_success 'setup binary file with history' '
printf "\\0\\n" >file &&
git add file &&
git commit -m one &&
printf "\\01\\n" >>file &&
git add file &&
git commit -m two
'
test_expect_success 'file is considered binary by porcelain' '
git diff HEAD^ HEAD >diff &&
find_diff <diff >actual &&
test_cmp expect.binary actual
'
test_expect_success 'file is considered binary by plumbing' '
git diff-tree -p HEAD^ HEAD >diff &&
find_diff <diff >actual &&
test_cmp expect.binary actual
'
test_expect_success 'setup textconv filters' '
echo file diff=foo >.gitattributes &&
git config diff.foo.textconv "\"$(pwd)\""/hexdump &&
git config diff.fail.textconv false
'
test_expect_success 'diff produces text' '
git diff HEAD^ HEAD >diff &&
find_diff <diff >actual &&
test_cmp expect.text actual
'
test_expect_success 'diff-tree produces binary' '
git diff-tree -p HEAD^ HEAD >diff &&
find_diff <diff >actual &&
test_cmp expect.binary actual
'
test_expect_success 'log produces text' '
git log -1 -p >log &&
find_diff <log >actual &&
test_cmp expect.text actual
'
test_expect_success 'format-patch produces binary' '
git format-patch --no-binary --stdout HEAD^ >patch &&
find_diff <patch >actual &&
test_cmp expect.binary actual
'
test_expect_success 'status -v produces text' '
git reset --soft HEAD^ &&
git status -v >diff &&
find_diff <diff >actual &&
test_cmp expect.text actual &&
git reset --soft HEAD@{1}
'
test_expect_success 'grep-diff (-G) operates on textconv data (add)' '
echo one >expect &&
git log --root --format=%s -G0 >actual &&
test_cmp expect actual
'
test_expect_success 'grep-diff (-G) operates on textconv data (modification)' '
echo two >expect &&
git log --root --format=%s -G1 >actual &&
test_cmp expect actual
'
test_expect_success 'pickaxe (-S) operates on textconv data (add)' '
echo one >expect &&
git log --root --format=%s -S0 >actual &&
test_cmp expect actual
'
test_expect_success 'pickaxe (-S) operates on textconv data (modification)' '
echo two >expect &&
git log --root --format=%s -S1 >actual &&
test_cmp expect actual
'
cat >expect.stat <<'EOF'
file | Bin 2 -> 4 bytes
1 file changed, 0 insertions(+), 0 deletions(-)
EOF
test_expect_success 'diffstat does not run textconv' '
echo file diff=fail >.gitattributes &&
git diff --stat HEAD^ HEAD >actual &&
test_i18ncmp expect.stat actual &&
head -n1 <expect.stat >expect.line1 &&
head -n1 <actual >actual.line1 &&
test_cmp expect.line1 actual.line1
'
# restore working setup
echo file diff=foo >.gitattributes
cat >expect.typechange <<'EOF'
--- a/file
+++ /dev/null
@@ -1,2 +0,0 @@
-0
-1
diff --git a/file b/file
new file mode 120000
index 0000000..67be421
--- /dev/null
+++ b/file
@@ -0,0 +1 @@
+frotz
\ No newline at end of file
EOF
# make a symlink the hard way that works on symlink-challenged file systems
test_expect_success 'textconv does not act on symlinks' '
printf frotz > file &&
git add file &&
git ls-files -s | sed -e s/100644/120000/ |
git update-index --index-info &&
git commit -m typechange &&
git show >diff &&
find_diff <diff >actual &&
test_cmp expect.typechange actual
'
test_done
|
velorientc/git_test3
|
t/t4030-diff-textconv.sh
|
Shell
|
gpl-2.0
| 3,555 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.