code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
function show_help {
echo
"Pharo Build Script
==================
This script assumes the existence of a new uninitialized image. Then it proceeds to its initialization and \"growing\".
* Step 1:
- Initialize the image
- output: core.image
* Step 2:
- Bootstrap Monticello local repositories
- output: monticello_bootstrap.image and changes file
* Step 3:
- Load Monticello remote repositories
- output: monticello.image and changes file
* Step 4:
- Load Metacello
- output: metacello.image and changes file
* Step 5:
- Load the rest of the image using BaselineOfIDE
- output: Pharo.image and changes file
Script arguments
================
-a
Architecture of the image. Indicates wether the image will be a 32 bit or 64 bit artifact.
Expected values: 32, 64
-h -?
Prints this help message
"
exit 0
}
# Initialize our own variables:
ARCH_DESCRIPTION=${BOOTSTRAP_ARCH}
# Use -gt 1 to consume two arguments per pass in the loop (e.g. each
# argument has a corresponding value to go with it).
# Use -gt 0 to consume one or more arguments per pass in the loop (e.g.
# some arguments don't have a corresponding value to go with it such
# as in the --default example).
# note: if this is set to -gt 0 the /etc/hosts part is not recognized ( may be a bug )
while getopts "h?a:d" opt; do
case "${opt}" in
a)
if [[ "${OPTARG}" -ne "32" && "${OPTARG}" -ne "64" ]]; then
echo "Invalid Option ${OPTARG}: expected architecture values are 32 or 64";
exit 1;
fi
ARCH_DESCRIPTION=${OPTARG};
;;
d)
DESCRIBE=1;
;;
h|\?)
show_help;
exit 0;
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
if [ -z ${ARCH_DESCRIPTION} ]; then
echo "No architecture specified. Please set the BOOTSTRAP_ARCH environment variable or use the -a argument";
exit 1;
fi
GIT_DESCRIBE=`git describe --always`
SUFFIX=${ARCH_DESCRIPTION}bit-${GIT_DESCRIBE}
if [[ ${DESCRIBE} -eq "1" ]]; then
echo ${SUFFIX}
exit 0
fi
SUFFIX=-$SUFFIX
#Get inside the bootstrap-cache folder. Pharo interprets relatives as relatives to the image and not the 'working directory'
cd bootstrap-cache
#Prepare
echo "Prepare Bootstrap files"
cp bootstrap.image core.image
../bootstrap/scripts/download_vm.sh
echo "Prepare fonts"
unzip ../resources/fonts/BitmapDejaVuSans.fuel -d .
echo "Prepare icons"
mkdir icon-packs
cd icon-packs
wget http://github.com/pharo-project/pharo-icon-packs/archive/idea11.zip
cd ..
#Required for the correct work of metacello baselines and unicode initialization
ln -s .. pharo-core
#Bootstrap Initialization: Class and RPackage initialization
echo "[Core] Class and RPackage initialization"
./vm/pharo core.image st ../bootstrap/scripts/01-initialization/01-init.st --save --quit
./vm/pharo core.image st ../bootstrap/scripts/01-initialization/02-initRPackageOrganizer.st --save --quit
./vm/pharo core.image st ../bootstrap/scripts/01-initialization/03-initUnicode.st --save --quit
zip core$SUFFIX.zip core.image
#Bootstrap Monticello Part 1: Core and Local repositories
echo "[Monticello] Bootstrap Monticello Core and Local repositories"
./vm/pharo core.image save monticello_bootstrap
./vm/pharo monticello_bootstrap.image st st-cache/Monticello.st --save --quit
./vm/pharo monticello_bootstrap.image st ../bootstrap/scripts/02-monticello-bootstrap/01-fixLocalMonticello.st --save --quit
./vm/pharo monticello_bootstrap.image st ../bootstrap/scripts/02-monticello-bootstrap/02-bootstrapMonticello.st --save --quit
zip monticello_bootstrap$SUFFIX.zip monticello_bootstrap.*
#Bootstrap Monticello Part 2: Networking Packages and Remote Repositories
echo "[Monticello] Loading Networking Packages and Remote Repositories"
./vm/pharo monticello_bootstrap.image save monticello
./vm/pharo monticello.image st ../bootstrap/scripts/02-monticello-bootstrap/03-bootstrapMonticelloRemote.st --save --quit
zip monticello$SUFFIX.zip monticello.*
#Bootstrap Metacello
echo "[Metacello] Bootstrapping Metacello"
./vm/pharo monticello.image save metacello
./vm/pharo metacello.image st ../bootstrap/scripts/03-metacello-bootstrap/01-loadMetacello.st --save --quit
zip metacello$SUFFIX.zip metacello.*
echo "[Pharo] Reloading rest of packages"
./vm/pharo metacello.image save Pharo
./vm/pharo Pharo.image eval --save "Metacello new baseline: 'IDE';repository: 'filetree://../src'; load"
./vm/pharo Pharo.image clean --release
# fix the display size in the image header (position 40 [zero based], 24 for 32-bit image)
# in older versions we must use octal representation
printf "\231\002\320\003" > displaySize.bin
if [[ ${ARCH_DESCRIPTION} -eq "32" ]]; then
SEEK=24
else
SEEK=40
fi
dd if="displaySize.bin" of="Pharo.image" bs=1 seek=$SEEK count=4 conv=notrunc
zip Pharo$SUFFIX.zip Pharo.*
|
vineetreddyrajula/pharo
|
bootstrap/scripts/build.sh
|
Shell
|
mit
| 4,988 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/TTGSnackbar/TTGSnackbar.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/TTGSnackbar/TTGSnackbar.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
klwoon/TTGSnackbar
|
ExampleObjc/TTGSnackbarObjcExample/Pods/Target Support Files/Pods-TTGSnackbarObjcExample/Pods-TTGSnackbarObjcExample-frameworks.sh
|
Shell
|
mit
| 4,674 |
#!/usr/bin/env bash
set -e
cd `dirname $0`
TEST_DIR=$PWD
export PATH=`dirname $TEST_DIR`:$PATH
# Reset the direnv loading if any
export DIRENV_CONFIG=$PWD
unset DIRENV_BASH
unset DIRENV_DIR
unset DIRENV_MTIME
unset DIRENV_DIFF
direnv_eval() {
eval `direnv export bash`
}
test_start() {
cd "$TEST_DIR/scenarios/$1"
direnv allow
echo "## Testing $1 ##"
}
test_stop() {
cd $TEST_DIR
direnv_eval
}
### RUN ###
direnv allow || true
direnv_eval
test_start base
direnv_eval
test "$HELLO" = "world"
MTIME=$DIRENV_MTIME
direnv_eval
test "$MTIME" = "$DIRENV_MTIME"
sleep 1
touch .envrc
direnv_eval
test "$MTIME" != "$DIRENV_MTIME"
cd ..
direnv_eval
echo "${HELLO}"
test -z "${HELLO}"
test_stop
test_start inherit
direnv_eval
test "$HELLO" = "world"
test_stop
test_start "ruby-layout"
direnv_eval
test "$GEM_HOME" != ""
test_stop
# Make sure directories with spaces are fine
test_start "space dir"
direnv_eval
test "$SPACE_DIR" = "true"
test_stop
test_start "child-env"
direnv_eval
test "$PARENT_PRE" = "1"
test "$CHILD" = "1"
test "$PARENT_POST" = "1"
test -z "$REMOVE_ME"
test_stop
test_start "special-vars"
export DIRENV_BASH=`which bash`
export DIRENV_CONFIG=foobar
direnv_eval || true
test -n "$DIRENV_BASH"
test "$DIRENV_CONFIG" = "foobar"
unset DIRENV_BASH
unset DIRENV_CONFIG
test_stop
test_start "dump"
direnv_eval
test "$LS_COLORS" = "*.ogg=38;5;45:*.wav=38;5;45"
test "$THREE_BACKSLASHES" = '\\\'
test "$LESSOPEN" = "||/usr/bin/lesspipe.sh %s"
test_stop
test_start "empty-var"
direnv_eval
test "${FOO-unset}" != "unset"
test "${FOO}" = ""
test_stop
test_start "empty-var-unset"
export FOO=""
direnv_eval
test "${FOO-unset}" == "unset"
unset FOO
test_stop
test_start "missing-file-source-env"
direnv_eval
test_stop
# Context: foo/bar is a symlink to ../baz. foo/ contains and .envrc file
# BUG: foo/bar is resolved in the .envrc execution context and so can't find
# the .envrc file.
#
# Apparently, the CHDIR syscall does that so I don't know how to work around
# the issue.
#
# test_start "symlink-bug"
# cd foo/bar
# direnv_eval
# test_stop
# Pending: test that the mtime is looked on the original file
# test_start "utils"
# LINK_TIME=`direnv file-mtime link-to-somefile`
# touch somefile
# NEW_LINK_TIME=`direnv file-mtime link-to-somefile`
# test "$LINK_TIME" = "$NEW_LINK_TIME"
# test_stop
|
camelpunch/direnv
|
test/direnv-test.sh
|
Shell
|
mit
| 2,435 |
yasm -O2 -f win32 -DPREFIX -DHAVE_ALIGNED_STACK=1 -DHIGH_BIT_DEPTH=0 -DBIT_DEPTH=8 -DARCH_X86_64=0 -o cpu-a.obj cpu-a.asm
|
lordmulder/CPUFeaturesLib
|
Prerequisites/cpu-a/build.sh
|
Shell
|
gpl-2.0
| 122 |
#!/bin/bash
if [ $(hostname) = ximera-1.asc.ohio-state.edu ]; then
echo On the deployment machine.
echo Pulling latest version from github, protecting our dotenv...
mv -f .env .env.backup
git pull
mv -f .env.backup .env
echo Updating npm...
npm install
echo Running gulp...
node ./node_modules/gulp/bin/gulp.js js
node ./node_modules/gulp/bin/gulp.js service-worker
node ./node_modules/gulp/bin/gulp.js css
echo Stopping old copies of app.js...
pm2 stop ximera
echo Starting a new copy of app.js...
pm2 start ecosystem.config.js --env production
else
echo not on the deployment machine...
echo copying environment and keys to deployment machine...
rsync -avz -f"- .git/" private_key.pem .env ximera:/var/www/apps/ximera
ssh ximera "cd /var/www/apps/ximera ; source deploy.sh"
fi
|
kisonecat/ximera
|
deploy.sh
|
Shell
|
gpl-2.0
| 859 |
version=\
(
'4.8'
)
url=\
(
"http://downloads.sourceforge.net/strace/4.8/strace-$version.tar.xz"
)
md5=\
(
'c575ef43829586801f514fd91bfe7575'
)
maintainer=\
(
'Ricardo Martins <[email protected]>'
)
configure()
{
"../strace-$version/configure" \
--prefix="$cfg_dir_toolchain_sysroot/usr" \
--target="$cfg_target_canonical" \
--host="$cfg_target_canonical" \
--build="$cfg_host_canonical" \
--disable-static \
--enable-shared
}
build()
{
$cmd_make
}
host_install()
{
$cmd_make install
}
target_install()
{
$cmd_target_strip strace -o $cfg_dir_rootfs/usr/bin/strace
}
|
LSTS/glued
|
rules/strace/default.bash
|
Shell
|
gpl-2.0
| 649 |
#!/bin/bash
set -e
cd tools/android
# Build C code
~/android-ndk-r9d/ndk-build
# Buid Jar
ant release
# Sign with the release key
jarsigner \
-verbose \
-sigalg SHA1withRSA \
-digestalg SHA1 \
-keystore ~/android-keys/chaoticrage-release-key.keystore \
-storepass `cat ~/android-keys/password` \
bin/ChaoticRage-release-unsigned.apk \
chaoticrage
# Align (reduces ram usage)
rm -f bin/ChaoticRage-release.apk
~/android-sdk-linux/tools/zipalign \
-v 4 \
bin/ChaoticRage-release-unsigned.apk \
bin/ChaoticRage-release.apk
# Move it where it belongs
mv "bin/ChaoticRage-release.apk" "$DESTDIR/chaoticrage-android-$VERSION.apk"
|
enjgine/chaotic-rage
|
tools/buildtool/android_apk.sh
|
Shell
|
gpl-2.0
| 640 |
#! /bin/sh
. ../../testenv.sh
analyze rng1.vhdl
elab_simulate sliding_index
clean
echo "Test successful"
|
tgingold/ghdl
|
testsuite/gna/bug049/testsuite.sh
|
Shell
|
gpl-2.0
| 109 |
#!/bin/bash
#
# Copyright (C) 2008 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License, version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 021110-1307, USA.
#
# vi: set ts=8 sw=8 autoindent noexpandtab :
#
# File : xattr-multi-run.sh
#
# Description: The wrapper script help to run the multi-nodes xattr test with
# various settings,to perform the utility,fucntional,stress test.
# NOTE:need to have openmpi configured and passwordless rsh/ssh
# access.
#
# Author: Tristan Ye, [email protected]
#
# History: 6 July 2008
#
################################################################################
# Global Variables
################################################################################
PATH=$PATH:/sbin # Add /sbin to the path for ocfs2 tools
export PATH=$PATH:.
. ./config.sh
RM="`which rm`"
MKDIR="`which mkdir`"
RSH_BIN="`which rsh`"
SSH_BIN="`which ssh`"
REMOTE_SH_BIN=${SSH_BIN}
USERNAME=`id -nu`
GROUPNAME=`id -gn`
SUDO="`which sudo` -u root"
AWK_BIN="`which awk`"
TOUCH_BIN="`which touch`"
MOUNT_BIN="`which sudo` -u root `which mount`"
REMOTE_MOUNT_BIN="${BINDIR}/remote_mount.py"
UMOUNT_BIN="`which sudo` -u root `which umount`"
REMOTE_UMOUNT_BIN="${BINDIR}/remote_umount.py"
MKFS_BIN="`which sudo` -u root `which mkfs.ocfs2`"
CHMOD_BIN="`which sudo` -u root `which chmod`"
CHOWN_BIN="`which sudo` -u root `which chown`"
IFCONFIG_BIN="`which sudo` -u root `which ifconfig`"
XATTR_TEST_BIN="${BINDIR}/xattr-multi-test"
LABEL="ocfs2-xattr-multi-test-`uname -m`"
SLOTS=2
DEFAULT_LOG="multiple-xattr-test-logs"
LOG_OUT_DIR=
LOG_FILE=
RUN_LOG_FILE=
MOUNT_POINT=
OCFS2_DEVICE=
SLOTS=
BLOCKSIZE=
CLUSTERSIZE=
BLOCKNUMS=
WORKPLACE=
TMP_DIR=/tmp
DEFAULT_RANKS=4
declare -i MPI_RANKS
MPI_HOSTS=
MPI_ACCESS_METHOD="ssh"
MPI_PLS_AGENT_ARG="-mca plm_rsh_agent ssh:rsh"
MPI_MCA_BTL="-mca btl tcp,self"
MPI_MCA_BTL_IF=""
TEST_NO=0
TEST_PASS=0
###for success/failure print
BOOTUP=color
RES_COL=80
MOVE_TO_COL="echo -en \\033[${RES_COL}G"
SETCOLOR_SUCCESS="echo -en \\033[1;32m"
SETCOLOR_FAILURE="echo -en \\033[1;31m"
SETCOLOR_WARNING="echo -en \\033[1;33m"
SETCOLOR_NORMAL="echo -en \\033[0;39m"
LOGLEVEL=1
echo_success() {
[ "$BOOTUP" = "color" ] && $MOVE_TO_COL
echo -n "["
[ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS
echo -n $" PASS "
[ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
echo -n "]"
return 0
}
echo_failure() {
[ "$BOOTUP" = "color" ] && $MOVE_TO_COL
echo -n "["
[ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE
echo -n $"FAILED"
[ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
echo -n "]"
return 1
}
echo_status()
{
if [ "${1}" == "0" ];then
echo_success
echo
else
echo_failure
echo
exit 1
fi
}
exit_or_not()
{
if [ "${1}" != "0" ];then
exit 1;
fi
}
################################################################################
# Utility Functions
################################################################################
f_usage()
{
echo "usage: `basename ${0}` [-r MPI_Ranks] <-f MPI_Hosts> [-a access method] [-o output] [-i interface] <-d <device>> <mountpoint path>"
echo " -r size of MPI rank"
echo " -a access method for process propagation,should be ssh or rsh,set ssh as a default method when omited."
echo " -f MPI hosts list,separated by comma,e.g -f node1.us.oracle.com,node2.us.oracle.com."
echo " -o output directory for the logs"
echo " -i Network Interface name to be used for MPI messaging."
echo " -d specify the device which has been formated as an ocfs2 volume."
echo " <mountpoint path> path of mountpoint where the ocfs2 volume will be mounted on."
exit 1;
}
f_getoptions()
{
if [ $# -eq 0 ]; then
f_usage;
exit 1
fi
while getopts "o:d:r:f:a:h:i:" options; do
case $options in
r ) MPI_RANKS="$OPTARG";;
f ) MPI_HOSTS="$OPTARG";;
o ) LOG_OUT_DIR="$OPTARG";;
d ) OCFS2_DEVICE="$OPTARG";;
a ) MPI_ACCESS_METHOD="$OPTARG";;
i ) INTERFACE="$OPTARG";;
h ) f_usage
exit 1;;
* ) f_usage
exit 1;;
esac
done
shift $(($OPTIND -1))
MOUNT_POINT=${1}
}
f_setup()
{
if [ "${UID}" = "0" ];then
echo "Should not run tests as root."
exit 1
fi
f_getoptions $*
if [ "$MPI_ACCESS_METHOD" = "rsh" ];then
MPI_PLS_AGENT_ARG="-mca plm_rsh_agent ssh:rsh"
REMOTE_SH_BIN=${RSH_BIN}
fi
if [ ! -z "${INTERFACE}" ]; then
${IFCONFIG_BIN} ${INTERFACE} >/dev/null 2>&1 || {
echo "Invalid NIC";
f_usage;
}
MPI_MCA_BTL_IF="-mca btl_tcp_if_include ${INTERFACE}"
fi;
if [ -z "${MOUNT_POINT}" ];then
f_usage
else
if [ ! -d ${MOUNT_POINT} ]; then
echo "Mount point ${MOUNT_POINT} does not exist."
exit 1
else
#To assure that mount point will not end with a trailing '/'
if [ "`dirname ${MOUNT_POINT}`" = "/" ]; then
MOUNT_POINT="`dirname ${MOUNT_POINT}``basename ${MOUNT_POINT}`"
else
MOUNT_POINT="`dirname ${MOUNT_POINT}`/`basename ${MOUNT_POINT}`"
fi
fi
fi
MPI_RANKS=${MPI_RANKS:-$DEFAULT_RANKS}
LOG_OUT_DIR=${LOG_OUT_DIR:-$DEFAULT_LOG}
${MKDIR} -p ${LOG_OUT_DIR} || exit 1
LOG_SUFIX=$(date +%F-%H-%M-%S)
LOG_FILE="`dirname ${LOG_OUT_DIR}`/`basename ${LOG_OUT_DIR}`/xattr-multiple-test-log-`uname -m`-${LOG_SUFIX}.log"
RUN_LOG_FILE="`dirname ${LOG_OUT_DIR}`/`basename ${LOG_OUT_DIR}`/xattr-multiple-test-log-run-`uname -m`-${LOG_SUFIX}.log"
if [ -z "$MPI_HOSTS" ];then
f_usage
fi
# Use number of testing nodes as the default slot number.
if [ -z "${SLOTS}" ];then
echo $MPI_HOSTS|sed -e 's/,/\n/g' >/tmp/$$
SLOTS=`cat /tmp/$$ |wc -l`
rm -f /tmp/$$
fi
${CHMOD_BIN} -R 777 ${MOUNT_POINT}
${CHOWN_BIN} -R ${USERNAME}:${GROUPNAME} ${MOUNT_POINT}
WORKPLACE="`dirname ${MOUNT_POINT}`/`basename ${MOUNT_POINT}`/multi_xattr_test_place"
}
f_do_mkfs_and_mount()
{
echo -n "Mkfsing device(-b ${BLOCKSIZE} -C ${CLUSTERSIZE}): "|tee -a ${RUN_LOG_FILE}
echo y|${MKFS_BIN} --fs-features=xattr -b ${BLOCKSIZE} -C ${CLUSTERSIZE} -N ${SLOTS} -L ${LABEL} ${OCFS2_DEVICE} ${BLOCKNUMS}>>${RUN_LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
echo -n "Mounting device ${OCFS2_DEVICE} to nodes(${MPI_HOSTS}):"|tee -a ${RUN_LOG_FILE}
${REMOTE_MOUNT_BIN} -l ${LABEL} -m ${MOUNT_POINT} -n ${MPI_HOSTS}>>${RUN_LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${SUDO} chown -R ${USERNAME}:${GROUPNAME} ${MOUNT_POINT}
${SUDO} chmod -R 777 ${MOUNT_POINT}
${MKDIR} -p ${WORKPLACE} || exit 1
}
f_do_umount()
{
echo -n "Umounting device ${OCFS2_DEVICE} from nodes(${MPI_HOSTS}):"|tee -a ${RUN_LOG_FILE}
${REMOTE_UMOUNT_BIN} -m ${MOUNT_POINT} -n ${MPI_HOSTS}>>${RUN_LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
}
f_runtest()
{
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Check Namespace&Filetype of Multinode Xattr on Ocfs2:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Check Namespace&Filetype of Multinode Xattr on Ocfs2:">>${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
for namespace in user trusted
do
for filetype in normal directory symlink
do
echo -e "Testing Binary:\t\t${SUDO} ${MPIRUN} --allow-run-as-root ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 20 -n ${namespace} -t ${filetype} -l 50 -s 200 ${WORKPLACE}">>${LOG_FILE}
echo "********${namespace} mode on ${filetype}********">>${LOG_FILE}
${SUDO} ${MPIRUN} --allow-run-as-root ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 20 -n ${namespace} -t ${filetype} -l 50 -s 200 ${WORKPLACE}>>${LOG_FILE} 2>&1
rc=$?
if [ "$rc" != "0" ];then
if [ "$namespace" == "user" -a "$filetype" == "symlink" ]; then
continue
else
rc=1
echo_failure | tee -a ${RUN_LOG_FILE}
echo | tee -a ${RUN_LOG_FILE}
exit 1
fi
fi
${RM} -rf ${WORKPLACE}/* || exit 1
done
if [ "$rc" != "0" ];then
if [ "$namespace" == "user" -a "$filetype" == "symlink" ]; then
continue
else
break
fi
fi
done
if [ "$rc" == "0" ];then
echo_success |tee -a ${RUN_LOG_FILE}
echo |tee -a ${RUN_LOG_FILE}
fi
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Check Utility of Multinode Xattr on Ocfs2:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Check Utility of Multinode Xattr on Ocfs2:">>${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
for((i=0;i<4;i++));do
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10 -n user -t normal -l 50 -s 100 ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10 -n user -t normal -l 50 -s 100 ${WORKPLACE}>>${LOG_FILE} 2>&1
rc=$?
if [ ! "$rc" == "0" ];then
echo_failure |tee -a ${RUN_LOG_FILE}
echo | tee -a ${RUN_LOG_FILE}
break
fi
done
if [ "$rc" == "0" ];then
echo_success |tee -a ${RUN_LOG_FILE}
echo | tee -a ${RUN_LOG_FILE}
fi
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Check Max Multinode Xattr EA_Name_Length:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Check Max Multinode Xattr EA_Name_Length:">> ${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 4 -n user -t normal -l 255 -s 300 ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 4 -n user -t normal -l 255 -s 300 ${WORKPLACE}>>${LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Check Max Multinode Xattr EA_Size:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Check Max Multinode Xattr EA_Size:">> ${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1 -n user -t normal -l 50 -s 65536 ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1 -n user -t normal -l 50 -s 65536 ${WORKPLACE}>>${LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Check Huge Multinode Xattr EA_Entry_Nums:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Check Huge Multinode Xattr EA_Entry_Nums:">> ${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10000 -n user -t normal -l 100 -s 200 ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 10000 -n user -t normal -l 100 -s 200 ${WORKPLACE}>>${LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Check All Max Multinode Xattr Arguments Together:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Check All Max Multinode Xattr Arguments Together:">> ${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 65536 ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 65536 ${WORKPLACE}>>${LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Launch Concurrent Adding Test:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Launch Concurrent Adding Test:">> ${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 5000 -o -r -k ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 1000 -n user -t normal -l 255 -s 5000 -o -r -k ${WORKPLACE}>>${LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
((TEST_NO++))
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -ne "[${TEST_NO}] Launch MultiNode Xattr Stress Test:"|tee -a ${RUN_LOG_FILE}
echo -ne "[${TEST_NO}] Launch MultiNode Xattr Stress Test:">> ${LOG_FILE}
echo >>${LOG_FILE}
echo "==========================================================">>${LOG_FILE}
echo -e "Testing Binary:\t\t${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 2000 -n user -t normal -l 255 -s 5000 -r -k ${WORKPLACE}">>${LOG_FILE}
${MPIRUN} ${MPI_PLS_AGENT_ARG} ${MPI_MCA_BTL} ${MPI_MCA_BTL_IF} -np ${MPI_RANKS} --host ${MPI_HOSTS} ${XATTR_TEST_BIN} -i 1 -x 2000 -n user -t normal -l 255 -s 5000 -r -k ${WORKPLACE}>>${LOG_FILE} 2>&1
RET=$?
echo_status ${RET} |tee -a ${RUN_LOG_FILE}
exit_or_not ${RET}
${RM} -rf ${WORKPLACE}/* || exit 1
((TEST_PASS++))
}
f_cleanup()
{
if [ -f "$TMP_FILE" ];then
${RM} -rf $TMP_FILE
fi
}
################################################################################
# Main Entry
################################################################################
trap 'echo -ne "\n\n">>${RUN_LOG_FILE};echo "Interrupted by Ctrl+C,Cleanuping... "|tee -a ${RUN_LOG_FILE}; f_cleanup;exit 1' SIGINT
trap ' : ' SIGTERM
f_setup $*
START_TIME=${SECONDS}
echo "=====================Multiple nodes xattr testing starts: `date`=====================" |tee -a ${RUN_LOG_FILE}
echo "=====================Multiple nodes xattr testing starts: `date`=====================" >> ${LOG_FILE}
for BLOCKSIZE in 512 4096
do
for CLUSTERSIZE in 4096 1048576
do
echo "++++++++++xattr tests with \"-b ${BLOCKSIZE} -C ${CLUSTERSIZE}\"++++++++++" |tee -a ${RUN_LOG_FILE}
echo "++++++++++xattr tests with \"-b ${BLOCKSIZE} -C ${CLUSTERSIZE}\"++++++++++">>${LOG_FILE}
echo "======================================================================================="
f_do_mkfs_and_mount
f_runtest
f_do_umount
echo "======================================================================================="
echo -e "\n\n\n">>${LOG_FILE}
done
done
f_cleanup
END_TIME=${SECONDS}
echo "=====================Multiple nodes xattr testing ends: `date`=====================" |tee -a ${RUN_LOG_FILE}
echo "=====================Multiple nodes xattr testing ends: `date`=====================" >> ${LOG_FILE}
echo "Time elapsed(s): $((${END_TIME}-${START_TIME}))" |tee -a ${RUN_LOG_FILE}
echo "Tests total: ${TEST_NO}" |tee -a ${RUN_LOG_FILE}
echo "Tests passed: ${TEST_PASS}" |tee -a ${RUN_LOG_FILE}
|
markfasheh/ocfs2-test
|
programs/xattr_tests/xattr-multi-run.sh
|
Shell
|
gpl-2.0
| 18,550 |
#!/bin/sh
# Check for VMWare
isVMWare
noVMWare=$?
isVPC
noVPC=$?
grep -q "QEMU Virtual CPU" /proc/cpuinfo
noQEMU=$?
while [ 1 ]
do
sleep `expr $1 \* 60`
myip=`cat /tmp/myip`
# For each instance
instance=1
while [ -d /etc/folding/$instance ]
do
cd /etc/folding/$instance
if [ -f latest.$myip.$instance ]
then
last=`cat latest.$myip.$instance`
if [ "$last" = "B" ]
then
new=A
else
new=B
fi
else
new=A
fi
tar cf backup.$myip.$new.$instance machinedependent.dat queue.dat work > /dev/null 2>&1
rm -f latest.$myip.$instance
echo $new > latest.$myip.$instance
# If tftpserverip exists then do a TFTP backup
serverip=`cat /tmp/tftpserverip`
if [ "$serverip" != "" ]
then
tftp -p $serverip -l backup.$myip.$new.$instance -r backup.$myip.$new.$instance > /dev/null 2>&1
tftp -p $serverip -l latest.$myip.$instance -r latest.$myip.$instance > /dev/null 2>&1
fi
# Save a copy for the webpage link
mv backup.$myip.$new.$instance /etc/folding/$instance/backup.tar
# Backup to USB drive A if present
mount -n -t vfat /dev/sda1 /usba > /dev/null 2>&1
if [ $? -eq 0 ]
then
# Make any directories necessary
mkdir -p /usba/folding/$instance/work
# Copy across any files that are newer than what is on the USB drive
for name in machinedependent.dat queue.dat `find work -type f`
do
if [ ! -f /usba/folding/$instance/$name ]
then
cp /etc/folding/$instance/$name /usba/folding/$instance/$name
elif [ /etc/folding/$instance/$name -nt /usba/folding/$instance/$name ]
then
cp /etc/folding/$instance/$name /usba/folding/$instance/$name
fi
done
# Clean up any stale files in the work directory
slot=0
while [ "$slot" -lt "10" ]
do
state=`queueinfo /usba/folding/$instance/queue.dat $slot`
if [ "$state" -eq "0" ]
then
rm -f /usba/folding/$instance/work/*_0$slot*
fi
slot=`expr $slot + 1`
done
umount /usba
fi
# Backup to USB drive B if present
mount -n -t vfat /dev/sdb1 /usbb > /dev/null 2>&1
if [ $? -eq 0 ]
then
# Make any directories necessary
mkdir -p /usbb/folding/$instance/work
# Copy across any files that are newer than what is on the USB drive
for name in machinedependent.dat queue.dat `find work -type f`
do
if [ ! -f /usbb/folding/$instance/$name ]
then
cp /etc/folding/$instance/$name /usbb/folding/$instance/$name
elif [ /etc/folding/$instance/$name -nt /usbb/folding/$instance/$name ]
then
cp /etc/folding/$instance/$name /usbb/folding/$instance/$name
fi
done
# Clean up any stale files in the work directory
slot=0
while [ "$slot" -lt "10" ]
do
state=`queueinfo /usbb/folding/$instance/queue.dat $slot`
if [ "$state" -eq "0" ]
then
rm -f /usbb/folding/$instance/work/*_0$slot*
fi
slot=`expr $slot + 1`
done
umount /usbb
fi
# Backup to hard drive image if VMWare and booted by syslinux
if [ $noVMWare -eq 0 -o $noQEMU -eq 0 -o $noVPC -eq 0 ]
then
if [ "`cat /proc/sys/kernel/bootloader_type`" = "49" ]
then
mount -n -t vfat /dev/hda1 /hda
if [ $? -eq 0 ]
then
# Make any directories necessary
mkdir -p /hda/folding/$instance/work
# Copy across any files that are newer than what is on the hard drive image
for name in machinedependent.dat queue.dat `find work -type f`
do
if [ ! -f /hda/folding/$instance/$name ]
then
cp /etc/folding/$instance/$name /hda/folding/$instance/$name
elif [ /etc/folding/$instance/$name -nt /hda/folding/$instance/$name ]
then
cp /etc/folding/$instance/$name /hda/folding/$instance/$name
fi
done
# Clean up any stale files in the work directory
slot=0
while [ "$slot" -lt "10" ]
do
state=`queueinfo /hda/folding/$instance/queue.dat $slot`
if [ "$state" -eq "0" ]
then
rm -f /hda/folding/$instance/work/*_0$slot*
fi
slot=`expr $slot + 1`
done
umount /hda
fi
else
echo "Hard drive image is corrupted, not backing up"
fi
fi
# Next instance
instance=`expr $instance + 1`
done
# If we were hiding the backup links on the webpage, make them available now
if [ -f /etc/folding/index.backup ]
then
mv /etc/folding/index.backup /etc/folding/index.html
fi
done
|
thwpike/foldingathome
|
initrd_dir/bin/backup.sh
|
Shell
|
gpl-2.0
| 4,811 |
#! /bin/bash
for d in `ls */doit.sh | cut -d/ -f1`; do
echo '[ '$d' ]'
cd ./$d >/dev/null
test -f "xlt.c" && exit 1
base=`ls rawcounts.* | head -n1 | cut -d. -f2`
rm -vf *.c *.pair
ls *.base *.xbase | grep -v '^'$base'\.' | xargs rm -vf
cd .. >/dev/null
done
rm -rf letters
rm *~ 2>/dev/null
|
ShiftMediaProject/enca
|
data/clean.sh
|
Shell
|
gpl-2.0
| 306 |
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
if [ "--risky" == "$1" ]; then
sed -i "s/enable = 0/enable = 1/g" /root/.config/torrench/config.ini
shift
torrench "$@"
exit 0
elif [ "${1#-}" != "$1" ]; then
set -- torrench "$@"
fi
exec "$@"
|
kryptxy/torrench
|
docker/entrypoint.sh
|
Shell
|
gpl-3.0
| 271 |
#!/usr/bin/bash
for f in *.po; do
msgcat --use-first $f template.pot -o $f
msgattrib --set-obsolete --ignore-file=template.pot -o $f $f
msgattrib --no-obsolete -o $f $f
done
|
KoKuToru/gTox
|
src/i18n/update.sh
|
Shell
|
gpl-3.0
| 177 |
#!/bin/bash
STORAGE_DIRECTORY=`dirname "$(readlink -f "$0")"`
exec "${STORAGE_DIRECTORY}/htd_main" --input gr --output td --strategy challenge --opt width --iterations 0 --print-opt-progress "$@" <&0
|
yannponty/RNARedPrint
|
lib/htd-master/scripts/htd_gr2td_exhaustive.sh
|
Shell
|
gpl-3.0
| 202 |
#!/bin/sh
download() {
cd tgz
wget -N -nd -c -e robots=off -A tgz,html -r -np \
http://www.repository.voxforge1.org/downloads/es/Trunk/Audio/Main/16kHz_16bit
# http://www.repository.voxforge1.org/downloads/SpeechCorpus/Trunk/Audio/Main/8kHz_16bit
cd ..
}
unpack() {
for f in tgz/*.tgz; do
tar xf $f -C wav
done
}
convert_flac() {
find wav -name "*flac*" -type d | while read file; do
outdir=${file//flac/wav}
mkdir -p $outdir
done
find wav -name "*.flac" | while read f; do
outfile=${f//flac/wav}
flac -s -d $f -o $outfile
done
}
collect_prompts() {
mkdir etc
> etc/allprompts
find wav -name PROMPTS | while read f; do
echo $f
cat $f >> etc/allprompts
done
#find wav -name prompts | while read f; do
# echo $f
# cat $f >> etc/allprompts
#done
}
#FIXME
make_prompts() {
cat etc/allprompts | sort | sed 's/mfc/wav/g' |
mv allprompts.tmp etc/allprompts
cat etc/allprompts | awk '{
if (system("test -f wav/" $1 ".wav") == 0) {
printf ("<s> ");
for (i=2;i<=NF;i++)
printf ("%s ", tolower($i));
printf ("</s> (%s)\n", $1);
}
}
' > etc/voxforge_es_sphinx.transcription
./traintest.sh etc/voxforge_es_sphinx.transcription
./build_fileids.py etc/voxforge_es_sphinx.transcription.train > etc/voxforge_es_sphinx.fileids.train
./build_fileids.py etc/voxforge_es_sphinx.transcription.test > etc/voxforge_es_sphinx.fileids.test
}
#download
#unpack
#collect_prompts
#make_prompts
#sphinxtrain run
|
ejmalfatti/VoiceControl-RaspberryPi
|
voxforge-es-0.2/scripts/build.sh
|
Shell
|
gpl-3.0
| 1,450 |
sudo apt-get update && sudo apt-get -y upgrade
sudo apt-get install vim rsync git zip unzip
wget -qO- https://get.docker.com/ | sh
sudo usermod -aG docker ubuntu
sudo su
curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
|
mingchuno/golden-cache
|
init.sh
|
Shell
|
gpl-3.0
| 345 |
#!/bin/bash
PREF='matchem'
python MatrixChem.py > ${PREF}.gr
header=`head -1 ${PREF}.gr | cut -f 2-`
pltline='plot '
sep=''
i=1
for clm in $header ; do
i=$[$i + 1]
pltline="$pltline$sep\"${PREF}.gr\" using 1:$i w l lw 2 title \"$clm\""
sep=', '
done
gnuplot <<EOF
set terminal postscript eps color 16
set ylabel 'Molecule count'
set xlabel 'time (s)'
set output "${PREF}.eps"
$pltline
EOF
|
laryamamoto/PyCellChemistry
|
src/scripts/matchem.sh
|
Shell
|
gpl-3.0
| 399 |
cd $(dirname "${0}")
cd ..
yarn install || exit 1
yarn install_deps || exit 1
|
eloello/kakakaka
|
bin/build-dev.sh
|
Shell
|
gpl-3.0
| 78 |
#####
##### Elec: Supervised training using three types of tv-embedding
#####
##### Step 1. Generate input files.
##### Step 2. Training.
#####
##### NOTE1: To run this script, download unlab_data.tar.gz and decompress it at test/,
##### so that the directory test/unlab_data will be created.
#####
##### NOTE2: For your convenience, the files of the results of tv-embedding
##### learning (*.layer0) are provided. They are endian sensitive
##### and were generated with Little Endian (Intel convention).
##### If they are not usable in your system, you need to generate them
##### in your system using the provided scripts (see the comments
##### on s_fn0, s_fn1, and s_fn2 below) and set sdir=output below.
#####
gpu=-1 # <= Change this to, e.g., "gpu=0" to use a specific GPU.
dim=100 # <= Change this to change dimensionality of tv-embedding.
sdir=unlab_data # <= Change this to the directory where tv-embedding files are.
# <= Provided at unlab_data/ (unlab_data.tar.gz).
# sdir=output # <= output/ if generated by semisup-elec-{unsup|unsup3|parsup}-tv.sh.
prep=../bin/prepText
cnet=../bin/conText
options="LowerCase UTF8"
txt_ext=.txt.tok
z=4 # to avoid name conflict with other scripts
pch_sz=5
s_fn0=${sdir}/elec-uns-p${pch_sz}.dim${dim}.ite10.layer0 # generated by semisup-elec-unsup-tv.sh
s_fn1=${sdir}/elec-parsup-p3p${pch_sz}.dim${dim}.ite10.layer0 # generated by semisup-elec-parsup-tv.sh
s_fn2=${sdir}/elec-unsx3-p${pch_sz}.dim${dim}.ite10.layer0 # generated by semisup-elec-unsup3-tv.sh
#--- Step 1. Generate input files.
xvoc1=data/elec${z}-25k-trn.vocab
$cnet $gpu write_word_mapping layer0_fn=$s_fn0 word_map_fn=$xvoc1 # extract word mapping from the tv-embedding file.
xvoc3=data/elec${z}-25k-trn-123gram.vocab
$cnet $gpu write_word_mapping layer0_fn=$s_fn2 word_map_fn=$xvoc3 # extract word mapping from the tv-embedding file.
for set in 25k-train test; do
opt=AllowZeroRegion
#--- dataset#0: for the main layer (seq, same as semisup-elec-{unsup|unsup3|parsup}-tv.sh)
rnm=data/elec${z}-${set}-p${pch_sz}seq
$prep gen_regions $opt \
region_fn_stem=$rnm input_fn=data/elec-${set} vocab_fn=$xvoc1 \
$options text_fn_ext=$txt_ext label_fn_ext=.cat \
label_dic_fn=data/elec_cat.dic \
patch_size=$pch_sz patch_stride=1 padding=$((pch_sz-1))
#--- dataset#1: for the side layer (bow, same as semisup-elec-{unsup|parsup}-tv.sh)
rnm=data/elec${z}-${set}-p${pch_sz}bow
$prep gen_regions $opt Bow \
region_fn_stem=$rnm input_fn=data/elec-${set} vocab_fn=$xvoc1 \
$options text_fn_ext=$txt_ext RegionOnly \
patch_size=$pch_sz patch_stride=1 padding=$((pch_sz-1))
#--- dataset#2: for the side layer (bag-of-1-3grams, same as semisup-elec-unsup3-tv.sh)
rnm=data/elec${z}-${set}-p${pch_sz}x3bow
$prep gen_regions $opt Bow \
region_fn_stem=$rnm input_fn=data/elec-${set} vocab_fn=$xvoc3 \
$options text_fn_ext=$txt_ext RegionOnly \
patch_size=$pch_sz patch_stride=1 padding=$((pch_sz-1))
done
#--- Step 2. Training.
gpumem=${gpu}:4 # pre-allocate 4GB GPU memory.
logfn=log_output/elec-semisup3-dim${dim}.log
perffn=perf/elec-semisup3-dim${dim}.csv
echo
echo Supervised training using 3 types of tv-embedding to produce additional input.
echo This takes a while. See $logfn and $perffn for progress and see param/semisup3.param for the rest of the parameters.
$cnet $gpumem cnn 0side0_fn=$s_fn0 0side1_fn=$s_fn1 0side2_fn=$s_fn2 \
trnname=elec${z}-25k-train-p${pch_sz} tstname=elec${z}-test-p${pch_sz} \
data_ext0=seq data_ext1=bow data_ext2=x3bow \
0side0_dsno=1 0side1_dsno=1 0side2_dsno=2 \
evaluation_fn=$perffn test_interval=25 LessVerbose \
reg_L2=1e-4 step_size=0.1 \
@param/semisup3.param > $logfn
|
DeercoderCourse/NLP
|
conText-v2.00/test/semisup-elec-all-three.sh
|
Shell
|
gpl-3.0
| 4,000 |
#!/bin/sh
## live-build(7) - System Build Scripts
## Copyright (C) 2006-2013 Daniel Baumann <[email protected]>
##
## This program comes with ABSOLUTELY NO WARRANTY; for details see COPYING.
## This is free software, and you are welcome to redistribute it
## under certain conditions; see COPYING for details.
# Debian releases
RELEASE_squeeze="6.0.7"
RELEASE_wheezy="7.0.0"
RELEASE_jessie="8"
RELEASE_sid="unstable"
# Progress Linux releases
RELEASE_artax="1.0"
RELEASE_baureo="2.0"
RELEASE_charon="unstable"
# Ubuntu releases
RELEASE_hardy="8.04" # LTS: 2013-04-01
RELEASE_lucid="10.04" # LTS: 2015-04-01
RELEASE_oneiric="11.10" # 2013-04-01
RELEASE_precise="12.04" # LTS: 2017-04-01
RELEASE_quantal="12.10" # 2014-04-01
|
linuxknow/livebuildlihuen
|
functions/releases.sh
|
Shell
|
gpl-3.0
| 733 |
#!/bin/bash -e
# The Textinfo package contains programs for reading, writing, and converting
# info pages.
. ../lfs.comm
build_src() {
version=6.5
srcfil=texinfo-$version.tar.xz
srcdir=texinfo-$version
tar -xf $LFSSRC/$srcfil && cd $srcdir
./configure --prefix=/tools
make $JOBS
make install
cd .. && rm -rf $srcdir
}
build
|
fangxinmiao/projects
|
Architeture/OS/Linux/Distributions/LFS/build-scripts/lfs-8.4-systemd/1-build-tmp-system/30-Texinfo.sh
|
Shell
|
gpl-3.0
| 363 |
#!/bin/bash
#
# This is job test5_0
#
#
## Start of header for backend 'local'.
#
set -e
set -u
ENVIRONMENT_DIR='.'
#
# Variables declared in MOLGENIS Compute headers/footers always start with an MC_ prefix.
#
declare MC_jobScript="test5_0.sh"
declare MC_jobScriptSTDERR="test5_0.err"
declare MC_jobScriptSTDOUT="test5_0.out"
declare MC_failedFile="molgenis.pipeline.failed"
declare MC_singleSeperatorLine=$(head -c 120 /dev/zero | tr '\0' '-')
declare MC_doubleSeperatorLine=$(head -c 120 /dev/zero | tr '\0' '=')
declare MC_tmpFolder='tmpFolder'
declare MC_tmpFile='tmpFile'
declare MC_tmpFolderCreated=0
#
##
### Header functions.
##
#
function errorExitAndCleanUp() {
local _signal="${1}"
local _problematicLine="${2}"
local _exitStatus="${3:-$?}"
local _executionHost="$(hostname)"
local _format='INFO: Last 50 lines or less of %s:\n'
local _errorMessage="FATAL: Trapped ${_signal} signal in ${MC_jobScript} running on ${_executionHost}. Exit status code was ${_exitStatus}."
if [ "${_signal}" == 'ERR' ]; then
_errorMessage="FATAL: Trapped ${_signal} signal on line ${_problematicLine} in ${MC_jobScript} running on ${_executionHost}. Exit status code was ${_exitStatus}."
fi
_errorMessage=${4:-"${_errorMessage}"} # Optionally use custom error message as 4th argument.
echo "${_errorMessage}"
echo "${MC_doubleSeperatorLine}" > "${MC_failedFile}"
echo "${_errorMessage}" >> "${MC_failedFile}"
if [ -f "${MC_jobScriptSTDERR}" ]; then
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
printf "${_format}" "${MC_jobScriptSTDERR}" >> "${MC_failedFile}"
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
tail -50 "${MC_jobScriptSTDERR}" >> "${MC_failedFile}"
fi
if [ -f "${MC_jobScriptSTDOUT}" ]; then
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
printf "${_format}" "${MC_jobScriptSTDOUT}" >> "${MC_failedFile}"
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
tail -50 "${MC_jobScriptSTDOUT}" >> "${MC_failedFile}"
fi
echo "${MC_doubleSeperatorLine}" >> "${MC_failedFile}"
}
#
# Create tmp dir per script/job.
# To be called with with either a file or folder as first and only argument.
# Defines two globally set variables:
# 1. MC_tmpFolder: a tmp dir for this job/script. When function is called multiple times MC_tmpFolder will always be the same.
# 2. MC_tmpFile: when the first argument was a folder, MC_tmpFile == MC_tmpFolder
# when the first argument was a file, MC_tmpFile will be a path to a tmp file inside MC_tmpFolder.
#
function makeTmpDir {
#
# Compile paths.
#
local _originalPath="${1}"
local _myMD5="$(md5sum ${MC_jobScript} | cut -d ' ' -f 1)"
local _tmpSubFolder="tmp_${MC_jobScript}_${_myMD5}"
local _dir
local _base
if [[ -d "${_originalPath}" ]]; then
_dir="${_originalPath}"
_base=''
else
_base=$(basename "${_originalPath}")
_dir=$(dirname "${_originalPath}")
fi
MC_tmpFolder="${_dir}/${_tmpSubFolder}/"
MC_tmpFile="${MC_tmpFolder}/${_base}"
echo "DEBUG ${MC_jobScript}::makeTmpDir: dir='${_dir}';base='${_base}';MC_tmpFile='${MC_tmpFile}'"
#
# Cleanup the previously created tmpFolder first if this script was resubmitted.
#
if [[ ${MC_tmpFolderCreated} -eq 0 && -d "${MC_tmpFolder}" ]]; then
rm -rf "${MC_tmpFolder}"
fi
#
# (Re-)create tmpFolder.
#
mkdir -p "${MC_tmpFolder}"
MC_tmpFolderCreated=1
}
trap 'errorExitAndCleanUp HUP NA $?' HUP
trap 'errorExitAndCleanUp INT NA $?' INT
trap 'errorExitAndCleanUp QUIT NA $?' QUIT
trap 'errorExitAndCleanUp TERM NA $?' TERM
trap 'errorExitAndCleanUp EXIT NA $?' EXIT
trap 'errorExitAndCleanUp ERR $LINENO $?' ERR
touch "${MC_jobScript}.started"
#
## End of header for backend 'local'
#
#
## Generated header
#
# Assign values to the parameters in this script
# Set taskId, which is the job name of this task
taskId="test5_0"
# Make compute.properties available
rundir="TEST_PROPERTY(project.basedir)/target/test/benchmark/run/testFoldingWeaving"
runid="testFoldingWeaving"
workflow="src/main/resources/workflows/testfolding/workflow.csv"
parameters="src/main/resources/workflows/testfolding/parameters.csv"
user="TEST_PROPERTY(user.name)"
database="none"
backend="localhost"
port="80"
interval="2000"
path="."
# Connect parameters to environment
local_chr="1"
concat[0]="prefix_1_a"
concat[1]="prefix_1_b"
concat[2]="prefix_1_c"
# Validate that each 'value' parameter has only identical values in its list
# We do that to protect you against parameter values that might not be correctly set at runtime.
if [[ ! $(IFS=$'\n' sort -u <<< "${local_chr[*]}" | wc -l | sed -e 's/^[[:space:]]*//') = 1 ]]; then echo "Error in Step 'test5': input parameter 'local_chr' is an array with different values. Maybe 'local_chr' is a runtime parameter with 'more variable' values than what was folded on generation-time?" >&2; exit 1; fi
#
## Start of your protocol template
#
#string local_chr
#list concat
for s in "prefix_1_a" "prefix_1_b" "prefix_1_c"
do
echo ${s}
echo 1
done
#
## End of your protocol template
#
# Save output in environment file: '$ENVIRONMENT_DIR/test5_0.env' with the output vars of this step
echo "" >> $ENVIRONMENT_DIR/test5_0.env
chmod 755 $ENVIRONMENT_DIR/test5_0.env
#
## Start of footer for backend 'local'.
#
if [ -d "${MC_tmpFolder:-}" ]; then
echo -n "INFO: Removing MC_tmpFolder ${MC_tmpFolder} ..."
rm -rf "${MC_tmpFolder}"
echo 'done.'
fi
tS=${SECONDS:-0}
tM=$((SECONDS / 60 ))
tH=$((SECONDS / 3600))
echo "On $(date +"%Y-%m-%d %T") ${MC_jobScript} finished successfully after ${tM} minutes." >> molgenis.bookkeeping.log
printf '%s:\t%d seconds\t%d minutes\t%d hours\n' "${MC_jobScript}" "${tS}" "${tM}" "${tH}" >> molgenis.bookkeeping.walltime
mv "${MC_jobScript}".{started,finished}
trap - EXIT
exit 0
|
pneerincx/molgenis-compute
|
molgenis-compute-core/src/test/resources/expected/testFoldingWeaving/test5_0.sh
|
Shell
|
lgpl-3.0
| 5,888 |
#!/bin/bash -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT="$DIR/../.."
REPOSITORY_ROOT="$ROOT/.."
WORKSPACE_DIR="$ROOT/.workspace"
TMP_DIR="$WORKSPACE_DIR/tmpdir"
VERSION=$("$REPOSITORY_ROOT/git-version")
export VERSION
export TECTONIC_RELEASE_BUCKET=releases.tectonic.com
export TECTONIC_BINARY_BUCKET=tectonic-release
export TECTONIC_RELEASE="tectonic-$VERSION"
export TECTONIC_RELEASE_TARBALL_FILE="$TECTONIC_RELEASE.tar.gz"
export TECTONIC_RELEASE_TARBALL_URL="https://$TECTONIC_RELEASE_BUCKET/$TECTONIC_RELEASE_TARBALL_FILE"
export TECTONIC_RELEASE_DIR="$WORKSPACE_DIR/$VERSION"
export TECTONIC_RELEASE_TOP_DIR="$TECTONIC_RELEASE_DIR/tectonic"
export INSTALLER_RELEASE_DIR="$TECTONIC_RELEASE_TOP_DIR/tectonic-installer"
export TERRAFORM_BIN_TMP_DIR="$TMP_DIR/terraform-bin"
export TERRAFORM_BIN_VERSION=0.9.6
export TERRAFORM_BIN_BASE_URL="https://releases.hashicorp.com/terraform/${TERRAFORM_BIN_VERSION}/terraform_${TERRAFORM_BIN_VERSION}"
export TERRAFORM_LICENSE_URL="https://raw.githubusercontent.com/hashicorp/terraform/v${TERRAFORM_BIN_VERSION}/LICENSE"
export TERRAFORM_SOURCES=(
"${REPOSITORY_ROOT}/modules"
"${REPOSITORY_ROOT}/platforms"
"${REPOSITORY_ROOT}/config.tf"
"${REPOSITORY_ROOT}/terraformrc.example"
"${REPOSITORY_ROOT}/examples"
)
|
everett-toews/tectonic-installer
|
installer/scripts/release/common.env.sh
|
Shell
|
apache-2.0
| 1,289 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This contains util code for testing kubectl.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8080}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_PORT=${KUBELET_PORT:-10250}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="gcr.io/google-containers/perl"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
# Define variables for resource types to prevent typos.
clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
persistentvolumes="persistentvolumes"
pods="pods"
podtemplates="podtemplates"
replicasets="replicasets"
replicationcontrollers="replicationcontrollers"
roles="roles"
secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
thirdpartyresources="thirdpartyresources"
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
attempts=$((attempts+1))
done
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
# failures.
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
if [ "$preserve_err_file" != true ] ; then
rm "${ERROR_FILE}"
fi
break
fi
done
}
# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
function wait-for-pods-with-label()
{
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --template '{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
# Code to be run before running the tests.
setup() {
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
kube::etcd::start
# Find a standard sed instance for use with edit scripts
SED=sed
if which gsed &>/dev/null; then
SED=gsed
fi
if ! ($SED --version 2>&1 | grep -q GNU); then
echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'."
exit 1
fi
kube::log::status "Building kubectl"
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
# TODO: we need to note down the current default namespace and set back to this
# namespace after the tests are done.
kubectl config view
CONTEXT="test"
kubectl config set-context "${CONTEXT}"
kubectl config use-context "${CONTEXT}"
kube::log::status "Setup complete"
}
# Runs all pod related tests.
run_pod_tests() {
kube::log::status "Testing kubectl(v1:pods)"
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
# Describe command should print detailed information
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:" "Controllers"
# Describe command should print events information by default
kube::test::describe_object_events_assert pods 'valid-pod'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert pods 'valid-pod' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert pods 'valid-pod' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controllers"
# Describe command should print events information by default
kube::test::describe_resource_events_assert pods
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert pods false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert pods true
### Validate Export ###
kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")
### Delete POD valid-pod by id
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --now
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --now
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --grace-period=0
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command succeeds without --force by waiting
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from dumped YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
echo "${output_pod}" | $SED '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod from JSON
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create POD valid-pod from YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with no parameter mustn't kill everything
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete all PODs
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
# Post-condition: no POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
# Detailed tests for describe pod output
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-kubectl-describe-pod
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
### Create a generic secret
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type'
### Create a generic configmap
# Pre-condition: no CONFIGMAP exists
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
### Create a pod disruption budget
# Command
kubectl create pdb test-pdb --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '2'
# Command
kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Describe command (resource only) should print detailed information about environment variables
kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Clean-up
kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
kubectl delete pdb/test-pdb pdb/test-pdb-2 --namespace=test-kubectl-describe-pod
kubectl delete namespace test-kubectl-describe-pod
### Create two PODs
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/redis/redis-proxy.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs are created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
### Delete multiple PODs at once
# Pre-condition: valid-pod and redis-proxy PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
# Post-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Label the valid-pod POD
# Pre-condition: valid-pod is not labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
# Command
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
# Post-condition: valid-pod is labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
### Label the valid-pod POD with empty label value
# Pre-condition: valid-pod does not have label "emptylabel"
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
# Command
kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptylabel" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
### Annotate the valid-pod POD with empty annotation value
# Pre-condition: valid-pod does not have annotation "emptyannotation"
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '<no value>'
# Command
kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptyannotation" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
### Record label change
# Pre-condition: valid-pod does not have record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
# Command
kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod has record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Do not record label change
# Command
kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation still contains command with --record=true
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Record label change with unspecified flag and previous change already recorded
# Command
kubectl label pods valid-pod new-record-change=true "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation contains new change
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create pod-with-precision POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
## Patch preserves precision
# Command
kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
# Post-condition: pod-with-precision POD has patched annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
# Command
kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has label
kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
# Command
kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Cleanup
kubectl delete pod pod-with-precision "${kube_flags[@]}"
### Annotate POD YAML file locally without effecting the live pod.
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Command
kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
# Pre-condition: annotationkey is annotationvalue
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Command
output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
kube::test::if_has_string "${output_message}" "localvalue"
# Cleanup
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
### Create valid-pod POD
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
echo -e "#!/bin/bash\n$SED -i \"s/mock/modified/g\" \$1" > ${TEMP}
chmod +x ${TEMP}
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
rm ${TEMP}
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
## kubectl create --edit won't create anything if user makes no changes
[ "$(EDITOR=cat kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1 | grep 'Edit cancelled')" ]
## Create valid-pod POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## Patch can modify a local object
kubectl patch --local -f pkg/api/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never"
## Patch pod can change image
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# Post-condition: valid-pod has the record annotation
kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}"
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# prove that yaml input works too
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}'
# Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
# Command
# Needs to retry because other party may change the resource.
for count in {0..3}; do
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
break
fi
done
## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
((resourceVersion+=100))
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the conflict
if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
## --force replace pod can change other field, e.g., spec.container.name
# Command
kubectl get "${kube_flags[@]}" pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
## check replace --grace-period requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
## check replace --timeout requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
#cleaning
rm /tmp/tmp-valid-pod.json
## replace of a cluster scoped resource can succeed
# Pre-condition: a node exists
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test"
}
}
__EOF__
kubectl replace -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test",
"annotations": {"a":"b"}
}
}
__EOF__
# Post-condition: the node command succeeds
kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
kubectl delete node node-v1-test "${kube_flags[@]}"
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
# Pre-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod
# Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
## kubectl edit should work on Windows
[ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ]
[ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ]
[ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ]
[ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ]
[ "$(EDITOR=cat kubectl edit ns | grep 'kind: List')" ]
### Label POD YAML file locally without effecting the live pod.
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: name is still valid-pod in the live pod, but command output is the new value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
kube::test::if_has_string "${output_message}" "localonlyvalue"
### Overwriting an existing label is not permitted
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
### --overwrite must be used to overwrite existing label, can be applied to all resources
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is valid-pod-super-sayan
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs from 1 yaml file
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
### Delete two PODs from 1 yaml file
# Pre-condition: redis-master and redis-proxy PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
# Command
kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: no PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply should update configuration annotations only if apply is already called
## 1. kubectl create doesn't set the annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is applied
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
## 4. kubectl replace updates an existing annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
}
# Runs tests related to kubectl apply.
run_kubectl_apply_tests() {
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl apply -f with label selector should only apply matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
## kubectl apply --prune
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "a" not found'
# cleanup
kubectl delete pods b
# same thing without prune for a sanity check
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check both pods exist
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
# cleanup
kubectl delete pod/a pod/b
## kubectl apply --prune requires a --all flag to select everything
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" \
'all resources selected for prune without explicitly passing --all'
# should apply everything
kubectl apply --all --prune -f hack/testdata/prune
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
kubectl delete pod/a pod/b
## kubectl apply --prune should fallback to delete for non reapable types
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
## kubectl apply --prune --prune-whitelist
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and don't prune pod a by overwriting whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and prune pod a with default whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# cleanup
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
}
# Runs tests related to kubectl create --filename(-f) --selector(-l).
run_kubectl_create_filter_tests() {
## kubectl create -f with label selector should only create matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# create
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
}
run_kubectl_apply_deployments_tests() {
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
}
# Runs tests for --save-config tests.
run_save_config_tests() {
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# tests kubectl group prefix matching
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
kubectl delete hpa frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
}
run_kubectl_run_tests() {
## kubectl run should create deployments or jobs
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx "--image=$IMAGE_NGINX" --generator=deployment/v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
}
run_kubectl_get_tests() {
### Test retrieval of non-existing pods
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of non-existing POD with output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of pods when none exist with non-human readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
### Test retrieval of pods when none exist, with human-readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test retrieval of non-existing POD with json output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
# Post-condition: make sure we don't display an empty List
if kube::test::if_has_string "${output_message}" 'List'; then
echo 'Unexpected List output'
echo "${LINENO} $(basename $0)"
exit 1
fi
### Test kubectl get all
output_message=$(kubectl --v=6 --namespace default get all 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get 200 OK from all the url(s)
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1beta1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --allow-missing-template-keys defaults to true for jsonpath templates
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
## check --allow-missing-template-keys defaults to true for go templates
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'missing is not found'
## check --allow-missing-template-keys=false results in an error for a missing key with go
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: PODs redis-master and redis-proxy exist
# Check that all items in the list are printed
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "redis-master redis-proxy"
# cleanup
kubectl delete pods redis-master redis-proxy "${kube_flags[@]}"
}
run_kubectl_request_timeout_tests() {
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --request-timeout on 'get pod'
output_message=$(kubectl get pod valid-pod --request-timeout=1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout on 'get pod' with --watch
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
## check --request-timeout value with no time unit
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout value with invalid time unit
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
}
run_tpr_tests() {
create_and_use_new_namespace
kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "ThirdPartyResource",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "foo.company.com"
},
"versions": [
{
"name": "v1"
}
]
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'foo.company.com:'
kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "ThirdPartyResource",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "bar.company.com"
},
"versions": [
{
"name": "v1"
}
]
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'bar.company.com:foo.company.com:'
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1" "third party api"
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1/foos" "third party api Foo"
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1/bars" "third party api Bar"
# Test that we can list this new third party resource (foos)
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new third party resource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/foo.yaml "${kube_flags[@]}"
# Test that we can list this new third party resource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test alternate forms
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test all printers, with lists and individual items
kube::log::status "Testing ThirdPartyResource printing"
kubectl "${kube_flags[@]}" get foos
kubectl "${kube_flags[@]}" get foos/test
kubectl "${kube_flags[@]}" get foos -o name
kubectl "${kube_flags[@]}" get foos/test -o name
kubectl "${kube_flags[@]}" get foos -o wide
kubectl "${kube_flags[@]}" get foos/test -o wide
kubectl "${kube_flags[@]}" get foos -o json
kubectl "${kube_flags[@]}" get foos/test -o json
kubectl "${kube_flags[@]}" get foos -o yaml
kubectl "${kube_flags[@]}" get foos/test -o yaml
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
# Test patching
kube::log::status "Testing ThirdPartyResource patching"
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
# Get local version
TPR_RESOURCE_FILE="${KUBE_TEMP}/tpr-foos-test.json"
kubectl "${kube_flags[@]}" get foos/test -o json > "${TPR_RESOURCE_FILE}"
# cannot apply strategic patch locally
TPR_PATCH_ERROR_FILE="${KUBE_TEMP}/tpr-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${TPR_PATCH_ERROR_FILE}"
if grep -q "try --type merge" "${TPR_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for ThirdPartyResource: $(cat ${TPR_PATCH_ERROR_FILE})"
else
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${TPR_PATCH_ERROR_FILE})"
exit 1
fi
# can apply merge patch locally
kubectl "${kube_flags[@]}" patch --local -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
# can apply merge patch remotely
kubectl "${kube_flags[@]}" patch --record -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
rm "${TPR_RESOURCE_FILE}"
rm "${TPR_PATCH_ERROR_FILE}"
# Test labeling
kube::log::status "Testing ThirdPartyResource labeling"
kubectl "${kube_flags[@]}" label foos --all listlabel=true
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
# Test annotating
kube::log::status "Testing ThirdPartyResource annotating"
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
# Test describing
kube::log::status "Testing ThirdPartyResource describing"
kubectl "${kube_flags[@]}" describe foos
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Delete the resource
kubectl "${kube_flags[@]}" delete foos test
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Bar
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/bar.yaml "${kube_flags[@]}"
# Test that we can list this new third party resource
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete bars test
# Make sure it's gone
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create single item via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo.yaml
# Test that we have create a foo named test
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply an empty patch doesn't change fields
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
# Update a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-updated-subfield.yaml
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
# Delete a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-deleted-subfield.yaml
# Test that apply has deleted the field
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
# Test that the field does not exist
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-added-subfield.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/foo.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create list via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list.yaml
# Test that we have create a foo and a bar from a list
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that re-apply an list doesn't change anything
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that the fields have the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Update fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-updated-field.yaml
# Test that apply has updated the fields
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
# Delete fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-deleted-field.yaml
# Test that apply has deleted the fields
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
# Test that the fields does not exist
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-added-field.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/multi-tpr-list.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply --prune
# Test that no foo or bar exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on foo.yaml that has foo/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/TPR/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right tprs exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on bar.yaml that has bar/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/TPR/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right tprs exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/bar.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# teardown
kubectl delete thirdpartyresources foo.company.com "${kube_flags[@]}"
kubectl delete thirdpartyresources bar.company.com "${kube_flags[@]}"
}
run_recursive_resources_tests() {
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
# a list but since it contains invalid objects, it will never open.
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# cleaning
rm /tmp/tmp-editor.sh
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Rollout on multiple deployments recursively
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Pause the deployments recursively
PRESERVE_ERR_FILE=true
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Resume the deployments recursively
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Retrieve the rollout history of the deployments recursively
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
kube::test::if_has_string "${output_message}" "nginx0-deployment"
kube::test::if_has_string "${output_message}" "nginx1-deployment"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Attempt to pause the replication controllers recursively
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
## Attempt to resume the replication controllers recursively
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
}
run_namespace_tests() {
kube::log::status "Testing kubectl(v1:namespaces)"
### Create a new namespace
# Pre-condition: only the "default" namespace exists
# The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
# kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
# Command
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Clean up
kubectl delete namespace my-namespace
######################
# Pods in Namespaces #
######################
if kube::test::if_supports_resource "${pods}" ; then
### Create a new namespace
# Pre-condition: the other namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace other
# Post-condition: namespace 'other' is created.
kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
### Create POD valid-pod in specific namespace
# Pre-condition: no POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: a resource cannot be retrieved by name across all namespaces
output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
### Delete POD valid-pod in specific namespace
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other
fi
}
run_secrets_test() {
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-secrets
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
### Create a generic secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a docker-registry secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='[email protected]' --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a tls secret
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Create a secret using stringData
kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": "secret-string-data"
},
"data": {
"k1":"djE=",
"k2":""
},
"stringData": {
"k2":"v2"
}
}
__EOF__
# Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
# Clean up
kubectl delete secret secret-string-data --namespace=test-secrets
### Create a secret using output flags
# Pre-condition: no secret exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
[[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]]
## Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Clean up
kubectl delete namespace test-secrets
}
run_configmap_tests() {
kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap'
kubectl delete configmap test-configmap "${kube_flags[@]}"
### Create a new namespace
# Pre-condition: the test-configmaps namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-configmaps
# Post-condition: namespace 'test-configmaps' is created.
kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
### Create a generic configmap in a specific namespace
# Pre-condition: no configmaps namespace exists
kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
[[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete namespace test-configmaps
}
run_service_tests() {
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:services)"
### Create redis-master service from JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Describe command should print detailed information
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_object_events_assert services 'redis-master'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert services 'redis-master' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert services 'redis-master' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert services
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert services false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert services true
### set selector
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Set command to change the selector.
kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
# Set command to reset the selector back to the original one.
kubectl set selector -f examples/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
### Dump current redis-master service
output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")
### Delete redis-master-service by id
# Pre-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create redis-master-service from dumped JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: redis-master service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
### Create redis-master-v1-test service
# Pre-condition: redis-master-service service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "service-v1-test"
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
__EOF__
# Post-condition: service-v1-test service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
### Identity
kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
### Delete services by id
# Pre-condition: service-v1-test exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
kubectl delete service "service-v1-test" "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create two services
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master and redis-slave services are created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
### Custom columns can be specified
# Pre-condition: generate output using custom columns
output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
# Post-condition: should contain name column
kube::test::if_has_string "${output_message}" 'redis-master'
### Delete multiple services at once
# Pre-condition: redis-master and redis-slave services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
# Command
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create an ExternalName service
# Pre-condition: Only the default kubernetes service exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create service externalname beep-boop --external-name bar.com
# Post-condition: beep-boop service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
### Delete beep-boop service by id
# Pre-condition: beep-boop service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
# Command
kubectl delete service beep-boop "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
}
run_rc_tests() {
kube::log::status "Testing kubectl(v1:replicationcontrollers)"
### Create and stop controller, make sure it doesn't leak pods
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rc 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rc 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rc 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rc 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rc "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rc
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rc false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rc true
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with (wrong) current-replicas and replicas
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with replicas only
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale replication controller from JSON with replicas only
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Clean-up
kubectl delete rc frontend "${kube_flags[@]}"
### Scale multiple replication controllers
kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Scale a job
kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml "${kube_flags[@]}"
# Command
kubectl scale --replicas=2 job/pi
# Post-condition: 2 replicas for pi
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
### Scale a deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Expose a deployment as a service
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
# Command
kubectl expose deployment/nginx-deployment
# Post-condition: service exists and exposes deployment port (80)
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Command
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
# Create a service using service/v1 generator
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
# Verify that expose service works without specifying a port.
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
# Post-condition: service exists with the same port as the original service.
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
# Cleanup services
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
### Expose negative invalid resource test
# Pre-condition: don't need
# Command
output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
# Post-condition: the error message has "cannot expose" string
kube::test::if_has_string "${output_message}" 'cannot expose'
### Try to generate a service with invalid name (exceeding maximum valid size)
# Pre-condition: use --name flag
output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: should fail due to invalid name
kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
# Pre-condition: default run without --name flag; should succeed by truncating the inherited name
output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: inherited name from pod has been truncated
kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostname-testing-sixty-three-characters-in-len\" exposed'
# Clean-up
kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
### Expose multiport object as a new service
# Pre-condition: don't use --port flag
output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
# Post-condition: expose succeeded
kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed'
# Post-condition: generated service has both ports from the exposed pod
kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 2379'
# Clean-up
kubectl delete svc etcd-server "${kube_flags[@]}"
### Delete replication controller with id
# Pre-condition: frontend replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replication controllers
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple controllers at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Auto scale replication controller
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, rc specified by file
kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, rc specified by name
kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
## Set resource limits/request of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's cpu limits
kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set limits/requests of a deployment specified by a file
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Clean up
kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
}
run_deployment_tests() {
# Test kubectl create deployment
kubectl create deployment test-nginx --image=gcr.io/google-containers/nginx:test-cmd
# Post-Condition: Deployment has 2 replicas defined in its spec.
kube::test::get_object_assert 'deploy test-nginx' "{{$container_name_field}}" 'nginx'
# Clean up
kubectl delete deployment test-nginx "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | $SED "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Newest deployment should be marked as overlapping
kubectl get deployment nginx2 -o yaml "${kube_flags[@]}" | grep "deployment.kubernetes.io/error-selector-overlapping-with"
# Oldest deployment should not be marked as overlapping
! kubectl get deployment nginx -o yaml "${kube_flags[@]}" | grep "deployment.kubernetes.io/error-selector-overlapping-with"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
}
run_rs_tests() {
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
}
run_multi_resources_tests() {
kube::log::status "Testing kubectl(v1:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) exists
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:'
fi
fi
# Post-condition: mock rc (and mock2) exists
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
#############################
# Multiple Resources via URL#
#############################
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: service(mock) and rc(mock) exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
# Clean up
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
exit 1
fi
kube::log::status "Checking kubectl version"
kubectl version
i=0
create_and_use_new_namespace() {
i=$(($i+1))
kube::log::status "Creating namespace namespace${i}"
kubectl create namespace "namespace${i}"
kubectl config set-context "${CONTEXT}" --namespace="namespace${i}"
}
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version")
fi
if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
statefulset_replicas_field=".spec.replicas"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
deployment_image_field="(index .spec.template.spec.containers 0).image"
deployment_second_image_field="(index .spec.template.spec.containers 1).image"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
fi
# Make sure "kubernetes" service exists.
if kube::test::if_supports_resource "${services}" ; then
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
# Require the service to exist (either we created it or the API server did)
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
fi
# Passing no arguments to create is an error
! kubectl create
#######################
# kubectl config set #
#######################
kube::log::status "Testing kubectl(v1:config set)"
kubectl config set-cluster test-cluster --server="https://does-not-work"
# Get the api cert and add a comment to avoid flag parsing problems
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_writen" == "$r_writen"
#######################
# kubectl local proxy #
#######################
# Make sure the UI can be proxied
start-proxy
check-curl-proxy-code /ui 301
check-curl-proxy-code /api/ui 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
fi
if kube::test::if_supports_resource "${static}" ; then
check-curl-proxy-code /static/ 200
fi
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/ui 301
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
check-curl-proxy-code /custom/api/v1/namespaces 200
stop-proxy
#########################
# RESTMapper evaluation #
#########################
kube::log::status "Testing RESTMapper"
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
### Non-existent resource type should give a recognizeable error
# Pre-condition: None
# Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
exit 1
fi
rm "${RESTMAPPER_ERROR_FILE}"
# Post-condition: None
kubectl get "${kube_flags[@]}" --raw /version
if kube::test::if_supports_resource "${clusterroles}" ; then
# make sure the server was properly bootstrapped with clusterroles and bindings
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
# test `kubectl create clusterrole`
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
# test `kubectl create clusterrolebinding`
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin -n default
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group -n default
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name -n default
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
fi
if kube::test::if_supports_resource "${roles}" ; then
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
fi
#########################
# Assert short name #
#########################
kube::log::status "Testing propagation of short names for resources"
output_message=$(kubectl get --raw=/api/v1)
## test if a short name is exported during discovery
kube::test::if_has_string "${output_message}" '{"name":"configmaps","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
###########################
# POD creation / deletion #
###########################
if kube::test::if_supports_resource "${pods}" ; then
run_pod_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
run_save_config_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the empty string
if grep -q "unexpected nil value for field" "${ERROR_FILE}"; then
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
fi
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move apply tests to run on rs instead of pods so that they can be
# run for federation apiserver as well.
run_kubectl_apply_tests
run_kubectl_run_tests
run_kubectl_create_filter_tests
fi
if kube::test::if_supports_resource "${deployments}" ; then
run_kubectl_apply_deployments_tests
fi
###############
# Kubectl get #
###############
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move get tests to run on rs instead of pods so that they can be
# TODO: Move get tests to run on rs instead of pods so that they can be
# run for federation apiserver as well.
run_kubectl_get_tests
fi
##################
# Global timeout #
##################
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move request timeout tests to run on rs instead of pods so that they
# can be run for federation apiserver as well.
run_kubectl_request_timeout_tests
fi
#####################################
# Third Party Resources #
#####################################
if kube::test::if_supports_resource "${thirdpartyresources}" ; then
run_tpr_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment "test1" created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
fi
#####################################
# Recursive Resources via directory #
#####################################
if kube::test::if_supports_resource "${pods}" ; then
run_recursive_resources_tests
fi
##############
# Namespaces #
##############
if kube::test::if_supports_resource "${namespaces}" ; then
run_namespace_tests
fi
###########
# Secrets #
###########
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${secrets}" ; then
run_secrets_test
fi
fi
######################
# ConfigMap #
######################
if kube::test::if_supports_resource "${namespaces}"; then
if kube::test::if_supports_resource "${configmaps}" ; then
run_configmap_tests
fi
fi
####################
# Client Config #
####################
# Command
# Pre-condition: kubeconfig "missing" is not a file or directory
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: kubeconfig "missing" is not a file or directory
# Command
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
# Post-condition: --user contains a valid / empty value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Command
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
# Post-condition: --cluster contains a "valid" value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: context "missing-context" does not exist
# Command
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
kube::test::if_has_string "${output_message}" 'context "missing-context" does not exist'
# Post-condition: invalid or missing context returns error
# Pre-condition: cluster "missing-cluster" does not exist
# Command
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
kube::test::if_has_string "${output_message}" 'cluster "missing-cluster" does not exist'
# Post-condition: invalid or missing cluster returns error
# Pre-condition: user "missing-user" does not exist
# Command
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
# Post-condition: invalid or missing user returns error
# test invalid config
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
kube::test::if_has_string "${output_message}" "Error loading config file"
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
kube::test::if_has_string "${output_message}" 'no such file or directory'
####################
# Service Accounts #
####################
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
### Create a new namespace
# Pre-condition: the test-service-accounts namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-service-accounts
# Post-condition: namespace 'test-service-accounts' is created.
kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
### Create a service account in a specific namespace
# Command
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
# Clean-up
kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
# Clean up
kubectl delete namespace test-service-accounts
fi
#################
# Pod templates #
#################
if kube::test::if_supports_resource "${podtemplates}" ; then
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
fi
############
# Services #
############
if kube::test::if_supports_resource "${services}" ; then
run_service_tests
fi
###########################
# Replication controllers #
###########################
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
run_rc_tests
fi
fi
######################
# Deployments #
######################
if kube::test::if_supports_resource "${deployments}" ; then
run_deployment_tests
fi
######################
# Replica Sets #
######################
if kube::test::if_supports_resource "${replicasets}" ; then
run_rs_tests
fi
#################
# Stateful Sets #
#################
if kube::test::if_supports_resource "${statefulsets}" ; then
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/nginx-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/nginx-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
fi
######################
# Lists #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${deployments}" ; then
kube::log::status "Testing kubectl(v1:lists)"
### Create a List with objects from multiple versions
# Command
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
### Delete the List with objects from multiple versions
# Command
kubectl delete service/list-service-test deployment/list-deployment-test
fi
fi
######################
# Multiple Resources #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
run_multi_resources_tests
fi
fi
######################
# Persistent Volumes #
######################
if kube::test::if_supports_resource "${persistentvolumes}" ; then
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
fi
############################
# Persistent Volume Claims #
############################
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
fi
############################
# Storage Classes #
############################
if kube::test::if_supports_resource "${storageclass}" ; then
### Create and delete storage class
# Pre-condition: no storage classes currently exist
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1beta1",
"metadata": {
"name": "storage-class-name"
},
"provisioner": "kubernetes.io/fake-provisioner-type",
"parameters": {
"zone":"us-east-1b",
"type":"ssd"
}
}
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
fi
#########
# Nodes #
#########
if kube::test::if_supports_resource "${nodes}" ; then
kube::log::status "Testing kubectl(v1:nodes)"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_object_events_assert nodes "127.0.0.1"
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert nodes "127.0.0.1" false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert nodes "127.0.0.1" true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert nodes
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert nodes false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert nodes true
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
# Post-condition: node is unschedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
# Post-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
fi
########################
# authorization.k8s.io #
########################
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
fi
#####################
# Retrieve multiple #
#####################
if kube::test::if_supports_resource "${nodes}" ; then
if kube::test::if_supports_resource "${services}" ; then
kube::log::status "Testing kubectl(v1:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
fi
fi
#####################
# Resource aliasing #
#####################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
kube::log::status "Testing resource aliasing"
kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
fi
fi
###########
# Explain #
###########
if kube::test::if_supports_resource "${pods}" ; then
kube::log::status "Testing kubectl(v1:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
fi
###########
# Swagger #
###########
# Verify schema
file="${KUBE_TEMP}/schema-v1.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of services" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
#####################
# Kubectl --sort-by #
#####################
if kube::test::if_supports_resource "${pods}" ; then
### sort-by should not panic if no pod exists
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl get pods --sort-by="{metadata.name}"
kubectl get pods --sort-by="{metadata.creationTimestamp}"
fi
############################
# Kubectl --all-namespaces #
############################
if kube::test::if_supports_resource "${pods}" ; then
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Verify a specific namespace is ignored when all-namespaces is provided
# Command
kubectl get pods --all-namespaces --namespace=default
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
fi
################
# Certificates #
################
if kube::test::if_supports_resource "${csr}" ; then
# approve
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
# deny
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
fi
kube::test::clear_all
}
|
iTagir/kubernetes
|
hack/make-rules/test-cmd-util.sh
|
Shell
|
apache-2.0
| 182,384 |
#!/usr/bin/env bash
set -e
# Create a temp dir and clean it up on exit
TEMPDIR=`mktemp -d -t maya-test.XXX`
trap "rm -rf $TEMPDIR" EXIT HUP INT QUIT TERM
# Build the Maya binary for the tests
echo "--> Building maya"
go build -o $TEMPDIR/maya || exit 1
# Run the tests
echo "--> Running tests"
GOBIN="`which go`"
PATH=$TEMPDIR:$PATH \
$GOBIN test ${GOTEST_FLAGS:--cover -timeout=900s} $($GOBIN list ./... | grep -v /vendor/)
|
gkGaneshR/maya
|
buildscripts/test.sh
|
Shell
|
apache-2.0
| 433 |
#!/bin/sh
which setxkbmap
if test $? -ne 0
then
echo "error, setxkbmap not found"
exit 1
fi
# English - US 'en-us' 0x00000409
setxkbmap -model pc104 -layout us
./xrdp-genkeymap ../instfiles/km-00000409.ini
# English - UK 'en-GB' 0x00000809
setxkbmap -model pc105 -layout gb
./xrdp-genkeymap ../instfiles/km-00000809.ini
# German 'de' 0x00000407
setxkbmap -model pc104 -layout de
./xrdp-genkeymap ../instfiles/km-00000407.ini
# Italian 'it' 0x00000410
setxkbmap -model pc104 -layout it
./xrdp-genkeymap ../instfiles/km-00000410.ini
# Japanese 'jp' 0x00000411
setxkbmap -model pc105 -layout jp -variant OADG109A
./xrdp-genkeymap ../instfiles/km-00000411.ini
# Polish 'pl' 0x00000415
setxkbmap -model pc104 -layout pl
./xrdp-genkeymap ../instfiles/km-00000415.ini
# Russia 'ru' 0x00000419
setxkbmap -model pc104 -layout ru
./xrdp-genkeymap ../instfiles/km-00000419.ini
# Sweden 'se' 0x0000041d
setxkbmap -model pc104 -layout se
./xrdp-genkeymap ../instfiles/km-0000041d.ini
# Portuguese -PT 'pt-pt' 0x00000816
setxkbmap -model pc104 -layout pt
./xrdp-genkeymap ../instfiles/km-00000816.ini
# set back to en-us
setxkbmap -model pc104 -layout us
|
cocoon/xrdp
|
genkeymap/dump-keymaps.sh
|
Shell
|
apache-2.0
| 1,156 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# shellcheck source=scripts/ci/libraries/_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/../libraries/_script_init.sh"
if [[ ${GITHUB_REF} == 'refs/heads/master' ]]; then
echo "::set-output name=branch::constraints-master"
elif [[ ${GITHUB_REF} == 'refs/heads/v1-10-test' ]]; then
echo "::set-output name=branch::constraints-1-10"
elif [[ ${GITHUB_REF} == 'refs/heads/v2-0-test' ]]; then
echo "::set-output name=branch::constraints-2-0"
else
echo
echo "Unexpected ref ${GITHUB_REF}. Exiting!"
echo
exit 1
fi
|
DinoCow/airflow
|
scripts/ci/constraints/ci_branch_constraints.sh
|
Shell
|
apache-2.0
| 1,332 |
wget https://dist.thingsboard.io/thingsboard-2.5.1pe.rpm
|
thingsboard/thingsboard.github.io
|
docs/user-guide/install/pe/resources/2.5.1pe/thingsboard-centos-download.sh
|
Shell
|
apache-2.0
| 57 |
#!/bin/sh
#This has a series of multiple command tests w/o comments#
../bin/rshell < multi_command_test
|
kchen068/rshell
|
tests/multi_command.sh
|
Shell
|
bsd-3-clause
| 105 |
#!/bin/bash
if [ "$TRAVIS_OS_NAME" == "linux" ]
then
# Add repositories
sudo add-apt-repository -y 'deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-3.9 main'
wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
sudo apt-get update -qq
# Remove existing LLVM
sudo apt-get remove llvm
# Install Clang + LLVM
sudo apt-get install -y llvm-3.9-dev libclang-3.9-dev clang-3.9
sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-3.9 20
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-3.9 20
sudo rm -f /usr/local/clang-3.5.0/bin/clang
sudo rm -f /usr/local/clang-3.5.0/bin/clang++
# Other dependencies
sudo apt-get install -y libedit-dev
elif [ "$TRAVIS_OS_NAME" == "osx" ]
then
brew update
brew install -v llvm --with-clang
fi
|
mpflanzer/Oclgrind
|
.travis-deps.sh
|
Shell
|
bsd-3-clause
| 863 |
#!/bin/bash
# Copyright (c) 2014 The CoreOS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Replacement script for 'grub-install' which does not detect drives
# properly when partitions are mounted via individual loopback devices.
SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
. "${SCRIPT_ROOT}/common.sh" || exit 1
# We're invoked only by build_image, which runs in the chroot
assert_inside_chroot
# Flags.
DEFINE_string target "" \
"The GRUB target to install such as i386-pc or x86_64-efi"
DEFINE_string esp_dir "" \
"Path to EFI System partition mount point."
DEFINE_string disk_image "" \
"The disk image containing the EFI System partition."
# Parse flags
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
switch_to_strict_mode
# Our GRUB lives under coreos/grub so new pygrub versions cannot find grub.cfg
GRUB_DIR="coreos/grub/${FLAGS_target}"
# Modules required to boot a standard CoreOS configuration
CORE_MODULES=( normal search test fat part_gpt search_fs_uuid gzio search_part_label terminal gptprio configfile memdisk tar echo )
# Name of the core image, depends on target
CORE_NAME=
case "${FLAGS_target}" in
i386-pc)
CORE_MODULES+=( biosdisk serial )
CORE_NAME="core.img"
;;
x86_64-efi)
CORE_MODULES+=( serial linuxefi efi_gop getenv )
CORE_NAME="core.efi"
;;
x86_64-xen)
CORE_NAME="core.elf"
;;
*)
die_notrace "Unknown GRUB target ${FLAGS_target}"
;;
esac
# In order for grub-setup-bios to properly detect the layout of the disk
# image it expects a normal partitioned block device. For most of the build
# disk_util maps individual loop devices to each partition in the image so
# the kernel can automatically detach the loop devices on unmount. When
# using a single loop device with partitions there is no such cleanup.
# That's the story of why this script has all this goo for loop and mount.
ESP_DIR=
LOOP_DEV=
cleanup() {
if [[ -d "${ESP_DIR}" ]]; then
if mountpoint -q "${ESP_DIR}"; then
sudo umount "${ESP_DIR}"
fi
rm -rf "${ESP_DIR}"
fi
if [[ -b "${LOOP_DEV}" ]]; then
sudo losetup --detach "${LOOP_DEV}"
fi
}
trap cleanup EXIT
info "Installing GRUB ${FLAGS_target} in ${FLAGS_disk_image##*/}"
LOOP_DEV=$(sudo losetup --find --show --partscan "${FLAGS_disk_image}")
ESP_DIR=$(mktemp --directory)
# work around slow/buggy udev, make sure the node is there before mounting
if [[ ! -b "${LOOP_DEV}p1" ]]; then
# sleep a little just in case udev is ok but just not finished yet
warn "loopback device node ${LOOP_DEV}p1 missing, waiting on udev..."
sleep 0.5
for (( i=0; i<5; i++ )); do
if [[ -b "${LOOP_DEV}p1" ]]; then
break
fi
warn "looback device node still ${LOOP_DEV}p1 missing, reprobing..."
sudo blockdev --rereadpt ${LOOP_DEV}
sleep 0.5
done
if [[ ! -b "${LOOP_DEV}p1" ]]; then
failboat "${LOOP_DEV}p1 where art thou? udev has forsaken us!"
fi
fi
sudo mount -t vfat "${LOOP_DEV}p1" "${ESP_DIR}"
sudo mkdir -p "${ESP_DIR}/${GRUB_DIR}"
info "Compressing modules in ${GRUB_DIR}"
for file in "/usr/lib/grub/${FLAGS_target}"/*{.lst,.mod}; do
out="${ESP_DIR}/${GRUB_DIR}/${file##*/}"
gzip --best --stdout "${file}" | sudo_clobber "${out}"
done
info "Generating ${GRUB_DIR}/load.cfg"
# Include a small initial config in the core image to search for the ESP
# by filesystem ID in case the platform doesn't provide the boot disk.
# The existing $root value is given as a hint so it is searched first.
ESP_FSID=$(sudo grub-probe -t fs_uuid -d "${LOOP_DEV}p1")
sudo_clobber "${ESP_DIR}/${GRUB_DIR}/load.cfg" <<EOF
search.fs_uuid ${ESP_FSID} root \$root
set prefix=(memdisk)
set
EOF
if [[ ! -f "${ESP_DIR}/coreos/grub/grub.cfg.tar" ]]; then
info "Generating grub.cfg memdisk"
sudo tar cf "${ESP_DIR}/coreos/grub/grub.cfg.tar" \
-C "${BUILD_LIBRARY_DIR}" "grub.cfg"
fi
info "Generating ${GRUB_DIR}/${CORE_NAME}"
sudo grub-mkimage \
--compression=auto \
--format "${FLAGS_target}" \
--prefix "(,gpt1)/coreos/grub" \
--config "${ESP_DIR}/${GRUB_DIR}/load.cfg" \
--memdisk "${ESP_DIR}/coreos/grub/grub.cfg.tar" \
--output "${ESP_DIR}/${GRUB_DIR}/${CORE_NAME}" \
"${CORE_MODULES[@]}"
# Now target specific steps to make the system bootable
case "${FLAGS_target}" in
i386-pc)
info "Installing MBR and the BIOS Boot partition."
sudo cp "/usr/lib/grub/i386-pc/boot.img" "${ESP_DIR}/${GRUB_DIR}"
sudo grub-bios-setup --device-map=/dev/null \
--directory="${ESP_DIR}/${GRUB_DIR}" "${LOOP_DEV}"
;;
x86_64-efi)
info "Installing default x86_64 UEFI bootloader."
sudo mkdir -p "${ESP_DIR}/EFI/boot"
# Use the test keys for signing unofficial builds
if [[ ${COREOS_OFFICIAL:-0} -ne 1 ]]; then
sudo sbsign --key /usr/share/sb_keys/DB.key \
--cert /usr/share/sb_keys/DB.crt \
"${ESP_DIR}/${GRUB_DIR}/${CORE_NAME}"
sudo cp "${ESP_DIR}/${GRUB_DIR}/${CORE_NAME}.signed" \
"${ESP_DIR}/EFI/boot/grub.efi"
sudo sbsign --key /usr/share/sb_keys/DB.key \
--cert /usr/share/sb_keys/DB.crt \
--output "${ESP_DIR}/EFI/boot/bootx64.efi" \
"/usr/lib/shim/shim.efi"
else
sudo cp "${ESP_DIR}/${GRUB_DIR}/${CORE_NAME}" \
"${ESP_DIR}/EFI/boot/bootx64.efi"
fi
;;
x86_64-xen)
info "Installing default x86_64 Xen bootloader."
sudo mkdir -p "${ESP_DIR}/xen" "${ESP_DIR}/boot/grub"
sudo cp "${ESP_DIR}/${GRUB_DIR}/${CORE_NAME}" \
"${ESP_DIR}/xen/pvboot-x86_64.elf"
sudo cp "${BUILD_LIBRARY_DIR}/menu.lst" \
"${ESP_DIR}/boot/grub/menu.lst"
;;
esac
cleanup
trap - EXIT
command_completed
|
BugRoger/scripts
|
build_library/grub_install.sh
|
Shell
|
bsd-3-clause
| 5,967 |
#!/bin/bash
npm run build
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
for d in ${DIR}/../packages/*/ ; do (cd "$d" && npm publish); done
cd ${DIR}/../ && npm publish
|
clausejs/clausejs
|
scripts/publish_all.sh
|
Shell
|
mit
| 182 |
#!/bin/bash
NOW=$(date +%s)
LIMIT=$(expr $NOW + 3600 \* 4 )
# Args check
if [ $# -eq 1 ]
then
LIMIT=$(expr $1 \* 60 + $NOW)
fi
echo "**************************************"
echo "You haven't speciefied any limit for the stress test. I will set 4 hours as limit. Good luck!"
echo "**************************************"
echo "Each run will have 50 parallel requests."
RUN=0
while [ $(date +%s) -lt $LIMIT ]
do
echo "Run number $RUN"
RES=`bash stress-test.sh info.json 50`
if [ $RES -ne 50 ]
then
echo "ABORTED: RUN number $RUN has failed."
exit -1
else
RUN=$(expr $RUN + 1)
fi
sleep 5
done
|
diegorusso/ictf-framework
|
services/poipoi/test/longrun.sh
|
Shell
|
gpl-2.0
| 610 |
#!/bin/bash
COUCH_URL="https://www.apache.org/dist/couchdb/source/1.6.1/"
COUCH_CONFIG_DIR="/usr/local/etc/couchdb/"
COUCH_NAME="apache-couchdb-1.6.1"
COUCH_TAR="${COUCH_NAME}.tar.gz"
COUCH_DIR="${COUCH_NAME}"
COUCH_ASC="${COUCH_TAR}.asc"
COUCH_DEP="wget make autoconf autoconf-archive automake libtool perl-Test-Harness erlang libicu-devel js-devel curl-devel gcc-c++"
CLEANUP_PKGS="wget make"
# Install epel
yum -y install epel-release;
# Install couchdb dependencies
yum -y install ${COUCH_DEP} && yum clean all
# Download and install couchdb
# * Get the package and its gpgkey
wget "${COUCH_URL}${COUCH_TAR}";
wget "${COUCH_URL}${COUCH_ASC}"
# * Verify the key
echo "*******Verifying the gpgkey*******"
gpg --keyserver pgpkeys.mit.edu --recv-key 04F4EE9B
gpg --verify ${COUCH_ASC} ${COUCH_TAR}
if [ $? -ne 0 ]; then
exit $?;
fi
# * Extract and install
tar -xzf ${COUCH_TAR};
cd ${COUCH_DIR};
/bin/sh ./configure --with-erlang=/usr/lib64/erlang/usr/include;
make && make install;
# Add couchdb user and proper file ownerships and permissions
adduser -r --home /usr/local/var/lib/couchdb -M --shell /bin/bash --comment "CouchDB Administrator" couchdb;
chown -R couchdb:couchdb /usr/local/etc/couchdb;
chown -R couchdb:couchdb /usr/local/var/lib/couchdb;
chown -R couchdb:couchdb /usr/local/var/log/couchdb;
chown -R couchdb:couchdb /usr/local/var/run/couchdb;
chmod 0770 /usr/local/etc/couchdb;
chmod 0770 /usr/local/var/lib/couchdb;
chmod 0770 /usr/local/var/log/couchdb;
chmod 0770 /usr/local/var/run/couchdb;
# Configure couchdb to listen at 0.0.0.0
sed -e 's/^bind_address = .*$/bind_address = 0.0.0.0/' -i "${COUCH_CONFIG_DIR}default.ini"
# Cleanup unnessasary stuff
yum -y remove ${CLEANUP_PKGS} && yum clean all;
cd ..;
rm -rf ${COUCH_NAME}*
|
CentOS/CentOS-Dockerfiles
|
couchdb/centos7/install.sh
|
Shell
|
gpl-2.0
| 1,759 |
#!/bin/bash
#before install check DB setting in
# judge.conf
# hustoj-read-only/web/include/db_info.inc.php
# and down here
#and run this with root
#CENTOS/REDHAT/FEDORA WEBBASE=/var/www/html APACHEUSER=apache
WEBBASE=/var/www/html
APACHEUSER=www-data
DBUSER=root
DBPASS=root
printf "Input Database(MySQL) Username:"
read tmp
if test -n "$tmp"
then
DBUSER="$tmp"
fi
printf "Input Database(MySQL) Password:"
read tmp
if test -n "$tmp"
then
DBPASS="$tmp"
fi
#try install tools
if uname -a|grep 'Ubuntu\|Debian'
then
sudo apt-get install make flex g++ clang libmysql++-dev php5 apache2 mysql-server php5-mysql php5-gd php5-cli mono-gmcs subversion
sudo /etc/init.d/mysql start
HTTP_START="sudo /etc/init.d/apache2 restart"
else
sudo yum -y update
sudo yum -y install php httpd php-mysql mysql-server php-xml php-gd gcc-c++ mysql-devel php-mbstring glibc-static flex
sudo /etc/init.d/mysqld start
WEBBASE=/var/www/html
APACHEUSER=apache
HTTP_START="sudo /etc/init.d/httpd restart"
echo "/usr/bin/judged" > judged
fi
sudo svn checkout https://github.com/zhblue/hustoj/trunk/trunk hustoj-read-only
#create user and homedir
sudo /usr/sbin/useradd -m -u 1536 judge
#compile and install the core
cd hustoj-read-only/core/
sudo ./make.sh
cd ../..
#install web and db
sudo cp -R hustoj-read-only/web $WEBBASE/JudgeOnline
sudo chmod -R 771 $WEBBASE/JudgeOnline
sudo chown -R $APACHEUSER $WEBBASE/JudgeOnline
sudo mysql -h localhost -u$DBUSER -p$DBPASS < db.sql
#create work dir set default conf
sudo mkdir /home/judge
sudo mkdir /home/judge/etc
sudo mkdir /home/judge/data
sudo mkdir /home/judge/log
sudo mkdir /home/judge/run0
sudo mkdir /home/judge/run1
sudo mkdir /home/judge/run2
sudo mkdir /home/judge/run3
sudo cp java0.policy judge.conf /home/judge/etc
sudo chown -R judge /home/judge
sudo chgrp -R $APACHEUSER /home/judge/data
sudo chgrp -R root /home/judge/etc /home/judge/run?
sudo chmod 775 /home/judge /home/judge/data /home/judge/etc /home/judge/run?
#update database account
SED_CMD="s/OJ_USER_NAME=root/OJ_USER_NAME=$DBUSER/g"
SED_CMD2="s/OJ_PASSWORD=root/OJ_PASSWORD=$DBPASS/g"
sed $SED_CMD judge.conf|sed $SED_CMD2 >/home/judge/etc/judge.conf
SED_CMD="s/DB_USER=\\\"root\\\"/DB_USER=\\\"$DBUSER\\\"/g"
SED_CMD2="s/DB_PASS=\\\"root\\\"/DB_PASS=\\\"$DBPASS\\\"/g"
sed $SED_CMD hustoj-read-only/web/include/db_info.inc.php|sed $SED_CMD2 >$WEBBASE/JudgeOnline/include/db_info.inc.php
#boot up judged
sudo cp judged /etc/init.d/judged
sudo chmod +x /etc/init.d/judged
sudo ln -s /etc/init.d/judged /etc/rc3.d/S93judged
sudo ln -s /etc/init.d/judged /etc/rc2.d/S93judged
sudo /etc/init.d/judged start
$HTTP_START
if uname -a | grep 'Ubuntu\|Debian'
then
echo "Debian is better :P"
else
chcon -R -t httpd_sys_content_t /home/judge/
chcon -R -t httpd_sys_content_t /var/www/html/
fi
echo "Browse http://127.0.0.1/JudgeOnline to check if the installation is working"
|
wlx65003/HZNUOJ
|
judger/install/install-interactive.sh
|
Shell
|
gpl-3.0
| 2,931 |
# if else test
# --output:start
# a: local func
# b: global func
# a: local test
# b: global func
# start func2
# x local
# y global
# x call in func2
# y call in func2
# start func1
# x local
# y call in func2
# x call in func1
# y call in func1
# end func1
# x call in func2
# y call in func1
# end func2
# x local
# y call in func1
# --output:end
a = "local test"
global b = "global test"
func test() {
b = "global func"
a = "local func"
print("a: ", a)
print("b: ", b)
}
test()
print("a: ", a)
print("b: ", b)
global y = "y global"
x = "x local"
func func1() {
print("start func1")
print(x)
print(y)
y = "y call in func1"
x = "x call in func1"
print(x)
print(y)
print("end func1")
}
func func2() {
print("start func2")
print(x)
print(y)
y = "y call in func2"
x = "x call in func2"
print(x)
print(y)
func1()
print(x)
print(y)
print("end func2")
}
func2()
print(x)
print(y)
|
alexst07/seti
|
test/interpreter/lang_basic/global.sh
|
Shell
|
apache-2.0
| 930 |
#!/usr/bin/env bash -ex
for f in $(git ls-files -- *.thrift); do
/usr/local/bin/thrift --gen java -out src-gen/ $f
done
|
ilya-klyuchnikov/buck
|
third-party/java/thrift/gen.sh
|
Shell
|
apache-2.0
| 122 |
#!/bin/sh
. /lib/functions.sh
. $dir/functions.sh
# Set dnsmasq config
handle_dhcp() {
if [ -z "${1/cfg[0-9a-fA-F]*/}" ]; then
section_rename dhcp $1 dnsmasq
fi
}
config_load dhcp
config_foreach handle_dhcp dnsmasq
uci batch << EOF
set dhcp.dnsmasq.local="/$profile_suffix/"
set dhcp.dnsmasq.domain="$profile_suffix"
EOF
config_get addnhosts dnsmasq addnhosts
if [ -z "${addnhosts/\var\/etc\/hosts.olsr/}" ]; then
uci add_list dhcp.dnsmasq.addnhosts="/var/etc/hosts.olsr"
if [ "$profile_ipv6" = 1 ]; then
uci add_list dhcp.dnsmasq.addnhosts="/var/etc/hosts.olsr.ipv6"
fi
fi
uci_commitverbose "Setup dnsmasq" dhcp
|
saraedum/luci-packages-old
|
contrib/package/meshwizard/files/usr/bin/meshwizard/helpers/setup_dnsmasq.sh
|
Shell
|
apache-2.0
| 630 |
#!/usr/bin/zsh
setopt extended_glob
# Target must be a target directory or .
TARGET=gerbers
PROJNAME=$1
# Valid: ITEAD, SEEED, OSHPARK
# Or set the values manually after the conditional
MANUF=OSHPARK
# Manufacturers' file extensions
exts_oshpark=(
'*F_Cu.g*' .GTL
'*F_Mask.g*' .GTS
'*F_SilkS.g*' .GTO
'*In1_Cu.g*' .G2L
'*In2_Cu.g*' .G3L
'*B_Cu.g*' .GBL
'*B_Mask.g*' .GBS
'*B_SilkS.g*' .GBO
'*Edge_Cuts.g*' .GKO
'*Eco1_User.g*' .GKO
'*.drl' .XLN
'*-NPTH.drl' -NPTH.XLN )
exts_itead_seeed=(
'*F_Cu.g*' .GTL
'*F_Mask.g*' .GTS
'*F_SilkS.g*' .GTO
'*In1_Cu.g*' .GL2
'*In2_Cu.g*' .GL3
'*B_Cu.g*' .GBL
'*B_Mask.g*' .GBS
'*B_SilkS.g*' .GBO
'*Edge_Cuts.g*' .GML
'*Eco1_User.g*' .GML
'*.drl' .TXT
'*-NPTH.drl' -NPTH.TXT )
case "$MANUF" in
OSHPARK)
exts=($exts_oshpark)
;;
ITEAD)
exts=($exts_itead_seeed)
;;
SEEED)
exts=($exts_itead_seeed)
;;
esac
mkdir -p "${TARGET}"
for pat ext in $exts; do
file=( $~pat(N) )
if [ -z $file ]; then continue; fi
mv -vf $file ${TARGET}/${PROJNAME}$ext
done
|
WCP52/input_frontend
|
pcb/scripts/rename_gerbers.sh
|
Shell
|
bsd-3-clause
| 1,230 |
#!/bin/bash
FN="TENxBUSData_1.0.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/experiment/src/contrib/TENxBUSData_1.0.0.tar.gz"
"https://bioarchive.galaxyproject.org/TENxBUSData_1.0.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-tenxbusdata/bioconductor-tenxbusdata_1.0.0_src_all.tar.gz"
)
MD5="4b8aaf62fb5eeced06e16140d52f1a1c"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-tenxbusdata/post-link.sh
|
Shell
|
mit
| 1,303 |
#! /bin/sh
# Test nl_langinfo.
# Copyright (C) 2000, 2001, 2003, 2007, 2008 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <http://www.gnu.org/licenses/>.
common_objpfx=$1
run_program_prefix=$2
# Run the test program.
cat <<"EOF" |
# Only decimal numerical escape sequences allowed in strings.
C ABDAY_1 Sun
C ABDAY_2 Mon
C ABDAY_3 Tue
C ABDAY_4 Wed
C ABDAY_5 Thu
C ABDAY_6 Fri
C ABDAY_7 Sat
C DAY_1 Sunday
C DAY_2 Monday
C DAY_3 Tuesday
C DAY_4 Wednesday
C DAY_5 Thursday
C DAY_6 Friday
C DAY_7 Saturday
C ABMON_1 Jan
C ABMON_2 Feb
C ABMON_3 Mar
C ABMON_4 Apr
C ABMON_5 May
C ABMON_6 Jun
C ABMON_7 Jul
C ABMON_8 Aug
C ABMON_9 Sep
C ABMON_10 Oct
C ABMON_11 Nov
C ABMON_12 Dec
C MON_1 January
C MON_2 February
C MON_3 March
C MON_4 April
C MON_5 May
C MON_6 June
C MON_7 July
C MON_8 August
C MON_9 September
C MON_10 October
C MON_11 November
C MON_12 December
C AM_STR AM
C PM_STR PM
C D_T_FMT "%a %b %e %H:%M:%S %Y"
C D_FMT "%m/%d/%y"
C T_FMT "%H:%M:%S"
C T_FMT_AMPM "%I:%M:%S %p"
C ABDAY_1 Sun
C ABDAY_2 Mon
C ABDAY_3 Tue
C ABDAY_4 Wed
C ABDAY_5 Thu
C ABDAY_6 Fri
C ABDAY_7 Sat
C DAY_1 Sunday
C DAY_2 Monday
C DAY_3 Tuesday
C DAY_4 Wednesday
C DAY_5 Thursday
C DAY_6 Friday
C DAY_7 Saturday
C RADIXCHAR .
C THOUSEP ""
C YESEXPR ^[yY]
C NOEXPR ^[nN]
en_US.ANSI_X3.4-1968 ABMON_1 Jan
en_US.ANSI_X3.4-1968 ABMON_2 Feb
en_US.ANSI_X3.4-1968 ABMON_3 Mar
en_US.ANSI_X3.4-1968 ABMON_4 Apr
en_US.ANSI_X3.4-1968 ABMON_5 May
en_US.ANSI_X3.4-1968 ABMON_6 Jun
en_US.ANSI_X3.4-1968 ABMON_7 Jul
en_US.ANSI_X3.4-1968 ABMON_8 Aug
en_US.ANSI_X3.4-1968 ABMON_9 Sep
en_US.ANSI_X3.4-1968 ABMON_10 Oct
en_US.ANSI_X3.4-1968 ABMON_11 Nov
en_US.ANSI_X3.4-1968 ABMON_12 Dec
en_US.ANSI_X3.4-1968 MON_1 January
en_US.ANSI_X3.4-1968 MON_2 February
en_US.ANSI_X3.4-1968 MON_3 March
en_US.ANSI_X3.4-1968 MON_4 April
en_US.ANSI_X3.4-1968 MON_5 May
en_US.ANSI_X3.4-1968 MON_6 June
en_US.ANSI_X3.4-1968 MON_7 July
en_US.ANSI_X3.4-1968 MON_8 August
en_US.ANSI_X3.4-1968 MON_9 September
en_US.ANSI_X3.4-1968 MON_10 October
en_US.ANSI_X3.4-1968 MON_11 November
en_US.ANSI_X3.4-1968 MON_12 December
en_US.ANSI_X3.4-1968 AM_STR AM
en_US.ANSI_X3.4-1968 PM_STR PM
en_US.ANSI_X3.4-1968 D_T_FMT "%a %d %b %Y %r %Z"
en_US.ANSI_X3.4-1968 D_FMT "%m/%d/%Y"
en_US.ANSI_X3.4-1968 T_FMT "%r"
en_US.ANSI_X3.4-1968 T_FMT_AMPM "%I:%M:%S %p"
en_US.ANSI_X3.4-1968 RADIXCHAR .
en_US.ANSI_X3.4-1968 THOUSEP ,
en_US.ANSI_X3.4-1968 YESEXPR ^[yY].*
en_US.ANSI_X3.4-1968 NOEXPR ^[nN].*
en_US.ISO-8859-1 ABMON_1 Jan
en_US.ISO-8859-1 ABMON_2 Feb
en_US.ISO-8859-1 ABMON_3 Mar
en_US.ISO-8859-1 ABMON_4 Apr
en_US.ISO-8859-1 ABMON_5 May
en_US.ISO-8859-1 ABMON_6 Jun
en_US.ISO-8859-1 ABMON_7 Jul
en_US.ISO-8859-1 ABMON_8 Aug
en_US.ISO-8859-1 ABMON_9 Sep
en_US.ISO-8859-1 ABMON_10 Oct
en_US.ISO-8859-1 ABMON_11 Nov
en_US.ISO-8859-1 ABMON_12 Dec
en_US.ISO-8859-1 MON_1 January
en_US.ISO-8859-1 MON_2 February
en_US.ISO-8859-1 MON_3 March
en_US.ISO-8859-1 MON_4 April
en_US.ISO-8859-1 MON_5 May
en_US.ISO-8859-1 MON_6 June
en_US.ISO-8859-1 MON_7 July
en_US.ISO-8859-1 MON_8 August
en_US.ISO-8859-1 MON_9 September
en_US.ISO-8859-1 MON_10 October
en_US.ISO-8859-1 MON_11 November
en_US.ISO-8859-1 MON_12 December
en_US.ISO-8859-1 AM_STR AM
en_US.ISO-8859-1 PM_STR PM
en_US.ISO-8859-1 D_T_FMT "%a %d %b %Y %r %Z"
en_US.ISO-8859-1 D_FMT "%m/%d/%Y"
en_US.ISO-8859-1 T_FMT "%r"
en_US.ISO-8859-1 T_FMT_AMPM "%I:%M:%S %p"
en_US.ISO-8859-1 RADIXCHAR .
en_US.ISO-8859-1 THOUSEP ,
en_US.ISO-8859-1 YESEXPR ^[yY].*
en_US.ISO-8859-1 NOEXPR ^[nN].*
de_DE.ISO-8859-1 ABDAY_1 So
de_DE.ISO-8859-1 ABDAY_2 Mo
de_DE.ISO-8859-1 ABDAY_3 Di
de_DE.ISO-8859-1 ABDAY_4 Mi
de_DE.ISO-8859-1 ABDAY_5 Do
de_DE.ISO-8859-1 ABDAY_6 Fr
de_DE.ISO-8859-1 ABDAY_7 Sa
de_DE.ISO-8859-1 DAY_1 Sonntag
de_DE.ISO-8859-1 DAY_2 Montag
de_DE.ISO-8859-1 DAY_3 Dienstag
de_DE.ISO-8859-1 DAY_4 Mittwoch
de_DE.ISO-8859-1 DAY_5 Donnerstag
de_DE.ISO-8859-1 DAY_6 Freitag
de_DE.ISO-8859-1 DAY_7 Samstag
de_DE.ISO-8859-1 ABMON_1 Jan
de_DE.ISO-8859-1 ABMON_2 Feb
de_DE.ISO-8859-1 ABMON_3 Mär
de_DE.ISO-8859-1 ABMON_4 Apr
de_DE.ISO-8859-1 ABMON_5 Mai
de_DE.ISO-8859-1 ABMON_6 Jun
de_DE.ISO-8859-1 ABMON_7 Jul
de_DE.ISO-8859-1 ABMON_8 Aug
de_DE.ISO-8859-1 ABMON_9 Sep
de_DE.ISO-8859-1 ABMON_10 Okt
de_DE.ISO-8859-1 ABMON_11 Nov
de_DE.ISO-8859-1 ABMON_12 Dez
de_DE.ISO-8859-1 MON_1 Januar
de_DE.ISO-8859-1 MON_2 Februar
de_DE.ISO-8859-1 MON_3 März
de_DE.ISO-8859-1 MON_4 April
de_DE.ISO-8859-1 MON_5 Mai
de_DE.ISO-8859-1 MON_6 Juni
de_DE.ISO-8859-1 MON_7 Juli
de_DE.ISO-8859-1 MON_8 August
de_DE.ISO-8859-1 MON_9 September
de_DE.ISO-8859-1 MON_10 Oktober
de_DE.ISO-8859-1 MON_11 November
de_DE.ISO-8859-1 MON_12 Dezember
de_DE.ISO-8859-1 D_T_FMT "%a %d %b %Y %T %Z"
de_DE.ISO-8859-1 D_FMT "%d.%m.%Y"
de_DE.ISO-8859-1 T_FMT "%T"
de_DE.ISO-8859-1 RADIXCHAR ,
de_DE.ISO-8859-1 THOUSEP .
de_DE.ISO-8859-1 YESEXPR ^[jJyY].*
de_DE.ISO-8859-1 NOEXPR ^[nN].*
de_DE.UTF-8 ABDAY_1 So
de_DE.UTF-8 ABDAY_2 Mo
de_DE.UTF-8 ABDAY_3 Di
de_DE.UTF-8 ABDAY_4 Mi
de_DE.UTF-8 ABDAY_5 Do
de_DE.UTF-8 ABDAY_6 Fr
de_DE.UTF-8 ABDAY_7 Sa
de_DE.UTF-8 DAY_1 Sonntag
de_DE.UTF-8 DAY_2 Montag
de_DE.UTF-8 DAY_3 Dienstag
de_DE.UTF-8 DAY_4 Mittwoch
de_DE.UTF-8 DAY_5 Donnerstag
de_DE.UTF-8 DAY_6 Freitag
de_DE.UTF-8 DAY_7 Samstag
de_DE.UTF-8 ABMON_1 Jan
de_DE.UTF-8 ABMON_2 Feb
de_DE.UTF-8 ABMON_3 Mär
de_DE.UTF-8 ABMON_4 Apr
de_DE.UTF-8 ABMON_5 Mai
de_DE.UTF-8 ABMON_6 Jun
de_DE.UTF-8 ABMON_7 Jul
de_DE.UTF-8 ABMON_8 Aug
de_DE.UTF-8 ABMON_9 Sep
de_DE.UTF-8 ABMON_10 Okt
de_DE.UTF-8 ABMON_11 Nov
de_DE.UTF-8 ABMON_12 Dez
de_DE.UTF-8 MON_1 Januar
de_DE.UTF-8 MON_2 Februar
de_DE.UTF-8 MON_3 März
de_DE.UTF-8 MON_4 April
de_DE.UTF-8 MON_5 Mai
de_DE.UTF-8 MON_6 Juni
de_DE.UTF-8 MON_7 Juli
de_DE.UTF-8 MON_8 August
de_DE.UTF-8 MON_9 September
de_DE.UTF-8 MON_10 Oktober
de_DE.UTF-8 MON_11 November
de_DE.UTF-8 MON_12 Dezember
de_DE.UTF-8 D_T_FMT "%a %d %b %Y %T %Z"
de_DE.UTF-8 D_FMT "%d.%m.%Y"
de_DE.UTF-8 T_FMT "%T"
de_DE.UTF-8 RADIXCHAR ,
de_DE.UTF-8 THOUSEP .
de_DE.UTF-8 YESEXPR ^[jJyY].*
de_DE.UTF-8 NOEXPR ^[nN].*
fr_FR.ISO-8859-1 ABDAY_1 dim.
fr_FR.ISO-8859-1 ABDAY_2 lun.
fr_FR.ISO-8859-1 ABDAY_3 mar.
fr_FR.ISO-8859-1 ABDAY_4 mer.
fr_FR.ISO-8859-1 ABDAY_5 jeu.
fr_FR.ISO-8859-1 ABDAY_6 ven.
fr_FR.ISO-8859-1 ABDAY_7 sam.
fr_FR.ISO-8859-1 DAY_1 dimanche
fr_FR.ISO-8859-1 DAY_2 lundi
fr_FR.ISO-8859-1 DAY_3 mardi
fr_FR.ISO-8859-1 DAY_4 mercredi
fr_FR.ISO-8859-1 DAY_5 jeudi
fr_FR.ISO-8859-1 DAY_6 vendredi
fr_FR.ISO-8859-1 DAY_7 samedi
fr_FR.ISO-8859-1 ABMON_1 janv.
fr_FR.ISO-8859-1 ABMON_2 févr.
fr_FR.ISO-8859-1 ABMON_3 mars
fr_FR.ISO-8859-1 ABMON_4 avril
fr_FR.ISO-8859-1 ABMON_5 mai
fr_FR.ISO-8859-1 ABMON_6 juin
fr_FR.ISO-8859-1 ABMON_7 juil.
fr_FR.ISO-8859-1 ABMON_8 août
fr_FR.ISO-8859-1 ABMON_9 sept.
fr_FR.ISO-8859-1 ABMON_10 oct.
fr_FR.ISO-8859-1 ABMON_11 nov.
fr_FR.ISO-8859-1 ABMON_12 déc.
fr_FR.ISO-8859-1 MON_1 janvier
fr_FR.ISO-8859-1 MON_2 février
fr_FR.ISO-8859-1 MON_3 mars
fr_FR.ISO-8859-1 MON_4 avril
fr_FR.ISO-8859-1 MON_5 mai
fr_FR.ISO-8859-1 MON_6 juin
fr_FR.ISO-8859-1 MON_7 juillet
fr_FR.ISO-8859-1 MON_8 août
fr_FR.ISO-8859-1 MON_9 septembre
fr_FR.ISO-8859-1 MON_10 octobre
fr_FR.ISO-8859-1 MON_11 novembre
fr_FR.ISO-8859-1 MON_12 décembre
fr_FR.ISO-8859-1 D_T_FMT "%a %d %b %Y %T %Z"
fr_FR.ISO-8859-1 D_FMT "%d/%m/%Y"
fr_FR.ISO-8859-1 T_FMT "%T"
fr_FR.ISO-8859-1 RADIXCHAR ,
fr_FR.ISO-8859-1 THOUSEP " "
fr_FR.ISO-8859-1 YESEXPR ^[oOyY].*
fr_FR.ISO-8859-1 NOEXPR ^[nN].*
ja_JP.EUC-JP ABDAY_1 Æü
ja_JP.EUC-JP ABDAY_2 ·î
ja_JP.EUC-JP ABDAY_3 ²Ð
ja_JP.EUC-JP ABDAY_4 ¿å
ja_JP.EUC-JP ABDAY_5 ÌÚ
ja_JP.EUC-JP ABDAY_6 ¶â
ja_JP.EUC-JP ABDAY_7 ÅÚ
ja_JP.EUC-JP DAY_1 ÆüÍËÆü
ja_JP.EUC-JP DAY_2 ·îÍËÆü
ja_JP.EUC-JP DAY_3 ²ÐÍËÆü
ja_JP.EUC-JP DAY_4 ¿åÍËÆü
ja_JP.EUC-JP DAY_5 ÌÚÍËÆü
ja_JP.EUC-JP DAY_6 ¶âÍËÆü
ja_JP.EUC-JP DAY_7 ÅÚÍËÆü
ja_JP.EUC-JP ABMON_1 " 1·î"
ja_JP.EUC-JP ABMON_2 " 2·î"
ja_JP.EUC-JP ABMON_3 " 3·î"
ja_JP.EUC-JP ABMON_4 " 4·î"
ja_JP.EUC-JP ABMON_5 " 5·î"
ja_JP.EUC-JP ABMON_6 " 6·î"
ja_JP.EUC-JP ABMON_7 " 7·î"
ja_JP.EUC-JP ABMON_8 " 8·î"
ja_JP.EUC-JP ABMON_9 " 9·î"
ja_JP.EUC-JP ABMON_10 "10·î"
ja_JP.EUC-JP ABMON_11 "11·î"
ja_JP.EUC-JP ABMON_12 "12·î"
ja_JP.EUC-JP MON_1 "1·î"
ja_JP.EUC-JP MON_2 "2·î"
ja_JP.EUC-JP MON_3 "3·î"
ja_JP.EUC-JP MON_4 "4·î"
ja_JP.EUC-JP MON_5 "5·î"
ja_JP.EUC-JP MON_6 "6·î"
ja_JP.EUC-JP MON_7 "7·î"
ja_JP.EUC-JP MON_8 "8·î"
ja_JP.EUC-JP MON_9 "9·î"
ja_JP.EUC-JP MON_10 "10·î"
ja_JP.EUC-JP MON_11 "11·î"
ja_JP.EUC-JP MON_12 "12·î"
ja_JP.EUC-JP T_FMT_AMPM "%p%I»þ%Mʬ%SÉÃ"
ja_JP.EUC-JP ERA_D_FMT "%EY%m·î%dÆü"
ja_JP.EUC-JP ERA_D_T_FMT "%EY%m·î%dÆü %H»þ%Mʬ%SÉÃ"
ja_JP.EUC-JP RADIXCHAR .
ja_JP.EUC-JP THOUSEP ,
ja_JP.EUC-JP YESEXPR ^([yY£ù£Ù]|¤Ï¤¤|¥Ï¥¤)
ja_JP.EUC-JP NOEXPR ^([nN£î£Î]|¤¤¤¤¤¨|¥¤¥¤¥¨)
# Is CRNCYSTR supposed to be the national or international sign?
# ja_JP.EUC-JP CRNCYSTR JPY
ja_JP.EUC-JP CODESET EUC-JP
EOF
LOCPATH=${common_objpfx}localedata GCONV_PATH=${common_objpfx}iconvdata \
LC_ALL=tt_TT ${run_program_prefix} \
${common_objpfx}localedata/tst-langinfo \
> ${common_objpfx}localedata/tst-langinfo.out
exit $?
|
stephenR/glibc-fpp
|
localedata/tst-langinfo.sh
|
Shell
|
gpl-2.0
| 13,539 |
#!/bin/sh
python manage.py collectstatic --noinput --settings=dkobo.settings
grunt build_all
npm install yuglify
python manage.py compress --settings=dkobo.settings
mkdir -p jsapp/CACHE
cp -R jsapp/components/fontawesome/fonts jsapp/CACHE/fonts
python manage.py collectstatic --noinput --settings=dkobo.settings
|
jomolinare/dkobo
|
scripts/collectstatic.sh
|
Shell
|
agpl-3.0
| 312 |
#!/bin/bash -x
set -euo pipefail
IFS=$'\n\t'
# Test scripts run with PWD=tests/..
# The test harness exports some variables into the environment during
# testing: PYTHONPATH (python module import path
# WORK_DIR (a directory that is safe to modify)
# DOCKER (the docker executable location)
# ATOMIC (an invocation of 'atomic' which measures code coverage)
# SECRET (a generated sha256 hash inserted into test containers)
# In addition, the test harness creates some images for use in testing.
# See tests/test-images/
IMAGE="atomic-test-4"
ID=`${DOCKER} inspect ${IMAGE} | grep '"Id"' | cut -f4 --delimiter=\"`
setup () {
# Perform setup routines here.
${DOCKER} tag ${IMAGE} foobar/${IMAGE}:latest
}
teardown () {
# Cleanup your test data.
set +e
${DOCKER} rmi foobar/${IMAGE}:latest
set -e
}
# Utilize exit traps for cleanup wherever possible. Additional cleanup
# logic can be added to a "cleanup stack", by cascading function calls
# within traps. See tests/integration/test_mount.sh for an example.
trap teardown EXIT
setup
rc=0
${ATOMIC} verify ${ID} 1>/dev/null || rc=$?
if [[ ${rc} != 1 ]]; then
# Test failed
echo "This test should result in a return code of 1"
exit 1
fi
|
rh-atomic-bot/atomic
|
tests/integration/test_verify.sh
|
Shell
|
lgpl-2.1
| 1,283 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export TERM=${TERM:-dumb}
export PAGER=cat
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
export GEODE_BUILD=~/geode
export CALLSTACKS_DIR=${GEODE_BUILD}/callstacks
#SLEEP_TIME is in seconds
PARALLEL_DUNIT=${1}
SLEEP_TIME=${2}
COUNT=3
STACK_INTERVAL=5
if [[ -z "${PARALLEL_DUNIT}" ]]; then
echo "PARALLEL_DUNIT must be set. exiting..."
exit 1
fi
if [[ -z "${SLEEP_TIME}" ]]; then
echo "SLEEP_TIME must be set. exiting..."
exit 1
fi
mkdir -p ${CALLSTACKS_DIR}
sleep ${SLEEP_TIME}
echo "Capturing call stacks"
for (( h=0; h<${COUNT}; h++)); do
today=`date +%Y-%m-%d-%H-%M-%S`
logfile=${CALLSTACKS_DIR}/callstacks-${today}.txt
if [ -n "${PARALLEL_DUNIT}" ]; then
mapfile -t containers < <(docker ps --format '{{.Names}}')
for (( i=0; i<${#containers[@]}; i++ )); do
echo "Container: ${containers[i]}" | tee -a ${logfile};
[ -x $JAVA_HOME/bin/jps ] && JPS=$JAVA_HOME/bin/jps || JPS=jps
mapfile -t processes < <(docker exec ${containers[i]} ${JPS} | cut -d ' ' -f 1)
echo "Got past processes."
for ((j=0; j<${#processes[@]}; j++ )); do
echo "********* Dumping stack for process ${processes[j]}:" | tee -a ${logfile}
docker exec ${containers[i]} /bin/bash -c '[ -x $JAVA_HOME/bin/jstack ] && JSTACK=$JAVA_HOME/bin/jstack || JSTACK=jstack; $JSTACK -l '"${processes[j]}" >> ${logfile}
done
done
else
mapfile -t processes < <(jps | cut -d ' ' -f 1)
echo "Got past processes."
[ -x $JAVA_HOME/bin/jstack ] && JSTACK=$JAVA_HOME/bin/jstack || JSTACK=jstack
for ((j=0; j<${#processes[@]}; j++ )); do
echo "********* Dumping stack for process ${processes[j]}:" | tee -a ${logfile}
$JSTACK -l ${processes[j]} >> ${logfile}
done
fi
sleep ${STACK_INTERVAL}
done
echo "Checking progress files:"
if [ -n "${PARALLEL_DUNIT}" ]; then
mapfile -t progressfiles < <(find ${GEODE_BUILD} -name "*-progress.txt")
for (( i=0; i<${#progressfiles[@]}; i++)); do
echo "Checking progress file: ${progressfiles[i]}"
/usr/local/bin/dunit-progress hang ${progressfiles[i]} | tee -a ${CALLSTACKS_DIR}/dunit-hangs.txt
done
fi
|
davebarnes97/geode
|
ci/scripts/capture-call-stacks.sh
|
Shell
|
apache-2.0
| 3,456 |
#!/bin/bash
########################################################################
#
# Linux on Hyper-V and Azure Test Code, ver. 1.0.0
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
########################################################################
# Description:
# Basic SR-IOV test that checks if VF can send and receive multicast packets
#
# Steps:
# Use Ping
# On the 2nd VM: ping -I eth1 224.0.0.1 -c 11 > out.client &
# On the TEST VM: ping -I eth1 224.0.0.1 -c 11 > out.client
# Check results:
# On the TEST VM: cat out.client | grep 0%
# On the 2nd VM: cat out.client | grep 0%
# If both have 0% packet loss, test passed
################################################################################
# Convert eol
dos2unix SR-IOV_Utils.sh
# Source SR-IOV_Utils.sh. This is the script that contains all the
# SR-IOV basic functions (checking drivers, checking VFs, assigning IPs)
. SR-IOV_Utils.sh || {
echo "ERROR: unable to source SR-IOV_Utils.sh!"
echo "TestAborted" > state.txt
exit 2
}
# Check the parameters in constants.sh
Check_SRIOV_Parameters
if [ $? -ne 0 ]; then
msg="ERROR: The necessary parameters are not present in constants.sh. Please check the xml test file"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
# Check if the SR-IOV driver is in use
VerifyVF
if [ $? -ne 0 ]; then
msg="ERROR: VF is not loaded! Make sure you are using compatible hardware"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
UpdateSummary "VF is present on VM!"
# Set static IP to the VF
ConfigureVF
if [ $? -ne 0 ]; then
msg="ERROR: Could not set a static IP to eth1!"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
LogMsg "INFO: All configuration completed successfully. Will proceed with the testing"
# Configure VM1
#ifconfig eth1 allmulti
ip link set dev eth1 allmulticast on
if [ $? -ne 0 ]; then
msg="ERROR: Could not enable ALLMULTI option on VM1!"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 1
fi
# Configure VM2
ssh -i "$HOME"/.ssh/"$sshKey" -o StrictHostKeyChecking=no "$REMOTE_USER"@"$VF_IP2" "ip link set dev eth1 allmulticast on"
if [ $? -ne 0 ]; then
msg="ERROR: Could not enable ALLMULTI option on VM2!"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateAborted
exit 1
fi
ssh -i "$HOME"/.ssh/"$sshKey" -o StrictHostKeyChecking=no "$REMOTE_USER"@"$VF_IP2" "echo '1' > /proc/sys/net/ipv4/ip_forward"
if [ $? -ne 0 ]; then
msg="ERROR: Could not enable IP Forwarding on VM2!"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
ssh -i "$HOME"/.ssh/"$sshKey" -o StrictHostKeyChecking=no "$REMOTE_USER"@"$VF_IP2" "ip route add 224.0.0.0/4 dev eth1"
if [ $? -ne 0 ]; then
msg="ERROR: Could not add new route to Routing Table on VM2!"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
ssh -i "$HOME"/.ssh/"$sshKey" -o StrictHostKeyChecking=no "$REMOTE_USER"@"$VF_IP2" "echo '0' > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts"
if [ $? -ne 0 ]; then
msg="ERROR: Could not enable broadcast listening on VM2!"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
# Multicast testing
ssh -i "$HOME"/.ssh/"$sshKey" -o StrictHostKeyChecking=no "$REMOTE_USER"@"$VF_IP2" "ping -I eth1 224.0.0.1 -c 11 > out.client &"
if [ $? -ne 0 ]; then
msg="ERROR: Could not start ping on VM2 (VF_IP: ${VF_IP2})"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
ping -I eth1 224.0.0.1 -c 11 > out.client
if [ $? -ne 0 ]; then
msg="ERROR: Could not start ping on VM1 (VF_IP: ${VF_IP1})"
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
exit 1
fi
LogMsg "INFO: Ping was started on both VMs. Results will be checked in a few seconds"
sleep 5
# Check results - Summary must show a 0% loss of packets
multicastSummary=$(cat out.client | grep 0%)
if [ $? -ne 0 ]; then
msg="ERROR: VM1 shows that packets were lost!"
LogMsg "$msg"
LogMsg "${multicastSummary}"
UpdateSummary "$msg"
UpdateSummary "${multicastSummary}"
SetTestStateFailed
fi
LogMsg "Multicast summary"
LogMsg "${multicastSummary}"
msg="Multicast packets were successfully sent, 0% loss"
LogMsg $msg
UpdateSummary "$msg"
SetTestStateCompleted
|
adriansuhov/lis-test
|
WS2012R2/lisa/remote-scripts/ica/SR-IOV_Multicast.sh
|
Shell
|
apache-2.0
| 5,017 |
# Script to generate the documentation.
ln -s ../src/inc/liblogger
ln -s ../src/testapp
doxygen
|
Kudo/liblogger
|
docs/gendoc.sh
|
Shell
|
apache-2.0
| 96 |
#!/bin/bash
# assumes this script (config.sh) lives in "${JAMR_HOME}/scripts/"
export JAMR_HOME="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." > /dev/null && pwd )"
export CLASSPATH=".:${JAMR_HOME}/target/scala-2.10/jamr-assembly-0.1-SNAPSHOT.jar"
# change the following enviroment variables for your configuration
export CDEC="${JAMR_HOME}/tools/cdec"
export ILLINOIS_NER="${JAMR_HOME}/tools/IllinoisNerExtended"
export ILLINOIS_NER_JAR="${ILLINOIS_NER}/target/IllinoisNerExtended-2.7.jar"
export WNHOME="${JAMR_HOME}/tools/WordNet-3.0"
export SCALA="${JAMR_HOME}/tools/scala-2.11.2/bin/scala"
export SMATCH="${JAMR_HOME}/scripts/smatch_v1_0/smatch_modified.py"
export TRAIN_FILE="${JAMR_HOME}/data/LDC2014E41_DEFT_Phase_1_AMR_Annotation_R4/data/split/training/training.txt"
export DEV_FILE="${JAMR_HOME}/data/LDC2014E41_DEFT_Phase_1_AMR_Annotation_R4/data/split/dev/dev.txt"
export TEST_FILE="${JAMR_HOME}/data/LDC2014E41_DEFT_Phase_1_AMR_Annotation_R4/data/split/test/test.txt"
export MODEL_DIR="${JAMR_HOME}/models/ACL2014_LDC2014E41" # ideally keep this the same as the config_SOMETHING.sh
# The options specified below will override any options specified in the scripts
# CONCEPT_ID_TRAINING_OPTIONS and RELATION_ID_TRAINING_OPTIONS will override PARSER_OPTIONS
export PARSER_OPTIONS="
--stage1-features bias,length,fromNERTagger,conceptGivenPhrase
--stage2-decoder LR
--stage2-features rootConcept,rootDependencyPathv1,bias,typeBias,self,fragHead,edgeCount,distance,logDistance,posPathv3,dependencyPathv4,conceptBigram
--stage2-labelset ${JAMR_HOME}/resources/labelset-r4
--output-format AMR,nodes,edges,root
--ignore-parser-errors
--print-stack-trace-on-errors
"
export CONCEPT_ID_TRAINING_OPTIONS="
--training-optimizer Adagrad
--training-passes 10
--training-stepsize 1
-v 1
"
export RELATION_ID_TRAINING_OPTIONS="
--training-optimizer Adagrad
--training-passes 7
--training-save-interval 1
"
|
hopshackle/wordAlignment
|
scripts/config_ACL2014_LDC2014E41.sh
|
Shell
|
bsd-2-clause
| 1,968 |
#!/bin/bash
FN="BSgenome.Dmelanogaster.UCSC.dm2.masked_1.3.99.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/annotation/src/contrib/BSgenome.Dmelanogaster.UCSC.dm2.masked_1.3.99.tar.gz"
"https://bioarchive.galaxyproject.org/BSgenome.Dmelanogaster.UCSC.dm2.masked_1.3.99.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bsgenome.dmelanogaster.ucsc.dm2.masked/bioconductor-bsgenome.dmelanogaster.ucsc.dm2.masked_1.3.99_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bsgenome.dmelanogaster.ucsc.dm2.masked/bioconductor-bsgenome.dmelanogaster.ucsc.dm2.masked_1.3.99_src_all.tar.gz"
)
MD5="ae7ca3053e97a0d6bd20bc2b9635ecce"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
mdehollander/bioconda-recipes
|
recipes/bioconductor-bsgenome.dmelanogaster.ucsc.dm2.masked/post-link.sh
|
Shell
|
mit
| 1,612 |
#!/bin/sh
# Setup script for OSSEC UI
# Author: Daniel B. Cid <[email protected]>
# Finding my location
LOCAL=`dirname $0`;
cd $LOCAL
PWD=`pwd`
ERRORS=0;
# Looking for echo -n
ECHO="echo -n"
hs=`echo -n "a"`
if [ ! "X$hs" = "Xa" ]; then
ls "/usr/ucb/echo" > /dev/null 2>&1
if [ $? = 0 ]; then
ECHO="/usr/ucb/echo -n"
else
ECHO=echo
fi
fi
# Looking for htpasswd
HTPWDCMD="htpasswd"
ls "`which $HTPWDCMD`" > /dev/null 2>&1
if [ ! $? = 0 ]; then
HTPWDCMD="htpasswd2"
ls "`which $HTPWDCMD`" > /dev/null 2>&1
if [ ! $? = 0 ]; then
HTPWDCMD=""
fi
fi
# Default options
HT_DIR_ACCESS="deny from all"
HT_FLZ_ACCESS="AuthUserFile $PWD/.htpasswd"
HT_DEFAULT="htaccess_def.txt"
echo "Setting up ossec ui..."
echo ""
ls $HT_DEFAULT > /dev/null 2>&1
if [ ! $? = 0 ]; then
echo "** ERROR: Could not find '$HT_DEFAULT'. Unable to continue."
ERRORS=1;
fi
# 1- Create .htaccess blocking access to private directories.
PRIV_DIRS="site lib tmp"
mkdir tmp >/dev/null 2>&1
chmod 777 tmp
for i in $PRIV_DIRS; do
echo $HT_DIR_ACCESS > ./$i/.htaccess;
done
# 2- Create. htaccess blocking access to .sh and config files.
echo $HT_FLZ_ACCESS > ./.htaccess;
echo "" >> ./.htaccess;
cat $HT_DEFAULT >> ./.htaccess;
# 3- Create password
while [ 1 ]; do
if [ "X$MY_USER" = "X" ]; then
$ECHO "Username: "
read MY_USER;
else
break;
fi
done
if [ "X$HTPWDCMD" = "X" ]; then
echo "** ERROR: Could not find htpasswd. No password set."
ERRORS=1;
else
$HTPWDCMD -c $PWD/.htpasswd $MY_USER
if [ ! $? = 0 ]; then
ERRORS=1;
fi
fi
if [ $ERRORS = 0 ]; then
echo ""
echo "Setup completed successfuly."
else
echo ""
echo "Setup failed to complete."
fi
|
TheDarren/ossecweb-stanford
|
webui/setup.sh
|
Shell
|
mit
| 1,824 |
#!/bin/bash
FN="IlluminaHumanMethylation450kprobe_2.0.6.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.8/data/annotation/src/contrib/IlluminaHumanMethylation450kprobe_2.0.6.tar.gz"
"https://bioarchive.galaxyproject.org/IlluminaHumanMethylation450kprobe_2.0.6.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-illuminahumanmethylation450kprobe/bioconductor-illuminahumanmethylation450kprobe_2.0.6_src_all.tar.gz"
)
MD5="84c31861fcbaddbf2a9c500b8d8d767d"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bioconda/recipes
|
recipes/bioconductor-illuminahumanmethylation450kprobe/post-link.sh
|
Shell
|
mit
| 1,419 |
#!/bin/sh
srcdir=${srcdir:-.}
oIFS="$IFS"
IFS='
'
secno=0
testno=0
comment=0
ecode=0
test -f ${srcdir}/cql2xcqlsample || exit 1
test -d cql || mkdir cql
for f in `cat ${srcdir}/cql2xcqlsample`; do
if echo $f | grep '^#' >/dev/null; then
comment=1
else
if test "$comment" = "1"; then
secno=`expr $secno + 1`
testno=0
fi
comment=0
testno=`expr $testno + 1`
OUT1=${srcdir}/cql/$secno.$testno.out
ERR1=${srcdir}/cql/$secno.$testno.err
OUT2=cql/$secno.$testno.out.tmp
ERR2=cql/$secno.$testno.err.tmp
DIFF=cql/$secno.$testno.diff
../util/cql2xcql "$f" >$OUT2 2>$ERR2
if test -f $OUT1 -a -f $ERR1; then
if diff $OUT1 $OUT2 >$DIFF; then
rm $DIFF
rm $OUT2
else
echo "diff out $secno $testno $f"
cat $DIFF
ecode=1
fi
if diff $ERR1 $ERR2 >$DIFF; then
rm $DIFF
rm $ERR2
else
echo "diff err $secno $testno $f"
cat $DIFF
ecode=1
fi
else
echo "making test $secno $testno $f"
mv $OUT2 $OUT1
mv $ERR2 $ERR1
ecode=1
fi
fi
done
IFS="$oIFS"
exit $ecode
|
ysuarez/Evergreen-Customizations
|
yaz-4.2.32/test/test_cql2xcql.sh
|
Shell
|
gpl-2.0
| 1,044 |
#!/bin/bash
prog="valgrind -q ../src/pedis"
samples=../support_files/samples/*
n=0
err=0
for sample in $samples; do
echo -e "\n$sample"
func=$(../src/readpe -f csv -h optional "$sample" | grep Entry | cut -d, -f2)
for format in text csv xml html; do
$prog -f $format -F $func $sample || let err++
done
let n++
done
echo "$n samples analyzed. $err errors." > /dev/fd/2
|
diogoleal/pev
|
tests/valgrind_pedis.sh
|
Shell
|
gpl-3.0
| 383 |
#!/bin/sh
# Ensure that md5sum prints each checksum atomically
# so that concurrent md5sums don't intersperse their output
# Copyright (C) 2009-2015 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ md5sum
(mkdir tmp && cd tmp && seq 500 | xargs touch)
# This will output at least 16KiB per process
# and start 3 processes, with 2 running concurrently,
# which triggers often on Fedora 11 at least.
(find tmp tmp tmp -type f | xargs -n500 -P2 md5sum) |
sed -n '/[0-9a-f]\{32\} /!p' |
grep . > /dev/null && fail=1
Exit $fail
|
andreas-gruenbacher/coreutils
|
tests/misc/md5sum-parallel.sh
|
Shell
|
gpl-3.0
| 1,206 |
#!/bin/bash
# Copyright (C) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
if [[ "$*" == "id" ]]; then
# Have a way to determine the uid under which volume mounts get mounted
stat -c "%u" /minishift-docs
elif [ "$(id -u)" = '0' ]; then
echo "Image cannot be run as root. Use -u option of 'docker run' to specify a uid."
exit -1
else
BUNDLER_HOME="/tmp/bundler/home"
mkdir -p $BUNDLER_HOME
cd $DOCS_CONTENT && HOME="$BUNDLER_HOME" /usr/local/bin/rake "$@"
fi
|
jimmidyson/minishift
|
docs/docker-entrypoint.sh
|
Shell
|
apache-2.0
| 999 |
#!/bin/bash
# Copyright © 2016-2017 Zetok Zalbavar <[email protected]>
# Copyright © 2019 by The qTox Project Contributors
#
# This file is part of qTox, a Qt-based graphical interface for Tox.
# qTox is libre software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qTox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qTox. If not, see <http://www.gnu.org/licenses/>
# script to change qTox version in `.nsi` files to supplied one
#
# requires:
# * files `qtox.nsi` and `qtox64.nsi` in working dir
# * GNU sed
# usage:
#
# ./$script $version
#
# $version has to be composed of at least one number/dot
set -eu -o pipefail
# change version in .nsi files in the right line
change_version() {
for nsi in *.nsi
do
sed -i -r "/DisplayVersion/ s/\"[0-9\\.]+\"$/\"$@\"/" "$nsi"
done
}
# exit if supplied arg is not a version
is_version() {
if [[ ! $@ =~ [0-9\\.]+ ]]
then
echo "Not a version: $@"
exit 1
fi
}
main() {
is_version "$@"
change_version "$@"
}
main "$@"
|
sudden6/qTox
|
windows/qtox-nsi-version.sh
|
Shell
|
gpl-3.0
| 1,497 |
#!/usr/bin/env bash
if [[ ${RUN_PHPCS} == 1 ]]; then
CHANGED_FILES=`git diff --name-only --diff-filter=ACMR $TRAVIS_COMMIT_RANGE | grep \\\\.php | awk '{print}' ORS=' '`
IGNORE="tests/cli/,apigen/,includes/gateways/simplify-commerce/includes/,includes/libraries/,includes/api/legacy/"
if [ "$CHANGED_FILES" != "" ]; then
echo "Running Code Sniffer."
./vendor/bin/phpcs --ignore=$IGNORE --encoding=utf-8 -n -p $CHANGED_FILES
fi
fi
|
DevinWalker/woocommerce
|
tests/bin/phpcs.sh
|
Shell
|
gpl-3.0
| 440 |
#!/usr/bin/env bash
# find voltdb binaries
if [ -e ../../bin/voltdb ]; then
# assume this is the examples folder for a kit
VOLTDB_BIN="$(dirname $(dirname $(pwd)))/bin"
elif [ -n "$(which voltdb 2> /dev/null)" ]; then
# assume we're using voltdb from the path
VOLTDB_BIN=$(dirname "$(which voltdb)")
else
echo "Unable to find VoltDB installation."
echo "Please add VoltDB's bin directory to your path."
exit -1
fi
# call script to set up paths, including
# java classpaths and binary paths
source $VOLTDB_BIN/voltenv
# leader host for startup purposes only
# (once running, all nodes are the same -- no leaders)
STARTUPLEADERHOST="localhost"
# list of cluster nodes separated by commas in host:[port] format
SERVERS="localhost"
# remove binaries, logs, runtime artifacts, etc... but keep the jars
function clean() {
rm -rf voltdbroot log procedures/geospatial/*.class client/geospatial/*.class
}
# remove everything from "clean" as well as the jarfiles
function cleanall() {
clean
rm -rf geospatial-procs.jar geospatial-client.jar
}
# compile the source code for procedures and the client into jarfiles
function jars() {
# compile java source
javac -classpath $APPCLASSPATH procedures/geospatial/*.java
javac -classpath $CLIENTCLASSPATH client/geospatial/*.java
# build procedure and client jars
jar cf geospatial-procs.jar -C procedures geospatial
jar cf geospatial-client.jar -C client geospatial
# remove compiled .class files
rm -rf procedures/geospatial/*.class client/geospatial/*.class
}
# compile the procedure and client jarfiles if they don't exist
function jars-ifneeded() {
if [ ! -e geospatial-procs.jar ] || [ ! -e geospatial-client.jar ]; then
jars;
fi
}
# load schema, procedures, and static data
function init() {
jars-ifneeded
sqlcmd < ddl.sql
csvloader -f advertisers.csv advertisers
}
# Init to directory voltdbroot
function voltinit-ifneeded() {
voltdb init --force
}
# run the voltdb server locally
function server() {
voltinit-ifneeded
voltdb start -H $STARTUPLEADERHOST
}
# run this target to see what command line options the client offers
function client-help() {
jars-ifneeded
java -classpath geospatial-client.jar:$CLIENTCLASSPATH \
geospatial.AdBrokerBenchmark --help
}
# run the client that drives the example
function client() {
jars-ifneeded
java -classpath geospatial-client.jar:$CLIENTCLASSPATH geospatial.AdBrokerBenchmark
}
function help() {
echo "Usage: ./run.sh {clean|cleanall|jars|server|init|client|client-help}"
}
# Run the targets pass on the command line
# If no first arg, run server
if [ $# -eq 0 ]; then server; exit; fi
for arg in "$@"
do
echo "${0}: Performing $arg..."
$arg
done
|
deerwalk/voltdb
|
examples/geospatial/run.sh
|
Shell
|
agpl-3.0
| 2,791 |
#! /bin/sh
#
# Copyright (c) 2015 Red Hat.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Displays stats from the shell ping Performance Co-Pilot domain agent.
#
. $PCP_DIR/etc/pcp.env
sts=2
progname=`basename $0`
tmp=`mktemp -d /tmp/pcp.XXXXXXXXX` || exit 1
trap "rm -rf $tmp; exit \$sts" 0 1 2 3 15
_usage()
{
[ ! -z "$@" ] && echo $@ 1>&2
pmgetopt --progname=$progname --usage --config=$tmp/usage
exit 1
}
cat > $tmp/usage << EOF
# Usage: [options] tag...
Options:
-l,--tags report the list of shell-ping command tags
-c,--status report shell-ping service check control variables
--help
# end
EOF
cflag=false
lflag=false
ARGS=`pmgetopt --progname=$progname --config=$tmp/usage -- "$@"`
[ $? != 0 ] && exit 1
eval set -- "$ARGS"
while [ $# -gt 0 ]
do
case "$1" in
-c)
cflag=true
;;
-l)
lflag=true
;;
-\?)
_usage ""
;;
--) # end of options, start of arguments
shift
while [ $# -gt 0 ]
do
echo \"$1\" >> $tmp/tags
shift
done
break
;;
esac
shift # finished with this option now, next!
done
pmprobe -I shping.cmd | grep -v 'Note: timezone' > $tmp/cmd 2> $tmp/err
if grep "^pmprobe:" $tmp/err > /dev/null 2>&1
then
$PCP_ECHO_PROG $PCP_ECHO_N "$progname: ""$PCP_ECHO_C"
sed < $tmp/err -e 's/^pmprobe: //g'
sts=1
exit
fi
set `cat $tmp/cmd`
if [ "$2" -eq "-12357" ] # Unknown metric name
then
$PCP_ECHO_PROG "$progname: pmdashping(1) is not installed"
sts=1
exit
elif [ "$2" -lt 0 ] # Some other PMAPI error
then
shift && shift
$PCP_ECHO_PROG "$progname: $*"
sts=1
exit
elif [ "$2" -eq 0 ] # No instances?
then
$PCP_ECHO_PROG "$progname: no shell ping command instances"
sts=1
exit
fi
shift && shift # skip over name and error/count
if $lflag
then
$PCP_ECHO_PROG "$progname: $*"
sts=0
exit
elif $cflag
then
$PCP_ECHO_PROG $PCP_ECHO_N "Duty cycles: ""$PCP_ECHO_C"
pmprobe -v shping.control.cycles | $PCP_AWK_PROG '{ print $3 }'
$PCP_ECHO_PROG $PCP_ECHO_N "Refresh interval: ""$PCP_ECHO_C"
pmprobe -v shping.control.cycletime | $PCP_AWK_PROG '{ print $3, "secs" }'
$PCP_ECHO_PROG $PCP_ECHO_N "Check timeout: ""$PCP_ECHO_C"
pmprobe -v shping.control.timeout | $PCP_AWK_PROG '{ print $3, "secs" }'
$PCP_ECHO_PROG
$PCP_ECHO_PROG "Tags and command lines:"
pminfo -f shping.cmd | \
sed -e '/^$/d' -e '/^shping/d' \
-e 's/"] value/]/' -e 's/^ *inst//' \
-e 's/[0-9][0-9]* or "//g'
sts=0
exit
fi
# positional args now hold shping metric instance names
while [ $# -gt 0 ]
do
if [ -s $tmp/tags ]
then
if ! grep -q "^$1$" $tmp/tags
then
shift
continue
fi
fi
# build up a pmie configuration file
cmd=`echo $1 | tr -d '"'`
echo "'sts_$cmd' = shping.status #'$cmd';" >> $tmp/pmie
echo "'time_$cmd' = shping.time.real #'$cmd';" >> $tmp/pmie
shift
done
if [ ! -s $tmp/pmie ]
then
$PCP_ECHO_PROG "$progname: no matching command tags found"
sts=1
exit
fi
cat $tmp/pmie | pmie -v -e -q | pmie2col -p 3 -w 10
sts=0
exit
|
aeg-aeg/pcpfans
|
src/pcp/shping/pcp-shping.sh
|
Shell
|
lgpl-2.1
| 3,534 |
#!/bin/bash
set -e
################################################################################
# A script for building all the examples.
################################################################################
# run all the example sbt builds
for directory in $( ls -d */ ); do
cd $directory && sbt clean test; cd -
done
# also run the example mvn build in hello-world
cd hello-world && mvn clean install; cd -
|
joecwu/finatra
|
examples/build-all.sh
|
Shell
|
apache-2.0
| 430 |
#!/bin/bash
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
ITERS=$1
if [ -z $ITERS ]; then
ITERS=7
fi
NASHORN_JAR=dist/nashorn.jar
JVM_FLAGS="-Djava.ext.dirs=`dirname $0`/../dist:$JAVA_HOME/jre/lib/ext -XX:+UnlockDiagnosticVMOptions -Dnashorn.unstable.relink.threshold=8 -Xms2G -Xmx2G -XX:+TieredCompilation -server -jar ${NASHORN_JAR}"
JVM_FLAGS7="-Xbootclasspath/p:${NASHORN_JAR} ${JVM_FLAGS}"
OCTANE_ARGS="--verbose --iterations ${ITERS}"
BENCHMARKS=( "box2d.js" "code-load.js" "crypto.js" "deltablue.js" "earley-boyer.js" "gbemu.js" "navier-stokes.js" "pdfjs.js" "raytrace.js" "regexp.js" "richards.js" "splay.js" )
# TODO mandreel.js has metaspace issues
if [ ! -z $JAVA7_HOME ]; then
echo "running ${ITERS} iterations with java7 using JAVA_HOME=${JAVA7_HOME}..."
for BENCHMARK in "${BENCHMARKS[@]}"
do
CMD="${JAVA7_HOME}/bin/java ${JVM_FLAGS} test/script/basic/run-octane.js -- test/script/external/octane/${BENCHMARK} ${OCTANE_ARGS}"
$CMD
done
else
echo "no JAVA7_HOME set. skipping java7"
fi
if [ ! -z $JAVA8_HOME ]; then
echo "running ${ITERS} iterations with java8 using JAVA_HOME=${JAVA8_HOME}..."
for BENCHMARK in "${BENCHMARKS[@]}"
do
CMD="${JAVA8_HOME}/bin/java ${JVM_FLAGS} test/script/basic/run-octane.js -- test/script/external/octane/${BENCHMARK} ${OCTANE_ARGS}"
$CMD
done
else
echo "no JAVA8_HOME set."
fi
echo "Done"
|
RedlineResearch/OpenJDK8
|
nashorn/bin/verbose_octane.sh
|
Shell
|
gpl-2.0
| 2,387 |
#!/bin/bash
source ../test_base
export DEV_GRAPHICS="-vga std"
make SERVICE=Test FILES=vga.cpp
start Test.img "VGA: Verify that the service starts test"
make SERVICE=Test FILES=vga.cpp clean
|
ingve/IncludeOS
|
test/hw/integration/vga/test.sh
|
Shell
|
apache-2.0
| 192 |
#!/usr/bin/env bash
set -e
export INTEGRATION_ROOT=./integration-tmp
export TMPC_ROOT=./integration-tmp/tmpc
declare -A cmap
trap "cleanup_containers" EXIT SIGINT
function cleanup_containers() {
for c in "${!cmap[@]}";
do
docker stop $c 1>>${INTEGRATION_ROOT}/test.log 2>&1 || true
if [ -z "$CIRCLECI" ]; then
docker rm -f $c 1>>${INTEGRATION_ROOT}/test.log 2>&1 || true
fi
done
unset cmap
}
function run_bridge_tests() {
## Setup
start_dnet 1 bridge 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-bridge]=dnet-1-bridge
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/bridge.bats
## Teardown
stop_dnet 1 bridge 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-bridge]
}
function run_overlay_consul_tests() {
## Test overlay network with consul
## Setup
start_dnet 1 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-consul]=dnet-1-consul
start_dnet 2 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-consul]=dnet-2-consul
start_dnet 3 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-consul]=dnet-3-consul
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/overlay-consul.bats
## Teardown
stop_dnet 1 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-consul]
stop_dnet 2 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-consul]
stop_dnet 3 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-consul]
}
function run_overlay_consul_host_tests() {
export _OVERLAY_HOST_MODE="true"
## Setup
start_dnet 1 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-consul]=dnet-1-consul
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/overlay-consul-host.bats
## Teardown
stop_dnet 1 consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-consul]
unset _OVERLAY_HOST_MODE
}
function run_overlay_zk_tests() {
## Test overlay network with zookeeper
start_dnet 1 zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-zookeeper]=dnet-1-zookeeper
start_dnet 2 zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-zookeeper]=dnet-2-zookeeper
start_dnet 3 zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-zookeeper]=dnet-3-zookeeper
./integration-tmp/bin/bats ./test/integration/dnet/overlay-zookeeper.bats
stop_dnet 1 zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-zookeeper]
stop_dnet 2 zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-zookeeper]
stop_dnet 3 zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-zookeeper]
}
function run_overlay_etcd_tests() {
## Test overlay network with etcd
start_dnet 1 etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-etcd]=dnet-1-etcd
start_dnet 2 etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-etcd]=dnet-2-etcd
start_dnet 3 etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-etcd]=dnet-3-etcd
./integration-tmp/bin/bats ./test/integration/dnet/overlay-etcd.bats
stop_dnet 1 etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-etcd]
stop_dnet 2 etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-etcd]
stop_dnet 3 etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-etcd]
}
function run_dnet_tests() {
# Test dnet configuration options
./integration-tmp/bin/bats ./test/integration/dnet/dnet.bats
}
function run_simple_consul_tests() {
# Test a single node configuration with a global scope test driver
## Setup
start_dnet 1 simple 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-simple]=dnet-1-simple
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/simple.bats
## Teardown
stop_dnet 1 simple 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-simple]
}
function run_multi_consul_tests() {
# Test multi node configuration with a global scope test driver backed by consul
## Setup
start_dnet 1 multi_consul consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-multi_consul]=dnet-1-multi_consul
start_dnet 2 multi_consul consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-multi_consul]=dnet-2-multi_consul
start_dnet 3 multi_consul consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-multi_consul]=dnet-3-multi_consul
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/multi.bats
## Teardown
stop_dnet 1 multi_consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-multi_consul]
stop_dnet 2 multi_consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-multi_consul]
stop_dnet 3 multi_consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-multi_consul]
}
function run_multi_zk_tests() {
# Test multi node configuration with a global scope test driver backed by zookeeper
## Setup
start_dnet 1 multi_zk zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-multi_zk]=dnet-1-multi_zk
start_dnet 2 multi_zk zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-multi_zk]=dnet-2-multi_zk
start_dnet 3 multi_zk zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-multi_zk]=dnet-3-multi_zk
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/multi.bats
## Teardown
stop_dnet 1 multi_zk 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-multi_zk]
stop_dnet 2 multi_zk 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-multi_zk]
stop_dnet 3 multi_zk 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-multi_zk]
}
function run_multi_etcd_tests() {
# Test multi node configuration with a global scope test driver backed by etcd
## Setup
start_dnet 1 multi_etcd etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-multi_etcd]=dnet-1-multi_etcd
start_dnet 2 multi_etcd etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-multi_etcd]=dnet-2-multi_etcd
start_dnet 3 multi_etcd etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-multi_etcd]=dnet-3-multi_etcd
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/multi.bats
## Teardown
stop_dnet 1 multi_etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-multi_etcd]
stop_dnet 2 multi_etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-multi_etcd]
stop_dnet 3 multi_etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-multi_etcd]
}
source ./test/integration/dnet/helpers.bash
if [ ! -d ${INTEGRATION_ROOT} ]; then
mkdir -p ${INTEGRATION_ROOT}
git clone https://github.com/sstephenson/bats.git ${INTEGRATION_ROOT}/bats
./integration-tmp/bats/install.sh ./integration-tmp
fi
if [ ! -d ${TMPC_ROOT} ]; then
mkdir -p ${TMPC_ROOT}
docker pull busybox:ubuntu
docker export $(docker create busybox:ubuntu) > ${TMPC_ROOT}/busybox.tar
mkdir -p ${TMPC_ROOT}/rootfs
tar -C ${TMPC_ROOT}/rootfs -xf ${TMPC_ROOT}/busybox.tar
fi
# Suite setup
if [ -z "$SUITES" ]; then
if [ -n "$CIRCLECI" ]
then
# We can only run a limited list of suites in circleci because of the
# old kernel and limited docker environment.
suites="dnet simple_consul multi_consul multi_zk multi_etcd"
else
suites="dnet simple_consul multi_consul multi_zk multi_etcd bridge overlay_consul overlay_consul_host overlay_zk overlay_etcd"
fi
else
suites="$SUITES"
fi
if [[ ( "$suites" =~ .*consul.* ) || ( "$suites" =~ .*bridge.* ) ]]; then
echo "Starting consul ..."
start_consul 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[pr_consul]=pr_consul
fi
if [[ "$suites" =~ .*zk.* ]]; then
echo "Starting zookeeper ..."
start_zookeeper 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[zookeeper_server]=zookeeper_server
fi
if [[ "$suites" =~ .*etcd.* ]]; then
echo "Starting etcd ..."
start_etcd 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dn_etcd]=dn_etcd
fi
echo ""
for suite in ${suites};
do
suite_func=run_${suite}_tests
echo "Running ${suite}_tests ..."
declare -F $suite_func >/dev/null && $suite_func
echo ""
done
|
pyakpyak/libnetwork
|
test/integration/dnet/run-integration-tests.sh
|
Shell
|
apache-2.0
| 8,333 |
if [[ $('uname') == 'Linux' ]]; then
local _sublime_linux_paths > /dev/null 2>&1
_sublime_linux_paths=(
"$HOME/bin/sublime_text"
"/opt/sublime_text/sublime_text"
"/opt/sublime_text_3/sublime_text"
"/usr/bin/sublime_text"
"/usr/local/bin/sublime_text"
"/usr/bin/subl"
"/opt/sublime_text_3/sublime_text"
"/usr/bin/subl3"
)
for _sublime_path in $_sublime_linux_paths; do
if [[ -a $_sublime_path ]]; then
st_run() { $_sublime_path $@ >/dev/null 2>&1 &| }
st_run_sudo() {sudo $_sublime_path $@ >/dev/null 2>&1}
alias sst=st_run_sudo
alias st=st_run
break
fi
done
elif [[ "$OSTYPE" = darwin* ]]; then
local _sublime_darwin_paths > /dev/null 2>&1
_sublime_darwin_paths=(
"/usr/local/bin/subl"
"/Applications/Sublime Text.app/Contents/SharedSupport/bin/subl"
"/Applications/Sublime Text 3.app/Contents/SharedSupport/bin/subl"
"/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl"
"$HOME/Applications/Sublime Text.app/Contents/SharedSupport/bin/subl"
"$HOME/Applications/Sublime Text 3.app/Contents/SharedSupport/bin/subl"
"$HOME/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl"
)
for _sublime_path in $_sublime_darwin_paths; do
if [[ -a $_sublime_path ]]; then
subl () { "$_sublime_path" $* }
alias st=subl
break
fi
done
fi
elif [[ "$OSTYPE" = 'cygwin' ]]; then
local _sublime_cygwin_paths > /dev/null 2>&1
_sublime_cygwin_paths=(
"$(cygpath $ProgramW6432/Sublime\ Text\ 2)/sublime_text.exe"
"$(cygpath $ProgramW6432/Sublime\ Text\ 3)/sublime_text.exe"
)
for _sublime_path in $_sublime_cygwin_paths; do
if [[ -a $_sublime_path ]]; then
subl () { "$_sublime_path" $* }
alias st=subl
break
fi
done
alias stt='st .'
|
linjk/oh-my-zsh
|
plugins/sublime/sublime.plugin.zsh
|
Shell
|
mit
| 2,014 |
#!/bin/sh
# Rooms and Floors example
CEP=localhost:8080
. ../common.sh
# Send an updateContext request with Room temp
function sendRoomTemp() #(url, floor, room, value)
{
payload='{
"contextElements": [
{
"type": "Room",
"isPattern": "false",
"id": "'$3'",
"attributes": [
{
"name": "temperature",
"type": "double",
"value": '$4'
},
{
"name": "floor",
"type": "string",
"value": "'$2'"
}
]
}
],
"updateAction": "APPEND"
}'
send $1 "v1/updateContext" "$payload"
}
echo "#1 First update CEP with RoomsAndFloors configuration"
CONFIG=`cat config.json`
updateConfig $CEP "$CONFIG"
echo ""
echo "#2 Then send T° of all the rooms to the CEP every 5 sec"
for temp in 12 14 18 20 24 19; do
echo ""
echo "# Wait between temperatures updates..."
echo ""
sleep 5
for room in 1 2 3 4; do
for floor in 1 2 3; do
# compute a unique temp for each room
t=$(($temp + (2*$floor) + $room))
echo " - Send updateContext for Room$floor$room with T°=$t"
out=$(sendRoomTemp $CEP "Floor$floor" "Room$floor$room" "$t")
echo " $out"
done
done
done
|
IOTHUB-F4I/IoThub
|
fiware-cepheus/doc/examples/RoomsAndFloorsExample/run.sh
|
Shell
|
gpl-3.0
| 1,165 |
#!/bin/zsh
setopt extended_glob
echo '
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
' > /tmp/copyright.txt
for i in (^vendor/)#*.go # or whatever other pattern...
do
if ! grep -q Copyright $i
then
echo "cat /tmp/copyright.txt $i >$i.new && mv $i.new $i"
fi
done
rm /tmp/copyright.txt
|
Dataman-Cloud/drone
|
vendor/github.com/go-swagger/go-swagger/hack/add-license.sh
|
Shell
|
apache-2.0
| 849 |
sed -i.bak 's/INCLUDEPATHS= \\/INCLUDEPATHS?= \\/g' $ROOTPATHSH/src/makefile.mingw
sed -i.bak 's/LIBPATHS= \\/LIBPATHS?= \\/g' $ROOTPATHSH/src/makefile.mingw
sed -i.bak 's/USE_UPNP:=-/USE_UPNP?=-/g' $ROOTPATHSH/src/makefile.mingw
sed -i.bak 's,#include <miniupnpc/miniwget.h>,#include <miniwget.h>,g' $ROOTPATHSH/src/net.cpp
sed -i.bak 's,#include <miniupnpc/miniupnpc.h>,#include <miniupnpc.h>,g' $ROOTPATHSH/src/net.cpp
sed -i.bak 's,#include <miniupnpc/upnpcommands.h>,#include <upnpcommands.h>,g' $ROOTPATHSH/src/net.cpp
sed -i.bak 's,#include <miniupnpc/upnperrors.h>,#include <upnperrors.h>,g' $ROOTPATHSH/src/net.cpp
sed -i.bak 's/\$(CC) -enable-stdcall-fixup/\$(CC) -Wl,-enable-stdcall-fixup/g' $ROOTPATHSH/${EWBLIBS}/${MINIUPNPC}/Makefile.mingw # workaround, see http://stackoverflow.com/questions/13227354/warning-cannot-find-entry-symbol-nable-stdcall-fixup-defaulting
sed -i.bak 's/all: init upnpc-static upnpc-shared testminixml libminiupnpc.a miniupnpc.dll/all: init upnpc-static/g' $ROOTPATHSH/${EWBLIBS}/${MINIUPNPC}/Makefile.mingw # only need static, rest is not compiling
# += does not work on windows defined variables
sed -i.bak 's/CFLAGS=-mthreads/CFLAGS=${ADDITIONALCCFLAGS} -mthreads/g' $ROOTPATHSH/src/makefile.mingw
sed -i.bak 's/CC = gcc/CC=gcc ${ADDITIONALCCFLAGS} -Wall/g' $ROOTPATHSH/${EWBLIBS}/${MINIUPNPC}/Makefile.mingw
|
gridcoin/Gridcoin-stake
|
contrib/easywinbuilder/patch_files.sh
|
Shell
|
mit
| 1,358 |
#!/usr/bin/env bash
VOLTDB_BIN="$(pwd)/../../../bin"
VOLTDB_BASE=$(dirname "$VOLTDB_BIN")
VOLTDB_LIB="$VOLTDB_BASE/lib"
VOLTDB_VOLTDB="$VOLTDB_BASE/voltdb"
LOG4J="$VOLTDB_VOLTDB/log4j.xml"
CLASSPATH=$({ \
\ls -1 "$VOLTDB_VOLTDB"/voltdb-*.jar; \
\ls -1 "$VOLTDB_LIB"/*.jar; \
\ls -1 "$VOLTDB_LIB"/extension/*.jar; \
} 2> /dev/null | paste -sd ':' - )
VOTER_BASE=$VOLTDB_BASE/examples/voter
FILES="\
src
ddl.sql
deployment.xml"
function clean() {
rm -Rf $FILES
}
function copyvoter() {
#Get all the files from real voter
for F in $FILES
do
cp -pR $VOTER_BASE/$F .
done
}
function copyrunsh(){
#Get the run.sh and copy to another name
cp -p $VOTER_BASE/run.sh runtest.sh
#Change the paths to work from tests/test_apps
sed 's#\.\./#../../#' runtest.sh > runexample.sh
}
#Copy the run.sh from voter and modify to be called
copyrunsh
# If too many args show help, show help and exit
if [ $# -gt 1 ]; then bash runexample.sh help; exit; fi
# If clean then clean the run.sh stuff, then call clean
if [ "$1" = "clean" ]
then
clean
bash runexample.sh clean
rm runexample.sh runtest.sh
exit
fi
#Otherwise - copy the rest of voter
copyvoter
#Copy the AdHocBenchmark.java from her to src
cp -p AdHocBenchmark.java src/voter/
#If adhoc, run it otherwise call into the original run.sh
if [ "$1" = "adhoc" ]
then
(cat adhoc-quiet.input | sqlcmd > /dev/null) || exit 1
bash runexample.sh srccompile
java -classpath obj:$CLASSPATH:obj -Dlog4j.configuration=file://$LOG4J \
voter.AdHocBenchmark \
--displayinterval=5 \
--warmup=5 \
--duration=12 \
--servers=localhost:21212 \
--contestants=6 \
--maxvotes=2
elif [ "$1" = "adhoc-logged" ]
then
(cat adhoc-noisy.input | sqlcmd > /dev/null) || exit 1
bash runexample.sh srccompile
java -classpath obj:$CLASSPATH:obj -Dlog4j.configuration=file://$LOG4J \
voter.AdHocBenchmark \
--displayinterval=5 \
--warmup=5 \
--duration=12 \
--servers=localhost:21212 \
--contestants=6 \
--maxvotes=2
else
bash runexample.sh $1
fi
|
anhnv-3991/VoltDB
|
tests/test_apps/voter-adhoc/run.sh
|
Shell
|
agpl-3.0
| 2,171 |
#!/bin/sh
set -ex
mkdir /usr/local/mips-linux-musl
# originally from
# https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/
# OpenWrt-Toolchain-ar71xx-generic_gcc-5.3.0_musl-1.1.16.Linux-x86_64.tar.bz2
URL="https://ci-mirrors.rust-lang.org/rustc"
FILE="OpenWrt-Toolchain-ar71xx-generic_gcc-5.3.0_musl-1.1.16.Linux-x86_64.tar.bz2"
curl -L "$URL/$FILE" | tar xjf - -C /usr/local/mips-linux-musl --strip-components=2
for file in /usr/local/mips-linux-musl/bin/mips-openwrt-linux-*; do
ln -s $file /usr/local/bin/`basename $file`
done
|
aidancully/rust
|
src/ci/docker/host-x86_64/dist-various-1/install-mips-musl.sh
|
Shell
|
apache-2.0
| 545 |
#!/usr/bin/env bash
#
# Zinc, the bare metal stack for rust.
# Copyright 2014 Matt Coffin <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
for e in $EXAMPLES; do
EXAMPLE_NAME=$e make build
done
|
phil-opp/zinc
|
support/build-examples.sh
|
Shell
|
apache-2.0
| 722 |
source_sh ${srcdir}/emulparams/elf32ppc.sh
source_sh ${srcdir}/emulparams/elf_fbsd.sh
OUTPUT_FORMAT="elf32-powerpc-freebsd"
|
mattstock/binutils-bexkat1
|
ld/emulparams/elf32ppc_fbsd.sh
|
Shell
|
gpl-2.0
| 126 |
#!/bin/sh
# Copyright (C) 1989-2016 Free Software Foundation, Inc.
# This file is part of GDB.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Create version.c from version.in.
# Usage:
# create-version.sh PATH-TO-GDB-SRCDIR HOST_ALIAS \
# TARGET_ALIAS OUTPUT-FILE-NAME
srcdir="$1"
host_alias="$2"
target_alias="$3"
output="$4"
rm -f version.c-tmp $output version.tmp
date=`sed -n -e 's/^.* BFD_VERSION_DATE \(.*\)$/\1/p' $srcdir/../bfd/version.h`
sed -e "s/DATE/$date/" < $srcdir/version.in > version.tmp
echo '#include "version.h"' >> version.c-tmp
echo 'const char version[] = "'"`sed q version.tmp`"'";' >> version.c-tmp
echo 'const char host_name[] = "'"$host_alias"'";' >> version.c-tmp
echo 'const char target_name[] = "'"$target_alias"'";' >> version.c-tmp
mv version.c-tmp $output
rm -f version.tmp
|
freak97/binutils
|
gdb/common/create-version.sh
|
Shell
|
gpl-2.0
| 1,412 |
#!/usr/bin/env bash
#TODO: move this logic into release.hs.
set -xe
BUILD_DIR="$PWD"
cd "$(dirname "$0")/../.."
(cd etc/scripts && stack --install-ghc build)
RELEASE_SCRIPT="$(cd etc/scripts && stack exec which stack-release-script)"
cd "$BUILD_DIR"
"$RELEASE_SCRIPT" --no-test-haddocks release
|
AndreasPK/stack
|
etc/scripts/linux-armv7-release.sh
|
Shell
|
bsd-3-clause
| 295 |
#!/bin/sh
#
# Copyright (c) 2015 Matt Bell
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test get command"
. lib/test-lib.sh
test_init_ipfs
test_ipfs_get_flag() {
ext="$1"; shift
tar_flag="$1"; shift
flag="$@"
test_expect_success "ipfs get $flag succeeds" '
ipfs get "$HASH" '"$flag"' >actual
'
test_expect_success "ipfs get $flag output looks good" '
printf "%s\n\n" "Saving archive to $HASH$ext" >expected &&
test_cmp expected actual
'
test_expect_success "ipfs get $flag archive output is valid" '
tar "$tar_flag" "$HASH$ext" &&
test_cmp "$HASH" data &&
rm "$HASH$ext" &&
rm "$HASH"
'
}
# we use a function so that we can run it both offline + online
test_get_cmd() {
test_expect_success "'ipfs get --help' succeeds" '
ipfs get --help >actual
'
test_expect_success "'ipfs get --help' output looks good" '
egrep "ipfs get.*<ipfs-path>" actual >/dev/null ||
test_fsh cat actual
'
test_expect_success "ipfs get succeeds" '
echo "Hello Worlds!" >data &&
HASH=`ipfs add -q data` &&
ipfs get "$HASH" >actual
'
test_expect_success "ipfs get output looks good" '
printf "%s\n\n" "Saving file(s) to $HASH" >expected &&
test_cmp expected actual
'
test_expect_success "ipfs get file output looks good" '
test_cmp "$HASH" data
'
test_expect_success "ipfs get DOES NOT error when trying to overwrite a file" '
ipfs get "$HASH" >actual &&
rm "$HASH"
'
test_ipfs_get_flag ".tar" "-xf" -a
test_ipfs_get_flag ".tar.gz" "-zxf" -a -C
test_ipfs_get_flag ".tar.gz" "-zxf" -a -C -l 9
test_expect_success "ipfs get succeeds (directory)" '
mkdir -p dir &&
touch dir/a &&
mkdir -p dir/b &&
echo "Hello, Worlds!" >dir/b/c &&
HASH2=`ipfs add -r -q dir | tail -n 1` &&
ipfs get "$HASH2" >actual
'
test_expect_success "ipfs get output looks good (directory)" '
printf "%s\n\n" "Saving file(s) to $HASH2" >expected &&
test_cmp expected actual
'
test_expect_success "ipfs get output is valid (directory)" '
test_cmp dir/a "$HASH2"/a &&
test_cmp dir/b/c "$HASH2"/b/c &&
rm -r "$HASH2"
'
test_expect_success "ipfs get -a -C succeeds (directory)" '
ipfs get "$HASH2" -a -C >actual
'
test_expect_success "ipfs get -a -C output looks good (directory)" '
printf "%s\n\n" "Saving archive to $HASH2.tar.gz" >expected &&
test_cmp expected actual
'
test_expect_success "gzipped tar archive output is valid (directory)" '
tar -zxf "$HASH2".tar.gz &&
test_cmp dir/a "$HASH2"/a &&
test_cmp dir/b/c "$HASH2"/b/c &&
rm -r "$HASH2"
'
test_expect_success "ipfs get ../.. should fail" '
echo "Error: invalid ipfs ref path" >expected &&
test_must_fail ipfs get ../.. 2>actual &&
test_cmp expected actual
'
}
# should work offline
test_get_cmd
# should work online
test_launch_ipfs_daemon
test_get_cmd
test_kill_ipfs_daemon
test_done
|
ForrestWeston/go-ipfs
|
test/sharness/t0090-get.sh
|
Shell
|
mit
| 2,896 |
# The folder containing this script
export TOOLS=`dirname $0`
export TRUNK=`cd $TOOLS/..; pwd`
# Global variables needed later
export MODE=release
export DEBUGGABLE_FLAG=false
export DEBUG_FLAG=0
export ANDROID_NDK=$TRUNK/sdks/android-ndk
export ANDROID_SDK=$TRUNK/sdks/android-sdk
# Local variables
export SRCROOT=$1
export JAVA_SDK=`/usr/libexec/java_home`
export JCOUNT=20
export EXTERNAL_NAME=$2
# Path variables
export CLASSPATH="$ANDROID_SDK/platforms/android-8/android.jar"
export NDKBUILD=$ANDROID_NDK/ndk-build
export DSTROOT=$SRCROOT/_build/android/$MODE
export RAWROOT=$SRCROOT/_build/android/$MODE/assets
export JAVAC=$JAVA_SDK/bin/javac
export JAR=$JAVA_SDK/bin/jar
export DX=$ANDROID_SDK/platforms/android-8/tools/dx
export AAPT=$ANDROID_SDK/platforms/android-8/tools/aapt
export APKBUILDER=$ANDROID_SDK/tools/apkbuilder
export ZIPALIGN=$ANDROID_SDK/tools/zipalign
export ADB=$ANDROID_SDK/platform-tools/adb
export AIDL=$ANDROID_SDK/platform-tools/aidl
##########
# Build the native code components
export NDK_PROJECT_PATH=$DSTROOT
echo "Building native code components..."
$NDKBUILD NDK_DEBUG=$DEBUG_FLAG NDK_APP_DEBUGGABLE=$DEBUGGABLE_FLAG NDK_APPLICATION_MK=$SRCROOT/Application.mk -j $JCOUNT -s
if [ $? != 0 ]; then
exit $?
fi
mkdir -p "$SRCROOT/../binaries"
cp "$DSTROOT/libs/armeabi/lib$EXTERNAL_NAME.so" "$SRCROOT/../binaries"
mkdir -p "$SRCROOT/../binaries/Android"
cp "$DSTROOT/libs/armeabi/lib$EXTERNAL_NAME.so" "$SRCROOT/../binaries/Android/External-armeabi"
cd "$SRCROOT/../binaries"
zip -Rm "$EXTERNAL_NAME.lcext" "Android/*"
cd "$TOOLS"
|
thierrydouez/livecode
|
tools/build-extension-android.sh
|
Shell
|
gpl-3.0
| 1,575 |
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 tag" >/dev/stderr
exit 1
fi
TAG=$1
docker push quay.io/coreos/flannel:$TAG
|
kelcecil/flannel
|
dist/publish.sh
|
Shell
|
apache-2.0
| 130 |
#!/bin/sh
#~ Copyright 2002-2005 Rene Rivera.
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or copy at
#~ http://www.boost.org/LICENSE_1_0.txt)
# Reset the toolset.
BOOST_JAM_TOOLSET=
# Run a command, and echo before doing so. Also checks the exit status and quits
# if there was an error.
echo_run ()
{
echo "$@"
$@
r=$?
if test $r -ne 0 ; then
exit $r
fi
}
# Print an error message, and exit with a status of 1.
error_exit ()
{
echo "###"
echo "###" "$@"
echo "###"
echo "### You can specify the toolset as the argument, i.e.:"
echo "### ./build.sh gcc"
echo "###"
echo "### Toolsets supported by this script are:"
echo "### acc, como, darwin, gcc, intel-darwin, intel-linux, kcc, kylix,"
echo "### mipspro, mingw(msys), pathscale, pgi, qcc, sun, sunpro, tru64cxx, vacpp"
echo "###"
echo "### A special toolset; cc, is available which is used as a fallback"
echo "### when a more specific toolset is not found and the cc command is"
echo "### detected. The 'cc' toolset will use the CC, CFLAGS, and LIBS"
echo "### environment variables, if present."
echo "###"
exit 1
}
# Check that a command is in the PATH.
test_path ()
{
if `command -v command 1>/dev/null 2>/dev/null`; then
command -v $1 1>/dev/null 2>/dev/null
else
hash $1 1>/dev/null 2>/dev/null
fi
}
# Check that the OS name, as returned by "uname", is as given.
test_uname ()
{
if test_path uname; then
test `uname` = $*
fi
}
# Try and guess the toolset to bootstrap the build with...
Guess_Toolset ()
{
if test -r /mingw/bin/gcc ; then
BOOST_JAM_TOOLSET=mingw
BOOST_JAM_TOOLSET_ROOT=/mingw/
elif test_uname Darwin ; then BOOST_JAM_TOOLSET=darwin
elif test_uname IRIX ; then BOOST_JAM_TOOLSET=mipspro
elif test_uname IRIX64 ; then BOOST_JAM_TOOLSET=mipspro
elif test_uname OSF1 ; then BOOST_JAM_TOOLSET=tru64cxx
elif test_uname QNX && test_path qcc ; then BOOST_JAM_TOOLSET=qcc
elif test_path gcc ; then BOOST_JAM_TOOLSET=gcc
elif test_path icc ; then BOOST_JAM_TOOLSET=intel-linux
elif test -r /opt/intel/cc/9.0/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET=intel-linux
BOOST_JAM_TOOLSET_ROOT=/opt/intel/cc/9.0
elif test -r /opt/intel_cc_80/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET=intel-linux
BOOST_JAM_TOOLSET_ROOT=/opt/intel_cc_80
elif test -r /opt/intel/compiler70/ia32/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET=intel-linux
BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler70/ia32/
elif test -r /opt/intel/compiler60/ia32/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET=intel-linux
BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler60/ia32/
elif test -r /opt/intel/compiler50/ia32/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET=intel-linux
BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler50/ia32/
elif test_path pgcc ; then BOOST_JAM_TOOLSET=pgi
elif test_path pathcc ; then BOOST_JAM_TOOLSET=pathscale
elif test_path xlc ; then BOOST_JAM_TOOLSET=vacpp
elif test_path como ; then BOOST_JAM_TOOLSET=como
elif test_path KCC ; then BOOST_JAM_TOOLSET=kcc
elif test_path bc++ ; then BOOST_JAM_TOOLSET=kylix
elif test_path aCC ; then BOOST_JAM_TOOLSET=acc
elif test_uname HP-UX ; then BOOST_JAM_TOOLSET=acc
elif test -r /opt/SUNWspro/bin/cc ; then
BOOST_JAM_TOOLSET=sunpro
BOOST_JAM_TOOLSET_ROOT=/opt/SUNWspro/
# Test for "cc" as the default fallback.
elif test_path $CC ; then BOOST_JAM_TOOLSET=cc
elif test_path cc ; then
BOOST_JAM_TOOLSET=cc
CC=cc
fi
if test "$BOOST_JAM_TOOLSET" = "" ; then
error_exit "Could not find a suitable toolset."
fi
}
# The one option we support in the invocation
# is the name of the toolset to force building
# with.
case "$1" in
--guess-toolset) Guess_Toolset ; echo "$BOOST_JAM_TOOLSET" ; exit 1 ;;
-*) Guess_Toolset ;;
?*) BOOST_JAM_TOOLSET=$1 ; shift ;;
*) Guess_Toolset ;;
esac
BOOST_JAM_OPT_JAM="-o bootstrap/jam0"
BOOST_JAM_OPT_MKJAMBASE="-o bootstrap/mkjambase0"
BOOST_JAM_OPT_YYACC="-o bootstrap/yyacc0"
case $BOOST_JAM_TOOLSET in
mingw)
if test -r ${BOOST_JAM_TOOLSET_ROOT}bin/gcc ; then
export PATH=${BOOST_JAM_TOOLSET_ROOT}bin:$PATH
fi
BOOST_JAM_CC="gcc -DNT"
;;
gcc)
BOOST_JAM_CC=gcc
;;
darwin)
BOOST_JAM_CC=cc
;;
intel-darwin)
BOOST_JAM_CC=icc
;;
intel-linux)
if test -r /opt/intel/cc/9.0/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET_ROOT=/opt/intel/cc/9.0/
elif test -r /opt/intel_cc_80/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET_ROOT=/opt/intel_cc_80/
elif test -r /opt/intel/compiler70/ia32/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler70/ia32/
elif test -r /opt/intel/compiler60/ia32/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler60/ia32/
elif test -r /opt/intel/compiler50/ia32/bin/iccvars.sh ; then
BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler50/ia32/
fi
if test -r ${BOOST_JAM_TOOLSET_ROOT}bin/iccvars.sh ; then
# iccvars does not change LD_RUN_PATH. We adjust LD_RUN_PATH here in
# order not to have to rely on ld.so.conf knowing the icc library
# directory. We do this before running iccvars.sh in order to allow a
# user to add modifications to LD_RUN_PATH in iccvars.sh.
if test -z "${LD_RUN_PATH}"; then
LD_RUN_PATH="${BOOST_JAM_TOOLSET_ROOT}lib"
else
LD_RUN_PATH="${BOOST_JAM_TOOLSET_ROOT}lib:${LD_RUN_PATH}"
fi
export LD_RUN_PATH
. ${BOOST_JAM_TOOLSET_ROOT}bin/iccvars.sh
fi
BOOST_JAM_CC=icc
;;
vacpp)
BOOST_JAM_CC=xlc
;;
como)
BOOST_JAM_CC="como --c"
;;
kcc)
BOOST_JAM_CC=KCC
;;
kylix)
BOOST_JAM_CC=bc++
;;
mipspro)
BOOST_JAM_CC=cc
;;
pathscale)
BOOST_JAM_CC=pathcc
;;
pgi)
BOOST_JAM_CC=pgcc
;;
sun*)
if test -z "${BOOST_JAM_TOOLSET_ROOT}" -a -r /opt/SUNWspro/bin/cc ; then
BOOST_JAM_TOOLSET_ROOT=/opt/SUNWspro/
fi
if test -r "${BOOST_JAM_TOOLSET_ROOT}bin/cc" ; then
PATH=${BOOST_JAM_TOOLSET_ROOT}bin:${PATH}
export PATH
fi
BOOST_JAM_CC=cc
;;
clang*)
BOOST_JAM_CC="clang -Wno-unused -Wno-format"
BOOST_JAM_TOOLSET=clang
;;
tru64cxx)
BOOST_JAM_CC=cc
;;
acc)
BOOST_JAM_CC="cc -Ae"
;;
cc)
if test -z "$CC" ; then CC=cc ; fi
BOOST_JAM_CC=$CC
BOOST_JAM_OPT_JAM="$BOOST_JAM_OPT_JAM $CFLAGS $LIBS"
BOOST_JAM_OPT_MKJAMBASE="$BOOST_JAM_OPT_MKJAMBASE $CFLAGS $LIBS"
BOOST_JAM_OPT_YYACC="$BOOST_JAM_OPT_YYACC $CFLAGS $LIBS"
;;
qcc)
BOOST_JAM_CC=qcc
;;
*)
error_exit "Unknown toolset: $BOOST_JAM_TOOLSET"
;;
esac
echo "###"
echo "### Using '$BOOST_JAM_TOOLSET' toolset."
echo "###"
YYACC_SOURCES="yyacc.c"
MKJAMBASE_SOURCES="mkjambase.c"
BJAM_SOURCES="\
command.c compile.c constants.c debug.c execcmd.c frames.c function.c glob.c\
hash.c hdrmacro.c headers.c jam.c jambase.c jamgram.c lists.c make.c make1.c\
object.c option.c output.c parse.c pathsys.c regexp.c rules.c\
scan.c search.c subst.c timestamp.c variable.c modules.c strings.c filesys.c\
builtins.c class.c cwd.c native.c md5.c w32_getreg.c modules/set.c\
modules/path.c modules/regex.c modules/property-set.c modules/sequence.c\
modules/order.c"
case $BOOST_JAM_TOOLSET in
mingw)
BJAM_SOURCES="${BJAM_SOURCES} execnt.c filent.c pathnt.c"
;;
*)
BJAM_SOURCES="${BJAM_SOURCES} execunix.c fileunix.c pathunix.c"
;;
esac
BJAM_UPDATE=
if test "$1" = "--update" -o "$2" = "--update" -o "$3" = "--update" -o "$4" = "--update" ; then
BJAM_UPDATE="update"
fi
if test "${BJAM_UPDATE}" = "update" -a ! -x "./bootstrap/jam0" ; then
BJAM_UPDATE=
fi
if test "${BJAM_UPDATE}" != "update" ; then
echo_run rm -rf bootstrap
echo_run mkdir bootstrap
if test ! -r jamgram.y -o ! -r jamgramtab.h ; then
echo_run ${BOOST_JAM_CC} ${BOOST_JAM_OPT_YYACC} ${YYACC_SOURCES}
if test -x "./bootstrap/yyacc0" ; then
echo_run ./bootstrap/yyacc0 jamgram.y jamgramtab.h jamgram.yy
fi
fi
if test ! -r jamgram.c -o ! -r jamgram.h ; then
if test_path yacc ; then YACC="yacc -d"
elif test_path bison ; then YACC="bison -y -d --yacc"
fi
echo_run $YACC jamgram.y
mv -f y.tab.c jamgram.c
mv -f y.tab.h jamgram.h
fi
if test ! -r jambase.c ; then
echo_run ${BOOST_JAM_CC} ${BOOST_JAM_OPT_MKJAMBASE} ${MKJAMBASE_SOURCES}
if test -x "./bootstrap/mkjambase0" ; then
echo_run ./bootstrap/mkjambase0 jambase.c Jambase
fi
fi
echo_run ${BOOST_JAM_CC} ${BOOST_JAM_OPT_JAM} ${BJAM_SOURCES}
fi
if test -x "./bootstrap/jam0" ; then
if test "${BJAM_UPDATE}" != "update" ; then
echo_run ./bootstrap/jam0 -f build.jam --toolset=$BOOST_JAM_TOOLSET "--toolset-root=$BOOST_JAM_TOOLSET_ROOT" "$@" clean
fi
echo_run ./bootstrap/jam0 -f build.jam --toolset=$BOOST_JAM_TOOLSET "--toolset-root=$BOOST_JAM_TOOLSET_ROOT" "$@"
fi
|
NixaSoftware/CVis
|
venv/bin/tools/build/v2/engine/build.sh
|
Shell
|
apache-2.0
| 9,306 |
#!/usr/bin/webif-page
<?
. /usr/lib/webif/webif.sh
timeout=60
if empty "$FORM_reboot"; then
reboot_msg="<form method=\"post\" action=\"$SCRIPT_NAME\"><input type=\"submit\" value=\" @TR<<Yes, really reboot now>> \" name=\"reboot\" /></form>"
else
uci_load "network"
router_ip="$CONFIG_lan_ipaddr"
[ -n "$SERVER_PORT" ] && [ "$SERVER_PORT" != "80" ] && router_ip="$router_ip:$SERVER_PORT"
header_inject_head="<meta http-equiv=\"refresh\" content=\"$timeout;http://$router_ip\" />"
reboot_msg="@TR<<Rebooting now>>...
<br/><br/>
@TR<<reboot_wait#Please wait about>> $timeout @TR<<reboot_seconds#seconds.>> @TR<<reboot_reload#The webif² should automatically reload.>>
<br/><br/>
<center>
<script type=\"text/javascript\">
<!--
var bar1=createBar(350,15,'white',1,'black','blue',85,7,3,'');
-->
</script>
</center>"
fi
header "System" "Reboot" ""
?>
<br/><br/><br/>
<table width="90%" border="0" cellpadding="2" cellspacing="2" align="center">
<tr>
<td><script type="text/javascript" src="/js/progress.js"></script><? echo -n "$reboot_msg" ?><br/><br/><br/></td>
</tr>
</table>
<? footer ?>
<?
! empty "$FORM_reboot" && {
reboot &
exit
}
?>
<!--
##WEBIF:name:System:910:Reboot
-->
|
gwlim/openwrt_mr3420_8M
|
feeds/xwrt/webif/files/www/cgi-bin/webif/reboot.sh
|
Shell
|
gpl-2.0
| 1,193 |
#!/bin/sh
NET=/tmp/net
LOG=$NET/log
SRV_USR=$NET/users
SRV_IPS=$NET/srv_ip
CLI_IPS=$NET/cli_ip
PATH=/sbin:/usr/sbin:$PATH
export PATH
[ -d $NET ] || mkdir -p $NET
rm_entry() {
grep -v "^$1 " $2 > $2.new
mv $2.new $2
}
clear_dev() {
for file in `grep -l "^$1 " $NET/*`; do
rm_entry $1 $file
done
}
# net_auth_up ppp $DEV $TTY $USER
net_auth_up() {
echo "authup: $@" >> $LOG
rm_entry $2 $SRV_USR.$1
echo "$2 $3 $4 "`date` >> $SRV_USR.$1
}
# net_auth_down ppp $DEV $TTY $USER
net_auth_down() {
echo "authdown: $@" >> $LOG
clean_dev $2
}
# $DEV $TTY $GATEWAY $TUNNELIP $CLIENTIP (when pptpd)
# net_ip_up ppp ppp0 /dev/pts/0 192.168.200.1 192.168.200.10 192.168.10.25
# net_ip_up ppp ppp0 192.168.200.10 192.168.200.1 pptp (when pptp)
# net_ip_up openvpn tun0 $TUNNELIP $GATEWAY openvpn
net_ip_up() {
echo "ipup: $@" >> $LOG
if [ "$6" = "" ]; then
ext=$5
[ "$1" != "ppp" ] && ext=$1
rm_entry $2 $CLI_IPS.$ext
echo "$2 $3 $4" >> $CLI_IPS.$ext
else
rm_entry $2 $SRV_IPS.pptp
echo "$2 $3 $4 $5 $6" >> $SRV_IPS.pptp
fi
}
# net_ip_down ppp $TTY
# net_ip_down openvpn $TUN
net_ip_down() {
echo "ipdown: $@" >> $LOG
clear_dev $2
}
ifconfig_info() {
ifconfig -a | awk -v "opt=$1" '
BEGIN {
ifaces=""
ptp=0
}
($0 ~ /^[^ \t]/) {
iface=$1
gsub(/\.[0-9]+$/,"",iface)
}
($3 ~ /^encap:/) {
encap=$3
gsub(/encap:/,"",encap)
if (encap == "Ethernet") {
link=$5
}
next
}
($2 ~ /^addr:/) {
ip=$2
gsub(/addr:/,"",ip)
if ($3 ~ /P-t-P:/) {
ptp=1
link=substr($3,7)
}
mask=$4
gsub(/Mask:/,"",mask)
}
($1 == "RX" && $5 == "TX") {
rx_bytes=$2
rx_desc=$3$4
tx_bytes=$6
tx_desc=$7$8
gsub(/bytes:/,"",rx_bytes)
gsub(/bytes:/,"",tx_bytes)
}
($0 ~ /^[ \t]*$/) {
clifile=""
"grep -l \"^" iface " \" / /tmp/net/???_ip.* 2>/dev/null | tail -n 1" | getline clifile
if (clifile != "") {
"basename " clifile | getline clifile
gsub(/..._ip./,"",clifile)
}
if (encap != "" && encap != "Local" && (encap != "UNSPEC" || ptp == 1)) {
if (link == "") { link="-"; }
if (ip == "") { ip="-"; }
if (mask == "") { mask="-"; }
if (clifile == "") { clifile="-"; }
if (encap == "UNSPEC") { encap="Pnt-to-Pnt"; }
if (opt == "raw") {
print iface " " ip " " mask " " encap " " link " " clifile " " rx_bytes " " rx_desc " " tx_bytes " " tx_desc
} else {
print iface "=\"" iface " " ip " " mask " " encap " " link " " clifile " " rx_bytes " " rx_desc " " tx_bytes " " tx_desc "\""
}
ifaces=ifaces " " iface
}
link=""
encap=""
ip=""
mask=""
ptp=0
}
END {
print "ifaces=\"" substr(ifaces,2) "\""
}
'
}
brctrl_ifaces() {
brctl show | awk '
BEGIN {
ifaces=""
}
(NF == 4) {
iface=$4
gsub(/\.[0-9]+$/,"",iface)
ifaces=ifaces " " iface
}
(NF == 1) {
iface=$1
gsub(/\.[0-9]+$/,"",iface)
ifaces=ifaces " " iface
}
END {
print "br_ifaces=\"" substr(ifaces,2) "\""
}
'
}
ip2int() {
set $(echo $1 | tr '\.' ' ');
echo $(($1<<24|$2<<16|$3<<8|$4));
}
int2ip() {
echo $(($1>>24&255)).$(($1>>16&255)).$(($1>>8&255)).$(($1&255))
}
#pptpd
USERS=/etc/ppp/users
PEERS=/etc/ppp/peers
# users.pptpd format:
# username password ip-address
# users.pptp format:
# peername username password ip-address
# peers.pptp format:
# peername host-name username
ppp_del_user() {
rm_entry "$2" $USERS.$1
}
ppp_add_user() {
ppp_del_user "$1" "$2"
echo "$2 $3 $4 $5" >> $USERS.$1
}
ppp_del_peer() {
rm_entry "$2" $USERS.$1
rm_entry "$2" $PEERS.$1
}
ppp_add_peer() {
ppp_add_user "$1" "$2" "$4" "$5" "$6"
rm_entry "$2" $PEERS.$1
echo "$2 $3 $4" >> $PEERS.$1
}
build_chap_secrets() {
mkdir /etc/ppp/peers 2>&-
touch /etc/ppp/users.pptpd /etc/ppp/users.pptp /etc/ppp/peers.pptp
grep -v pptp /etc/ppp/chap-secrets > /tmp/chap-secrets
awk '{print $1 " pptpd " $2 " " $3}' /etc/ppp/users.pptpd >> /tmp/chap-secrets
awk '{print $2 " pptp:" $1 " " $3 " " $4}' /etc/ppp/users.pptp >> /tmp/chap-secrets
awk '{
peer="/etc/ppp/peers/pptp:" $1
print "pty \"pptp " $2 " --nolaunchpppd\"" > peer
print "mppe required,stateless" >> peer
print "name " $3 >> peer
print "remotename pptp:" $1 >> peer
print "file /etc/ppp/options.pptp" >> peer
print "ipparam pptp:" $1 >> peer
}' /etc/ppp/peers.pptp
rm /etc/ppp/chap-secrets
mv /tmp/chap-secrets /etc/ppp/chap-secrets
chmod 700 /etc/ppp/chap-secrets
chmod 600 /etc/ppp/peers/* 2>&-
}
|
sxx1314/openwrt-test
|
feeds/xwrt/webif-vpn/files/etc/functions-net.sh
|
Shell
|
gpl-2.0
| 4,691 |
#!/bin/sh
#
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
if [ "${TESTSRC}" = "" ]
then
echo "TESTSRC not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTSRC=${TESTSRC}"
if [ "${TESTJAVA}" = "" ]
then
echo "TESTJAVA not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTJAVA=${TESTJAVA}"
if [ "${TESTCLASSES}" = "" ]
then
echo "TESTCLASSES not set. Test cannot execute. Failed."
exit 1
fi
JAVAC="${TESTJAVA}/bin/javac"
JAR="${TESTJAVA}/bin/jar"
rm -rf ${TESTCLASSES}/test3
mkdir -p ${TESTCLASSES}/test3
echo "Hello world" > ${TESTCLASSES}/test3/hello.txt
echo "Bye world" > ${TESTCLASSES}/test3/bye.txt
cp ${TESTSRC}/test1/com/foo/TestClass.java ${TESTCLASSES}/test3
cd ${TESTCLASSES}/test3
${JAVAC} -d . TestClass.java
${JAR} cvf foo.jar hello.txt bye.txt com/foo/TestClass.class
rm -f ../foo.jar
mv foo.jar ..
|
FauxFaux/jdk9-jdk
|
test/java/net/URLClassLoader/closetest/build2.sh
|
Shell
|
gpl-2.0
| 1,844 |
#!/bin/bash
# Script that requires one parameter to run
BASE=$(cd $(dirname $0); pwd -P)
error() {
cat <<< "$@" 1>&2
exit 1
}
[[ $# == 0 ]] && error "Usage: $(basename $0) <something>"
printf "Running with %s.\n" "$1"
|
jotham/bash-fragments
|
simple-template.sh
|
Shell
|
isc
| 229 |
#!/bin/sh
coq_compile()
{
echo "coqc: $1" 1>&2
coqc -opt "$1"
if [ $? -ne 0 ]
then
exit 1
fi
}
grep -n 'Admitted' *.v
case $? in
0) echo "warning: Some Admissions remain" 1>&2 ;;
1) ;;
*) exit 1
esac
grep -n 'admit' *.v
case $? in
0) echo "warning: Some proofs completed with 'admit'" 1>&2 ;;
1) ;;
*) exit 1
esac
coq_compile ListAux.v
coq_compile StringAux.v
coq_compile MapWeak.v
coq_compile MapWeakAux.v
coq_compile SetWeak.v
coq_compile NonEmptyStack.v
coq_compile Stack.v
coq_compile ListMapWeak.v
coq_compile ListNonEmptyStack.v
coq_compile ListSetWeak.v
coq_compile ListStack.v
coq_compile Error.v
coq_compile Size.v
coq_compile Time.v
coq_compile Names.v
coq_compile PathVirtual.v
coq_compile PathReal.v
coq_compile FilesystemRef.v
coq_compile FilesystemOp.v
coq_compile InputStream.v
coq_compile Archive.v
coq_compile ArchiveHandler.v
coq_compile Filesystem.v
ocamllex coq2html.mll || exit 1
ocamlopt -o coq2html str.cmxa coq2html.ml || exit 1
rm -rf doc || exit 1
mkdir doc || exit 1
./coq2html -o 'doc/%.html' *.glob *.v || exit 1
cp *.css doc || exit 1
cp *.js doc || exit 1
cp *.v doc || exit 1
cp index.html doc || exit 1
|
io7m/jvvfs-model2
|
build.sh
|
Shell
|
isc
| 1,345 |
#!/bin/sh
set +h # disable hashall
shopt -s -o pipefail
set -e # Exit on error
PKG_NAME="perl"
PKG_VERSION="5.22.1"
TARBALL="${PKG_NAME}-${PKG_VERSION}.tar.bz2"
SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
function prepare() {
ln -sv "../../source/$TARBALL" "$TARBALL"
}
function unpack() {
tar xf ${TARBALL}
}
function build() {
sh Configure -des -Dprefix=/tools -Dlibs=-lm
make $MAKE_PARALLEL
}
function check() {
echo " "
}
function instal() {
cp -v perl cpan/podlators/pod2man /tools/bin
mkdir -pv /tools/lib/perl5/$PKG_VERSION
cp -Rv lib/* /tools/lib/perl5/$PKG_VERSION
}
function clean() {
rm -rf "${SRC_DIR}" "$TARBALL"
}
clean;prepare;unpack;pushd ${SRC_DIR};build;[[ $MAKE_CHECK = TRUE ]] && check;instal;popd;clean
|
PandaLinux/pandaOS
|
phase1/perl/build.sh
|
Shell
|
mit
| 766 |
#!/bin/sh -xe
mount /dev/mmcblk0p1 /boot
if [ ! -f /boot/uEnv.txt.orig ]; then
cp -p /boot/uEnv.txt /boot/uEnv.txt.orig
fi
echo "optargs=capemgr.disable_partno=BB-BONELT-HDMI,BB-BONELT-HDMIN" >>/boot/uEnv.txt
umount /boot
cp BB-* /lib/firmware/
ln -sf $PWD/../rc.local /etc/
sudo ln -sf $PWD/lcd-ifupdown.sh /etc/network/if-up.d/lcd
sudo ln -sf $PWD/lcd-ifupdown.sh /etc/network/if-post-down.d/lcd
sudo ln -sf $PWD/../lcd-message /usr/bin/
sudo ln -sf $PWD/../asic /usr/bin/
sudo ln -sf $PWD/../../spi-test/spi-test /usr/bin/
sudo ln -sf $PWD/../waas/waas /usr/bin/
sudo ln -sf $PWD/../knc-serial /usr/bin
sudo ln -sf $PWD/../knc-led /usr/bin
sudo mkdir -p /config
#cp rc-local.service /etc/systemd/system/
#systemctl enable rc-local
|
KnCMiner/knc-asic
|
system/install.sh
|
Shell
|
mit
| 735 |
#!/bin/bash
source ${TESTS_DIR}/functions.sh
CURRENT_REPOSITORIES_JSON=${TESTS_DIR}/files/repositories.json
cp ${CURRENT_REPOSITORIES_JSON} settings/repositories.json
trash *7z
touch afisync.log
xvfb-run ./AFISync &
while [ ! -f *7z ]; do
sleep 2
done
kill_and_wait
if [ -f core* ]; then
echo -e "\e[31m$1Core file detected\e[0m"
exit 1
fi
exit 0
|
haikion/afi-sync
|
afi-sync-tests/bash-tests/log-rotate.sh
|
Shell
|
mit
| 361 |
#!/bin/bash
alias g='git'
alias gb='git branch -vv'
alias gbr='git branch -vv -r'
alias gs='g status --untracked-files=no'
alias gsa='g status'
alias gsaa='g status --untracked-files=all'
alias gd='git diff'
alias gfa='g fetch --all'
alias gfat='gfa --tags'
alias gfap='gfa --prune --tags'
alias gp='git pull'
alias gr='git remote -v'
alias gpum='git pull upstream main'
|
BenWhitehead/.dotfiles
|
fs/home/user/.bash/alias/git.bash
|
Shell
|
mit
| 373 |
#!/bin/sh
# run codepainter on source files
# https://github.com/jedmao/codepainter
# then hand edit { spaces_inside_curly_brackets }
cd ../src
codepaint xform -j ../build/glif-codepainter.json "**/*.js"
cd ../build
|
gschorno/glif
|
build/pretty.sh
|
Shell
|
mit
| 220 |
#!/usr/bin/env bash
upload_url='https://uploads.github.com/repos/dirkraft/ash/releases/3238640/assets'
asset_file=$1
asset_name=$(basename $asset_file)
curl --silent \
--header "Authorization: token ${GITHUB_TOKEN}" \
--header "Content-Type: application/octet-stream" \
"${upload_url}?name=${asset_name}" \
--data-binary "@${asset_file}" | jq .
|
dirkraft/ash
|
scripts/publish-dev.sh
|
Shell
|
mit
| 355 |
#!/bin/bash
set -e
export CFLAGS="-I${SANDBOX}/include/libxml2 ${CFLAGS}"
export CPPFLAGS="-I${SANDBOX}/include/libxml2 ${CPPFLAGS}"
${SRCDIR}/configure --build=x86_64-unknown-linux-gnu --host=${PREFIX} --prefix=${OUTPUT} \
--enable-static --with-libxml-prefix=${SANDBOX} \
--with-libxml-include-prefix=${SANDBOX}/include/libxml2 \
--with-libxml-libs-prefix=${SANDBOX}/lib \
--with-crypto=no
make -j8
make install
|
ericwlange/hemroid
|
packages/libxslt/build_android.sh
|
Shell
|
mit
| 434 |
# Only difference between this and testsuite.sh is the timeout time is very large here.
allPassed=1
cd "examples"
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
for file in $(ls -1 *.tex); do
printf "$file"
out=( $( ( /usr/bin/timeout 100 /usr/bin/time -f %e pdflatex --quiet $file 2>&1; echo $? ) | tr -d '\0' ) )
if [ -z ${out[0]+x} ]; then
allPassed=0
printf " ${RED}timed out${NC}\n"
else
if ((${out[-1]} > 0)); then
printf " ${RED}failed${NC}\n"
allPassed=0
else
printf " ${GREEN}passed${NC} in ${out[-2]}s\n"
fi
fi
done
if [ $allPassed == 1 ]; then
printf "${GREEN}All tests passed.${NC}\n"
else
exit 1
fi
|
sanathdevalapurkar/algtop-notes
|
compileexamples.sh
|
Shell
|
mit
| 698 |
#SQL:
#select id from article_live where
#big_picture_id is null and picture_id is null and
#id in (
# SELECT tlc.CONTENT_ID
# FROM tag_to_live_content tlc
# WHERE tlc.TAG_ID = 24364
# INTERSECT
# SELECT tlc.CONTENT_ID
# FROM tag_to_live_content tlc
# WHERE tlc.TAG_ID = 19805
#)
#and id not in (
#select article_id from IN_BODY_ELEMENT_LIVE
#)
echo "Migrating 100 articles "
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/355926947"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4531108"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4562075"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/5329682"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4234816"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/5098906"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/330369634"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/332276819"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/329601844"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/333308078"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4735622"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4985410"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/332049827"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/335929612"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/329501411"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/331475059"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4058459"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/377770433"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/5127008"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4042239"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/374666315"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/5355220"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4571219"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/5205392"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/366769552"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4487144"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/336567005"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/4968057"
echo
sleep 3
curl -X POST "http://flexcontentmigrator.gutools.co.uk/migrate/article/3937207"
|
guardian/flex-content-migrator
|
scripts/article/first100_stageReviews.sh
|
Shell
|
mit
| 3,090 |
#!/usr/bin/env bash
# Waking up herokuapp
curl $BACKOFFICE_URL > /dev/null
if [ "$CI_BRANCH" == "master" ]; then
gradle clean bintrayUpload
fi
# ARTIFACTS:
# ARTIFACT build/libs
#
# ENVIRONMENT VARIABLES:
# BACKOFFICE_URL https://backoffice -qa.kushkipagos.com/
|
Kushki/kushki-java
|
pipeline/03-distribute.sh
|
Shell
|
mit
| 266 |
#!/usr/bin/env bash
# Keep my .ssh stuff private.
chmod 700 "${HOMESICK}/bashrc/home/.ssh"
case ${PLATFORM} in
darwin)
# Setup ssh stuff.
if [[ -z ${SSH_AUTH_SOCK} ]]; then
ssh-add -K
else
echo "SSH agent using key in OSX keychain."
fi
;;
*)
# Setup ssh agent.
if [[ -z ${SSH_AUTH_SOCK} ]]; then
export SSH_ENV=${HOME}/.ssh/env-${HOSTNAME}
export SSH_CONFIG=${HOME}/.ssh/config
ssh-login
else
echo "SSH agent already active from another session or host."
fi
;;
esac
|
dougborg/bashrc
|
home/.bashrc.d/topics/ssh.sh
|
Shell
|
mit
| 547 |
#!/bin/sh
erlc proxy.erl && erl -noshell -s proxy $@
|
itplanes/proxy
|
proxy.sh
|
Shell
|
mit
| 54 |
#!/usr/bin/env bash
shopt -s -o pipefail
set -e # Exit on error
PKG_NAME="diffutils"
PKG_VERSION="3.7"
TARBALL="${PKG_NAME}-${PKG_VERSION}.tar.xz"
SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
LINK="http://ftp.gnu.org/gnu/$PKG_NAME/$TARBALL"
function showHelp() {
echo -e "--------------------------------------------------------------------------------------------------------------"
echo -e "Description: The Diffutils package contains programs that show the differences between files or directories."
echo -e "--------------------------------------------------------------------------------------------------------------"
echo -e ""
}
function prepare() {
echo -e "Downloading $TARBALL from $LINK"
wget "$LINK" -O "$TARBALL"
}
function unpack() {
echo -e "Unpacking $TARBALL"
tar xf ${TARBALL}
}
function build() {
echo -e "Configuring $PKG_NAME"
./configure --prefix=/tools
make "$MAKE_PARALLEL"
}
function instal() {
echo -e "Installing $PKG_NAME"
make "${MAKE_PARALLEL}" install
}
function clean() {
echo -e "Cleaning up..."
rm -rf ${SRC_DIR} ${TARBALL}
}
# Run the installation procedure
time {
showHelp
clean
prepare
unpack
pushd ${SRC_DIR}
build
instal
popd
clean
}
|
PandaLinux/base-64
|
temp-system/diffutils/build.sh
|
Shell
|
mit
| 1,225 |
exec file -m ../magic data/* | sort | diff -u -w - <(sort data.out)
|
enlnt/kdb-magic
|
test/check.sh
|
Shell
|
mit
| 68 |
#header "Adjusting file descriptors limit, if necessary"
#FILE_DESCRIPTOR_LIMIT=$( ulimit -n )
#if [ $FILE_DESCRIPTOR_LIMIT -lt 512 ]
#then
# info "Increasing file description limit to 512"
# ulimit -n 512
#fi
dotnet --info
dotnet restore
dotnet test test/MR.Augmenter.Tests/MR.Augmenter.Tests.csproj -f netcoreapp2.0
dotnet test test/MR.Augmenter.AspNetCore.Tests/MR.Augmenter.AspNetCore.Tests.csproj -f netcoreapp2.0
|
mrahhal/MR.Augmenter
|
build.sh
|
Shell
|
mit
| 420 |
#!/bin/bash
VERSION=0.0.1
usage () {
echo "usehowdy [-hV]"
echo
echo "Options:"
echo " -h|--help Print this help dialogue and exit"
echo " -V|--version Print the current version and exit"
}
require() {
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${SCRIPTDIR}/deps/$1.sh
}
require howdy/howdy
usehowdy () {
for opt in "${@}"; do
case "$opt" in
-h|--help)
usage
return 0
;;
-V|--version)
echo "$VERSION"
return 0
;;
esac
done
## your code here
howdy Troy green
}
if [[ ${BASH_SOURCE[0]} != $0 ]]; then
export -f usehowdy
else
usehowdy "${@}"
exit 0
fi
|
thoward/usehowdy
|
usehowdy.sh
|
Shell
|
mit
| 687 |
#!/bin/bash
waitpid(){
pid=$1
while kill -0 "$pid" 2> /dev/null; do
sleep 0.5
done
}
BREAK_TIME=4
ACK_TIME=2
for mask in "raspi_mon_sys/MainMonitoringSystem.py" "raspi_mon_sys/OpenEnergyMonitor.py" "raspi_mon_sys/PlugwiseMonitor.py" "raspi_mon_sys/MailLoggerServer.py"; do
pid=$(pgrep -n -f "python $mask")
if [[ ! -z $pid ]]; then
kill -2 $pid
echo "Sleeping $BREAK_TIME seconds to allow process termination"
sleep $BREAK_TIME
kill -9 $pid 2> /dev/null
waitpid $pid
echo "Sleeping $ACK_TIME seconds to allow other process acknowledgement"
sleep $ACK_TIME
fi
done
|
ESAI-CEU-UCH/raspi-monitoring-system
|
stop.sh
|
Shell
|
mit
| 654 |
export PORT=8088
./aurora-demo
|
gernest/aurora-demo
|
run.sh
|
Shell
|
mit
| 31 |
#!/bin/bash
set -euo pipefail
HELM_DOCS_VERSION="1.5.0"
# install helm-docs
curl --silent --show-error --fail --location --output /tmp/helm-docs.tar.gz https://github.com/norwoodj/helm-docs/releases/download/v"${HELM_DOCS_VERSION}"/helm-docs_"${HELM_DOCS_VERSION}"_Linux_x86_64.tar.gz
tar -xf /tmp/helm-docs.tar.gz helm-docs
# validate docs
./helm-docs
git diff --exit-code
|
joeferner/redis-commander
|
.github/helm-docs.sh
|
Shell
|
mit
| 377 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.