code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/usr/bin/env bash # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MYNAME="${BASH_SOURCE-$0}" function hadoop_usage { hadoop_add_subcommand "run" "Start kms in the current window" hadoop_add_subcommand "run -security" "Start in the current window with security manager" hadoop_add_subcommand "start" "Start kms in a separate window" hadoop_add_subcommand "start -security" "Start in a separate window with security manager" hadoop_add_subcommand "status" "Return the LSB compliant status" hadoop_add_subcommand "stop" "Stop kms, waiting up to 5 seconds for the process to end" hadoop_add_subcommand "top n" "Stop kms, waiting up to n seconds for the process to end" hadoop_add_subcommand "stop -force" "Stop kms, wait up to 5 seconds and then use kill -KILL if still running" hadoop_add_subcommand "stop n -force" "Stop kms, wait up to n seconds and then use kill -KILL if still running" hadoop_generate_usage "${MYNAME}" false } # let's locate libexec... if [[ -n "${HADOOP_HOME}" ]]; then HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" else bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" fi HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" # shellcheck disable=SC2034 HADOOP_NEW_CONFIG=true if [[ -f "${HADOOP_LIBEXEC_DIR}/kms-config.sh" ]]; then . "${HADOOP_LIBEXEC_DIR}/kms-config.sh" else echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/kms-config.sh." 2>&1 exit 1 fi # The Java System property 'kms.http.port' it is not used by Kms, # it is used in Tomcat's server.xml configuration file # # Mask the trustStorePassword # shellcheck disable=SC2086 CATALINA_OPTS_DISP="$(echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ ]*/trustStorePassword=***/')" hadoop_debug "Using CATALINA_OPTS: ${CATALINA_OPTS_DISP}" # We're using hadoop-common, so set up some stuff it might need: hadoop_finalize hadoop_verify_logdir if [[ $# = 0 ]]; then case "${HADOOP_DAEMON_MODE}" in status) hadoop_status_daemon "${CATALINA_PID}" exit ;; start) set -- "start" ;; stop) set -- "stop" ;; esac fi hadoop_finalize_catalina_opts export CATALINA_OPTS # A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server # if [[ "${1}" = "stop" ]]; then export JAVA_OPTS=${CATALINA_OPTS} fi # If ssl, the populate the passwords into ssl-server.xml before starting tomcat # # KMS_SSL_KEYSTORE_PASS is a bit odd. # if undefined, then the if test will not enable ssl on its own # if "", set it to "password". # if custom, use provided password # if [[ -f "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" ]]; then if [[ -n "${KMS_SSL_KEYSTORE_PASS+x}" ]] || [[ -n "${KMS_SSL_TRUSTSTORE_PASS}" ]]; then export KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password} sed -e 's/_kms_ssl_keystore_pass_/'${KMS_SSL_KEYSTORE_PASS}'/g' \ -e 's/_kms_ssl_truststore_pass_/'${KMS_SSL_TRUSTSTORE_PASS}'/g' \ "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" \ > "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml" chmod 700 "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml" >/dev/null 2>&1 fi fi exec "${HADOOP_CATALINA_HOME}/bin/catalina.sh" "$@"
NJUJYB/disYarn
hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
Shell
apache-2.0
3,779
#!/bin/bash files=(bash_profile gitignore inputrc pylintrc screenrc tmux.conf vimrc) install-vim-deps() { # pathogen mkdir -p ~/.vim/autoload ~/.vim/bundle && \ curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim # solarized cd ~/.vim/bundle git clone git://github.com/altercation/vim-colors-solarized.git git clone --depth=1 https://github.com/vim-syntastic/syntastic.git git clone https://github.com/vim-airline/vim-airline git clone https://github.com/elzr/vim-json.git } for f in ${files[@]}; do cp -v ${f} ~/.${f} done if [ ! -d ~/.vim/bundle ] && [ ! -d ~/.vim/autoload ];then install-vim-deps fi
csuttles/utils
home/linux-install.sh
Shell
apache-2.0
644
find src \( -name *.java -o -name *.xml -o -name *.js -o -name *.html -o -name *.css -o -name *.properties -o -name *.jsp \) -exec cat {} \; | wc -l
alexript/balas
calc_src_lines.sh
Shell
apache-2.0
149
#!/bin/bash cd /var/shopping-plaza.ru/common/cron php -f ./news.php
rootree/shopping-plaza
common-files/cron/news.sh
Shell
apache-2.0
69
#!/bin/bash exec 1> >(logger -s -t $(basename $0)) 2>&1 function shutdown() { date echo "Shutting down Tomcat" ${CATALINA_HOME}/bin/catalina.sh stop } # Allow any signal which would kill a process to stop Tomcat trap shutdown HUP INT QUIT ABRT KILL ALRM TERM TSTP if [ ! -f ${CATALINA_HOME}/scripts/.tomcat_admin_created ]; then ${CATALINA_HOME}/scripts/create_admin.sh fi date echo "Starting Tomcat" export CATALINA_PID=/tmp/$$ exec ${CATALINA_HOME}/bin/catalina.sh run echo "Waiting for `cat $CATALINA_PID`" wait `cat $CATALINA_PID
CALlanoR/virtual_environments
ansible/centos7_tomcat8/scripts/tomcat.sh
Shell
apache-2.0
554
#!/bin/bash if [ $ANT_HOME ] ; then EXE_ANT=$ANT_HOME/bin/ant else export ANT_HOME=$(pwd)/apache-ant-1.9.6 fi echo ANT_HOME: $ANT_HOME export MRUN_OPTS="$1" if [ e$2 != "e" ] ; then export MRUN_OPTS="$MRUN_OPTS $2" fi if [ e$3 != "e" ] ; then export MRUN_OPTS="$MRUN_OPTS $3" fi echo ant run $MRUN_OPTS $ANT_HOME/bin/ant -buildfile $MRUN_OPTS
OpenSourceConsulting/athena-meerkat
agent/runtarget.sh
Shell
apache-2.0
378
#!/bin/bash for time in `cat hdfs.incon | awk '{print $1}' | uniq` do let min=100 for val in `cat hdfs.incon | grep "^$time " | awk '{print $2}'` do if [ $min -gt $val ]; then min=$val fi done echo $time" "$val done
songweijia/hdfsrs
experiments/precision/ana/hdfs.sh
Shell
apache-2.0
240
#!/bin/bash ############################################### # Function to clean TMP files ############################################### cleanup() { rm -rf "${TMP_SUBMIT_SCRIPT}".* } ############################################### # Function to submit the script ############################################### ERROR_SUBMIT="Error submiting script to queue system" submit() { # Submit the job to the queue # shellcheck disable=SC2086 eval ${SUBMISSION_CMD} ${SUBMISSION_PIPE}${TMP_SUBMIT_SCRIPT} result=$? # Check if submission failed if [ $result -ne 0 ]; then submit_err=$(cat "${TMP_SUBMIT_SCRIPT}.err") echo "${ERROR_SUBMIT}${submit_err}" exit 1 fi } #--------------------------------------------------- # MAIN EXECUTION #--------------------------------------------------- if [ -z "${COMPSS_HOME}" ]; then SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" COMPSS_HOME=${SCRIPT_DIR}/../../../../ else SCRIPT_DIR="${COMPSS_HOME}/Runtime/scripts/queues/commons" fi # shellcheck source=common.sh # shellcheck disable=SC1091 source "${SCRIPT_DIR}"/common.sh # Get command args (loads args from commons.sh, specially sc_cfg) get_args "$@" # Load specific queue system variables # shellcheck source=../supercomputers/default.cfg # shellcheck disable=SC1091 # shellcheck disable=SC2154 source "${SCRIPT_DIR}/../supercomputers/${sc_cfg}" # Check parameters check_args # Load specific queue system flags # shellcheck source=../queue_systems/slurm.cfg # shellcheck disable=SC1091 source "${SCRIPT_DIR}/../queue_systems/${QUEUE_SYSTEM}.cfg" # Set wall clock time set_time # Log received arguments log_args # Create TMP submit script create_normal_tmp_submit # Trap cleanup trap cleanup EXIT # Submit submit
mF2C/COMPSs
compss/runtime/scripts/queues/commons/submit.sh
Shell
apache-2.0
1,834
#!/bin/sh bash -c " mkdir -p components-extras; cd components-extras; rm -rf scala-2.*.tgz; rm -rf jdk*; rm -rf OpenJDK*; wget https://downloads.lightbend.com/scala/2.12.8/scala-2.12.8.tgz; wget https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u212-b04/OpenJDK8U-jdk_x64_linux_hotspot_8u212b04.tar.gz cd -"
thomsonreuters/CM-Well
server/cmwell-cons/app/get-3rd-components.sh
Shell
apache-2.0
332
#!/bin/bash # # Wrapper script needs improvement! # # @author Andre van Hoorn BINDIR=$(cd "$(dirname "$0")"; pwd)/ JAVAARGS="-Dkieker.common.logging.Log=JDK -Djava.util.logging.config.file=${BINDIR}/logging.properties -Xms56m -Xmx1024m" MAINCLASSNAME=kieker.tools.KaxViz #echo java ${JAVAARGS} -cp "${CLASSPATH}" ${MAINCLASSNAME} "$@" time java ${JAVAARGS} -cp "${BINDIR}/../lib/*":"${BINDIR}/../build/libs/*":"${BINDIR}" ${MAINCLASSNAME} "$@"
HaStr/kieker
bin/kax-viz.sh
Shell
apache-2.0
448
curl \ --cacert ca.crt \ --key client.key \ --cert client.crt \ https://localhost:3000
trammell/test
mtls/client.sh
Shell
artistic-2.0
95
#!/bin/bash for i in `seq 1 5`; do mysql -h 127.0.0.1 -u root --password=1234 -e 'show databases;' && break echo "[*] Waiting for mysql to start..." sleep 5 done echo "[*] Loading MySQL schema..." mysql -h 127.0.0.1 -u root --password=1234 < ./db/schema.v0.sql echo "[*] Loading MySQL dummy data..." mysql -h 127.0.0.1 -u root --password=1234 -o oncall < ./db/dummy_data.sql echo "[*] Tables created for database oncall:" mysql -h 127.0.0.1 -u root --password=1234 -o oncall -e 'show tables;'
diegocepedaw/oncall
.ci/setup_mysql.sh
Shell
bsd-2-clause
508
#!/bin/bash printf "Checking average fishing mortality in \""`pwd`"\" ... " declare -r FILENAME=`head -n 1 file_nam.tmp` rm -f gmon.out rm -f $FILENAME-p*.log rm -f $FILENAME.ppp rm -f $FILENAME-p*.par rm -f variance rm -f $FILENAME.p0* rm -f fmin.log rm -f $FILENAME-p*.bar rm -f $FILENAME-p*.rep rm -f $FILENAME-p*-tagest.log rm -f eigv.rpt ../../../../../tpl/tagest -est &> /dev/null declare -r LINE=`grep "average_fishing_mortality =" $FILENAME-p01.rep` #declare -r EXPECTED="average_fishing_mortality = 0.000243605 0.000270474 0.0147825 0.0156899 0.00192086 0.00187844 0.010441 0.0105243 0.00157244 0.00249184" declare -r EXPECTED="average_fishing_mortality = 0.000451781 6.69695e-05 1.26857e-05 0.000221534 0.00225643 0.000204055 0.000673397 0.00030166 0.000336638 0.00294695 0.0100434 0.00427859" if [ ! "$LINE" = "$EXPECTED" ]; then printf "FAILED\n" printf "Error: expected \"$EXPECTED\" \n got \"$LINE\"\n" exit 0 fi rm -f gmon.out rm -f $FILENAME-p01.log rm -f $FILENAME.ppp rm -f $FILENAME-p01.par rm -f variance rm -f $FILENAME.p01 rm -f $FILENAME.tmp rm -f fmin.log rm -f $FILENAME-p01.bar rm -f $FILENAME-p01.rep rm -f $FILENAME-p01-tagest.log rm -f eigv.rpt printf "OK\n" exit 0
johnrsibert/tagest
25mt/test/bin/testAverageFishingMortality.sh
Shell
bsd-2-clause
1,217
#!/bin/sh sbt 'project scalaFilter' 'docker:publishLocal' sbt 'project generator' 'docker:publishLocal'
Zeta-Project/zeta
api/createDockerImages.sh
Shell
bsd-2-clause
105
#!/bin/sh MYSELF=`which "$0" 2>/dev/null` [ $? -gt 0 -a -f "$0" ] && MYSELF="./$0" java=java if test -n "$JAVA_HOME"; then java="$JAVA_HOME/bin/java" fi exec "$java" $java_args -cp $MYSELF org.openscience.cdk.nfp.SmiToFps "$@" exit 1
johnmay/efficient-bits
fp-idx/src/main/resources/smi2fps-stub.sh
Shell
bsd-2-clause
237
#!/bin/sh set -e echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" install_framework() { if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then local source="${BUILT_PRODUCTS_DIR}/$1" elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" elif [ -r "$1" ]; then local source="$1" fi local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" if [ -L "${source}" ]; then echo "Symlinked..." source="$(readlink "${source}")" fi # use filter instead of exclude so missing patterns dont' throw errors echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\"" rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" local basename basename="$(basename -s .framework "$1")" binary="${destination}/${basename}.framework/${basename}" if ! [ -r "$binary" ]; then binary="${destination}/${basename}" fi # Strip invalid architectures so "fat" simulator / device frameworks work on device if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then strip_invalid_archs "$binary" fi # Resign the code if required by the build settings to avoid unstable apps code_sign_if_enabled "${destination}/$(basename "$1")" # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then local swift_runtime_libs swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]}) for lib in $swift_runtime_libs; do echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\"" rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" code_sign_if_enabled "${destination}/${lib}" done fi } # Signs a framework with the provided identity code_sign_if_enabled() { if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then # Use the current code_sign_identitiy echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\"" /usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1" fi } # Strip invalid architectures strip_invalid_archs() { binary="$1" # Get architectures for current file archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)" stripped="" for arch in $archs; do if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then # Strip non-valid architectures in-place lipo -remove "$arch" -output "$binary" "$binary" || exit 1 stripped="$stripped $arch" fi done if [[ "$stripped" ]]; then echo "Stripped $binary of architectures:$stripped" fi } if [[ "$CONFIGURATION" == "Debug" ]]; then install_framework "Pods-LVForgivingAssert_Tests/LVForgivingAssert.framework" fi if [[ "$CONFIGURATION" == "Release" ]]; then install_framework "Pods-LVForgivingAssert_Tests/LVForgivingAssert.framework" fi
lovoo/LVForgivingAssert
Example/Pods/Target Support Files/Pods-LVForgivingAssert_Tests/Pods-LVForgivingAssert_Tests-frameworks.sh
Shell
bsd-3-clause
3,584
#!/bin/sh DCS_HOST=http://localhost:8080/dcs/rest RESULT=clusters-from-raw-post.xml curl $DCS_HOST -# \ --data-binary @clustering-with-raw-post-data.txt \ -H "Content-Type: multipart/form-data; boundary=---------------------------191691572411478" \ -H "Content-Length: 44389" \ -o $RESULT echo Results saved to $RESULT
arnaudsj/carrot2
applications/carrot2-dcs/examples/curl/clustering-with-raw-post.sh
Shell
bsd-3-clause
340
#!/usr/bin/env sh VG="valgrind --leak-check=full --error-exitcode=1" SABIR=./sabir ./sabir-train dump test/data/* > test/model.tmp $VG ./sabir -mtest/model.tmp test/bad_utf8.txt rm test/model.tmp exit 0
michaelnmmeyer/sabir
test/bad_utf8.sh
Shell
bsd-3-clause
207
#!/bin/bash SCRIPTDIR=$(readlink -f $(dirname $0)) RISCV_TEST_DIR=$SCRIPTDIR/../software/riscv-tests set -e set -o pipefail ( cd $(dirname $RISCV_TEST_DIR) git submodule update --init --recursive $(basename ${RISCV_TEST_DIR}) cd ${RISCV_TEST_DIR} sed -i 's/. = 0x80000000/. = 0x00000000/' env/p/link.ld sed -i 's/.tohost.*$//' env/p/link.ld ./configure --with-xlen=32 2>&1 make clean &>/dev/null make -k isa -j10 >/dev/null 2>&1 || true ) TEST_DIR=${RISCV_TEST_DIR}/isa FILES=$(ls ${TEST_DIR}/rv32[um][im]-p-* | grep -v dump | grep -v hex) PREFIX=riscv32-unknown-elf OBJDUMP=$PREFIX-objdump OBJCOPY=$PREFIX-objcopy mkdir -p test #MEM files are for lattice boards, the hex files are for altera boards for f in $FILES do BIN_FILE=test/$(basename $f).bin QEX_FILE=test/$(basename $f).qex MEM_FILE=test/$(basename $f).mem MIF_FILE=test/$(basename $f).mif SPLIT_FILE=test/$(basename $f).split2 echo "$f > $QEX_FILE" ( cp $f test/ $OBJCOPY -O binary $f $BIN_FILE $OBJDUMP --disassemble-all -Mnumeric,no-aliases $f > test/$(basename $f).dump python $SCRIPTDIR/bin2hex.py $BIN_FILE -a 0x0 > $QEX_FILE || exit -1 sed -e 's/://' -e 's/\(..\)/\1 /g' $QEX_FILE >$SPLIT_FILE awk '{if (NF == 9) print $5$6$7$8}' $SPLIT_FILE > $MEM_FILE # rm -f $MIF_FILE $SPLIT_FILE ) & done wait ( #cleanup edits cd ${RISCV_TEST_DIR}/env git checkout . )
VectorBlox/risc-v
tools/generate_hex_files.sh
Shell
bsd-3-clause
1,403
#!/bin/bash cur_dir=`old=\`pwd\`; cd \`dirname $0\`; echo \`pwd\`; cd $old;` prj=`basename $cur_dir | sed s/\.dep\..*$//` fpm=php-fpm fpm_config=$cur_dir/app/config/php-fpm.conf fpm_pidfile=/var/run/php-fpm/$prj-php-fpm.pid nginx=/usr/sbin/nginx start_fpm(){ printf "starting php-fpm..." $fpm -y $fpm_config -g $fpm_pidfile if [ -f "$fpm_pidfile" ]; then echo "" echo " php-fpm started" else echo "" echo " php-fpm failed!" fi } stop_fpm(){ printf "stopping php-fpm" i=0 while [ "$i" -le 99999 ]; do if [ $((i%10)) = 0 ]; then printf "." fi if [ -f "$fpm_pidfile" ]; then kill `cat $fpm_pidfile` else echo "" echo " stopped" break fi sleep 0.1; done } ask_restart_fpm(){ echo "" /bin/echo -n "restart php-fpm?(y/n)[n] " read yn if [ "$yn" = "y" ] ; then : else echo "skip php-fpm" return fi stop_fpm start_fpm } case "$1" in 'start') stop_fpm start_fpm ;; 'stop') stop_fpm ;; 'restart') ask_restart_fpm $nginx -s reload ;; *) echo "Usage: $0 {start|stop|restart}" exit 1 ;; esac
ideawu/iphp
tools/new_app_files/server.sh
Shell
bsd-3-clause
1,071
#!/bin/bash # # Copyright (c) 2014, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # # Created By Mike Dodge(http://fb.me/mikedodge04): # # Code_Sync # This function will download a ssh key from your defined web server as # $KEY_SERVER. It will use the downloaded ssh key to do an rsync from the # defined $RSYNC_SERVER to the local code/lib. It will overwrite any local # changes with what is on the $RSYNC_SERVER. # # $KEY_SERVER: # This is a HTTPS server where you will store your ssh key for download # $KEY_PATH: # This is the rest of the web address used to download the key # $RSYNC_SERVER: # Server host name in which you are storing the /code/lib directory ######################################################### ### Global Vars ### ######################################################### # If the Global var is empty && assign # Server Paths [[ -z "$KEY_SERVER" ]] && KEY_SERVER="http://WEB_SERVER.com/" [[ -z "$KEY_PATH" ]] && KEY_PATH="DIR/rsync_key" [[ -z "$RSYNC_SERVER" ]] && RSYNC_SERVER="CODE_SYNC_SERVER" # Client Paths [[ -z "$code" ]] && code="/Library/code" [[ -z "$key" ]] && key="$code/key/rsync_key" [[ -z "$lib" ]] && lib="$code/lib" [[ -z "$modules" ]] && modules="$lib/modules" [[ -z "$scripts" ]] && scripts="$lib/scripts" ######################################################### function code_sync () { # Usage: code_sync EXIT # Downloads latest changes to /Library/code/lib, # and deletes any files not on the server # EXIT should contain "exit" to exit on sync fail # if you pass exit as $1 it will quit on sync fail exit_on_failure="noexit" [[ "$1" = "exit" ]] && exit_on_failure="exit" download_key maintain_ssh_known_hosts rsync_lib "$exit_on_failure" code_sync_finsh } ######################################################### ### Supplement Functions ### ######################################################### function dl_logger () { # Usage: dl_logger FAIL_MSG # Writes FAIL_MSG to syslog with the "code_sync" tag. Useful for Splunk # or other logging tools FAIL_MSG="$1" dl_logger_tag="code_sync" logger -t $dl_logger_tag "$FAIL_MSG" } function download_key () { # Usage: download_key # Downloads the ssh key used to an rsync. mkdir -p $code/key &> /dev/null curl -s "$KEY_SERVER/$KEY_PATH" --O "$key" &>/dev/null } function code_sync_abort () { # Usage: abort MESSAGE EXIT # Echoes MESSAGE # EXIT must contain "exit" or "noexit", exit the script. echo "$1" >&2 || echo "" if [[ "$2" = "exit" ]] ; then . $modules/finish.sh; finish exit 1 fi return 1 } function maintain_ssh_known_hosts (){ # Usage: maintain_ssh_known_hosts # Ensures the /etc/ssh_known_hosts file is up to date with the version in codelib CODE_KNOWN_HOSTS="$lib/conf/ssh_known_hosts" KNOWN_HOSTS='/etc/ssh_known_hosts' . $modules/diff_replace.sh; diff_replace "$KNOWN_HOSTS" "$CODE_KNOWN_HOSTS" } function rsync_lib () { # Usage: rsync_lib EXIT # Input $1= exit, (optional). Exits on failure # Syncs the code library from your Rsync server down to the localhost exit_on_failure="noexit" [[ "$1" = "exit" ]] && exit_on_failure="exit" # Change permissions on the rsync key and pull down the code base with # an rsync using the key. Delete any changes on the localhost that aren't # on the server. Note, we are assuming that there is a 'util' account on # your codesync server that has access to the code library on the server. chmod 700 $code/key/rsync_key rsync -av --delete -e "ssh -i $code/key/rsync_key" \ util@"$RSYNC_SERVER":/code/lib $code/ &>/dev/null # Check for failure and log to syslog if [[ $? -ne 0 ]]; then dl_logger "Failed to rsync" msg="Code_Sync failed!" code_sync_abort "$msg" "$exit_on_failure" fi } function create_code_directories { # Create the dirs needed for code. This is where we like to keep all of the # necessary dirs for later. mkdir -p $code/key # Create the Logs dir mkdir -p $code/logs # Make sure the Log file exists touch $code/logs/log.txt # Create the tags dir mkdir -p $code/tags # Create the var dir mkdir -p $code/var # expecting errors, so im returning 0 return 0 } function set_code_permissions { # Set lib only to root access chown -R root:root $lib &>/dev/null # Allow bin to be used by admin #Set the correct permissions for the code dir chmod -R 755 $lib &> /dev/null # excpeting errors, so im returning 0 return 0 } function code_sync_finsh () { # Creates code dirs, and Sets Premissions. create_code_directories set_code_permissions }
rtrouton/IT-CPE
code/lib/modules/code_sync.sh
Shell
bsd-3-clause
4,916
#!/bin/sh # Package PACKAGE="horde" DNAME="Horde" # Others INSTALL_DIR="/usr/local/${PACKAGE}" WEB_DIR="/var/services/web" PATH="${INSTALL_DIR}/bin:${PATH}" USER="nobody" HORDE="${INSTALL_DIR}/bin/horde.sh" PID_FILE="${INSTALL_DIR}/var/horde.pid" start_daemon () { start-stop-daemon -S -q -m -b -N 10 -x ${HORDE} -c ${USER} -u ${USER} -p ${PID_FILE} > /dev/null } stop_daemon () { start-stop-daemon -K -q -u ${USER} -p ${PID_FILE} wait_for_status 1 20 || start-stop-daemon -K -s 9 -q -p ${PID_FILE} } daemon_status () { start-stop-daemon -K -q -t -u ${USER} -p ${PID_FILE} } wait_for_status () { counter=$2 while [ ${counter} -gt 0 ]; do daemon_status [ $? -eq $1 ] && return let counter=counter-1 sleep 1 done return 1 } case $1 in start) if daemon_status; then echo ${DNAME} is already running else echo Starting ${DNAME} ... start_daemon fi ;; stop) if daemon_status; then echo Stopping ${DNAME} ... stop_daemon else echo ${DNAME} is not running fi ;; status) if daemon_status; then echo ${DNAME} is running exit 0 else echo ${DNAME} is not running exit 1 fi ;; log) exit 1 ;; *) exit 1 ;; esac
momiji/spksrc
spk/horde/src/dsm-control.sh
Shell
bsd-3-clause
1,348
#!/bin/bash dir=/home/mingfus/data/repositories/scalloptest/ bin=$dir/programs datadir=$dir/data/encode65 list=$dir/data/encode65.list result=$dir/plots/encode65/collect.B759/cov scripts=./scripts mkdir -p $result rm -rf $scripts for k in `cat $list | cut -f 1 -d ":"` do echo $k nohup $bin/bamkit $datadir/$k.bam > $result/$k & #echo "$bin/bamkit $datadir/$k.bam > $result/$k" >> $scripts done #cat $scripts | xargs -L 1 -I CMD -P 30 bash -c CMD
Kingsford-Group/scalloptest
plots/collect.encode65.cov.sh
Shell
bsd-3-clause
454
#!/bin/bash FN="pasilla_1.18.0.tar.gz" URLS=( "https://bioconductor.org/packages/3.12/data/experiment/src/contrib/pasilla_1.18.0.tar.gz" "https://bioarchive.galaxyproject.org/pasilla_1.18.0.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-pasilla/bioconductor-pasilla_1.18.0_src_all.tar.gz" ) MD5="03ef56df3c4df1bd5d6d839bfcb06a04" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do curl $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
blankenberg/bioconda-recipes
recipes/bioconductor-pasilla/post-link.sh
Shell
mit
1,287
#!/bin/bash python generateProject.py result=${PWD##*/} EV3=".ev3" fn=$result$EV3 rm ../$fn zip ../$fn -x"README.md" -x"*.ev3" -x"*.svg" -x"*.py" -x"*.sh" * cp ../$fn robot.ev3
alan412/AnimalAllies_447
makeBin.sh
Shell
mit
178
# Function for setting a variable to a default value if not already set. # # @author Oktay Acikalin <[email protected]> # @copyright Oktay Acikalin # @license MIT (LICENSE.txt) function var.default () { [ -z "${!1}" ] && declare -rg "${1}=$2" }
oktayacikalin/project-service
lib/var.default.sh
Shell
mit
261
/usr/bin/bcompare "$6" "$7" -title1="$3" -title2="$5" exit 0
yun-percy/Android_Commond_Tools
config/bcdiff.sh
Shell
mit
62
#!/bin/bash # Setub script for configuring Ubuntu 14.04 EC2 instance # Install curl if not installed sudo apt-get install -y curl # Install git sudo apt-get install -y git # Yeoman scaffolding tool and angular-fullstack code generator # http://yeoman.io/ #npm install -g yo generator-angular-fullstack # Need libfontconfig package to run PhantomJS #sudo apt-get install -y libfontconfig # Install heroku toolbelt # https://toolbelt.heroku.com/debian wget -O- https://toolbelt.heroku.com/install-ubuntu.sh | sh # Create repositories directory cd $HOME if [ -d ./repositories/ ]; then mv repositories repositories.old fi mkdir $HOME/repositories # Install and configure docker # https://docs.docker.com/linux/step_one/ curl -fsSL https://get.docker.com/ | sh # Run docker without sudo (need to re-login to apply changes) # https://docs.docker.com/engine/installation/linux/ubuntulinux/ sudo usermod -aG docker $USER # TODO: Add configuration as well # Have a look at https://github.com/startup-class/setup # Add screen configuration cd $HOME wget raw.github.com/startup-class/dotfiles/master/.screenrc -O .screenrc
pavel-r/dev-env-setup
setup.sh
Shell
mit
1,124
#!/bin/sh # # Copyright (c) 2010, Will Palmer # Copyright (c) 2011, Alexey Shumkin (+ non-UTF-8 commit encoding tests) # test_description='Test pretty formats' . ./test-lib.sh sample_utf8_part=$(printf "f\303\244ng") commit_msg () { # String "initial. initial" partly in German # (translated with Google Translate), # encoded in UTF-8, used as a commit log message below. msg="initial. an${sample_utf8_part}lich\n" if test -n "$1" then printf "$msg" | iconv -f utf-8 -t "$1" else printf "$msg" fi } test_expect_success 'set up basic repos' ' >foo && >bar && git add foo && test_tick && git config i18n.commitEncoding iso8859-1 && commit_msg iso8859-1 > commit_msg && git commit --file commit_msg && git add bar && test_tick && git commit -m "add bar" && git config --unset i18n.commitEncoding ' test_expect_success 'alias builtin format' ' git log --pretty=oneline >expected && git config pretty.test-alias oneline && git log --pretty=test-alias >actual && test_cmp expected actual ' test_expect_success 'alias masking builtin format' ' git log --pretty=oneline >expected && git config pretty.oneline "%H" && git log --pretty=oneline >actual && test_cmp expected actual ' test_expect_success 'alias user-defined format' ' git log --pretty="format:%h" >expected && git config pretty.test-alias "format:%h" && git log --pretty=test-alias >actual && test_cmp expected actual ' test_expect_success 'alias user-defined tformat with %s (iso8859-1 encoding)' ' git config i18n.logOutputEncoding iso8859-1 && git log --oneline >expected-s && git log --pretty="tformat:%h %s" >actual-s && git config --unset i18n.logOutputEncoding && test_cmp expected-s actual-s ' test_expect_success 'alias user-defined tformat with %s (utf-8 encoding)' ' git log --oneline >expected-s && git log --pretty="tformat:%h %s" >actual-s && test_cmp expected-s actual-s ' test_expect_success 'alias user-defined tformat' ' git log --pretty="tformat:%h" >expected && git config pretty.test-alias "tformat:%h" && git log --pretty=test-alias >actual && test_cmp expected actual ' test_expect_success 'alias non-existent format' ' git config pretty.test-alias format-that-will-never-exist && test_must_fail git log --pretty=test-alias ' test_expect_success 'alias of an alias' ' git log --pretty="tformat:%h" >expected && git config pretty.test-foo "tformat:%h" && git config pretty.test-bar test-foo && git log --pretty=test-bar >actual && test_cmp expected actual ' test_expect_success 'alias masking an alias' ' git log --pretty=format:"Two %H" >expected && git config pretty.duplicate "format:One %H" && git config --add pretty.duplicate "format:Two %H" && git log --pretty=duplicate >actual && test_cmp expected actual ' test_expect_success 'alias loop' ' git config pretty.test-foo test-bar && git config pretty.test-bar test-foo && test_must_fail git log --pretty=test-foo ' test_expect_success 'NUL separation' ' printf "add bar\0$(commit_msg)" >expected && git log -z --pretty="format:%s" >actual && test_cmp expected actual ' test_expect_success 'NUL termination' ' printf "add bar\0$(commit_msg)\0" >expected && git log -z --pretty="tformat:%s" >actual && test_cmp expected actual ' test_expect_success 'NUL separation with --stat' ' stat0_part=$(git diff --stat HEAD^ HEAD) && stat1_part=$(git diff-tree --no-commit-id --stat --root HEAD^) && printf "add bar\n$stat0_part\n\0$(commit_msg)\n$stat1_part\n" >expected && git log -z --stat --pretty="format:%s" >actual && test_i18ncmp expected actual ' test_expect_failure 'NUL termination with --stat' ' stat0_part=$(git diff --stat HEAD^ HEAD) && stat1_part=$(git diff-tree --no-commit-id --stat --root HEAD^) && printf "add bar\n$stat0_part\n\0$(commit_msg)\n$stat1_part\n0" >expected && git log -z --stat --pretty="tformat:%s" >actual && test_i18ncmp expected actual ' test_expect_success 'setup more commits' ' test_commit "message one" one one message-one && test_commit "message two" two two message-two && head1=$(git rev-parse --verify --short HEAD~0) && head2=$(git rev-parse --verify --short HEAD~1) && head3=$(git rev-parse --verify --short HEAD~2) && head4=$(git rev-parse --verify --short HEAD~3) ' test_expect_success 'left alignment formatting' ' git log --pretty="format:%<(40)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && message two Z message one Z add bar Z $(commit_msg) Z EOF test_cmp expected actual ' test_expect_success 'left alignment formatting at the nth column' ' git log --pretty="format:%h %<|(40)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && $head1 message two Z $head2 message one Z $head3 add bar Z $head4 $(commit_msg) Z EOF test_cmp expected actual ' test_expect_success 'left alignment formatting with no padding' ' git log --pretty="format:%<(1)%s" >actual && # complete the incomplete line at the end echo >>actual && cat <<EOF >expected && message two message one add bar $(commit_msg) EOF test_cmp expected actual ' test_expect_success 'left alignment formatting with trunc' ' git log --pretty="format:%<(10,trunc)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && message .. message .. add bar Z initial... EOF test_cmp expected actual ' test_expect_success 'left alignment formatting with ltrunc' ' git log --pretty="format:%<(10,ltrunc)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && ..sage two ..sage one add bar Z ..${sample_utf8_part}lich EOF test_cmp expected actual ' test_expect_success 'left alignment formatting with mtrunc' ' git log --pretty="format:%<(10,mtrunc)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && mess.. two mess.. one add bar Z init..lich EOF test_cmp expected actual ' test_expect_success 'right alignment formatting' ' git log --pretty="format:%>(40)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && Z message two Z message one Z add bar Z $(commit_msg) EOF test_cmp expected actual ' test_expect_success 'right alignment formatting at the nth column' ' git log --pretty="format:%h %>|(40)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && $head1 message two $head2 message one $head3 add bar $head4 $(commit_msg) EOF test_cmp expected actual ' test_expect_success 'right alignment formatting with no padding' ' git log --pretty="format:%>(1)%s" >actual && # complete the incomplete line at the end echo >>actual && cat <<EOF >expected && message two message one add bar $(commit_msg) EOF test_cmp expected actual ' test_expect_success 'center alignment formatting' ' git log --pretty="format:%><(40)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && Z message two Z Z message one Z Z add bar Z Z $(commit_msg) Z EOF test_cmp expected actual ' test_expect_success 'center alignment formatting at the nth column' ' git log --pretty="format:%h %><|(40)%s" >actual && # complete the incomplete line at the end echo >>actual && qz_to_tab_space <<EOF >expected && $head1 message two Z $head2 message one Z $head3 add bar Z $head4 $(commit_msg) Z EOF test_cmp expected actual ' test_expect_success 'center alignment formatting with no padding' ' git log --pretty="format:%><(1)%s" >actual && # complete the incomplete line at the end echo >>actual && cat <<EOF >expected && message two message one add bar $(commit_msg) EOF test_cmp expected actual ' test_expect_success 'left/right alignment formatting with stealing' ' git commit --amend -m short --author "long long long <[email protected]>" && git log --pretty="format:%<(10,trunc)%s%>>(10,ltrunc)% an" >actual && # complete the incomplete line at the end echo >>actual && cat <<EOF >expected && short long long long message .. A U Thor add bar A U Thor initial... A U Thor EOF test_cmp expected actual ' test_done
spritetong/msysgit-git-utf8
t/t4205-log-pretty-formats.sh
Shell
gpl-2.0
8,756
#!/bin/sh /etc/rc.common user_name=admin user_home=/tmp/greendownload green_dl_path="$(/bin/config get green_download_path)" green_volume_uuid="$(/bin/config get green_volume_uuid)" mount_point=/tmp/jffs2_green_download work_dir=$user_home/work_dir ftp_work_dir=$work_dir/ftp bt_work_dir=$work_dir/bt emule_work_dir=$work_dir/emule statfifo_work_dir=$user_home/statfifo swap_dir=$work_dir/swap green_download_swap=$swap_dir/swap_file green_download_swap_size=64 green_download_filename=greendownload_config_donot_delete green_dl_uprate="$(/bin/config get green_download_max_uprate)" green_dl_downrate="$(/bin/config get green_download_max_downrate)" green_dl_max_tasks_run="$(/bin/config get green_download_max_tasks_run)" green_dl_max_tasks_all="$(/bin/config get green_download_max_tasks_all)" green_download_status_test="$(/bin/config get green_download_status)" start_from_GUI=1 is_prru() { local firmware_region=`cat /tmp/firmware_region | awk '{print $1}'` local gui_region=$($CONFIG get GUI_Region) if [ "$firmware_region" = "" -o "$firmware_region" = "WW" ]; then if [ "$gui_region" = "Russian" -o "$gui_region" = "Chinese" ]; then echo "0" else echo "1" fi elif [ "$firmware_region" = "RU" -o "$firmware_region" = "PR" ]; then echo "0" else echo "1" fi } is_dafake(){ local curr_uuid [ "x$(ls /tmp/mnt)" = "x" ] && echo "No USB Drive for greendownlaod,exit" && exit sd=$(echo $1 |awk -F"/" '{print $3}') curr_uuid=$(vol_id -u /dev/$sd 2>/dev/null) #both Drive and mount point not changed if [ "x$curr_uuid" = "x$2" ];then [ -d $1 ] && return 0 fi #modify download path [ -d $1 -a "x$curr_uuid" != "x" ] && green_volume_uuid=$curr_uuid && return 0 for sdx in $(ls /tmp/mnt) do curr_uuid=$(vol_id -u /dev/$sdx 2>/dev/null) [ "x$curr_uuid" = "x" ] && continue #Drive not change,but mount point changed if [ "x$2" = "x$curr_uuid" ];then folder=$(echo $1 |cut -d'/' -f4-) green_dl_path="/mnt/$sdx/$folder" [ -d $green_dl_path ] && return 0 fi done return 1 } start() { local download_state="$(/bin/config get green_download_enable)" [ "x$download_state" != "x1" ] && exit # [ `is_prru` = "1" ] && exit #flush block buff [ "$green_download_status_test" -eq "-1" ] && start_from_GUI=0 /bin/config set green_download_status=0 # /bin/config set green_download_enable=0 sync # echo 3 > /proc/sys/vm/drop_caches if [ "x$green_volume_uuid" = "x" ];then [ "x$green_dl_path" = "x" ] && green_dl_path="/mnt/sda1" if [ -d $green_dl_path -a "x$(vol_id -u /dev/sda1 2>/dev/null)" != "x" ];then green_volume_uuid=$(vol_id -u /dev/sda1 2>/dev/null) /bin/config set green_download_path=$green_dl_path /bin/config set green_volume_uuid=$green_volume_uuid else [ "x$(ls /tmp/mnt)" = "x" ] && echo "No USB Drive for greendownlaod,exit" && exit for sdx in $(ls /tmp/mnt) do [ ! -d "$sdx" ] && continue current_uuid=$(vol_id -u /dev/$sdx 2>/dev/null) [ "x$current_uuid" = "x" ] && continue green_dl_path="/mnt/$sdx" green_volume_uuid=$current_uuid /bin/config set green_download_path=$green_dl_path /bin/config set green_volume_uuid=$green_volume_uuid break done fi else if is_dafake $green_dl_path $green_volume_uuid ;then /bin/config set green_download_path=$green_dl_path /bin/config set green_volume_uuid=$green_volume_uuid fi fi dev=$green_dl_path #prepare download directory and check if it can be accessable [ ! -d "$green_dl_path" ] && { #[ $start_from_GUI -eq 1 ] && /bin/config set green_download_status=2 [ "x$(ls /tmp/mnt)" = "x" ] && echo "No USB Drive for greendownlaod,exit" && exit for sdx in $(ls /tmp/mnt) do [ ! -d "/mnt/$sdx" ] && continue current_uuid=$(vol_id -u /dev/$sdx 2>/dev/null) [ "x$current_uuid" = "x" ] && continue green_dl_path="/mnt/$sdx" green_volume_uuid=$current_uuid /bin/config set green_download_path=$green_dl_path /bin/config set green_volume_uuid=$green_volume_uuid /bin/config set green_disk_lable="U:/" break done dev=$green_dl_path } #test filesystem can write ? /bin/touch "$green_dl_path/gl" [ ! -f "$green_dl_path/gl" ] && { echo "Filesystem can't write, try to remount..." > /dev/console mount -o remount rw $dev #test filesystem can write ? /bin/touch "$green_dl_path/gl" [ ! -f "$green_dl_path/gl" ] && { [ $start_from_GUI -eq 1 ] && /bin/config set green_download_status=4 echo "Filesystem can't write, exit..." > /dev/console && /bin/config commit && exit } /bin/rm "$green_dl_path/gl" } /bin/rm "$green_dl_path/gl" #prepare work directory if [ "x$1" != "x" ] ;then echo "do reload,copy download information from $1/$green_download_filename " > /dev/console [ -d "$dev/$green_download_filename" ] && rm -rf "$dev/$green_download_filename" rm -rf "$1/$green_download_filename/emule/Temp" rm -rf "$1/$green_download_filename/emule/Incoming" mv "$1/$green_download_filename" "$dev" fi [ ! -d "$dev/$green_download_filename" ] && mkdir -p "$dev/$green_download_filename" [ ! -d "$dev/$green_download_filename/ftp" ] && mkdir -p "$dev/$green_download_filename/ftp" [ ! -d "$dev/$green_download_filename/bt" ] && mkdir -p "$dev/$green_download_filename/bt" [ ! -d "$dev/$green_download_filename/emule" ] && mkdir -p "$dev/$green_download_filename/emule" [ ! -d "$dev/$green_download_filename/swap" ] && mkdir -p "$dev/$green_download_filename/swap" #copy the status file for GUI. if [ ! -s "$dev/$green_download_filename/status" ] && [ -s "$dev/$green_download_filename/status.bak" ]; then cp "$dev/$green_download_filename/status.bak" "$dev/$green_download_filename/status" fi [ -f "$dev/$green_download_filename/status" ] && cp "$dev/$green_download_filename/status" /tmp/dl_status [ -f "$dev/$green_download_filename/status" ] && cp "$dev/$green_download_filename/status" $work_dir/status [ -f "$dev/$green_download_filename/downloaded_total" ] && tail -n 10 "$dev/$green_download_filename/downloaded_total" > /tmp/dl_downloaded sync && sleep 1 if [ ! -d "$dev/$green_download_filename" -o ! -d "$dev/$green_download_filename/ftp" -o ! -d "$dev/$green_download_filename/bt" -o ! -d "$dev/$green_download_filename/emule" -o ! -d "$dev/$green_download_filename/swap" ]; then [ $start_from_GUI -eq 1 ] && /bin/config set green_download_status=4 echo "Cannot creat work dir on device for app, exit ..." && /bin/config commit && exit fi mkdir -p $statfifo_work_dir rm -rf $work_dir ln -s "$dev/$green_download_filename" $work_dir if [ ! -d $work_dir ];then [ $start_from_GUI -eq 1 ] && /bin/config set green_download_status=4 echo "Cannot creat work dir link for app, exit ..." && /bin/config commit && exit fi #check upload speed rate & download speed rate, value 0 means no limit if [ -n "$green_dl_uprate" -a "$green_dl_uprate" -ge "0" ]; then echo "Upload limit speed is $green_dl_uprate KB/s" else green_dl_uprate=0 && /bin/config set green_download_max_uprate=0 fi if [ -n "$green_dl_downrate" -a "$green_dl_downrate" -ge "0" ]; then echo "Download limit speed is $green_dl_downrate KB/s" else green_dl_downrate=0 && /bin/config set green_download_max_downrate=0 fi #prepare update files # /bin/cp $mount_point/* / -rf #&& chmod 777 /etc/aMule/ -R #check swap # if [ ! -f /proc/swaps ];then # echo "Kernel do not support swap.." # else # #prepare swap partation # swapoff $green_download_swap # [ ! -f $green_download_swap ] && { # dd if=/dev/zero of=$green_download_swap bs=1M count=$green_download_swap_size # mkswap $green_download_swap # } # swapon $green_download_swap # sleep 1 # cat /proc/swaps | grep swap_file || echo "Enable swap failed.." # fi #add firewall rule as well as net-wall, this should be added before to start app #start app #start amuled daemon # /etc/aMule/amule.sh restart $emule_work_dir #wget doesn't need to start #run transmission-daemon #/usr/bin/transbt.sh start #start greendownload daemon sync sleep 1 #flush block buff # echo 3 > /proc/sys/vm/drop_caches /usr/sbin/net-wall rule /usr/sbin/net-wall start echo "Start greendownload core..." #Since remove "sync" in greendownload code,set this proc value to 100. echo 100 > /proc/sys/vm/dirty_writeback_centisecs /bin/config set previous_green_download_path=$green_dl_path # delete last status file. rm -rf /tmp/emule_tasks rm -rf /tmp/transbt_list greendownload -w $work_dir -s $statfifo_work_dir -u $green_dl_uprate -d $green_dl_downrate -r $green_dl_max_tasks_run -a $green_dl_max_tasks_all } do_stop() { #Since remove "sync" in greendownload code,set this proc value to default 500. echo 500 > /proc/sys/vm/dirty_writeback_centisecs sync sync killall greendownload sleep 3 # swapoff $green_download_swap /bin/rm -rf $statfifo_work_dir /bin/rm -rf $work_dir /bin/rm -rf $user_home #stop transmission-daemon /usr/bin/transbt.sh stop #stop amuled /etc/aMule/amule.sh stop #stop wget killall wget # /bin/config set green_download_enable=0 /bin/config set green_download_status=0 /bin/config commit rm -f /tmp/dl_status } stop() { # [ `is_prru` = "1" ] && exit if [ $# = 2 ];then dev=`echo $green_dl_path | cut -d "/" -f3` if [ echo $dev | grep $2 ];then do_stop fi else do_stop fi } reload() { local last_path="$(/bin/config get previous_green_download_path)" local new_path="$(/bin/config get green_download_path)" if [ "$last_path" = "$new_path" ]; then local run=`ps |grep -c greendownload` if [ "x$run" = "x0" ] ;then start else echo "save path not change,return" > /dev/console fi exit fi killall -SIGUSR1 greendownload start "$last_path" } restart() { stop start }
paul-chambers/netgear-r7800
package/green-download/files/green_download.sh
Shell
gpl-2.0
9,820
#!/bin/bash if [ -z "$TERMINUS_TOKEN" ]; then echo "TERMINUS_TOKEN environment variables missing; assuming unauthenticated build" exit 0 fi # Exit immediately, but don't expose $TERMINUS_TOKEN set -e set +x git clone --branch master https://github.com/pantheon-systems/terminus.git ~/terminus cd ~/terminus && composer install terminus auth:login --machine-token=$TERMINUS_TOKEN
pantheon-systems/solr-for-wordpress
bin/install-terminus.sh
Shell
gpl-2.0
384
set -x -e o pipefail GH_MYMAKE_ARGS="-fPIC" HYPERROGUE_USE_GLEW=$GH_HYP_GLEW export HYPERROGUE_USE_GLEW=${HYPERROGUE_USE_GLEW: -1} HYPERROGUE_USE_PNG=$GH_HYP_PNG export HYPERROGUE_USE_PNG=${HYPERROGUE_USE_PNG: -1} HYPERROGUE_USE_ROGUEVIZ=$GH_HYP_RVIZ export HYPERROGUE_USE_ROGUEVIZ=${HYPERROGUE_USE_ROGUEVIZ: -1} if [[ "$GH_HYP_RVIZ" == "rviz_1" ]]; then GH_MYMAKE_ARGS+=" -std=c++17 -rv" fi export CC=$GH_COMPILER export CXX=${CC%cc}++ if [[ "$GH_BUILDSYS" == "makefile" ]]; then make elif [[ "$GH_BUILDSYS" == "mymake" ]]; then make mymake ./mymake $GH_MYMAKE_ARGS mv hyper hyperrogue else echo 'unknown build system' exit 1 fi
zenorogue/hyperrogue
.github/workflows/build.sh
Shell
gpl-2.0
651
# # Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 only, as # published by the Free Software Foundation. # # This code is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # version 2 for more details (a copy is included in the LICENSE file that # accompanied this code). # # You should have received a copy of the GNU General Public License version # 2 along with this work; if not, write to the Free Software Foundation, # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. # # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. # # # @test # @bug 6528083 # @summary Test RMI Bootstrap with SSL # # @library /lib/testlibrary # @modules java.management/sun.management # java.management/sun.management.jmxremote # @build jdk.testlibrary.* TestLogger Utils RmiBootstrapTest # @run shell/timeout=300 RmiSslBootstrapTest.sh # Define the Java class test name TESTCLASS="RmiBootstrapTest" export TESTCLASS # Source in utility shell script to generate and remove .properties and .acl files . ${TESTSRC}/GeneratePropertyPassword.sh generatePropertyPasswordFiles `ls ${TESTSRC}/*_ssltest*.in` rm -rf ${TESTCLASSES}/ssl mkdir -p ${TESTCLASSES}/ssl cp -rf ${TESTSRC}/ssl/*store ${TESTCLASSES}/ssl chmod -R 777 ${TESTCLASSES}/ssl DEBUGOPTIONS="" export DEBUGOPTIONS EXTRAOPTIONS="--add-exports java.management/sun.management=ALL-UNNAMED \ --add-exports java.management/sun.management.jmxremote=ALL-UNNAMED" export EXTRAOPTIONS # Call the common generic test # echo ------------------------------------------------------------- echo Launching test for `basename $0 .sh` echo ------------------------------------------------------------- sh ${TESTSRC}/../RunTest.sh ${DEBUGOPTIONS} ${EXTRAOPTIONS} ${TESTCLASS} \ ${TESTCLASSES}/management_ssltest*.properties result=$? restoreFilePermissions `ls ${TESTSRC}/*_ssltest*.in` exit $result
FauxFaux/jdk9-jdk
test/sun/management/jmxremote/bootstrap/RmiSslBootstrapTest.sh
Shell
gpl-2.0
2,351
#!/bin/bash if [ $# -eq 0 ] then tests=($(ls -d test*)) else tests=() for arg in "$@" do if [ -d "$arg" ] then tests+=($arg) fi done fi echo "The pdf for the following tests will be created:" echo " ${tests[@]}" echo "" if [ ! -d pdf ] then mkdir pdf fi rm pdf/* for test in "${tests[@]}" do echo "creating pdf for $test" root -l "macroPdf.cpp(\"${test//\/}\")" mv pdf/out.pdf pdf/${test//\/}.pdf echo "created pdf for $test" done
trianam/tkLayoutTests
TestRoutingOuter/createPdf.sh
Shell
gpl-2.0
514
# ******************************************************************************* # ******************************************************************************* # ** ** # ** ** # ** Copyright 2015-2017 JK Technosoft ** # ** http://www.jktech.com ** # ** ** # ** ProActio is free software; you can redistribute it and/or modify it ** # ** under the terms of the GNU General Public License (GPL) as published ** # ** by the Free Software Foundation; either version 2 of the License, or ** # ** at your option) any later version. ** # ** ** # ** ProActio is distributed in the hope that it will be useful, but WITHOUT ** # ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ** # ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ** # ** for more details. ** # ** ** # ** See TNC.TXT for more information regarding the Terms and Conditions ** # ** of use and alternative licensing options for this software. ** # ** ** # ** A copy of the GPL is in GPL.TXT which was provided with this package. ** # ** ** # ** See http://www.fsf.org for more information about the GPL. ** # ** ** # ** ** # ******************************************************************************* # ******************************************************************************* # # Author: # # JK Technosoft # http://www.jktech.com # August 11, 2015 # # History: # #Source in Proactio environment variables . `sh /getpath.sh CONFIG`/proactio.profile SCRIPT_NAME= atransaction_drv.p echo "${DATE}: Long Running Transactions alert script started at ${HOSTNAME}" >> ${LOGDIR}/atransaction.log RESULT=`$DLC/bin/_progres -b -p ${PROGDIR}/${SCRIPT_NAME}` echo ${RESULT} >> ${LOGDIR}/atransaction.log if [ $? -eq "0" ] then echo "${DATE}: Long Running Transactions alert overflow alert script executed successfully on ${HOSTNAME}" >> ${LOGDIR}/atransaction.log else ERRMSG="${DATE}: Errors occurred while executing Long Running Transactions alert script on hostname = ${HOSTNAME} : script name = ${SCRIPT_NAME}" echo $ERRMSG >> ${LOGDIR}/atransaction.log echo $ERRMSG | mailx -s "Proactio Notification Mail" ${EMAIL_LIST} fi
JKT-OSSCLUB/ProActio
Proactio/cron/atransaction.sh
Shell
gpl-2.0
3,046
#!/bin/sh # # srecord - manipulate eprom load files # Copyright (C) 1999, 2003, 2006-2008 Peter Miller # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. # TEST_SUBJECT="WindowsNT binary" . test_prelude cat > test.in << 'fubar' S00600004844521B S11A0000546869732069730A61206E617374790D0A746573742E0D85 S5030001FB fubar if test $? -ne 0; then no_result; fi srec_cat test.in -o test.int -bin > log 2>&1 if test $? -ne 0; then cat log; fail; fi srec_cat test.int -bin -o test.out -header HDR > log 2>&1 if test $? -ne 0; then cat log; fail; fi diff test.in test.out if test $? -ne 0; then fail; fi # # The things tested here, worked. # No other guarantees are made. # pass
freyc/SRecord
test/00/t0030a.sh
Shell
gpl-3.0
1,340
#./run_parser.sh <parser_name> <db_name> <version> <input_file_name>(default value: "") <is_promiscous>(default 0, 1:promiscous) <default_attribute>(if applicable) #./run_parser.sh uniprot uniprot_sprot Feb_5_09 uniprot_sprot.dat.gz 0 #./run_parser.sh uniprot uniprot_trembl Feb_5_09 uniprot_trembl.dat.gz 0 #./run_parser.sh taxonomy taxonomy Feb_4_09 "" 0 #./run_parser.sh hgnc hgnc Feb_5_09 hgnc.txt 0 #./run_parser.sh go_obo go Feb_12_09 gene_ontology_edit.obo 0 #./run_parser.sh psi_mi_obo psi_mi_obo Feb13 psi-mi25.obo 0 #./run_parser.sh psi_mi_2.5 intact Jan13 "" 1 intact #./run_parser.sh psi_mi_2.5 biogrid Jan13 "" 1 accessionnumber #./run_parser.sh psi_mi_2.5 hprd Apr10 "" 1 hprd #./run_parser.sh psi_mi_2.5 mint Dec11 "" 1 mint #./run_parser.sh psi_mi_2.5 dip Jan13 "" 1 dip #./run_parser.sh psi_mi_2.5 mpact Oct08 "" 1 cygd #./run_parser.sh psi_mi_2.5 bind Unk05 "" 0 name # not in psi_mi_2.5 format (in psi_mi_2 format) ./run_parser.sh biopax_level_2 reactome Dec12 biopax.zip 1 reactome #./run_parser.sh string string v7.1 "" 1 #./run_parser.sh kegg_ko kegg_ko Feb_5_09 ko 1 #./run_parser.sh kegg_ligand kegg_ligand Feb_5_09 "" 0 #./run_parser.sh kegg_gene kegg_gene Feb_5_09 genes.tar.gz 0 #./run_parser.sh ipi ipi Feb_12_09 "" 0 #./run_parser.sh cog cog Mar_5_03 "" 1 #./run_parser.sh scop scop 1.73 "" 1 #./run_parser.sh pfam pfam Feb_12_09 "" 1 #./run_parser.sh ncbi_genpept genpept v1 "" 0 #./run_parser.sh nr nr v1 "" 0 #./run_parser.sh generic input_database 1 input_database4_v2.txt 0 name
emreg00/biana
scripts/parsing/parse_external_dbs.sh
Shell
gpl-3.0
1,516
#! /bin/bash g09 < $1 > $1.out grep "^ Energy= " $1.out | sed s/" Energy= "/""/ | sed s/"NIter=.*"/""/
matthiaslein/WellFARe-FF
run-gauss.bash
Shell
gpl-3.0
106
#!/bin/sh -e BEATS_VERSION="1.2.3" wget -q "https://download.elastic.co/beats/winlogbeat/winlogbeat-${BEATS_VERSION}-windows.zip" unzip "winlogbeat-${BEATS_VERSION}-windows.zip"
Graylog2/graylog-plugin-beats
vagrant_winlogbeat/download.sh
Shell
gpl-3.0
179
#!/bin/sh HOME=$DEBUG_REAL_HOME steam steam://install/400 echo $? > ~/install-exit-status unzip -o pts-portal-1.zip mv pts-portal-1.dem $DEBUG_REAL_HOME/.steam/steam/steamapps/common/Portal/portal echo "#!/bin/bash . steam-env-vars.sh cd \$DEBUG_REAL_HOME/.steam/steam/steamapps/common/Portal ./hl2_linux -game portal +con_logfile \$LOG_FILE +cl_showfps 1 +timedemoquit pts-portal-1 -novid -fullscreen \$@" > portal chmod +x portal
phoronix-test-suite/phoronix-test-suite
ob-cache/test-profiles/pts/portal-1.1.2/install.sh
Shell
gpl-3.0
436
#!/bin/bash set -e -o pipefail help() { echo echo 'Usage ./setup.sh ~/path/to/MANTA_PRIVATE_KEY' echo echo 'Checks that your Triton and Docker environment is sane and configures' echo 'an environment file to use.' echo echo 'MANTA_PRIVATE_KEY is the filesystem path to an SSH private key' echo 'used to connect to Manta for the database backups.' echo echo 'Additional details must be configured in the _env file, but this script will properly' echo 'encode the SSH key details for use with this MySQL image.' echo } # populated by `check` function whenever we're using Triton TRITON_USER= TRITON_DC= # --------------------------------------------------- # Top-level commands # Check for correct configuration and setup _env file envcheck() { if [ -z "$1" ]; then tput rev # reverse tput bold # bold echo 'Please provide a path to a SSH private key to access Manta.' tput sgr0 # clear help exit 1 fi if [ ! -f "$1" ]; then tput rev # reverse tput bold # bold echo 'SSH private key for Manta is unreadable.' tput sgr0 # clear help exit 1 fi # Assign args to named vars MANTA_PRIVATE_KEY_PATH=$1 command -v docker >/dev/null 2>&1 || { echo tput rev # reverse tput bold # bold echo 'Docker is required, but does not appear to be installed.' tput sgr0 # clear echo 'See https://docs.joyent.com/public-cloud/api-access/docker' exit 1 } command -v json >/dev/null 2>&1 || { echo tput rev # reverse tput bold # bold echo 'Error! JSON CLI tool is required, but does not appear to be installed.' tput sgr0 # clear echo 'See https://apidocs.joyent.com/cloudapi/#getting-started' exit 1 } command -v triton >/dev/null 2>&1 || { echo tput rev # reverse tput bold # bold echo 'Error! Joyent Triton CLI is required, but does not appear to be installed.' tput sgr0 # clear echo 'See https://www.joyent.com/blog/introducing-the-triton-command-line-tool' exit 1 } # make sure Docker client is pointed to the same place as the Triton client local docker_user=$(docker info 2>&1 | awk -F": " '/SDCAccount:/{print $2}') local docker_dc=$(echo $DOCKER_HOST | awk -F"/" '{print $3}' | awk -F'.' '{print $1}') TRITON_USER=$(triton profile get | awk -F": " '/account:/{print $2}') TRITON_DC=$(triton profile get | awk -F"/" '/url:/{print $3}' | awk -F'.' '{print $1}') if [ ! "$docker_user" = "$TRITON_USER" ] || [ ! "$docker_dc" = "$TRITON_DC" ]; then echo tput rev # reverse tput bold # bold echo 'Error! The Triton CLI configuration does not match the Docker CLI configuration.' tput sgr0 # clear echo echo "Docker user: ${docker_user}" echo "Triton user: ${TRITON_USER}" echo "Docker data center: ${docker_dc}" echo "Triton data center: ${TRITON_DC}" exit 1 fi local triton_cns_enabled=$(triton account get | awk -F": " '/cns/{print $2}') if [ ! "true" == "$triton_cns_enabled" ]; then echo tput rev # reverse tput bold # bold echo 'Error! Triton CNS is required and not enabled.' tput sgr0 # clear echo exit 1 fi # setup environment file if [ ! -f "_env" ]; then echo '# Environment variables for MySQL service' > _env echo 'MYSQL_USER=dbuser' >> _env echo 'MYSQL_PASSWORD='$(cat /dev/urandom | LC_ALL=C tr -dc 'A-Za-z0-9' | head -c 7) >> _env echo 'MYSQL_REPL_USER=repluser' >> _env echo 'MYSQL_REPL_PASSWORD='$(cat /dev/urandom | LC_ALL=C tr -dc 'A-Za-z0-9' | head -c 7) >> _env echo 'MYSQL_DATABASE=demodb' >> _env echo >> _env echo '# Environment variables for backups to Manta' >> _env echo 'MANTA_URL=https://us-east.manta.joyent.com' >> _env echo 'MANTA_BUCKET= # an existing Manta bucket' >> _env echo 'MANTA_USER= # a user with access to that bucket' >> _env echo 'MANTA_SUBUSER=' >> _env echo 'MANTA_ROLE=' >> _env # MANTA_KEY_ID must be the md5 formatted key fingerprint. A SHA256 will result in errors. set +o pipefail # The -E option was added to ssh-keygen recently; if it doesn't work, then # assume we're using an older version of ssh-keygen that only outputs MD5 fingerprints ssh-keygen -yl -E md5 -f ${MANTA_PRIVATE_KEY_PATH} > /dev/null 2>&1 if [ $? -eq 0 ]; then echo MANTA_KEY_ID=$(ssh-keygen -yl -E md5 -f ${MANTA_PRIVATE_KEY_PATH} | awk '{print substr($2,5)}') >> _env else echo MANTA_KEY_ID=$(ssh-keygen -yl -f ${MANTA_PRIVATE_KEY_PATH} | awk '{print $2}') >> _env fi set -o pipefail # munge the private key so that we can pass it into an env var sanely # and then unmunge it in our startup script echo MANTA_PRIVATE_KEY=$(cat ${MANTA_PRIVATE_KEY_PATH} | tr '\n' '#') >> _env echo >> _env echo 'Edit the _env file with your desired MYSQL_* and MANTA_* config' else echo 'Existing _env file found, exiting' exit fi } get_root_password() { echo $(docker logs ${COMPOSE_PROJECT_NAME:-mysql}_mysql_1 2>&1 | \ awk '/Generated root password/{print $NF}' | \ awk '{$1=$1};1' ) | pbcopy } # --------------------------------------------------- # parse arguments # Get function list funcs=($(declare -F -p | cut -d " " -f 3)) until if [ ! -z "$1" ]; then # check if the first arg is a function in this file, or use a default if [[ " ${funcs[@]} " =~ " $1 " ]]; then cmd=$1 shift 1 else cmd="envcheck" fi $cmd "$@" if [ $? == 127 ]; then help fi exit else help fi do echo done
tgross/triton-mysql
examples/triton/setup.sh
Shell
mpl-2.0
6,099
#!/bin/bash source shell_functions.sh function ventclient () { for i in {1..4} do ./ventclient & done wait } function ventserver () { (sleep 2 ; echo) | ./ventserver } SERVER=ventserver CLIENT=ventclient run
zeromq/f77_zmq
examples/vent.sh
Shell
lgpl-2.1
228
# Usage: ./update.sh <ogg_src_directory> # # Copies the needed files from a directory containing the original # libogg source that we need for the Mozilla HTML5 media support. cp $1/include/ogg/config_types.h ./include/ogg/config_types.h cp $1/include/ogg/ogg.h ./include/ogg/ogg.h cp $1/include/ogg/os_types.h ./include/ogg/os_types.h cp $1/CHANGES ./CHANGES cp $1/COPYING ./COPYING cp $1/README ./README cp $1/src/bitwise.c ./src/ogg_bitwise.c cp $1/src/framing.c ./src/ogg_framing.c cp $1/AUTHORS ./AUTHORS patch -p0 < solaris-types.patch
sergecodd/FireFox-OS
B2G/gecko/media/libogg/update.sh
Shell
apache-2.0
542
#!/usr/bin/env bash # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Build a whl and container with Intel(R) MKL support # Usage: build-dev-container.sh DEBUG=1 DOCKER_BINARY="docker" TMP_DIR=$(pwd) # Helper function to traverse directories up until given file is found. function upsearch () { test / == "$PWD" && return || \ test -e "$1" && echo "$PWD" && return || \ cd .. && upsearch "$1" } function debug() { if [[ ${DEBUG} == 1 ]] ; then echo $1 fi } function die() { echo $1 exit 1 } # Set up WORKSPACE. WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}" ROOT_CONTAINER=${ROOT_CONTAINER:-tensorflow/tensorflow} TF_ROOT_CONTAINER_TAG=${ROOT_CONTAINER_TAG:-devel} # TF_BUILD_VERSION can be either a tag, branch, commit ID or PR number. # For a PR, set TF_BUILD_VERSION_IS_PR="yes" TF_BUILD_VERSION=${TF_DOCKER_BUILD_DEVEL_BRANCH:-master} TF_BUILD_VERSION_IS_PR=${TF_DOCKER_BUILD_DEVEL_BRANCH_IS_PR:-no} TF_REPO=${TF_REPO:-https://github.com/tensorflow/tensorflow} FINAL_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME:-intel-mkl/tensorflow} TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION:-nightly} BUILD_AVX_CONTAINERS=${BUILD_AVX_CONTAINERS:-no} BUILD_AVX2_CONTAINERS=${BUILD_AVX2_CONTAINERS:-no} BUILD_SKX_CONTAINERS=${BUILD_SKX_CONTAINERS:-no} BUILD_CLX_CONTAINERS=${BUILD_CLX_CONTAINERS:-no} CONTAINER_PORT=${TF_DOCKER_BUILD_PORT:-8888} BUILD_TF_V2_CONTAINERS=${BUILD_TF_V2_CONTAINERS:-yes} BUILD_TF_BFLOAT16_CONTAINERS=${BUILD_TF_BFLOAT16_CONTAINERS:-no} ENABLE_SECURE_BUILD=${ENABLE_SECURE_BUILD:-no} BAZEL_VERSION=${BAZEL_VERSION} BUILD_PY2_CONTAINERS=${BUILD_PY2_CONTAINERS:-no} ENABLE_DNNL1=${ENABLE_DNNL1:-no} debug "ROOT_CONTAINER=${ROOT_CONTAINER}" debug "TF_ROOT_CONTAINER_TAG=${TF_ROOT_CONTAINER_TAG}" debug "TF_BUILD_VERSION=${TF_BUILD_VERSION}" debug "TF_BUILD_VERSION_IS_PR=${TF_BUILD_VERSION_IS_PR}" debug "FINAL_IMAGE_NAME=${FINAL_IMAGE_NAME}" debug "TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION}" debug "BUILD_AVX_CONTAINERS=${BUILD_AVX_CONTAINERS}" debug "BUILD_AVX2_CONTAINERS=${BUILD_AVX2_CONTAINERS}" debug "BUILD_SKX_CONTAINERS=${BUILD_SKX_CONTAINERS}" debug "BUILD_CLX_CONTAINERS=${BUILD_CLX_CONTAINERS}" debug "BUILD_TF_V2_CONTAINERS=${BUILD_TF_V2_CONTAINERS}" debug "BUILD_TF_BFLOAT16_CONTAINERS=${BUILD_TF_BFLOAT16_CONTAINERS}" debug "ENABLE_SECURE_BUILD=${ENABLE_SECURE_BUILD}" debug "TMP_DIR=${TMP_DIR}" debug "BAZEL_VERSION=${BAZEL_VERSION}" debug "BUILD_PY2_CONTAINERS=${BUILD_PY2_CONTAINERS}" debug "ENABLE_DNNL1=${ENABLE_DNNL1}" function build_container() { if [[ $# -lt 2 ]]; then die "Usage: build_container <TEMP_IMAGE_NAME> <TF_DOCKER_BUILD_ARGS>." fi TEMP_IMAGE_NAME=${1} debug "TEMP_IMAGE_NAME=${TEMP_IMAGE_NAME}" shift TF_DOCKER_BUILD_ARGS=("${@}") # Add the proxy info build args. This will be later on passed to docker as # --build-arg so that users behind corporate proxy can build the images TF_DOCKER_BUILD_ARGS+=("--build-arg http_proxy=${http_proxy}") TF_DOCKER_BUILD_ARGS+=("--build-arg https_proxy=${https_proxy}") TF_DOCKER_BUILD_ARGS+=("--build-arg socks_proxy=${socks_proxy}") TF_DOCKER_BUILD_ARGS+=("--build-arg no_proxy=${no_proxy}") # In general having uppercase proxies is a good idea because different # applications running inside Docker may only honor uppercase proxies TF_DOCKER_BUILD_ARGS+=("--build-arg HTTP_PROXY=${HTTP_PROXY}") TF_DOCKER_BUILD_ARGS+=("--build-arg HTTPS_PROXY=${HTTPS_PROXY}") TF_DOCKER_BUILD_ARGS+=("--build-arg SOCKS_PROXY=${SOCKS_PROXY}") TF_DOCKER_BUILD_ARGS+=("--build-arg NO_PROXY=${NO_PROXY}") #Add --config=v2 build arg for TF v2 if [[ ${BUILD_TF_V2_CONTAINERS} == "no" ]]; then TF_DOCKER_BUILD_ARGS+=("--build-arg CONFIG_V2_DISABLE=--disable-v2") fi #Add build arg for bfloat16 build if [[ ${BUILD_TF_BFLOAT16_CONTAINERS} == "yes" ]]; then TF_DOCKER_BUILD_ARGS+=("--build-arg CONFIG_BFLOAT16_BUILD=--enable-bfloat16") fi #Add build arg for Secure Build if [[ ${ENABLE_SECURE_BUILD} == "yes" ]]; then TF_DOCKER_BUILD_ARGS+=("--build-arg ENABLE_SECURE_BUILD=--secure-build") fi # Add build arg for DNNL1 if [[ ${ENABLE_DNNL1} == "yes" ]]; then TF_DOCKER_BUILD_ARGS+=("--build-arg ENABLE_DNNL1=--enable-dnnl1") fi # BAZEL Version if [[ ${BAZEL_VERSION} != "" ]]; then TF_DOCKER_BUILD_ARGS+=("--build-arg BAZEL_VERSION=${BAZEL_VERSION}") fi # Perform docker build debug "Building docker image with image name and tag: ${TEMP_IMAGE_NAME}" CMD="${DOCKER_BINARY} build ${TF_DOCKER_BUILD_ARGS[@]} --no-cache --pull -t ${TEMP_IMAGE_NAME} -f Dockerfile.devel-mkl ." debug "CMD=${CMD}" ${CMD} if [[ $? == "0" ]]; then debug "${DOCKER_BINARY} build of ${TEMP_IMAGE_NAME} succeeded" else die "FAIL: ${DOCKER_BINARY} build of ${TEMP_IMAGE_NAME} failed" fi } function test_container() { if [[ "$#" != "1" ]]; then die "Usage: ${FUNCNAME} <TEMP_IMAGE_NAME>" fi TEMP_IMAGE_NAME=${1} # Make sure that there is no other containers of the same image running if "${DOCKER_BINARY}" ps | grep -q "${TEMP_IMAGE_NAME}"; then die "ERROR: It appears that there are docker containers of the image "\ "${TEMP_IMAGE_NAME} running. Please stop them before proceeding" fi # Start a docker container from the newly-built docker image DOCKER_RUN_LOG="${TMP_DIR}/docker_run.log" debug " Log file is at: ${DOCKER_RUN_LOG}" debug "Running docker container from image ${TEMP_IMAGE_NAME}..." RUN_CMD="${DOCKER_BINARY} run --rm -d -p ${CONTAINER_PORT}:${CONTAINER_PORT} ${TEMP_IMAGE_NAME} tail -f /dev/null 2>&1 > ${DOCKER_RUN_LOG}" debug "RUN_CMD=${RUN_CMD}" ${RUN_CMD} # Get the container ID CONTAINER_ID="" while [[ -z ${CONTAINER_ID} ]]; do sleep 1 debug "Polling for container ID..." CONTAINER_ID=$("${DOCKER_BINARY}" ps | grep "${TEMP_IMAGE_NAME}" | awk '{print $1}') done debug "ID of the running docker container: ${CONTAINER_ID}" debug "Performing basic sanity checks on the running container..." TEST_CMD_1=$(${DOCKER_BINARY} exec ${CONTAINER_ID} bash -c "${PYTHON} -c 'from tensorflow.python import _pywrap_util_port; print(_pywrap_util_port.IsMklEnabled())'") # Make TEST_CMD backward compatible with older code TEST_CMD_2=$(${DOCKER_BINARY} exec ${CONTAINER_ID} bash -c "${PYTHON} -c 'from tensorflow.python import pywrap_tensorflow; print(pywrap_tensorflow.IsMklEnabled())'") if [ "${TEST_CMD_1}" = "True" -o "${TEST_CMD_2}" = "True" ] ; then echo "PASS: MKL enabled test in ${TEMP_IMAGE_NAME}" else die "FAIL: MKL enabled test in ${TEMP_IMAGE_NAME}" fi # Stop the running docker container sleep 1 "${DOCKER_BINARY}" stop --time=0 ${CONTAINER_ID} } function checkout_tensorflow() { if [[ "$#" != "3" ]]; then die "Usage: ${FUNCNAME} <REPO_URL> <BRANCH/TAG/COMMIT-ID/PR-ID> <TF_BUILD_VERSION_IS_PR>" fi TF_REPO="${1}" TF_BUILD_VERSION="${2}" TF_BUILD_VERSION_IS_PR="${3}" TENSORFLOW_DIR="tensorflow" debug "Checking out ${TF_REPO}:${TF_BUILD_VERSION} into ${TENSORFLOW_DIR}" # Clean any existing tensorflow sources rm -rf "${TENSORFLOW_DIR}" git clone ${TF_REPO} ${TENSORFLOW_DIR} cd ${TENSORFLOW_DIR} if [[ "${TF_BUILD_VERSION_IS_PR}" == "yes" ]]; then # If TF_BUILD_VERSION is a PR number, then fetch first git fetch origin pull/${TF_BUILD_VERSION}/head:pr-${TF_BUILD_VERSION} git checkout pr-${TF_BUILD_VERSION} else git checkout ${TF_BUILD_VERSION} fi if [ $? -ne 0 ]; then die "Unable to find ${TF_BUILD_VERSION} on ${TF_REPO}" fi cd .. } function tag_container() { # Apply the final image name and tag TEMP_IMAGE_NAME="${1}" FINAL_IMG="${2}" DOCKER_VER=$("${DOCKER_BINARY}" version | grep Version | head -1 | awk '{print $NF}') if [[ -z "${DOCKER_VER}" ]]; then die "ERROR: Failed to determine ${DOCKER_BINARY} version" fi DOCKER_MAJOR_VER=$(echo "${DOCKER_VER}" | cut -d. -f 1) DOCKER_MINOR_VER=$(echo "${DOCKER_VER}" | cut -d. -f 2) FORCE_TAG="" if [[ "${DOCKER_MAJOR_VER}" -le 1 ]] && \ [[ "${DOCKER_MINOR_VER}" -le 9 ]]; then FORCE_TAG="--force" fi "${DOCKER_BINARY}" tag ${FORCE_TAG} "${TEMP_IMAGE_NAME}" "${FINAL_IMG}" || \ die "Failed to tag intermediate docker image ${TEMP_IMAGE_NAME} as ${FINAL_IMG}" debug "Successfully tagged docker image: ${FINAL_IMG}" } PYTHON_VERSIONS=("python3") if [[ ${BUILD_PY2_CONTAINERS} == "yes" ]]; then PYTHON_VERSIONS+=("python") fi PLATFORMS=() if [[ ${BUILD_AVX_CONTAINERS} == "yes" ]]; then PLATFORMS+=("sandybridge") fi if [[ ${BUILD_AVX2_CONTAINERS} == "yes" ]]; then PLATFORMS+=("haswell") fi if [[ ${BUILD_SKX_CONTAINERS} == "yes" ]]; then PLATFORMS+=("skylake") fi if [[ ${BUILD_CLX_CONTAINERS} == "yes" ]]; then PLATFORMS+=("icelake") fi # Checking out sources needs to be done only once checkout_tensorflow "${TF_REPO}" "${TF_BUILD_VERSION}" "${TF_BUILD_VERSION_IS_PR}" for PLATFORM in "${PLATFORMS[@]}" do for PYTHON in "${PYTHON_VERSIONS[@]}" do # Clear the build args array TF_DOCKER_BUILD_ARGS=("--build-arg TARGET_PLATFORM=${PLATFORM}") TF_DOCKER_BUILD_ARGS+=("--build-arg ROOT_CONTAINER=${ROOT_CONTAINER}") FINAL_TAG="${TF_DOCKER_BUILD_VERSION}" ROOT_CONTAINER_TAG="${TF_ROOT_CONTAINER_TAG}" if [[ ${PLATFORM} == "haswell" ]]; then FINAL_TAG="${FINAL_TAG}-avx2" fi if [[ ${PLATFORM} == "skylake" ]]; then FINAL_TAG="${FINAL_TAG}-avx512" fi if [[ ${PLATFORM} == "icelake" ]]; then FINAL_TAG="${FINAL_TAG}-avx512-VNNI" fi # Add -devel-mkl to the image tag FINAL_TAG="${FINAL_TAG}-devel-mkl" if [[ "${PYTHON}" == "python3" ]]; then TF_DOCKER_BUILD_ARGS+=("--build-arg WHL_DIR=/tmp/pip3") TF_DOCKER_BUILD_ARGS+=("--build-arg PIP=pip3") FINAL_TAG="${FINAL_TAG}-py3" ROOT_CONTAINER_TAG="${ROOT_CONTAINER_TAG}-py3" fi TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON=${PYTHON}") TF_DOCKER_BUILD_ARGS+=("--build-arg ROOT_CONTAINER_TAG=${ROOT_CONTAINER_TAG}") # Intermediate image name with tag TEMP_IMAGE_NAME="${USER}/tensorflow:${FINAL_TAG}" build_container "${TEMP_IMAGE_NAME}" "${TF_DOCKER_BUILD_ARGS[@]}" test_container "${TEMP_IMAGE_NAME}" tag_container "${TEMP_IMAGE_NAME}" "${FINAL_IMAGE_NAME}:${FINAL_TAG}" done done
renyi533/tensorflow
tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
Shell
apache-2.0
10,960
#!/bin/sh # # Initialize the db with some data for testing # # reset the db echo "Resetting the db....." ./surfapi.sh drop --db MONGOLAB_TEST=mongodb://localhost/test ./sajavadoc.sh $MONGOLAB_TEST /java/jdk/1.7 -sourcepath /fox/tmp/javadoc/src-jdk7 \ -subpackages java \ -subpackages javax \ -subpackages org.omg \ -subpackages org.w3c \ -subpackages org.xml ./sajavadoc.sh $MONGOLAB_TEST /java/com.surfapi/1.0 -sourcepath src/test/java com.surfapi.test ./sajavadoc.sh $MONGOLAB_TEST /java/com.surfapi/1.0 -sourcepath src/main/java com.surfapi.proc ./sajavadoc.sh $MONGOLAB_TEST /java/com.surfapi/0.9 -sourcepath src/test/java com.surfapi.test ./sajavadoc.sh $MONGOLAB_TEST /java/mongo-java-driver/2.9.3 -sourcepath /fox/tmp/surfapi-heroku/mongo-java-driver-2.9.3 -subpackages com -subpackages org ./sajavadoc.sh $MONGOLAB_TEST /java/javax.json/1.0.2 -sourcepath /fox/tmp/surfapi-heroku/javax.json-1.0.2 -subpackages javax ./sajavadoc.sh $MONGOLAB_TEST /java/javax.enterprise.concurrent/1.0 -sourcepath /fox/tmp/surfapi-heroku/javax.enterprise.concurrent-1.0 -subpackages javax ./sajavadoc.sh $MONGOLAB_TEST /java/javax.batch/1.0 -sourcepath /fox/tmp/surfapi-heroku/javax.batch-api-1.0 -subpackages javax ./sajavadoc.sh $MONGOLAB_TEST /java/jaxrs/2.3.1 -sourcepath /fox/tmp/surfapi-heroku/jaxrs-api-2.3.1 -subpackages javax ./sajavadoc.sh $MONGOLAB_TEST /java/org.osgi.core/5.0.0 -sourcepath /fox/tmp/surfapi-heroku/org.osgi.core-5.0.0 -subpackages org export JAVADOC_CP=/fox/tmp/surfapi-heroku/org.osgi.core-5.0.0.jar; ./sajavadoc.sh $MONGOLAB_TEST /java/org.osgi.enterprise/5.0.0 -sourcepath /fox/tmp/surfapi-heroku/org.osgi.enterprise-5.0.0 -subpackages org; export JAVADOC_CP= ./sajavadoc.sh $MONGOLAB_TEST /java/apache-commons-lang3/3.3.2 -sourcepath /fox/tmp/surfapi-heroku/commons-lang3-3.3.2-src/src/main/java -subpackages org ./sajavadoc.sh $MONGOLAB_TEST /java/apache-commons-io/2.4 -sourcepath /fox/tmp/surfapi-heroku/commons-io-2.4-src/src/main/java -subpackages org ./sajavadoc.sh $MONGOLAB_TEST /java/apache-commons-net/3.3 -sourcepath /fox/tmp/surfapi-heroku/commons-net-3.3-src/src/main/java -subpackages org ./sajavadoc.sh $MONGOLAB_TEST /java/apache-commons-collections4/4.0 -sourcepath /fox/tmp/surfapi-heroku/commons-collections4-4.0-src/src/main/java -subpackages org ./sajavadoc.sh $MONGOLAB_TEST /java/htmlunit/2.15 -J-Dfile.encoding=UTF-8 -sourcepath /fox/tmp/surfapi-heroku/htmlunit-2.15 -subpackages com -subpackages netscape # DONE: not all source files have javadoc. v7 has them (downloaded from maven repo) # ./sajavadoc.sh $MONGOLAB_TEST /java/javaee/6.0 -sourcepath /fox/tmp/surfapi-heroku/javaee-api-6.0 -subpackages javax ./sajavadoc.sh $MONGOLAB_TEST /java/javaee/7.0 -sourcepath /fox/tmp/surfapi-heroku/javaee-api-7.0 -subpackages javax ./sajavadoc.sh $MONGOLAB_TEST /java/junit/4.11 \ -sourcepath "/fox/tmp/surfapi-heroku/junit/src/main/java;/fox/tmp/surfapi-heroku/hamcrest-all-1.3" \ -subpackages org.junit.experimental \ -subpackages org.junit.matchers \ -subpackages org.junit.rules \ -subpackages org.junit.runner \ -subpackages org.junit.runners \ org.junit ./sajavadoc.sh $MONGOLAB_TEST /java/hamcrest/1.3 -sourcepath "/fox/tmp/surfapi-heroku/hamcrest-all-1.3" \ -subpackages org.hamcrest.beans \ -subpackages org.hamcrest.collection \ -subpackages org.hamcrest.core \ -subpackages org.hamcrest.integration \ -subpackages org.hamcrest.internal \ -subpackages org.hamcrest.number \ -subpackages org.hamcrest.ojbect \ -subpackages org.hamcrest.text \ -subpackages org.hamcrest.xml \ org.hamcrest ./sajavadoc.sh $MONGOLAB_TEST /java/gson/2.3.1 -sourcepath "/fox/tmp/surfapi-heroku/gson-2.3.1" -subpackages com
rga78/surfapi-heroku
initdb.sh
Shell
apache-2.0
3,755
#!/usr/bin/env bash # Copyright 2017 Telstra Open Source # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # PROJECT_ROOT=$(cd "$(dirname "$0")/../.."; pwd) TARGET=${PROJECT_ROOT}/services/storm/lib JACKSON_VERSION=2.9.3 CLI="mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:copy" ${CLI} -Dartifact=com.fasterxml.jackson.core:jackson-core:${JACKSON_VERSION}:jar -DoutputDirectory=${TARGET} ${CLI} -Dartifact=com.fasterxml.jackson.core:jackson-databind:${JACKSON_VERSION}:jar -DoutputDirectory=${TARGET} ${CLI} -Dartifact=com.fasterxml.jackson.core:jackson-annotations:${JACKSON_VERSION}:jar -DoutputDirectory=${TARGET}
carmine/open-kilda
base/hacks/shorm.requirements.download.sh
Shell
apache-2.0
1,148
#!/bin/bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. home="$(cd "$(dirname $0)"; pwd)" java -Dmysql.servers=${MYSQL_SERVERS} -Dskywalking.plugin.springtransaction.simplify_transaction_definition_name=true -jar ${agent_opts} ${home}/../libs/spring-tx-scenario.jar &
wu-sheng/sky-walking
test/plugin/scenarios/spring-tx-scenario/bin/startup.sh
Shell
apache-2.0
1,011
function f () { <ref>a=1 } a=1
jansorg/BashSupport
testData/psi/resolve/varDef/GlobalVarDefFromFunction.bash
Shell
apache-2.0
35
#!/bin/bash # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Select a prefix for the instance names in your cluster readonly CLUSTER_PREFIX=my-grid # Name of GCE network to add instances to readonly CLUSTER_NETWORK=default # By default, instance and disk names will be of the form: # Master: my-grid-mm # boot: my-grid-mm # data: my-grid-mm-data # Workers: my-grid-ww-<number> # boot: my-grid-ww-<number> # data: my-grid-ww-<number>-data # readonly MASTER_NODE_NAME_PATTERN="${CLUSTER_PREFIX}-mm" readonly MASTER_BOOT_DISK_PATTERN="${CLUSTER_PREFIX}-mm" readonly MASTER_DATA_DISK_PATTERN="${CLUSTER_PREFIX}-mm-data" readonly WORKER_NODE_NAME_PATTERN="${CLUSTER_PREFIX}-ww-%d" readonly WORKER_BOOT_DISK_PATTERN="${CLUSTER_PREFIX}-ww-%d" readonly WORKER_DATA_DISK_PATTERN="${CLUSTER_PREFIX}-ww-%d-data" # By default all hosts will be 4 core standard instances # in the zone us-central1-a, running debian-7 readonly MASTER_NODE_MACHINE_TYPE=n1-standard-4 readonly MASTER_NODE_ZONE=us-central1-a readonly MASTER_NODE_IMAGE=debian-7 readonly MASTER_NODE_DISK_SIZE=500GB readonly MASTER_NODE_SCOPE= readonly WORKER_NODE_MACHINE_TYPE=n1-standard-4 readonly WORKER_NODE_ZONE=us-central1-a readonly WORKER_NODE_IMAGE=debian-7 readonly WORKER_NODE_DISK_SIZE=500GB readonly WORKER_NODE_SCOPE= # Specify the number of each node type readonly MASTER_NODE_COUNT=1 readonly WORKER_NODE_COUNT=2 # Output file on the local workstation for logs readonly SCRIPT_LOG_DIR=/tmp
mbookman/solutions-google-compute-engine-cluster-for-grid-engine
cluster_properties.sh
Shell
apache-2.0
2,054
#!/bin/bash # A simple script triggered by keepalived when VIPs are moved # around. When VIPs are moved to this node, explicit route for # each VIP is added - this assures that any connection to VIP # will use by default local-ipv4 as source address. Without this # explicit route VIP address is used which causes issues when VIP # moved to another node. # https://bugs.launchpad.net/tripleo/+bug/1376200 # # When VIP is moved from this node, this explicit route is removed # to allow proper routing from this node to new VIP node. set -eu logger "Started $0 $@" PATH="$PATH:/bin:/sbin" LOCAL_IP=$(os-apply-config --key local-ipv4 --type netaddress) if [ "$3" = "MASTER" ]; then for ip in $(cat /etc/keepalived/virtual_ips); do logger "adding explicit route for $ip" ip ro replace local $ip dev lo src $LOCAL_IP done else for ip in $(cat /etc/keepalived/virtual_ips); do logger "removing explicit route for $ip" ip ro del local $ip done fi
radez/tripleo-image-elements
elements/keepalived/bin/keepalived_vip_change.sh
Shell
apache-2.0
992
#! /bin/bash COMMON_PATH=../common WORK_PATH=. . $COMMON_PATH/start-kata.sh
ssledz/gitsvnkata
kata-3-checkout-svn-project-5-last-commits/start-kata.sh
Shell
apache-2.0
77
add-apt-repository ppa:openjdk-r/ppa apt-get update apt-get install openjdk-8-jdk -y export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 version="4.10.4" wget http://archive.apache.org/dist/lucene/solr/$version/solr-$version.tgz tar xzf solr-$version.tgz chmod 777 solr-4.10.4/example/solr/collection1/conf/schema.xml chmod 777 solr-4.10.4/example/solr/solr.xml solr-$version/bin/solr start
juliandunn/chef-server-1
dev/scripts/provision-solr.sh
Shell
apache-2.0
401
#!/bin/bash set -e if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then echo "Pull requests do not affect the public Serenity/JS Handbook" npm run prebook:publish exit 0 fi if [[ $TRAVIS_BRANCH != 'master' ]]; then echo "Building from a branch does not affect the public Serenity/JS Handbook" npm run prebook:publish exit 0 fi if [[ $TRAVIS_BRANCH == 'master' ]]; then npm run book:publish fi
InvictusMB/serenity-js
scripts/travis/after_script.sh
Shell
apache-2.0
403
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #set -x usage() { printf "Usage: %s [uuid of this host] [uuid of the sr to place the heartbeat] [ add , true/false]\n" $(basename $0) } if [ -z $1 ]; then usage echo "#1# no uuid of host" exit 0 fi if [ -z $2 ]; then usage echo "#2# no uuid of sr" exit 0 fi if [ -z $3 ]; then usage echo "#21# no add parameter" exit 0 fi if [ `xe host-list | grep $1 | wc -l` -ne 1 ]; then echo "#3# Unable to find the host uuid: $1" exit 0 fi if [ `xe sr-list uuid=$2 | wc -l` -eq 0 ]; then echo "#4# Unable to find SR with uuid: $2" exit 0 fi if [ `xe pbd-list sr-uuid=$2 | grep -B 1 $1 | wc -l` -eq 0 ]; then echo "#5# Unable to find a pbd for the SR: $2" exit 0 fi hbfile=/opt/xensource/bin/heartbeat if [ "$3" = "true" ]; then srtype=`xe sr-param-get param-name=type uuid=$2` if [ "$srtype" = "nfs" ];then dir=/var/run/sr-mount/$2 filename=$dir/hb-$1 if [ ! -f "$filename" ]; then echo "#6# heartbeat file $filename doesn't exist" exit 0 fi else dir=/dev/VG_XenStorage-$2 link=$dir/hb-$1 lvchange -ay $link if [ $? -ne 0 ]; then echo "#7# Unable to make the heartbeat $link active" exit 0 fi fi if [ -f $hbfile ]; then grep $dir $hbfile >/dev/null if [ $? -gt 0 ] then echo $dir >> $hbfile fi else echo $dir >> $hbfile fi else if [ -f $hbfile ]; then sed -i /$2/d $hbfile fi fi echo "#0#DONE" exit 0
argv0/cloudstack
scripts/vm/hypervisor/xenserver/setup_heartbeat_file.sh
Shell
apache-2.0
2,258
#!/bin/bash # Copyright 2014 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script finds, caches, and prints a list of all directories that hold # *.go files. If any directory is newer than the cache, re-find everything and # update the cache. Otherwise use the cached file. set -o errexit set -o nounset set -o pipefail if [[ -z "${1:-}" ]]; then echo "usage: $0 <cache-file>" exit 1 fi CACHE="$1"; shift trap "rm -f '${CACHE}'" HUP INT TERM ERR # This is a partial 'find' command. The caller is expected to pass the # remaining arguments. # # Example: # kfind -type f -name foobar.go function kfind() { # include the "special" vendor directories which are actually part # of the Kubernetes source tree - generators will use these for # including certain core API concepts. find -H . ./vendor/k8s.io/apimachinery ./vendor/k8s.io/apiserver ./vendor/k8s.io/kube-aggregator ./vendor/k8s.io/sample-apiserver ./vendor/k8s.io/metrics \ \( \ -not \( \ \( \ -path ./vendor -o \ -path ./staging -o \ -path ./_\* -o \ -path ./.\* -o \ -path ./docs -o \ -path ./examples \ \) -prune \ \) \ \) \ "$@" } NEED_FIND=true # It's *significantly* faster to check whether any directories are newer than # the cache than to blindly rebuild it. if [[ -f "${CACHE}" ]]; then N=$(kfind -type d -newer "${CACHE}" -print -quit | wc -l) [[ "${N}" == 0 ]] && NEED_FIND=false fi mkdir -p $(dirname "${CACHE}") if $("${NEED_FIND}"); then kfind -type f -name \*.go \ | sed 's|/[^/]*$||' \ | sed 's|^./||' \ | LC_ALL=C sort -u \ > "${CACHE}" fi cat "${CACHE}"
fabianofranz/kubernetes
hack/make-rules/helpers/cache_go_dirs.sh
Shell
apache-2.0
2,453
#!/bin/bash #valgrind --tool=memcheck --leak-check=yes -v \ valgrind --tool=memcheck --leak-check=full --show-leak-kinds=all --undef-value-errors=no \ applications/argonnite_kmer_counter/argonnite -k 43 -threads-per-node 1 ~/dropbox/mini.fastq &> valgrind.log
sebhtml/biosal
scripts/memory-management/run-valgrind.sh
Shell
bsd-2-clause
269
#!/bin/sh # Test redux installation. set -e scriptdir=$(realpath $(dirname $0)) workdir=$(mktemp --tmpdir --directory) # avoid breaking inside the source dir cd $workdir trap "rm -rf $workdir" 0 expected=$(mktemp --tmpdir=$workdir redux-man-XXXXXX) cat - > $expected <<EOS redo.1 redo-ifchange.1 redo-ifcreate.1 redo-init.1 EOS bindir=$workdir/bin mkdir $bindir bin=$bindir/redux #build a binary (cd $scriptdir && go build -o $bin) # Run test and compares result to $expected run () { dst=$1 shift $@ t=$(mktemp --tmpdir=$workdir redux-man-mandir-XXXXXX) find $dst -type f -name '*.1' -printf '%f\n' | sort > $t cmp --verbose $expected $t } dir=$(mktemp --directory --tmpdir=$workdir redux-install-XXXXXX) # 0 use --mandir flag value if it exists run $dir/manA "$bin install --mandir $dir/manA man" # 1 use MANDIR if it is set run $dir/manB "env MANDIR=$dir/manB $bin install man" # 2 use first non-empty path in MANPATH if it is set. # This is equivalent to: $(echo $MANPATH | cut -f1 -d:) except that we skip empty fields. # There are a few variations on MANPATH run $dir/manC "env MANPATH=:$dir/manC $bin install man" run $dir/manD "env MANPATH=$dir/manD: $bin install man" run $dir/manE "env MANPATH=$dir/manE::does-not-exist $bin install man" # 3 use $(dirname redux)/../man if it is writable # does not exist yet, but will be created run $bindir/../man "$bin install man" # exists but empty rm -rf $bindir/../man/* run $bindir/../man "$bin install man" # 4 use first non-empty path in `manpath` if there's one. # This is equivalent to: $(manpath 2>/dev/null | cut -f1 -d:) except that we skip empty fields. # create a directory for manpath to return alt_man_dir=$(mktemp --directory --tmpdir=$workdir redux-test-4-XXXXXX) echo MANPATH_MAP $bindir $alt_man_dir > ~/.manpath # make $bindir/../man a file so redux can't use it and, instead, calls manpath rm -rf $bindir/../man touch $bindir/../man # manpath checks ~/.manpath entries against entries in $PATH, so we add it, temporarily. OLDPATH=$PATH PATH=$bindir:$PATH run $alt_man_dir "$bin install man" PATH=$OLDPATH # 5 use '/usr/local/man' # $bindir/../man is still unusable, # now mock up a failing manpath cat > $bindir/manpath <<EOS #!/bin/sh exit 1 EOS chmod +x $bindir/manpath # Since we cannot write to /usr/local/man, there's no point in doing a comparison. # Just look for an error. PATH=$bindir:$PATH # don't exit on error message=$($bin install man 2>&1) || : echo "$message" | egrep -q "^Error:.+error.+permission denied" if test "$?" != "0" ; then echo "Expected error message: $message" > /dev/stderr fi # cleanup rm -f ~/.manpath
gyepisam/redux
redux/install-man-test.sh
Shell
bsd-2-clause
2,663
#!/usr/bin/env bash set -e set -x CURRENT_BRANCH="6.x" function split() { SHA1=`./bin/splitsh-lite --prefix=$1` git push $2 "$SHA1:refs/heads/$CURRENT_BRANCH" -f } function remote() { git remote add $1 $2 || true } git pull origin $CURRENT_BRANCH remote auth [email protected]:illuminate/auth.git remote broadcasting [email protected]:illuminate/broadcasting.git remote bus [email protected]:illuminate/bus.git remote cache [email protected]:illuminate/cache.git remote config [email protected]:illuminate/config.git remote console [email protected]:illuminate/console.git remote container [email protected]:illuminate/container.git remote contracts [email protected]:illuminate/contracts.git remote cookie [email protected]:illuminate/cookie.git remote database [email protected]:illuminate/database.git remote encryption [email protected]:illuminate/encryption.git remote events [email protected]:illuminate/events.git remote filesystem [email protected]:illuminate/filesystem.git remote hashing [email protected]:illuminate/hashing.git remote http [email protected]:illuminate/http.git remote log [email protected]:illuminate/log.git remote mail [email protected]:illuminate/mail.git remote notifications [email protected]:illuminate/notifications.git remote pagination [email protected]:illuminate/pagination.git remote pipeline [email protected]:illuminate/pipeline.git remote queue [email protected]:illuminate/queue.git remote redis [email protected]:illuminate/redis.git remote routing [email protected]:illuminate/routing.git remote session [email protected]:illuminate/session.git remote support [email protected]:illuminate/support.git remote translation [email protected]:illuminate/translation.git remote validation [email protected]:illuminate/validation.git remote view [email protected]:illuminate/view.git split 'src/Illuminate/Auth' auth split 'src/Illuminate/Broadcasting' broadcasting split 'src/Illuminate/Bus' bus split 'src/Illuminate/Cache' cache split 'src/Illuminate/Config' config split 'src/Illuminate/Console' console split 'src/Illuminate/Container' container split 'src/Illuminate/Contracts' contracts split 'src/Illuminate/Cookie' cookie split 'src/Illuminate/Database' database split 'src/Illuminate/Encryption' encryption split 'src/Illuminate/Events' events split 'src/Illuminate/Filesystem' filesystem split 'src/Illuminate/Hashing' hashing split 'src/Illuminate/Http' http split 'src/Illuminate/Log' log split 'src/Illuminate/Mail' mail split 'src/Illuminate/Notifications' notifications split 'src/Illuminate/Pagination' pagination split 'src/Illuminate/Pipeline' pipeline split 'src/Illuminate/Queue' queue split 'src/Illuminate/Redis' redis split 'src/Illuminate/Routing' routing split 'src/Illuminate/Session' session split 'src/Illuminate/Support' support split 'src/Illuminate/Translation' translation split 'src/Illuminate/Validation' validation split 'src/Illuminate/View' view
jerguslejko/framework
bin/split.sh
Shell
mit
2,858
#! /bin/sh # Copyright (C) 2010-2014 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Nonexistent sources for AC_LIBOBJ should cause an Automake failure. . test-init.sh cat >> configure.ac << 'END' AC_PROG_CC AM_PROG_AR AC_PROG_RANLIB AC_LIBOBJ([foobar]) END cat > Makefile.am << 'END' noinst_LIBRARIES = libtu.a libtu_a_SOURCES = libtu_a_LIBADD = $(LIBOBJS) END : > ar-lib $ACLOCAL AUTOMAKE_fails grep 'configure\.ac:.*required file.*foobar\.c.*' stderr :
kuym/openocd
tools/automake-1.15/t/libobj15a.sh
Shell
gpl-2.0
1,069
#!/bin/bash #--------------------------------------------------------------# # # # amber-jumps.sh # # # # This file is part of the Amber project # # http://www.opencores.org/project,amber # # # # Description # # Parse the Amber disassembly file, amber.dis, and extract # # all the function jumps, using the test disassembly file # # to get the function names and addresses. # # # # Author(s): # # - Conor Santifort, [email protected] # # # #//////////////////////////////////////////////////////////////# # # # Copyright (C) 2010 Authors and OPENCORES.ORG # # # # This source file may be used and distributed without # # restriction provided that this copyright statement is not # # removed from the file and that any derivative work contains # # the original copyright notice and the associated disclaimer. # # # # This source file is free software; you can redistribute it # # and/or modify it under the terms of the GNU Lesser General # # Public License as published by the Free Software Foundation; # # either version 2.1 of the License, or (at your option) any # # later version. # # # # This source is distributed in the hope that it will be # # useful, but WITHOUT ANY WARRANTY; without even the implied # # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # # PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General # # Public License along with this source; if not, download it # # from http://www.opencores.org/lgpl.shtml # # # #--------------------------------------------------------------# show_usage() { echo " Usage:" echo "$0 <test-name>" exit } MINARGS=1 case $1 in ""|"-h"|"--help") show_usage ;; esac if [[ $# -lt $MINARGS ]] ; then echo "Too few arguments given (Minimum:$MINARGS)" echo show_usage fi #-------------------------------------------------------- # Find the test #-------------------------------------------------------- TEST_NAME=$1 # First check if its an assembly test if [ -f ../tests/${TEST_NAME}.S ]; then TEST_DIS=../tests/${TEST_NAME}.dis elif [ -d ../../sw/${TEST_NAME} ]; then TEST_DIS=../../sw/${TEST_NAME}/${TEST_NAME}.dis else echo "Test ${TEST_NAME} not found" exit fi grep jump amber.dis | awk '{print $1, $4, $6, $8, $10}' | sed 's/,//g' > /tmp/jumps grep '>:' $TEST_DIS | sed 's/<//' | sed 's/>://' > /tmp/funcsx sort /tmp/funcsx > /tmp/funcs # Remove some very common linux function jumps ../../sw/tools/amber-func-jumps /tmp/jumps /tmp/funcs \ | grep -v "cpu_idle -" \ | grep -v "cpu_idle <" \ | grep -v "default_idle -" \ | grep -v "default_idle <"
grvmind/amber-cycloneiii
trunk/sw/tools/amber-jumps.sh
Shell
gpl-2.0
3,738
# usage ./update_windows_myRUN.bat_scripts.sh examples_list language_list # language = C | Java | CS if [ "$1" = "" ]; then echo "*** usage : ./update_windows_myRUN.bat_scripts.sh examples_list language_list" exit; fi if [ "$2" = "" ]; then echo "*** usage : ./update_windows_myRUN.bat_scripts.sh examples_list language_list" exit; fi LIST=$1 LANGUAGE_LIST=$2 DCPS=$PWD/../../examples/dcps AUTOMATION_SCRIPTS=$PWD/../../build/scripts/overnight/example_automation_scripts for LANG in `cat $LANGUAGE_LIST`;do echo === Lang=$LANG; for each in `cat $LIST`;do echo " Example= $each ==="; if [ $LANG = "C" ] || [ $LANG = "CS" ]; then cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/$LANG/Standalone/VS2005/Bat/RUN.bat $DCPS/$each/$LANG/Standalone/VS2005/Bat/RUN.bat cp -f myRUN.bat $DCPS/$each/$LANG/Standalone/VS2005/Bat/myRUN.bat fi if [ $LANG = "C++" ]; then cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/$LANG/Standalone/VS2005/Bat/RUN.bat $DCPS/$each/$LANG/Standalone/VS2005/Bat/RUN.bat cp -f myRUN.bat $DCPS/$each/$LANG/Standalone/VS2005/Bat/myRUN.bat cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/$LANG/Corba/OpenFusion/VS2005/Bat/RUN.bat $DCPS/$each/C++/Corba/OpenFusion/VS2005/Bat/RUN.bat; cp -f myRUN.bat $DCPS/$each/C++/Corba/OpenFusion/VS2005/Bat/myRUN.bat; if [ $each = "Durability" ]; then cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/C++/Standalone/VS2005/Bat/start.bat $DCPS/$each/C++/Corba/OpenFusion/VS2005/Bat/start.bat; fi fi if [ $LANG = "Java" ]; then cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/$LANG/Standalone/Windows/Bat/RUN.bat $DCPS/$each/$LANG/Standalone/Windows/Bat/RUN.bat cp -f myRUN.bat $DCPS/$each/$LANG/Standalone/Windows/Bat/myRUN.bat cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/$LANG/Corba/JacORB/Windows/Bat/RUN.bat $DCPS/$each/$LANG/Corba/JacORB/Windows/Bat/RUN.bat; cp -f myRUN.bat $DCPS/$each/$LANG/Corba/JacORB/Windows/Bat/myRUN.bat; if [ $each = "Durability" ]; then cp -f $AUTOMATION_SCRIPTS/examples/dcps/$each/$LANG/Corba/JacORB/Windows/Bat/start.bat $DCPS/$each/$LANG/Corba/JacORB/Windows/Bat/start.bat; fi fi done done
SanderMertens/opensplice
examples/Dev_scripts/update_windows_myRUN_scripts.sh
Shell
gpl-3.0
2,207
#!/bin/bash VERSION=`grep '^version_number' launcher_version_number.py | cut -f 2 -d '"'` ARCHITECTURE=`uname -m | sed s/x86_64/amd64/g | sed s/i686/i386/g` ./package_linux_version.sh $VERSION $ARCHITECTURE TMP="tmp_debian_build" alias sudo="" sudo rm -fr $TMP sudo rm -f *.deb TARGET=$TMP/opt/Strudel mkdir -p $TARGET mkdir -p $TMP/usr/share/applications cp Strudel.desktop $TMP/usr/share/applications/ cp -r dist/Strudel-${VERSION}_${ARCHITECTURE}/* $TARGET/ mkdir $TMP/DEBIAN cp release/control $TMP/DEBIAN # This is not necessary on Ubuntu 13.04 when building for Ubuntu # cp release/postinst $TMP/DEBIAN # Remove pango from the control file, not needed on Ubuntu 13 sed -i 's/libpango1.0-dev,//g' $TMP/DEBIAN/control installedSize=`du -sx --exclude DEBIAN $TMP | awk '{print $1}'` sed -i "s/VERSION/${VERSION}/g" $TMP/DEBIAN/control sed -i "s/ARCHITECTURE/${ARCHITECTURE}/g" $TMP/DEBIAN/control sed -i "s/XXINSTALLEDSIZE/${installedSize}/g" $TMP/DEBIAN/control sudo chown -R root.root $TMP sudo find $TMP/ -iname '*.so.*' -exec chmod a-x {} \; sudo find $TMP/ -iname '*.so.*' -exec strip {} \; mkdir -p $TMP/opt/Strudel/icons/ cp IconPngs/MASSIVElogoTransparent144x144.png $TMP/opt/Strudel/icons/MASSIVElogoTransparent144x144.png cp Strudel.desktop $TMP/opt/Strudel/"Strudel.desktop" sudo chmod a-x $TMP/opt/Strudel/icons/MASSIVElogoTransparent144x144.png sudo chmod a-x $TMP/opt/Strudel/"Strudel.desktop" DEB=strudel_UBUNTU_${VERSION}_${ARCHITECTURE}.deb sudo dpkg -b $TMP $DEB echo echo echo ls -lh *.deb echo echo
monash-merc/cvl-fabric-launcher
package_ubuntu_version.sh
Shell
gpl-3.0
1,543
#!/bin/bash # Ensures that all NetCore files in the repository are parsable. cd `dirname $0`/.. find . -name "*.nc" | xargs ./src/Frenetic.d.byte -parse-only
similecat/freneticEx
tools/checksyntax.sh
Shell
lgpl-3.0
159
#!/bin/bash # # Copyright 2015 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Tests the examples provided in Bazel # # Load the test setup defined in the parent directory CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${CURRENT_DIR}/../integration_test_setup.sh" \ || { echo "integration_test_setup.sh not found!" >&2; exit 1; } function set_up() { copy_examples cat > WORKSPACE <<EOF workspace(name = "io_bazel") EOF } # # Native rules # function test_cpp() { assert_build "//examples/cpp:hello-world" assert_bazel_run "//examples/cpp:hello-world foo" "Hello foo" assert_test_ok "//examples/cpp:hello-success_test" assert_test_fails "//examples/cpp:hello-fail_test" } # An assertion that execute a binary from a sub directory (to test runfiles) function assert_binary_run_from_subdir() { ( # Needed to make execution from a different path work. export PATH=${bazel_javabase}/bin:"$PATH" && mkdir -p x && cd x && unset JAVA_RUNFILES && unset TEST_SRCDIR && assert_binary_run "../$1" "$2" ) } function test_java() { local java_pkg=examples/java-native/src/main/java/com/example/myproject assert_build_output ./bazel-bin/${java_pkg}/libhello-lib.jar ${java_pkg}:hello-lib assert_build_output ./bazel-bin/${java_pkg}/libcustom-greeting.jar ${java_pkg}:custom-greeting assert_build_output ./bazel-bin/${java_pkg}/hello-world ${java_pkg}:hello-world assert_build_output ./bazel-bin/${java_pkg}/hello-resources ${java_pkg}:hello-resources assert_binary_run_from_subdir "bazel-bin/${java_pkg}/hello-world foo" "Hello foo" } function test_java_test() { setup_javatest_support local java_native_tests=//examples/java-native/src/test/java/com/example/myproject local java_native_main=//examples/java-native/src/main/java/com/example/myproject assert_build "-- //examples/java-native/... -${java_native_main}:hello-error-prone" JAVA_VERSION="1.$(bazel query --output=build '@bazel_tools//tools/jdk:toolchain' | grep source_version | cut -d '"' -f 2)" if [ "${JAVA_VERSION}" -ne "1.7" ]; then assert_build_fails "${java_native_main}:hello-error-prone" \ "Did you mean 'result = b == -1;'?" fi assert_test_ok "${java_native_tests}:hello" assert_test_ok "${java_native_tests}:custom" assert_test_fails "${java_native_tests}:fail" assert_test_fails "${java_native_tests}:resource-fail" } function test_java_test_with_junitrunner() { # Test with junitrunner. setup_javatest_support local java_native_tests=//examples/java-native/src/test/java/com/example/myproject assert_test_ok "${java_native_tests}:custom_with_test_class" } function test_genrule_and_genquery() { # The --javabase flag is to force the tools/jdk:jdk label to be used # so it appears in the dependency list. assert_build_output ./bazel-bin/examples/gen/genquery examples/gen:genquery --javabase=//tools/jdk local want=./bazel-genfiles/examples/gen/genrule.txt assert_build_output $want examples/gen:genrule --javabase=//tools/jdk diff $want ./bazel-bin/examples/gen/genquery \ || fail "genrule and genquery output differs" grep -qE "^//tools/jdk:jdk$" $want || { cat $want fail "//tools/jdk:jdk not found in genquery output" } } function test_native_python() { assert_build //examples/py_native:bin --python2_path=python assert_test_ok //examples/py_native:test --python2_path=python assert_test_fails //examples/py_native:fail --python2_path=python } function test_native_python_with_zip() { assert_build //examples/py_native:bin --python2_path=python --build_python_zip # run the python package directly ./bazel-bin/examples/py_native/bin >& $TEST_log \ || fail "//examples/py_native:bin execution failed" expect_log "Fib(5) == 8" # Using python <zipfile> to run the python package python ./bazel-bin/examples/py_native/bin >& $TEST_log \ || fail "//examples/py_native:bin execution failed" expect_log "Fib(5) == 8" assert_test_ok //examples/py_native:test --python2_path=python --build_python_zip assert_test_fails //examples/py_native:fail --python2_path=python --build_python_zip } function test_shell() { assert_build "//examples/shell:bin" assert_bazel_run "//examples/shell:bin" "Hello Bazel!" assert_test_ok "//examples/shell:test" } # # Skylark rules # function test_python() { assert_build "//examples/py:bin" ./bazel-bin/examples/py/bin >& $TEST_log \ || fail "//examples/py:bin execution failed" expect_log "Fib(5)=8" # Mutate //examples/py:bin so that it needs to build again. echo "print('Hello')" > ./examples/py/bin.py # Ensure that we can rebuild //examples/py::bin without error. assert_build "//examples/py:bin" ./bazel-bin/examples/py/bin >& $TEST_log \ || fail "//examples/py:bin 2nd build execution failed" expect_log "Hello" } function test_java_skylark() { local java_pkg=examples/java-skylark/src/main/java/com/example/myproject assert_build_output ./bazel-bin/${java_pkg}/libhello-lib.jar ${java_pkg}:hello-lib assert_build_output ./bazel-bin/${java_pkg}/hello-data ${java_pkg}:hello-data assert_build_output ./bazel-bin/${java_pkg}/hello-world ${java_pkg}:hello-world # we built hello-world but hello-data is still there. want=./bazel-bin/${java_pkg}/hello-data test -x $want || fail "executable $want not found" assert_binary_run_from_subdir "bazel-bin/${java_pkg}/hello-data foo" "Heyo foo" } function test_java_test_skylark() { setup_skylark_javatest_support javatests=examples/java-skylark/src/test/java/com/example/myproject assert_build //${javatests}:pass assert_test_ok //${javatests}:pass assert_test_fails //${javatests}:fail } run_suite "examples"
spxtr/bazel
src/test/shell/bazel/bazel_example_test.sh
Shell
apache-2.0
6,238
# Copyright 2015 The Kythe Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define TEST_NAME, then include as part of an extractor test. # TODO(zarko): lift this script out for use in other test suites. BASE_DIR="$PWD/kythe/cxx/extractor/testdata" OUT_DIR="$TEST_TMPDIR/out" mkdir -p "${OUT_DIR}" export KYTHE_EXCLUDE_EMPTY_DIRS=1 export KYTHE_EXCLUDE_AUTOCONFIGURATION_FILES=1
bzz/kythe
kythe/cxx/extractor/testdata/test_common.sh
Shell
apache-2.0
905
#!/bin/sh # # Copyright (c) 2006-2016 Varnish Software AS # All rights reserved. # # Author: Poul-Henning Kamp <[email protected]> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. set -e ####################################################################### # Parameters export MAKEFLAGS="${MAKEFLAGS:--j2}" # This tempdirectory must not be used by anything else. # Do *NOT* set this to /tmp export TMPDIR=`pwd`/_vtest_tmp # Message to be shown in result pages # Max 10 char of [A-Za-z0-9/. _-] MESSAGE="${MESSAGE:-}" WAITPERIOD=60 # unit: Seconds WAITGOOD=60 # unit: WAITPERIOD WAITBAD=1 # unit: WAITPERIOD MAXRUNS="${MAXRUNS:-0}" ####################################################################### # NB: No User Serviceable Parts Beyond This Point ####################################################################### enable_gcov=false SSH_DST="-p 203 [email protected]" export REPORTDIR=`pwd`/_report export VTEST_REPORT="${REPORTDIR}/_log" ####################################################################### # Establish TMPDIR mkdir -p ${TMPDIR} rm -rf ${TMPDIR}/* # Try to make varnish own TMPDIR, in case we run as root chown varnish ${TMPDIR} > /dev/null 2>&1 || true ####################################################################### # Establish the SRCDIR we build/run/test if ! (cd varnish-cache 2>/dev/null) ; then git clone \ https://github.com/varnishcache/varnish-cache.git \ varnish-cache fi export SRCDIR=`pwd`/varnish-cache ####################################################################### # Submission of results if [ ! -f vt_key.pub ] ; then ssh-keygen -t ed25519 -N "" -f vt_key fi pack () ( cd ${REPORTDIR} tar czf - _log \ `grep '^MANIFEST ' _log | sort -u | sed 's/^MANIFEST *//'` \ ) submit () ( ssh \ -T \ -o StrictHostKeyChecking=no \ -o PasswordAuthentication=no \ -o NumberOfPasswordPrompts=0 \ -o RequestTTY=no \ -i vt_key \ ${SSH_DST} \ true \ < ${1} ) rm -f ${TMPDIR}/_report.tgz touch ${TMPDIR}/_report.tgz if ! submit ${TMPDIR}/_report.tgz; then echo "Test submit failed" echo echo "You probably need to email this VTEST specific ssh-key" echo "to [email protected]" echo sed 's/^/ /' vt_key.pub echo exit 2 fi ####################################################################### autogen () ( set -e cd "${SRCDIR}" nice make distclean > /dev/null 2>&1 || true nice sh "${SRCDIR}"/autogen.des ) makedistcheck () ( set -e cd "${SRCDIR}" nice make vtest-clean nice make distcheck ) gcovtest () ( set -x if [ `id -u` -eq 0 ] && su -m varnish -c 'true' ; then su -m varnish -c "make check" || exit 1 cd bin/varnishtest ./varnishtest -i tests/[ab]0000?.vtc tests/j*.vtc || exit 1 else make check || exit 1 fi ) makegcov () ( set -x cd "${SRCDIR}" export CFLAGS="-fprofile-arcs -ftest-coverage -fstack-protector -DDONT_DLCLOSE_VMODS" CC=gcc49 export MAKEFLAGS=-j1 find . -name '*.gc??' -print | xargs rm -f sh autogen.des || exit 1 make || exit 1 if [ `id -u` -eq 0 ] ; then chown -R varnish . | true fi if gcovtest && make gcov_digest ; then retval=0 else retval=1 fi if [ `id -u` -eq 0 ] ; then chown -R root . || true fi exit ${retval} ) failedtests () ( set -e cd "${SRCDIR}" VERSION=`./configure --version | awk 'NR == 1 {print $NF}'` LOGDIR="varnish-$VERSION/_build/sub/bin/varnishtest/tests" VTCDIR=bin/varnishtest/tests # cope with older automake, remove the sub directory test ! -d $LOGDIR && LOGDIR="varnish-$VERSION/_build/bin/varnishtest/tests" grep -l ':test-result: FAIL' "$LOGDIR"/*.trs | while read trs do name=`basename $trs .trs` vtc="${name}.vtc" log="${name}.log" rev=`git log -n 1 --pretty=format:%H "${VTCDIR}/${vtc}"` cp "${LOGDIR}/${log}" "${REPORTDIR}/_${log}" echo "VTCGITREV ${name} ${rev}" echo "MANIFEST _${log}" done ) orev=000 waitnext=${WAITBAD} i=0 last_day=`date +%d` while [ $MAXRUNS -eq 0 ] || [ $i -lt $MAXRUNS ] do i=$((i + 1)) (cd "${SRCDIR}" && git pull > /dev/null 2>&1 || true) rev=`cd "${SRCDIR}" && git show -s --pretty=format:%H` if [ "${waitnext}" -gt 0 -a "x${rev}" = "x${orev}" ] ; then sleep ${WAITPERIOD} waitnext=`expr ${waitnext} - 1 || true` continue fi waitnext=${WAITBAD} orev=${rev} if ! [ -d "${SRCDIR}" ] && ! mkdir -p "${SRCDIR}" ; then echo >&2 "could not create SRCDIR ${SRCDIR}" exit 2 fi rm -rf "${REPORTDIR}" mkdir "${REPORTDIR}" if ! $enable_gcov ; then do_gcov=false elif [ -f _force_gcov ] ; then do_gcov=true rm -f _force_gcov elif [ `date +%d` == $last_day ] ; then do_gcov=false elif [ `date +%H` -lt 3 ] ; then do_gcov=false else do_gcov=true fi echo "VTEST 1.04" > ${VTEST_REPORT} echo "DATE `date +%s`" >> ${VTEST_REPORT} echo "BRANCH trunk" >> ${VTEST_REPORT} echo "HOST `hostname`" >> ${VTEST_REPORT} echo "UNAME `uname -a`" >> ${VTEST_REPORT} echo "UGID `id`" >> ${VTEST_REPORT} if [ -x /usr/bin/lsb_release ] ; then echo "LSB `lsb_release -d`" >> ${VTEST_REPORT} else echo "LSB none" >> ${VTEST_REPORT} fi echo "MESSAGE ${MESSAGE}" >> ${VTEST_REPORT} echo "GITREV $rev" >> ${VTEST_REPORT} if ! autogen >> ${REPORTDIR}/_autogen 2>&1 ; then echo "AUTOGEN BAD" >> ${VTEST_REPORT} echo "MANIFEST _autogen" >> ${VTEST_REPORT} else echo "AUTOGEN GOOD" >> ${VTEST_REPORT} if $do_gcov ; then last_day=`date +%d` if makegcov >> ${REPORTDIR}/_makegcov 2>&1 ; then mv ${SRCDIR}/_gcov ${REPORTDIR}/ echo "MAKEGCOV GOOD" >> ${VTEST_REPORT} echo "MANIFEST _gcov" >> ${VTEST_REPORT} else echo "MAKEGCOV BAD" >> ${VTEST_REPORT} echo "MANIFEST _makegcov" >> ${VTEST_REPORT} fi elif ! makedistcheck >> ${REPORTDIR}/_makedistcheck 2>&1 ; then echo "MAKEDISTCHECK BAD" >> ${VTEST_REPORT} echo "MANIFEST _autogen" >> ${VTEST_REPORT} echo "MANIFEST _makedistcheck" >> ${VTEST_REPORT} failedtests >> ${VTEST_REPORT} else echo "MAKEDISTCHECK GOOD" >> ${VTEST_REPORT} waitnext=${WAITGOOD} fi fi echo "VTEST END" >> ${VTEST_REPORT} pack > ${TMPDIR}/_report.tgz submit ${TMPDIR}/_report.tgz done
feld/Varnish-Cache
tools/vtest.sh
Shell
bsd-2-clause
7,308
# first arg is config file if ! [[ "$1" ]]; then echo "error: no config file provided" exit 1 fi # second (optional) arg is nstudies limit if [ "$2" ]; then nstudies=$2 echo $nstudies fi config=$1 if [ $nstudies ]; then python setup_db.py $config # load nexson files python load_nexson.py $config -n $nstudies # prepare taxonomy files python generate_taxonomy_files.py $config -n $nstudies # load taxonomy files python load_taxonomy_files.py $config . # run some simple tests python test_db_selects.py $config exit 0 fi # clear existing tables python setup_db.py $config # load nexson files python load_nexson.py $config # load taxonomy python generate_taxonomy_files.py $config # load otu table python load_taxonomy_files.py $config . # run some simple tests python test_db_selects.py $config
OpenTreeOfLife/ottreeindex
otindex/scripts/run_setup_scripts.sh
Shell
bsd-2-clause
859
#!/bin/bash set -ex set -o pipefail export SYSTEMD_PAGER=cat dd if=/dev/urandom of=/var/tmp/testimage.raw bs=$((1024*1024+7)) count=5 # Test import machinectl import-raw /var/tmp/testimage.raw machinectl image-status testimage test -f /var/lib/machines/testimage.raw cmp /var/tmp/testimage.raw /var/lib/machines/testimage.raw # Test export machinectl export-raw testimage /var/tmp/testimage2.raw cmp /var/tmp/testimage.raw /var/tmp/testimage2.raw rm /var/tmp/testimage2.raw # Test compressed export (gzip) machinectl export-raw testimage /var/tmp/testimage2.raw.gz gunzip /var/tmp/testimage2.raw.gz cmp /var/tmp/testimage.raw /var/tmp/testimage2.raw rm /var/tmp/testimage2.raw # Test clone machinectl clone testimage testimage3 test -f /var/lib/machines/testimage3.raw machinectl image-status testimage3 test -f /var/lib/machines/testimage.raw machinectl image-status testimage cmp /var/tmp/testimage.raw /var/lib/machines/testimage.raw cmp /var/tmp/testimage.raw /var/lib/machines/testimage3.raw # Test removal machinectl remove testimage ! test -f /var/lib/machines/testimage.raw ! machinectl image-status testimage # Test export of clone machinectl export-raw testimage3 /var/tmp/testimage3.raw cmp /var/tmp/testimage.raw /var/tmp/testimage3.raw rm /var/tmp/testimage3.raw # Test rename machinectl rename testimage3 testimage4 test -f /var/lib/machines/testimage4.raw machinectl image-status testimage4 ! test -f /var/lib/machines/testimage3.raw ! machinectl image-status testimage3 cmp /var/tmp/testimage.raw /var/lib/machines/testimage4.raw # Test export of rename machinectl export-raw testimage4 /var/tmp/testimage4.raw cmp /var/tmp/testimage.raw /var/tmp/testimage4.raw rm /var/tmp/testimage4.raw # Test removal machinectl remove testimage4 ! test -f /var/lib/machines/testimage4.raw ! machinectl image-status testimage4 # → And now, let's test directory trees ← # # Set up a directory we can import mkdir /var/tmp/scratch mv /var/tmp/testimage.raw /var/tmp/scratch/ touch /var/tmp/scratch/anotherfile mkdir /var/tmp/scratch/adirectory echo "piep" > /var/tmp/scratch/adirectory/athirdfile # Test import-fs machinectl import-fs /var/tmp/scratch/ test -d /var/lib/machines/scratch machinectl image-status scratch # Test export-tar machinectl export-tar scratch /var/tmp/scratch.tar.gz test -f /var/tmp/scratch.tar.gz mkdir /var/tmp/extract (cd /var/tmp/extract ; tar xzf /var/tmp/scratch.tar.gz) diff -r /var/tmp/scratch/ /var/tmp/extract/ rm -rf /var/tmp/extract # Test import-tar machinectl import-tar /var/tmp/scratch.tar.gz scratch2 test -d /var/lib/machines/scratch2 machinectl image-status scratch2 diff -r /var/tmp/scratch/ /var/lib/machines/scratch2 # Test removal machinectl remove scratch ! test -f /var/lib/machines/scratch ! machinectl image-status scratch # Test clone machinectl clone scratch2 scratch3 test -d /var/lib/machines/scratch2 machinectl image-status scratch2 test -d /var/lib/machines/scratch3 machinectl image-status scratch3 diff -r /var/tmp/scratch/ /var/lib/machines/scratch3 # Test removal machinectl remove scratch2 ! test -f /var/lib/machines/scratch2 ! machinectl image-status scratch2 # Test rename machinectl rename scratch3 scratch4 test -d /var/lib/machines/scratch4 machinectl image-status scratch4 ! test -f /var/lib/machines/scratch3 ! machinectl image-status scratch3 diff -r /var/tmp/scratch/ /var/lib/machines/scratch4 # Test removal machinectl remove scratch4 ! test -f /var/lib/machines/scratch4 ! machinectl image-status scratch4 rm -rf /var/tmp/scratch echo OK > /testok exit 0
heftig/systemd
test/TEST-25-IMPORT/testsuite.sh
Shell
gpl-2.0
3,560
#! /bin/sh if test z"$srcdir" = "z"; then srcdir=. fi command=run_parser_all.sh one_test_logs_dir=test_log diffs_dir=diffs if test "z$LONG_TESTS" != z"yes" && test "z$ALL_TESTS" != z"yes"; then echo "Skipping long tests that take a lot of time to run" exit 77 fi if test "z$TEX_HTML_TESTS" = z"yes"; then echo "Skipping long tests, only doing HTML TeX tests" exit 77 fi dir=indices arg='index_entry_in_footnote' name='index_entry_in_footnote' [ -d "$dir" ] || mkdir $dir srcdir_test=$dir; export srcdir_test; cd "$dir" || exit 99 ../"$srcdir"/"$command" -dir $dir $arg exit_status=$? cat $one_test_logs_dir/$name.log if test -f $diffs_dir/$name.diff; then echo cat $diffs_dir/$name.diff fi exit $exit_status
mwcampbell/texinfo
tp/tests/test_scripts/indices_index_entry_in_footnote.sh
Shell
gpl-3.0
732
#!/bin/bash if [ $# -lt 1 ]; then echo "usage: $0 <rev>" echo "error: must specify a version number" exit 1 fi VERSION=$1 TOP_DIR=$(cd $(dirname $0)/../../ && pwd) TMP_DIR=/tmp/build-libroxml-$(date +"%y%m%d%H%M%S") LIBROXML_MAJOR=$(sed -n 's#.*MAJOR_VERSION.*, \(.*\))$#\1#p' $TOP_DIR/configure.ac) LIBROXML_MINOR=$(sed -n 's#.*MINOR_VERSION.*, \(.*\))$#\1#p' $TOP_DIR/configure.ac) LIBROXML_MICRO=$(sed -n 's#.*MICRO_VERSION.*, \(.*\))$#\1#p' $TOP_DIR/configure.ac) LIBROXML_VERSION=$LIBROXML_MAJOR.$LIBROXML_MINOR.$LIBROXML_MICRO mkdir -p $TMP_DIR mkdir -p $TMP_DIR/logs echo "== Exporting from GIT @$VERSION libroxml-$LIBROXML_VERSION ==" (cd $TOP_DIR && git archive --format=tar --prefix=libroxml-git-$VERSION/ $VERSION . > $TMP_DIR/libroxml-git-$VERSION.tar) tar xf $TMP_DIR/libroxml-git-$VERSION.tar -C $TMP_DIR/ && rm -f $TMP_DIR/libroxml-git-$VERSION.tar echo "== Generating upstream sources ==" (cd $TMP_DIR/libroxml-git-$VERSION/ && ./autogen.sh) &> $TMP_DIR/logs/upstream-source.txt (cd $TMP_DIR/libroxml-git-$VERSION/ && ./configure) >> $TMP_DIR/logs/upstream-source.txt (cd $TMP_DIR/libroxml-git-$VERSION/ && make dist-gzip && mv libroxml-$LIBROXML_VERSION.tar.gz ..) >> $TMP_DIR/logs/upstream-source.txt echo "== Generating debian source package ==" (cd $TMP_DIR/ && tar zxf libroxml-$LIBROXML_VERSION.tar.gz) cp -a $TMP_DIR/libroxml-$LIBROXML_VERSION.tar.gz $TMP_DIR/libroxml_$LIBROXML_VERSION.orig.tar.gz cp -a $TMP_DIR/libroxml-git-$VERSION/debian $TMP_DIR/libroxml-$LIBROXML_VERSION echo "== Build package ==" (cd $TMP_DIR/libroxml-$LIBROXML_VERSION/ && QUILT_PATCHES=debian/patches quilt push -a) > $TMP_DIR/logs/buildpackage.txt (cd $TMP_DIR/libroxml-$LIBROXML_VERSION/ && dpkg-buildpackage -us -uc) &>> $TMP_DIR/logs/buildpackage.txt echo "== Analyze package ==" (cd $TMP_DIR/ && lintian -vIiE --pedantic --color=auto *.changes *.deb *.dsc) > $TMP_DIR/logs/lintian.txt echo "********************************************************************" echo "Your package is built in '$TMP_DIR'" echo "********************************************************************"
mahmudur85/libroxml
data/scripts/make_debian_source.sh
Shell
lgpl-2.1
2,100
#!/bin/sh # SUMMARY: Test the wireguard example # LABELS: set -e # Source libraries. Uncomment if needed/defined #. "${RT_LIB}" . "${RT_PROJECT_ROOT}/_lib/lib.sh" NAME=wireguard clean_up() { rm -f ${NAME}* } trap clean_up EXIT # Test code goes here moby build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml" exit 0
furious-luke/linuxkit
test/cases/000_build/100_examples/070_wireguard/test.sh
Shell
apache-2.0
315
#!/bin/bash # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== set -e set -x source tensorflow/tools/ci_build/release/common.sh install_ubuntu_16_pip_deps pip3.6 install_bazelisk # Export required variables for running pip.sh export OS_TYPE="UBUNTU" export CONTAINER_TYPE="GPU" export TF_PYTHON_VERSION='python3.6' # Run configure. export TF_NEED_GCP=1 export TF_NEED_HDFS=1 export TF_NEED_S3=1 export TF_NEED_CUDA=1 export TF_CUDA_VERSION=10 export TF_CUDNN_VERSION=7 export TF_NEED_TENSORRT=1 export TENSORRT_INSTALL_PATH=/usr/local/tensorrt export CC_OPT_FLAGS='-mavx' export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION}) export PROJECT_NAME="tensorflow_gpu" export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib" export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0 yes "" | "$PYTHON_BIN_PATH" configure.py # Export optional variables for running pip.sh export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py36' export TF_BUILD_FLAGS="--config=opt --config=cuda --distinct_host_configuration=false \ --action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain " export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \ --distinct_host_configuration=false \ --action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION \ --config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \ --verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute " export TF_TEST_TARGETS="//tensorflow/python/... " export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean" export IS_NIGHTLY=0 # Not nightly export TF_PROJECT_NAME=${PROJECT_NAME} export TF_PIP_TEST_ROOT="pip_test" # To build both tensorflow and tensorflow-gpu pip packages export TF_BUILD_BOTH_GPU_PACKAGES=1 ./tensorflow/tools/ci_build/builds/pip_new.sh
xzturn/tensorflow
tensorflow/tools/ci_build/release/ubuntu_16/gpu_py36_full/pip_v1.sh
Shell
apache-2.0
2,737
#!/usr/bin/env bash if [[ $# -lt 1 ]]; then echo "Usage: $0 TRINO_VERSION" >&2 exit 1 fi set -euo pipefail # Retrieve the script directory. SCRIPT_DIR="${BASH_SOURCE%/*}" cd ${SCRIPT_DIR} TRINO_VERSION=$1 SERVER_LOCATION="https://repo1.maven.org/maven2/io/trino/trino-server/${TRINO_VERSION}/trino-server-${TRINO_VERSION}.tar.gz" CLIENT_LOCATION="https://repo1.maven.org/maven2/io/trino/trino-cli/${TRINO_VERSION}/trino-cli-${TRINO_VERSION}-executable.jar" WORK_DIR="$(mktemp -d)" curl -o ${WORK_DIR}/trino-server-${TRINO_VERSION}.tar.gz ${SERVER_LOCATION} tar -C ${WORK_DIR} -xzf ${WORK_DIR}/trino-server-${TRINO_VERSION}.tar.gz rm ${WORK_DIR}/trino-server-${TRINO_VERSION}.tar.gz cp -R bin ${WORK_DIR}/trino-server-${TRINO_VERSION} cp -R default ${WORK_DIR}/ curl -o ${WORK_DIR}/trino-cli-${TRINO_VERSION}-executable.jar ${CLIENT_LOCATION} chmod +x ${WORK_DIR}/trino-cli-${TRINO_VERSION}-executable.jar CONTAINER="trino:${TRINO_VERSION}" docker build ${WORK_DIR} --pull --platform linux/amd64 -f amd64.dockerfile -t ${CONTAINER}-amd64 --build-arg "TRINO_VERSION=${TRINO_VERSION}" docker build ${WORK_DIR} --pull --platform linux/arm64 -f arm64.dockerfile -t ${CONTAINER}-arm64 --build-arg "TRINO_VERSION=${TRINO_VERSION}" rm -r ${WORK_DIR} # Source common testing functions . container-test.sh test_container ${CONTAINER}-amd64 linux/amd64 test_container ${CONTAINER}-arm64 linux/arm64 docker image inspect -f '🚀 Built {{.RepoTags}} {{.Id}}' ${CONTAINER}-amd64 docker image inspect -f '🚀 Built {{.RepoTags}} {{.Id}}' ${CONTAINER}-arm64
losipiuk/presto
core/docker/build-remote.sh
Shell
apache-2.0
1,564
#!/bin/bash FN="rgu34acdf_2.18.0.tar.gz" URLS=( "https://bioconductor.org/packages/3.8/data/annotation/src/contrib/rgu34acdf_2.18.0.tar.gz" "https://bioarchive.galaxyproject.org/rgu34acdf_2.18.0.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-rgu34acdf/bioconductor-rgu34acdf_2.18.0_src_all.tar.gz" ) MD5="dcfa7ecce00e529f93809759ed837b8d" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do wget -O- -q $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
joachimwolff/bioconda-recipes
recipes/bioconductor-rgu34acdf/post-link.sh
Shell
mit
1,303
#!/bin/bash FN="signatureSearchData_1.6.0.tar.gz" URLS=( "https://bioconductor.org/packages/3.13/data/experiment/src/contrib/signatureSearchData_1.6.0.tar.gz" "https://bioarchive.galaxyproject.org/signatureSearchData_1.6.0.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-signaturesearchdata/bioconductor-signaturesearchdata_1.6.0_src_all.tar.gz" ) MD5="9fd4567b666beff53f010872d26de3f4" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do curl $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
phac-nml/bioconda-recipes
recipes/bioconductor-signaturesearchdata/post-link.sh
Shell
mit
1,343
#!/bin/sh java -cp ./axis.jar org.apache.axis.utils.tcpmon 8081 localhost 8080
marktriggs/nyu-sakai-10.4
webservices/axis/test/basic/tunnel.sh
Shell
apache-2.0
80
#!/usr/bin/env bash # if `docker run` first argument start with `-` the user is passing jenkins swarm launcher arguments if [[ $# -lt 1 ]] || [[ "$1" == "-"* ]]; then # jenkins swarm slave JAR=`ls -1 /usr/share/jenkins/swarm-client-*.jar | tail -n 1` # Set master URL - jmaster name defined in docker-compose and available for reference in Docker network PARAMS="-master http://jmaster:8080" # Set default number of executors (2 by default) PARAMS="$PARAMS -executors ${NUM_OF_EXECUTORS:-2}" # Set mode for jobs execution - leave this machine for tied jobs only PARAMS="$PARAMS -mode exclusive " # Set labels to slave PARAMS="$PARAMS -labels \"linux\" -labels \"alpine\" -labels \"3.3\" -labels \"java\" -labels \"docker\" -labels \"swarm\" -labels \"utility-slave\"" echo Running java $JAVA_OPTS -jar $JAR -fsroot $HOME/slave $PARAMS "$@" exec java $JAVA_OPTS -jar $JAR -fsroot $HOME $PARAMS "$@" fi # As argument is not jenkins, assume user want to run his own process, for sample a `bash` shell to explore this image exec "$@"
lynochka/JenkinsAsCodeReference
dockerizeit/slave/start.sh
Shell
bsd-3-clause
1,066
#!/bin/bash # set ORIGIN to current git origin ORIGIN=$(git remote -v | awk '$1=="origin" && $3=="(push)" {print $2}'); VERSION=$(cat package.json | grep version | head -1 | awk -F: '{ print $2 }' | sed 's/[",]//g'); # target folder: /dist/site, make it clean and step into rm -fr dist mkdir dist dist/site cd dist/site # init an empty git repo, checkout branch gh-pages git init git remote add origin $ORIGIN git fetch git checkout -t origin/gh-pages # remove all existed files in the repo, run the site build script rm * npm run build # commit and push to gh-pages git add . -A git commit -m "$VERSION" git push
sam019/element-react
build/scripts/release.sh
Shell
mit
622
#!/bin/bash sed -e "s@^#\!/usr/bin/python\$@#\!/usr/bin/python -Es@" -i $@
ignatenkobrain/firewalld
fix_python_shebang.sh
Shell
gpl-2.0
76
#!/bin/sh # # Copyright 2015 The SageTV Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Build the external codecs cd ../third_party/codecs/faac ./bootstrap || { echo "Build failed, exiting."; exit 1; } ./configure CFLAGS=-fno-common --enable-static --disable-shared --without-mp4v2 || { echo "Build failed, exiting."; exit 1; } make -j32 || { echo "Build failed, exiting."; exit 1; } cd ../faad2 ./bootstrap || { echo "Build failed, exiting."; exit 1; } ./configure CFLAGS=-fno-common --without-xmms --without-mpeg4ip --without-drm || { echo "Build failed, exiting."; exit 1; } make -j32 # || { echo "Build failed, exiting."; exit 1; } cd ../x264 ./configure "--extra-cflags=-fasm -fno-common -D_FILE_OFFSET_BITS=64" --disable-avis-input --disable-mp4-output --enable-pthread || { echo "Build failed, exiting."; exit 1; } make -j32 || { echo "Build failed, exiting."; exit 1; } cd ../xvidcore/build/generic ./bootstrap.sh || { echo "Build failed, exiting."; exit 1; } ./configure CFLAGS=-fno-common || { echo "Build failed, exiting."; exit 1; } make -j32 || { echo "Build failed, exiting."; exit 1; } # Build FFMPEG cd ../../../../ffmpeg make clean ./configure --disable-ffserver --disable-ffplay --enable-gpl --enable-pthreads --enable-nonfree --enable-libfaac --enable-libx264 --enable-libxvid --disable-devices --disable-demuxer=msnwc_tcp --enable-libfaad "--extra-cflags=-I. -I`readlink -f ../codecs/faad2/include` -I`readlink -f ../codecs/faac/include` -I`readlink -f ../codecs/x264` -I`readlink -f ../codecs/xvidcore/src`" "--extra-ldflags=-L`readlink -f ../codecs/faac/libfaac/.libs` -L`readlink -f ../codecs/faad2/libfaad/.libs` -L`readlink -f ../codecs/x264` -L`readlink -f ../codecs/xvidcore/build/generic/=build`" || { echo "Build failed, exiting."; exit 1; } make -j32 || { echo "Build failed, exiting."; exit 1; } cd ../../build # Build mplayer (if MPLAYER_NEW=1 is set, then the newer mplayer will be build) ./buildmplayer.sh || { echo "Build MPLAYER failed, exiting."; exit 1; } # Copy the files to the release folder mkdir elf cd elf cp ../../third_party/ffmpeg/ffmpeg . cp ../../third_party/codecs/jpeg-6b/jpegtran . cd ..
JasOXIII/sagetv
build/build3rdparty.sh
Shell
apache-2.0
2,680
#!/bin/bash rm -r autom4te.cache aclocal -I ./m4/ echo "autoconf!" autoconf configure.ac > test_conf if [ $? -eq 0 ] then echo "configure!" chmod +x ./test_conf # example on guinan, which has "normal" defaults for a workstation or server ./test_conf MPIFC=mpif90 FC=mpif90 CUDA_LIB="-L/usr/local/cuda/lib64/" MPI_INC="-I/usr/include/mpich2/" --with-cuda CUDA_INC="-I/usr/local/cuda/include" fi
QuLogic/specfem3d
utils/remake_makefiles.sh
Shell
gpl-2.0
410
#!/bin/bash REPO=${REPO:-docker.io} SOURCE_DIR=$(dirname ${BASH_SOURCE}) DSN_PYTHON=${DSN_PYTHON:-$HOME/rDSN.Python} function prepare_file() { # wget https://github.com/mcfatealan/rDSN.Python/tree/master/release/linux/MonitorPack.7z tar -cvzf ${SOURCE_DIR}/MonitorPack.tar.gz -C ${DSN_PYTHON}/src apps/rDSN.monitor dev setup.py tar -cvzf ${SOURCE_DIR}/rdsn-release.tar.gz -C $DSN_ROOT include lib bin } prepare_file docker build -t ${DOCKER_REPO}/rdsn ${SOURCE_DIR} docker push ${DOCKER_REPO}/rdsn
imzhenyu/rDSN
deploy/docker/build-image.sh
Shell
mit
512
#!/bin/sh . "${TEST_SCRIPTS_DIR}/unit.sh" define_test "event scripts" shellcheck_test "${CTDB_SCRIPTS_BASE}/events.d"/[0-9][0-9].*
SVoxel/R7800
git_home/samba.git/ctdb/tests/shellcheck/event_scripts.sh
Shell
gpl-2.0
134
#!/bin/sh set -e # check to see if casperjs folder is empty if [ ! -d "$HOME/casperjs-1.1.3/bin" ]; then wget https://github.com/n1k0/casperjs/archive/1.1.3.tar.gz -O $HOME/casper.tar.gz; tar -xvf $HOME/casper.tar.gz -C $HOME; else echo 'Using cached directory for casperjs.'; fi
Mertiozys/thelia
tests/travis/install-casperjs.sh
Shell
lgpl-3.0
287
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. export PYTHONPATH=./python/ export MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0 export MXNET_SUBGRAPH_VERBOSE=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 test_kvstore() { test_args=( "-n 4 --launcher local python3 dist_device_sync_kvstore.py" "-n 4 --launcher local python3 dist_device_sync_kvstore_custom.py" "--p3 -n 4 --launcher local python3 dist_device_sync_kvstore_custom.py" "-n 4 --launcher local python3 dist_sync_kvstore.py --type=init_gpu" ) for arg in "${test_args[@]}"; do python3 ../../tools/launch.py $arg if [ $? -ne 0 ]; then return $? fi done } test_horovod() { echo "localhost slots=2" > hosts mpirun -np 2 --hostfile hosts --bind-to none --map-by slot -mca pml ob1 \ -mca btl ^openib python3 dist_device_sync_kvstore_horovod.py if [ $? -ne 0 ]; then return $? fi } test_kvstore test_horovod exit $errors
szha/mxnet
tests/nightly/test_distributed_training-gpu.sh
Shell
apache-2.0
1,731
#!/bin/sh # plugin_test_3.sh -- a test case for the plugin API. # Copyright (C) 2008-2016 Free Software Foundation, Inc. # Written by Cary Coutant <[email protected]>. # This file is part of gold. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, # MA 02110-1301, USA. # This file goes with plugin_test.c, a simple plug-in library that # exercises the basic interfaces and prints out version numbers and # options passed to the plugin. check() { if ! grep -q "$2" "$1" then echo "Did not find expected output in $1:" echo " $2" echo "" echo "Actual output below:" cat "$1" exit 1 fi } check plugin_test_3.err "API version:" check plugin_test_3.err "gold version:" check plugin_test_3.err "option: _Z4f13iv" check plugin_test_3.err "two_file_test_main.o: claim file hook called" check plugin_test_3.err "two_file_test_1.o.syms: claim file hook called" check plugin_test_3.err "two_file_test_1b.o.syms: claim file hook called" check plugin_test_3.err "two_file_test_2.o.syms: claim file hook called" check plugin_test_3.err "two_file_test_1.o.syms: _Z4f13iv: PREVAILING_DEF_IRONLY_EXP" check plugin_test_3.err "two_file_test_1.o.syms: _Z2t2v: PREVAILING_DEF_REG" check plugin_test_3.err "two_file_test_1.o.syms: v2: RESOLVED_IR" check plugin_test_3.err "two_file_test_1.o.syms: t17data: RESOLVED_IR" check plugin_test_3.err "two_file_test_2.o.syms: _Z4f13iv: PREEMPTED_IR" check plugin_test_3.err "two_file_test_1.o: adding new input file" check plugin_test_3.err "two_file_test_1b.o: adding new input file" check plugin_test_3.err "two_file_test_2.o: adding new input file" check plugin_test_3.err "cleanup hook called" exit 0
h4ck3rm1k3/binutils-gdb
gold/testsuite/plugin_test_3.sh
Shell
gpl-2.0
2,290
#!/bin/sh export PARROT_ALLOW_SWITCHING_CVMFS_REPOSITORIES="yes" export HTTP_PROXY=http://cache01.hep.wisc.edu:3128 export PARROT_CVMFS_REPO='*.cern.ch:pubkey=<BUILTIN-cern.ch.pub>,url=http://cvmfs-stratum-one.cern.ch/cvmfs/*.cern.ch;http://cernvmfs.gridpp.rl.ac.uk/cvmfs/*.cern.ch;http://cvmfs.racf.bnl.gov/cvmfs/*.cern.ch atlas-nightlies.cern.ch:url=http://cvmfs-atlas-nightlies.cern.ch/cvmfs/atlas-nightlies.cern.ch,pubkey=<BUILTIN-cern.ch.pub>' parrot_run ./atlas.sh # vim: set noexpandtab tabstop=4:
nhazekam/cctools
apps/parrot_atlas/parrot.atlas.sh
Shell
gpl-2.0
508
######################################################################## # Bug #729843: innobackupex logs plaintext password ######################################################################## . inc/common.sh start_server mkdir $topdir/backup logfile=$topdir/backup/innobackupex_log # Don't use run_cmd_* or innobackupex functions here to avoid logging # the full command line (including the password in plaintext) set +e $IB_BIN $IB_ARGS --password=secretpassword $topdir/backup 2>&1 | tee $logfile set -e # Check that the password was not logged in plaintext run_cmd_expect_failure grep -- "secretpassword" $logfile
metacloud/percona-xtrabackup
test/t/bug729843.sh
Shell
gpl-2.0
628
#!/bin/bash MESON_VERSION="0.40.1" fw_depends python3 fw_installed meson && return 0 pip3 install meson==$MESON_VERSION touch $IROOT/meson.installed
saturday06/FrameworkBenchmarks
toolset/setup/linux/systools/meson.sh
Shell
bsd-3-clause
154
#!/bin/bash # Copyright 2015 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # kubernetes-e2e-{gce, gke, gke-ci} jobs: This script is triggered by # the kubernetes-build job, or runs every half hour. We abort this job # if it takes more than 75m. As of initial commit, it typically runs # in about half an hour. # # The "Workspace Cleanup Plugin" is installed and in use for this job, # so the ${WORKSPACE} directory (the current directory) is currently # empty. set -o errexit set -o nounset set -o pipefail set -o xtrace # Join all args with | # Example: join_regex_allow_empty a b "c d" e => a|b|c d|e function join_regex_allow_empty() { local IFS="|" echo "$*" } # Join all args with |, butin case of empty result prints "EMPTY\sSET" instead. # Example: join_regex_no_empty a b "c d" e => a|b|c d|e # join_regex_no_empty => EMPTY\sSET function join_regex_no_empty() { local IFS="|" if [ -z "$*" ]; then echo "EMPTY\sSET" else echo "$*" fi } echo "--------------------------------------------------------------------------------" echo "Initial Environment:" printenv | sort echo "--------------------------------------------------------------------------------" if [[ "${CIRCLECI:-}" == "true" ]]; then JOB_NAME="circleci-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}" BUILD_NUMBER=${CIRCLE_BUILD_NUM} WORKSPACE=`pwd` else # Jenkins? export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME fi # Additional parameters that are passed to hack/e2e.go E2E_OPT=${E2E_OPT:-""} # Set environment variables shared for all of the GCE Jenkins projects. if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then KUBERNETES_PROVIDER="gce" : ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_ZONE:="us-central1-f"} : ${MASTER_SIZE:="n1-standard-2"} : ${MINION_SIZE:="n1-standard-2"} : ${NUM_MINIONS:="3"} fi if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then if [[ "${PERFORMANCE:-}" == "true" ]]; then : ${MASTER_SIZE:="m3.xlarge"} : ${NUM_MINIONS:="100"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\ssuite\]"} else : ${MASTER_SIZE:="t2.small"} : ${NUM_MINIONS:="2"} fi fi # Specialized tests which should be skipped by default for projects. GCE_DEFAULT_SKIP_TESTS=( "Skipped" "Reboot" "Restart" "Example" ) # The following tests are known to be flaky, and are thus run only in their own # -flaky- build variants. GCE_FLAKY_TESTS=() # Tests which are not able to be run in parallel. GCE_PARALLEL_SKIP_TESTS=( ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} "Etcd" "NetworkingNew" "Nodes\sNetwork" "Nodes\sResize" "MaxPods" "SchedulerPredicates" "Services.*restarting" "Shell.*services" ) # Tests which are known to be flaky when run in parallel. GCE_PARALLEL_FLAKY_TESTS=( "Elasticsearch" "PD" "ServiceAccounts" "Service\sendpoints\slatency" "Services.*change\sthe\stype" "Services.*functioning\sexternal\sload\sbalancer" "Services.*identically\snamed" "Services.*release.*load\sbalancer" ) # Define environment variables based on the Jenkins project name. case ${JOB_NAME} in # Runs all non-flaky tests on GCE, sequentially. kubernetes-e2e-gce) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e"} : ${E2E_DOWN:="false"} : ${E2E_NETWORK:="e2e-gce"} : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${PROJECT:="k8s-jkns-e2e-gce"} ;; # Runs only the examples tests on GCE. kubernetes-e2e-gce-examples) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-examples"} : ${E2E_DOWN:="false"} : ${E2E_NETWORK:="e2e-examples"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Example"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-examples"} : ${PROJECT:="kubernetes-jenkins"} ;; # Runs the flaky tests on GCE, sequentially. kubernetes-e2e-gce-flaky) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-flaky"} : ${E2E_DOWN:="false"} : ${E2E_NETWORK:="e2e-flaky"} : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ) --ginkgo.focus=$(join_regex_no_empty \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flaky"} : ${PROJECT:="k8s-jkns-e2e-gce-flaky"} ;; # Runs all non-flaky tests on GCE in parallel. kubernetes-e2e-gce-parallel) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-parallel"} : ${E2E_NETWORK:="e2e-parallel"} : ${GINKGO_PARALLEL:="y"} : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-test-parallel"} : ${PROJECT:="kubernetes-jenkins"} # Override GCE defaults. NUM_MINIONS="6" ;; # Runs the flaky tests on GCE in parallel. kubernetes-e2e-gce-parallel-flaky) : ${E2E_CLUSTER_NAME:="parallel-flaky"} : ${E2E_NETWORK:="e2e-parallel-flaky"} : ${GINKGO_PARALLEL:="y"} : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ ) --ginkgo.focus=$(join_regex_no_empty \ ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="parallel-flaky"} : ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"} # Override GCE defaults. NUM_MINIONS="4" ;; # Runs only the reboot tests on GCE. kubernetes-e2e-gce-reboot) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-reboot"} : ${E2E_DOWN:="false"} : ${E2E_NETWORK:="e2e-reboot"} : ${GINKGO_TEST_ARGS:=" --ginkgo.focus=Reboot"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-reboot"} : ${PROJECT:="kubernetes-jenkins"} ;; # Runs the performance/scalability tests on GCE. A larger cluster is used. kubernetes-e2e-gce-scalability) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-scalability"} : ${E2E_NETWORK:="e2e-scalability"} : ${GINKGO_TEST_ARGS:="--ginkgo.focus=Performance\ssuite"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-scalability"} : ${PROJECT:="kubernetes-jenkins"} # Override GCE defaults. MASTER_SIZE="n1-standard-4" MINION_SIZE="n1-standard-2" MINION_DISK_SIZE="50GB" NUM_MINIONS="100" ;; # Runs a subset of tests on GCE in parallel. Run against all pending PRs. kubernetes-pull-build-test-e2e-gce) : ${E2E_CLUSTER_NAME:="jenkins-pull-gce-e2e-${EXECUTOR_NUMBER}"} : ${E2E_NETWORK:="pull-e2e-parallel-${EXECUTOR_NUMBER}"} : ${GINKGO_PARALLEL:="y"} # This list should match the list in kubernetes-e2e-gce-parallel. It # currently also excludes a slow namespace test. : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="pull-e2e-${EXECUTOR_NUMBER}"} : ${KUBE_GCS_STAGING_PATH_SUFFIX:="-${EXECUTOR_NUMBER}"} : ${PROJECT:="kubernetes-jenkins-pull"} # Override GCE defaults. MASTER_SIZE="n1-standard-1" MINION_SIZE="n1-standard-1" NUM_MINIONS="2" ;; # Runs non-flaky tests on GCE on the release-latest branch, # sequentially. As a reminder, if you need to change the skip list # or flaky test list on the release branch, you'll need to propose a # pull request directly to the release branch itself. kubernetes-e2e-gce-release) : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-release"} : ${E2E_DOWN:="false"} : ${E2E_NETWORK:="e2e-gce-release"} : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${PROJECT:="k8s-jkns-e2e-gce-release"} ;; esac # AWS variables export KUBE_AWS_INSTANCE_PREFIX=${E2E_CLUSTER_NAME} export KUBE_AWS_ZONE=${E2E_ZONE} # GCE variables export INSTANCE_PREFIX=${E2E_CLUSTER_NAME} export KUBE_GCE_ZONE=${E2E_ZONE} export KUBE_GCE_NETWORK=${E2E_NETWORK} export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-} export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-} # GKE variables export CLUSTER_NAME=${E2E_CLUSTER_NAME} export ZONE=${E2E_ZONE} export KUBE_GKE_NETWORK=${E2E_NETWORK} # Shared cluster variables export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-} export MASTER_SIZE=${MASTER_SIZE:-} export MINION_SIZE=${MINION_SIZE:-} export NUM_MINIONS=${NUM_MINIONS:-} export PROJECT=${PROJECT:-} export PATH=${PATH}:/usr/local/go/bin export KUBE_SKIP_CONFIRMATIONS=y # E2E Control Variables export E2E_UP="${E2E_UP:-true}" export E2E_TEST="${E2E_TEST:-true}" export E2E_DOWN="${E2E_DOWN:-true}" # Used by hack/ginkgo-e2e.sh to enable ginkgo's parallel test runner. export GINKGO_PARALLEL=${GINKGO_PARALLEL:-} echo "--------------------------------------------------------------------------------" echo "Test Environment:" printenv | sort echo "--------------------------------------------------------------------------------" # We get the Kubernetes tarballs on either cluster creation or when we want to # replace existing ones in a multi-step job (e.g. a cluster upgrade). if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then if [[ ${KUBE_RUN_FROM_OUTPUT:-} =~ ^[yY]$ ]]; then echo "Found KUBE_RUN_FROM_OUTPUT=y; will use binaries from _output" cp _output/release-tars/kubernetes*.tar.gz . else echo "Pulling binaries from GCS" # In a multi-step job, clean up just the kubernetes build files. # Otherwise, we want a completely empty directory. if [[ "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then rm -rf kubernetes* elif [[ $(find . | wc -l) != 1 ]]; then echo $PWD not empty, bailing! exit 1 fi # Tell kube-up.sh to skip the update, it doesn't lock. An internal # gcloud bug can cause racing component updates to stomp on each # other. export KUBE_SKIP_UPDATE=y sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update preview -q" || true sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update alpha -q" || true sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update beta -q" || true if [[ ! -z ${JENKINS_EXPLICIT_VERSION:-} ]]; then # Use an explicit pinned version like "ci/v0.10.0-101-g6c814c4" or # "release/v0.19.1" IFS='/' read -a varr <<< "${JENKINS_EXPLICIT_VERSION}" bucket="${varr[0]}" githash="${varr[1]}" echo "$bucket / $githash" elif [[ ${JENKINS_USE_SERVER_VERSION:-} =~ ^[yY]$ ]]; then # for GKE we can use server default version. bucket="release" msg=$(gcloud ${CMD_GROUP} container get-server-config --project=${PROJECT} --zone=${ZONE} | grep defaultClusterVersion) # msg will look like "defaultClusterVersion: 1.0.1". Strip # everything up to, including ": " githash="v${msg##*: }" else # The "ci" bucket is for builds like "v0.15.0-468-gfa648c1" bucket="ci" # The "latest" version picks the most recent "ci" or "release" build. version_file="latest" if [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then # The "release" bucket is for builds like "v0.15.0" bucket="release" if [[ ${JENKINS_USE_STABLE:-} =~ ^[yY]$ ]]; then # The "stable" version picks the most recent "release" build. version_file="stable" fi fi githash=$(gsutil cat gs://kubernetes-release/${bucket}/${version_file}.txt) fi # At this point, we want to have the following vars set: # - bucket # - githash gsutil -m cp gs://kubernetes-release/${bucket}/${githash}/kubernetes.tar.gz gs://kubernetes-release/${bucket}/${githash}/kubernetes-test.tar.gz . fi if [[ ! "${CIRCLECI:-}" == "true" ]]; then # Copy GCE keys so we don't keep cycling them. # To set this up, you must know the <project>, <zone>, and <instance> # on which your jenkins jobs are running. Then do: # # # SSH from your computer into the instance. # $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance> # # # Generate a key by ssh'ing from the instance into itself, then exit. # $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance> # $ ^D # # # Copy the keys to the desired location (e.g. /var/lib/jenkins/gce_keys/). # $ sudo mkdir -p /var/lib/jenkins/gce_keys/ # $ sudo cp ~/.ssh/google_compute_engine /var/lib/jenkins/gce_keys/ # $ sudo cp ~/.ssh/google_compute_engine.pub /var/lib/jenkins/gce_keys/ # # # Move the permissions for the keys to Jenkins. # $ sudo chown -R jenkins /var/lib/jenkins/gce_keys/ # $ sudo chgrp -R jenkins /var/lib/jenkins/gce_keys/ if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then echo "Skipping SSH key copying for AWS" else mkdir -p ${WORKSPACE}/.ssh/ cp /var/lib/jenkins/gce_keys/google_compute_engine ${WORKSPACE}/.ssh/ cp /var/lib/jenkins/gce_keys/google_compute_engine.pub ${WORKSPACE}/.ssh/ fi fi md5sum kubernetes*.tar.gz tar -xzf kubernetes.tar.gz tar -xzf kubernetes-test.tar.gz # Set by GKE-CI to change the CLUSTER_API_VERSION to the git version if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then export CLUSTER_API_VERSION=$(echo ${githash} | cut -c 2-) elif [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then release=$(gsutil cat gs://kubernetes-release/release/${version_file}.txt | cut -c 2-) export CLUSTER_API_VERSION=${release} fi fi cd kubernetes # Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml ARTIFACTS=${WORKSPACE}/_artifacts mkdir -p ${ARTIFACTS} export E2E_REPORT_DIR=${ARTIFACTS} ### Set up ### if [[ "${E2E_UP,,}" == "true" ]]; then go run ./hack/e2e.go ${E2E_OPT} -v --down go run ./hack/e2e.go ${E2E_OPT} -v --up go run ./hack/e2e.go -v --ctl="version --match-server-version=false" fi ### Run tests ### # Jenkins will look at the junit*.xml files for test failures, so don't exit # with a nonzero error code if it was only tests that failed. if [[ "${E2E_TEST,,}" == "true" ]]; then go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}" && exitcode=0 || exitcode=$? if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" && ${exitcode} == 0 && -n ${githash:-} ]]; then echo "publish githash to ci/latest-green.txt: ${githash}" echo "${githash}" > ${WORKSPACE}/githash.txt gsutil cp ${WORKSPACE}/githash.txt gs://kubernetes-release/ci/latest-green.txt fi fi # TODO(zml): We have a bunch of legacy Jenkins configs that are # expecting junit*.xml to be in ${WORKSPACE} root and it's Friday # afternoon, so just put the junit report where it's expected. # If link already exists, non-zero return code should not cause build to fail. for junit in ${ARTIFACTS}/junit*.xml; do ln -s -f ${junit} ${WORKSPACE} || true done ### Clean up ### if [[ "${E2E_DOWN,,}" == "true" ]]; then # Sleep before deleting the cluster to give the controller manager time to # delete any cloudprovider resources still around from the last test. # This is calibrated to allow enough time for 3 attempts to delete the # resources. Each attempt is allocated 5 seconds for requests to the # cloudprovider plus the processingRetryInterval from servicecontroller.go # for the wait between attempts. sleep 30 go run ./hack/e2e.go ${E2E_OPT} -v --down fi
jgriffiths1993/kubernetes
hack/jenkins/e2e.sh
Shell
apache-2.0
17,308
#!/usr/bin/env bash cd "$(dirname $0)" export LC_ALL=C data="../../assets" echo "Checking for unused graphics..." for dir in space exterior; do cd "$dir" echo -e "\n Unused planet $dir gfx" for img in *.png; do if ! cat ../${data}/*.xml | grep -qF "<$dir>$img"; then echo " $img" fi done cd .. done echo -e "\nChecking for overused graphics..." for dir in space exterior; do cd "$dir" echo -e "\n Overused planet $dir gfx" for img in *.png; do count=$(cat ../${data}/*.xml | grep -cF "<$dir>$img") if [[ $count > 1 ]]; then echo " $img => $count times" fi done | sort -k3 -n -r cd .. done
kjoenth/naev
dat/gfx/planet/check.sh
Shell
gpl-3.0
685
#!/bin/sh ## Copyright (C) 2005 Victorian Partnership for Advanced Computing (VPAC) Ltd ## 110 Victoria Street, Melbourne, 3053, Australia. ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Lesser General Public ## License as published by the Free Software Foundation; either ## version 2.1 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with this library; if not, write to the Free Software ## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA parsePackageConfigOptions $@ # Check if we're allowed to use GLIB2 if test "${NOGLIB2}" = "1" ; then return 0 fi case ${SYSTEM} in Darwin) setValueWithDefault GLIB2_DIR '/sw/';; *) setValueWithDefault GLIB2_DIR '/usr/';; esac setValueWithDefault GLIB2_INCDIR '${GLIB2_DIR}/include/glib-2.0' if test -r "${GLIB2_INCDIR}/glib.h" ; then setValueWithDefault GLIB2_INCLUDES '-I${GLIB2_INCDIR} -I${GLIB2_DIR}/lib/glib-2.0/include -DHAVE_GLIB2' setValueWithDefault GLIB2_LIBDIR '${GLIB2_DIR}/lib' setValueWithDefault GLIB2_LIBFILES '-lgobject-2.0 -lgmodule-2.0 -lglib-2.0' setValueWithDefault GLIB2_LIBS '-L${GLIB2_LIBDIR} ${GLIB2_LIBFILES}' if test "${RPATH_FLAG}x" != "x" ; then setValueWithDefault GLIB2_RPATH '${RPATH_FLAG}${GLIB2_LIBDIR}' fi setValueWithDefault HAVE_GLIB2 '1' fi
bmi-forum/bmi-pyre
VMake/Config/glib2-config.sh
Shell
gpl-2.0
1,740
#!/bin/bash # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set -e cd $(dirname $0) source ./determine_extension_dir.sh export GRPC_TEST_HOST=localhost:50051 php $extension_dir $(which phpunit) -v --debug --strict \ ../tests/generated_code/GeneratedCodeTest.php php $extension_dir $(which phpunit) -v --debug --strict \ ../tests/generated_code/GeneratedCodeWithCallbackTest.php
wangyikai/grpc
src/php/bin/run_gen_code_test.sh
Shell
bsd-3-clause
1,863
ADMIN_USER_NAME=${ADMIN_USER_NAME:-"wsadmin"} WSADMIN_PASS=$(cat /tmp/PASSWORD) /opt/IBM/WebSphere/AppServer/bin/wsadmin.sh -user $ADMIN_USER_NAME -password $WSADMIN_PASS -lang jython -f $1 $2
WASdev/ci.docker.websphere-traditional
docker-build/9.0.5.x/scripts/run_py_script.sh
Shell
apache-2.0
193
#!/bin/bash # # Contains a simple fetcher to download a file from a remote URL and verify its # SHA1 checksum. # # Usage: fetch.sh <remote URL> <SHA1 checksum> set -e # Pull the file from the remote URL file=`basename $1` echo "Downloading $1..." wget -q $1 # Generate a desired checksum report and check against it echo "$2 $file" > $file.sum sha1sum -c $file.sum rm $file.sum
christiangalsterer/httpbeat
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image/base/fetch.sh
Shell
apache-2.0
381
#!/bin/sh exec_mess() { /usr/libexec/mame/mess \ -artpath "$HOME/.mess/artwork;artwork" \ -ctrlrpath "$HOME/.mess/ctrlr;ctrlr" \ -inipath $HOME/.mess/ini \ -rompath $HOME/.mess/roms \ -samplepath "$HOME/.mess/samples;samples" \ -cfg_directory $HOME/.mess/cfg \ -comment_directory $HOME/.mess/comments \ -diff_directory $HOME/.mess/diff \ -input_directory $HOME/.mess/inp \ -nvram_directory $HOME/.mess/nvram \ -snapshot_directory $HOME/.mess/snap \ -state_directory $HOME/.mess/sta \ -video opengl \ -createconfig } if [ "$1" = "--newini" ]; then echo "Rebuilding the ini file at $HOME/.mess/mame.ini" echo "Modify this file for permanent changes to your MESS" echo "options and paths before running MESS again." cd $HOME/.mess if [ -e mame.ini ]; then echo "Your old ini file has been renamed to mame.ini.bak" mv mame.ini mame.ini.bak fi exec_mess elif [ ! -e $HOME/.mess ]; then echo "Running MESS for the first time..." echo "Creating an ini file for MESS at $HOME/.mess/mame.ini" echo "Modify this file for permanent changes to your MAME" echo "options and paths before running MAME again." mkdir $HOME/.mess for f in artwork cfg comments ctrlr diff ini ip nvram \ samples snap sta roms; do mkdir $HOME/.mess/${f} done cd $HOME/.mess && exec_mess else cd /usr/share/mame /usr/libexec/mame/mess "$@" fi
necrophcodr/void-packages
srcpkgs/mame/files/mess.sh
Shell
bsd-2-clause
1,414
#!/usr/bin/env bash if [ "$2" != "" ] then echo >&2 "Usage: $0 [<version>]" exit 1 fi MW_DIR=$(cd $(dirname $0)/../..; pwd) # e.g. mediawiki-core/ NPM_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mw-update-oojs'` # e.g. /tmp/mw-update-oojs.rI0I5Vir # Prepare MediaWiki working copy cd $MW_DIR git reset resources/lib/oojs/ && git checkout resources/lib/oojs/ && git fetch origin || exit 1 git checkout -B upstream-oojs origin/master || exit 1 # Fetch upstream version cd $NPM_DIR if [ "$1" != "" ] then npm install oojs@$1 || exit 1 else npm install oojs || exit 1 fi OOJS_VERSION=$(node -e 'console.log(JSON.parse(require("fs").readFileSync("./node_modules/oojs/package.json")).version);') if [ "$OOJS_VERSION" == "" ] then echo 'Could not find OOjs version' exit 1 fi # Copy file(s) mv ./node_modules/oojs/dist/* $MW_DIR/resources/lib/oojs/ || exit 1 # Generate commit cd $MW_DIR || exit 1 # Clean up temporary area rm -rf $NPM_DIR COMMITMSG=$(cat <<END Update OOjs to v$OOJS_VERSION Release notes: https://git.wikimedia.org/blob/oojs%2Fcore.git/v$OOJS_VERSION/History.md END ) git commit resources/lib/oojs/ -m "$COMMITMSG" || exit 1
kylethayer/bioladder
wiki/maintenance/resources/update-oojs.sh
Shell
gpl-3.0
1,155
####################################### # Pacman # ####################################### # Pacman - https://wiki.archlinux.org/index.php/Pacman_Tips alias pacupg='sudo pacman -Syu' alias pacin='sudo pacman -S' alias paclean='sudo pacman -Sc' alias pacins='sudo pacman -U' alias paclr='sudo pacman -Scc' alias pacre='sudo pacman -R' alias pacrem='sudo pacman -Rns' alias pacrep='pacman -Si' alias pacreps='pacman -Ss' alias pacloc='pacman -Qi' alias paclocs='pacman -Qs' alias pacinsd='sudo pacman -S --asdeps' alias pacmir='sudo pacman -Syy' alias paclsorphans='sudo pacman -Qdt' alias pacrmorphans='sudo pacman -Rs $(pacman -Qtdq)' alias pacfileupg='sudo pacman -Fy' alias pacfiles='pacman -F' alias pacls='pacman -Ql' alias pacown='pacman -Qo' alias pacupd="sudo pacman -Sy" alias upgrade='sudo pacman -Syu' function paclist() { # Based on https://bbs.archlinux.org/viewtopic.php?id=93683 pacman -Qqe | \ xargs -I '{}' \ expac "${bold_color}% 20n ${fg_no_bold[white]}%d${reset_color}" '{}' } function pacdisowned() { local tmp db fs tmp=${TMPDIR-/tmp}/pacman-disowned-$UID-$$ db=$tmp/db fs=$tmp/fs mkdir "$tmp" trap 'rm -rf "$tmp"' EXIT pacman -Qlq | sort -u > "$db" find /bin /etc /lib /sbin /usr ! -name lost+found \ \( -type d -printf '%p/\n' -o -print \) | sort > "$fs" comm -23 "$fs" "$db" } alias pacmanallkeys='sudo pacman-key --refresh-keys' function pacmansignkeys() { local key for key in $@; do sudo pacman-key --recv-keys $key sudo pacman-key --lsign-key $key printf 'trust\n3\n' | sudo gpg --homedir /etc/pacman.d/gnupg \ --no-permission-warning --command-fd 0 --edit-key $key done } if (( $+commands[xdg-open] )); then function pacweb() { if [[ $# = 0 || "$1" =~ '--help|-h' ]]; then local underline_color="\e[${color[underline]}m" echo "$0 - open the website of an ArchLinux package" echo echo "Usage:" echo " $bold_color$0$reset_color ${underline_color}target${reset_color}" return 1 fi local pkg="$1" local infos="$(LANG=C pacman -Si "$pkg")" if [[ -z "$infos" ]]; then return fi local repo="$(grep -m 1 '^Repo' <<< "$infos" | grep -oP '[^ ]+$')" local arch="$(grep -m 1 '^Arch' <<< "$infos" | grep -oP '[^ ]+$')" xdg-open "https://www.archlinux.org/packages/$repo/$arch/$pkg/" &>/dev/null } fi ####################################### # AUR helpers # ####################################### if (( $+commands[aura] )); then alias auin='sudo aura -S' alias aurin='sudo aura -A' alias auclean='sudo aura -Sc' alias auclr='sudo aura -Scc' alias auins='sudo aura -U' alias auinsd='sudo aura -S --asdeps' alias aurinsd='sudo aura -A --asdeps' alias auloc='aura -Qi' alias aulocs='aura -Qs' alias aulst='aura -Qe' alias aumir='sudo aura -Syy' alias aurph='sudo aura -Oj' alias aure='sudo aura -R' alias aurem='sudo aura -Rns' alias aurep='aura -Si' alias aurrep='aura -Ai' alias aureps='aura -As --both' alias auras='aura -As --both' alias auupd="sudo aura -Sy" alias auupg='sudo sh -c "aura -Syu && aura -Au"' alias ausu='sudo sh -c "aura -Syu --no-confirm && aura -Au --no-confirm"' alias upgrade='sudo aura -Syu' # extra bonus specially for aura alias auown="aura -Qqo" alias auls="aura -Qql" function auownloc() { aura -Qi $(aura -Qqo $@); } function auownls () { aura -Qql $(aura -Qqo $@); } fi if (( $+commands[pacaur] )); then alias pacclean='pacaur -Sc' alias pacclr='pacaur -Scc' alias paupg='pacaur -Syu' alias pasu='pacaur -Syu --noconfirm' alias pain='pacaur -S' alias pains='pacaur -U' alias pare='pacaur -R' alias parem='pacaur -Rns' alias parep='pacaur -Si' alias pareps='pacaur -Ss' alias paloc='pacaur -Qi' alias palocs='pacaur -Qs' alias palst='pacaur -Qe' alias paorph='pacaur -Qtd' alias painsd='pacaur -S --asdeps' alias pamir='pacaur -Syy' alias paupd="pacaur -Sy" alias upgrade='pacaur -Syu' fi if (( $+commands[trizen] )); then alias trconf='trizen -C' alias trupg='trizen -Syua' alias trsu='trizen -Syua --noconfirm' alias trin='trizen -S' alias trclean='trizen -Sc' alias trclr='trizen -Scc' alias trins='trizen -U' alias trre='trizen -R' alias trrem='trizen -Rns' alias trrep='trizen -Si' alias trreps='trizen -Ss' alias trloc='trizen -Qi' alias trlocs='trizen -Qs' alias trlst='trizen -Qe' alias trorph='trizen -Qtd' alias trinsd='trizen -S --asdeps' alias trmir='trizen -Syy' alias trupd="trizen -Sy" alias upgrade='trizen -Syu' fi if (( $+commands[yay] )); then alias yaconf='yay -Pg' alias yaclean='yay -Sc' alias yaclr='yay -Scc' alias yaupg='yay -Syu' alias yasu='yay -Syu --noconfirm' alias yain='yay -S' alias yains='yay -U' alias yare='yay -R' alias yarem='yay -Rns' alias yarep='yay -Si' alias yareps='yay -Ss' alias yaloc='yay -Qi' alias yalocs='yay -Qs' alias yalst='yay -Qe' alias yaorph='yay -Qtd' alias yainsd='yay -S --asdeps' alias yamir='yay -Syy' alias yaupd="yay -Sy" alias upgrade='yay -Syu' fi
lstolowski/oh-my-zsh
plugins/archlinux/archlinux.plugin.zsh
Shell
mit
5,152
#!/bin/bash # Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This script does some preparations before build of instrumented xkbcommon. # Do not warn about undefined sanitizer symbols in object files. sed -i "s/\(-Wl,--no-undefined\|-Wl,-z,defs\)//g" ./Makefile.am # Do not warn about uninstalled documentation. sed -i "s/--fail-missing//g" ./debian/rules # Do not warn about extra msan symbols. sed -i "s/dh_makeshlibs -- -c4/dh_makeshlibs/g" ./debian/rules
chromium/chromium
third_party/instrumented_libraries/xenial/scripts/pre-build/xkbcommon.sh
Shell
bsd-3-clause
578
# copy the active line from the command line buffer # onto the system clipboard (requires clipcopy plugin) copybuffer () { if which clipcopy &>/dev/null; then echo $BUFFER | clipcopy else echo "clipcopy function not found. Please make sure you have Oh My Zsh installed correctly." fi } zle -N copybuffer bindkey "^O" copybuffer
kristiankubik/dotfiles
zsh_plugins/copybuffer/copybuffer.plugin.zsh
Shell
mit
346