code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/bin/bash #********************************************************** #* CATEGORY JARVIS HOME AUTOMTION #* GROUP RESTART SYSTEM #* AUTHOR LANCE HAYNIE <[email protected]> #********************************************************** #Jarvis Home Automation #Copyright (C) 2017 Haynie Research & Development #This program is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 2 of the License, or #(at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License along #with this program; if not, write to the Free Software Foundation, Inc., #51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA sudo shutdown -r now
Haynie-Research-and-Development/jarvis
scripts/restart_system.sh
Shell
gpl-2.0
1,038
#! /bin/sh -e # tup - A file-based build system # # Copyright (C) 2010-2022 Mike Shal <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # Try a rule where we use the output names to drive the foreach . ./tup.sh check_no_windows symlink cat > Tupfile.lua << HERE binaries = {'ar', 'vi'} for k, v in pairs(binaries) do tup.rule('busybox', 'ln -s %f %o', v) end HERE touch busybox update tup_dep_exist . 'ln -s busybox ar' . ar tup_dep_exist . 'ln -s busybox vi' . vi eotup
gittup/tup
test/t2074-reverse-rule.sh
Shell
gpl-2.0
1,088
#!/usr/bin/env node (require("../src/apg-conv/apg-conv.js"))();
ldthomas/apg-js
bin/apg-conv.sh
Shell
gpl-2.0
64
#!/bin/ksh JVM="$JDK_HOME/bin/java $JAVA_RUN_OPTS" #Operator add_rule Id \"expresion\" ior_file operation < operator.ior #Operator replace_rule Id \"expresion\" ior_file operation <operator.ior #$1 $2 "$3" $4 $5 #Operator insert_rule Id Position \"expresion\" ior_file operation<operator.ior #$1 $2 $3 "$4" $5 $6 if [ $# -gt 0 ] then if [ $1 = add_rule ] then $JVM es.tid.corba.TIDDistrib.tools.Operator $1 $2 "$3" $4 $5 elif [ $1 = replace_rule ] then $JVM es.tid.corba.TIDDistrib.tools.Operator $1 $2 "$3" $4 $5 elif [ $1 = insert_rule ] then $JVM es.tid.corba.TIDDistrib.tools.Operator $1 $2 $3 "$4" $5 $6 else $JVM es.tid.corba.TIDDistrib.tools.Operator $* fi else $JVM es.tid.corba.TIDDistrib.tools.Operator $* fi
AlvaroVega/TIDNotifJ
tools/TIDDistrib/Operator.sh
Shell
gpl-2.0
759
#! /bin/bash connectedDev=$(hcitool con 2>/dev/null) conMac=${connectedDev:20:18} conMac=$(echo "$conMac" 2>/dev/null|tr -d " " 2>/dev/null) conname=$(hcitool name "$conMac" 2>/dev/null) echo "$conname" 2>/dev/null
zoff99/ToxBlinkenwall
toxblinkenwall/scripts/rpi_zerow/get_bt_connected_name.sh
Shell
gpl-2.0
216
#!/bin/bash # # Copyright (C) 2017 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. set -eo pipefail shopt -s nullglob # if command starts with an option, prepend mysqld if [ "${1:0:1}" = '-' ]; then set -- mysqld "$@" fi # skip setup if they want an option that stops mysqld wantHelp= for arg; do case "$arg" in -'?'|--help|--print-defaults|-V|--version) wantHelp=1 break ;; esac done # usage: file_env VAR [DEFAULT] # ie: file_env 'XYZ_DB_PASSWORD' 'example' # (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of # "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) file_env() { local var="$1" local fileVar="${var}_FILE" local def="${2:-}" if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then echo >&2 "error: both $var and $fileVar are set (but are exclusive)" exit 1 fi local val="$def" if [ "${!var:-}" ]; then val="${!var}" elif [ "${!fileVar:-}" ]; then val="$(< "${!fileVar}")" fi export "$var"="$val" unset "$fileVar" } _check_config() { toRun=( "$@" --verbose --help ) if ! errors="$("${toRun[@]}" 2>&1 >/dev/null)"; then cat >&2 <<-EOM ERROR: mysqld failed while attempting to check config command was: "${toRun[*]}" $errors EOM exit 1 fi } # Fetch value from server config # We use mysqld --verbose --help instead of my_print_defaults because the # latter only show values present in config files, and not server defaults _get_config() { local conf="$1"; shift "$@" --verbose --help --log-bin-index="$(mktemp -u)" 2>/dev/null | awk '$1 == "'"$conf"'" { print $2; exit }' } # allow the container to be started with `--user` if [ "$1" = 'mysqld' -a -z "$wantHelp" -a "$(id -u)" = '0' ]; then _check_config "$@" DATADIR="$(_get_config 'datadir' "$@")" mkdir -p "$DATADIR" chown -R mysql:mysql "$DATADIR" exec gosu mysql "$BASH_SOURCE" "$@" fi if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then # still need to check config, container may have started with --user _check_config "$@" # Get config DATADIR="$(_get_config 'datadir' "$@")" if [ ! -d "$DATADIR/mysql" ]; then file_env 'MYSQL_ROOT_PASSWORD' if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then echo >&2 'error: database is uninitialized and password option is not specified ' echo >&2 ' You need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ALLOW_EMPTY_PASSWORD and MYSQL_RANDOM_ROOT_PASSWORD' exit 1 fi mkdir -p "$DATADIR" echo 'Initializing database' "$@" --initialize-insecure echo 'Database initialized' if command -v mysql_ssl_rsa_setup > /dev/null && [ ! -e "$DATADIR/server-key.pem" ]; then # https://github.com/mysql/mysql-server/blob/23032807537d8dd8ee4ec1c4d40f0633cd4e12f9/packaging/deb-in/extra/mysql-systemd-start#L81-L84 echo 'Initializing certificates' mysql_ssl_rsa_setup --datadir="$DATADIR" echo 'Certificates initialized' fi SOCKET="$(_get_config 'socket' "$@")" "$@" --skip-networking --socket="${SOCKET}" & pid="$!" mysql=( mysql --protocol=socket -uroot -hlocalhost --socket="${SOCKET}" ) for i in {30..0}; do if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then break fi echo 'MySQL init process in progress...' sleep 1 done if [ "$i" = 0 ]; then echo >&2 'MySQL init process failed.' exit 1 fi if [ -z "$MYSQL_INITDB_SKIP_TZINFO" ]; then # sed is for https://bugs.mysql.com/bug.php?id=20545 mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql fi if [ ! -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then export MYSQL_ROOT_PASSWORD="$(pwgen -1 32)" echo "GENERATED ROOT PASSWORD: $MYSQL_ROOT_PASSWORD" fi rootCreate= # default root to listen for connections from anywhere file_env 'MYSQL_ROOT_HOST' '%' if [ ! -z "$MYSQL_ROOT_HOST" -a "$MYSQL_ROOT_HOST" != 'localhost' ]; then # no, we don't care if read finds a terminating character in this heredoc # https://unix.stackexchange.com/questions/265149/why-is-set-o-errexit-breaking-this-read-heredoc-expression/265151#265151 read -r -d '' rootCreate <<-EOSQL || true CREATE USER 'root'@'${MYSQL_ROOT_HOST}' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ; GRANT ALL ON *.* TO 'root'@'${MYSQL_ROOT_HOST}' WITH GRANT OPTION ; EOSQL fi "${mysql[@]}" <<-EOSQL -- What's done in this file shouldn't be replicated -- or products like mysql-fabric won't work SET @@SESSION.SQL_LOG_BIN=0; DELETE FROM mysql.user WHERE user NOT IN ('mysql.sys', 'mysqlxsys', 'root') OR host NOT IN ('localhost') ; SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}') ; GRANT ALL ON *.* TO 'root'@'localhost' WITH GRANT OPTION ; ${rootCreate} DROP DATABASE IF EXISTS test ; FLUSH PRIVILEGES ; EOSQL if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then mysql+=( -p"${MYSQL_ROOT_PASSWORD}" ) fi file_env 'MYSQL_DATABASE' if [ "$MYSQL_DATABASE" ]; then echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}" mysql+=( "$MYSQL_DATABASE" ) fi file_env 'MYSQL_USER' file_env 'MYSQL_PASSWORD' if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" | "${mysql[@]}" if [ "$MYSQL_DATABASE" ]; then echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" | "${mysql[@]}" fi echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}" fi echo for f in /docker-entrypoint-initdb.d/*; do case "$f" in *.sh) echo "$0: running $f"; . "$f" ;; *.sql) echo "$0: running $f"; "${mysql[@]}" < "$f"; echo ;; *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${mysql[@]}"; echo ;; *) echo "$0: ignoring $f" ;; esac echo done if [ ! -z "$MYSQL_ONETIME_PASSWORD" ]; then "${mysql[@]}" <<-EOSQL ALTER USER 'root'@'%' PASSWORD EXPIRE; EOSQL fi if ! kill -s TERM "$pid" || ! wait "$pid"; then echo >&2 'MySQL init process failed.' exit 1 fi echo echo 'MySQL init process done. Ready for start up.' echo fi fi exec "$@"
GoogleCloudPlatform/mysql-docker
5/debian9/5.7/docker-entrypoint.sh
Shell
gpl-2.0
6,774
#! /bin/sh -e # tup - A file-based build system # # Copyright (C) 2009-2020 Mike Shal <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # We have a ghost Tuprules.tup node, and then move a file over it. The # necessary directories should be re-parsed. . ./tup.sh check_monitor_supported monitor mkdir a mkdir a/a2 cat > a/a2/Tupfile << HERE include_rules : |> echo \$(VAR) |> HERE echo 'VAR=3' > Tuprules.tup echo 'VAR=4' > a/ok.txt update tup_dep_exist a Tuprules.tup a a2 tup_object_exist a/a2 'echo 3' mv a/ok.txt a/Tuprules.tup update tup_dep_exist a Tuprules.tup a a2 tup_object_exist a/a2 'echo 4' eotup
ppannuto/tup
test/t7030-move-over-ghost2.sh
Shell
gpl-2.0
1,227
#!/bin/bash function displayNumberOfFiles() { files=$(( $( ls -l | wc -l ) - 1 )) echo "Number of files in this directory are: $files" echo "" } function displayNumberOfSubdirectories() { typeset -i subdirectories=0 file * | while read filename filetype rest do if [ $filetype = "directory" ] then subdirectories+=1 # echo "directory $subdirectories" fi done echo "Number of Subdirectories in this directory: $subdirectories" echo "" } function displayNumberOfLinesContainingChar() { char=$1 echo "Enter the filename: \c" read file echo "Number of lines in $file containing $char: \c" grep -c $char $file echo "" } function follow() { choice=$1 case $choice in 1) displayNumberOfFiles showMenu ;; 2) displayNumberOfSubdirectories showMenu ;; 3) charToCount='y' displayNumberOfLinesContainingChar $charToCount showMenu ;; 4) echo "Enter the filename: \c" read file head -1 $file tail -1 $file showMenu ;; 5) exit 0 ;; *) echo "WRONG CHOICE. TRY AGAIN!" showMenu ;; esac } function showMenu() { echo "1.Display Number of files." echo "2.Display Number of Subdirectories." echo "3.Display Number of lines in a file containing 'y'." echo "4.Display first and the last line of the file." echo "5.EXIT" echo "Enter your Choice" read choice follow $choice } showMenu
itch96/ShellScripts
16.MenuFileOp.sh
Shell
gpl-3.0
1,616
#! /bin/bash #$ -cwd -V #$ -j y #$ -m e set -e res1=$(date +%s.%N) # use to calculate whole run time of the job echo $'\n'"["`date`"]: Job started." ## Add Modules ######################## module load $8 ####################################### SAMPLE_ID=$1 SAMPLE_PATH=$2 Q=$3 MaxD=$4 REF_BUILD=$5 REF_FILE=$6 BAMFILELIST=$7 GROUP=$9 INDIR="${SAMPLE_PATH}/${SAMPLE_ID}" SAMVCF="${INDIR}/${SAMPLE_ID}_${GROUP}_SourceBio_Q${Q}_Bcftools13_${REF_BUILD}.vcf" ##Samtools-Bcftools Variant Calling #samtools mpileup -v -d $MaxD -L $MaxD -q $Q -Q $Q -m 25 -t DP,AD,ADF,ADR,SP,INFO/AD,INFO/ADF,INFO/ADR -r chrM:0-16570 -f ${REF_FILE} -b ${BAMFILELIST} | bcftools call -v -m -f GQ,GP -O v -o $SAMVCF - # -I no Indels called/SNPs only bcftools w/o '-v' calls at every position samtools mpileup -vu -d $MaxD -q $Q -Q $Q -I -t DP,AD,ADF,ADR,SP -r chrM -f ${REF_FILE} -b ${BAMFILELIST} | bcftools call -m -f GQ,GP -O v -o $SAMVCF - #samtools mpileup -vu -d $MaxD -q $Q -Q $Q -I -t DP,DP4,SP -r chrM:0-16570 -f ${REF_FILE} -b ${BAMFILELIST} | bcftools call -v -m -f GQ,GP -O v -o $SAMVCF - echo $'\n'"["`date`"]: Variant Calling by Samtools-Bcftools is Complete!!" # runtime calculation res2=$(date +%s.%N) dt=$(echo "$res2 - $res1" | bc) dd=$(echo "$dt/86400" | bc) dt2=$(echo "$dt-86400*$dd" | bc) dh=$(echo "$dt2/3600" | bc) dt3=$(echo "$dt2-3600*$dh" | bc) dm=$(echo "$dt3/60" | bc) ds=$(echo "$dt3-60*$dm" | bc) printf "Total runtime: %d:%02d:%02d:%02.4f\n" $dd $dh $dm $ds echo "exit status $?"
Helgriff/MitoCirco
BashScripts/Samtools1-3Bcftools.sh
Shell
gpl-3.0
1,492
#!/usr/bin/env bash if [ "$#" -gt 2 ]; then echo "Usage: waitForFormat device_name [gpt|msdos]" exit 1 fi LABEL=gpt case "$2" in msdos) LABEL=msdos ;; *) LABEL=gpt ;; esac while true; do if [ `fdisk -l /dev/$1 | wc -l 2>/dev/null` -gt 0 ]; then parted -a optimal -s -- /dev/$1 mklabel $LABEL mkpart primary 0% 100%; exit 0 fi done
EWS-Network/ews-cf-templates
compute/ebs/scripts/waitForFormat.sh
Shell
gpl-3.0
365
#!/bin/bash # # This file is part of Schumix. # # Copyright (C) 2010-2013 Megax <http://megax.yeahunter.hu/> # Copyright (C) 2013-2015 Schumix Team <http://schumix.eu/> # # Schumix is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Schumix is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Schumix. If not, see <http://www.gnu.org/licenses/>. # main () { for f in $(find -iname "*.am" | grep -v "./_ReSharper" | grep -v "/obj/" | grep -v "./External") do file_update $f done configure_ac_update makefile_am_update } configure_ac_update () { f=configure.ac echo "Update $f" find $f -type f -exec sed -i 's/External\/YamlDotNet\/YamlDotNet\/Makefile/External\/YamlDotNet\/YamlDotNet\/Makefile\nPo\/Makefile/g' {} \; } makefile_am_update () { f=Makefile.am echo "Update $f" find $f -type f -exec sed -i 's/External\/YamlDotNet\/YamlDotNet /External\/YamlDotNet\/YamlDotNet Po /g' {} \; } file_update () { f=$1 echo "Update file: $f" # IronPython.Modules.dll find $f -type f -exec sed -i 's/..\/..\/..\/..\/IronPython.Modules/..\/..\/Dependencies\/IronPython.Modules.dll/g' {} \; find $f -type f -exec sed -i 's/..\/..\/IronPython.Modules/Dependencies\/IronPython.Modules.dll/g' {} \; # IronPython.dll find $f -type f -exec sed -i 's/..\/..\/..\/..\/IronPython/..\/..\/Dependencies\/IronPython.dll/g' {} \; find $f -type f -exec sed -i 's/..\/..\/IronPython/Dependencies\/IronPython.dll/g' {} \; # Microsoft.Dynamic.dll find $f -type f -exec sed -i 's/..\/..\/..\/..\/Microsoft.Dynamic/..\/..\/Dependencies\/Microsoft.Dynamic.dll/g' {} \; find $f -type f -exec sed -i 's/..\/..\/Microsoft.Dynamic/Dependencies\/Microsoft.Dynamic.dll/g' {} \; # Microsoft.Scripting.Metadata.dll find $f -type f -exec sed -i 's/..\/..\/..\/..\/Microsoft.Scripting.Metadata/..\/..\/Dependencies\/Microsoft.Scripting.Metadata.dll/g' {} \; find $f -type f -exec sed -i 's/..\/..\/Microsoft.Scripting.Metadata/Dependencies\/Microsoft.Scripting.Metadata.dll/g' {} \; # Microsoft.Scripting.dll find $f -type f -exec sed -i 's/..\/..\/..\/..\/Microsoft.Scripting/..\/..\/Dependencies\/Microsoft.Scripting.dll/g' {} \; find $f -type f -exec sed -i 's/..\/..\/Microsoft.Scripting/Dependencies\/Microsoft.Scripting.dll/g' {} \; } main
Schumix/Schumix2
fixmakefile.sh
Shell
gpl-3.0
2,726
#!/bin/bash g++-4.9 -std=c++11 $1.cpp -I../../.. -I../../../src/include -DNDEBUG -O3 -DZNN_CUBE_POOL_LOCKFREE -DZNN_USE_FLOATS -lpthread -lfftw3f -ljemalloc -o $1 -DZNN_DONT_CACHE_FFTS
seung-lab/znn-release
src/cpp/measurements/make_g++_mac.sh
Shell
gpl-3.0
185
#!/bin/bash # # Copyright (C) 2017. # Author: Jesús Manuel Mager Hois # e-mail: <[email protected]> # Project website: http://turing.iimas.unam.mx/wix/ # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #################################### # Configuration variables. # Substitute the path to the tools #################################### work="$HOME/wixes" base="$work/smtwixes" moses="$HOME/mosesdecoder" tereval=$moses/tools/ter/ter.jar wareval=$moses/tools/wer/wer.py wixnlp="$work/wixnlp" corpus="$work/wixarikacorpora" europarl="$work/europarl" morph=0 seg=0 tranlate=0 hier=0 while getopts "h?mtesi:" opt; do case "$opt" in h|\?) echo "evalmodel.sh [--morph | --moprh | --tags]" echo " Default --nomoprh" exit 0 ;; m) morph=1 ;; s) seg=1 ;; i) hier=1 ;; t) tags=1 ;; esac done shift $((OPTIND-1)) [ "$1" = "--" ] && shift cp $corpus/testset.wixes $base/corpus/test.wixes echo "* Split test corpus" python3 $wixnlp/tools/sep.py $base/corpus/test echo "* Normalize test corpus" python3 $wixnlp/normwix.py -a $base/corpus/test.wix $base/corpus/test.norm.wix $moses/scripts/tokenizer/tokenizer.perl -l es < $base/corpus/test.es > $base/testing/test.tokens.es -threads 8 tr '[:upper:]' '[:lower:]' < $base/testing/test.tokens.es > $base/testing/test.norm.es cp $base/testing/test.norm.es $base/testing/test.tokens.es echo "##### Translate..." if (( morph == 0 && seg == 0 && hier == 0)) then echo "No morphological translation" echo "Translating..." $moses/bin/moses \ -f $base/wixsinmorph/model/moses.ini \ < $base/corpus/test.norm.wix \ > $base/testing/test.hyp.es \ cat $base/testing/test.hyp.es $moses/bin/moses \ -f $base/eswixsinmorph/model/moses.ini \ < $base/testing/test.tokens.es\ > $base/testing/test.hyp.wix #cat $base/testing/test.hyp.wix fi if (( morph == 0 && seg == 1 && hier == 0)) then echo "With improved morphological translation" echo "Translating..." python3 seg.py $moses/bin/moses \ -f $base/wixeswixnlp/model/moses.ini \ < $base/corpus/test.seg.wix\ > $base/testing/test.hyp.es \ $moses/bin/moses \ -f $base/eswixwixnlp/model/moses.ini \ < $base/testing/test.tokens.es\ > $base/testing/test.hyp.wix fi if (( morph == 1 && seg == 0 && hier == 0)) then echo "Morphological Translation" echo "Translating..." $base/bin/segment.py -m $base/corpus/model.morph.bin -i $base/corpus/test.norm.wix -o $base/corpus/test.seg.wix $moses/bin/moses \ -f $base/wixeswithmorph/model/moses.ini \ < $base/corpus/test.seg.wix \ > $base/testing/test.hyp.es \ $moses/bin/moses \ -f $base/eswixwithmorph/model/moses.ini \ < $base/testing/test.tokens.es\ > $base/testing/test.hyp.wix fi if (( morph == 0 && seg == 0 && hier == 1)) then echo "Morphological Hierarchical Translation" python3 seg.py $moses/bin/moses \ -f $base/wixeshier/model/moses.ini \ < $base/corpus/test.seg.wix \ > $base/testing/test.hyp.es \ $moses/bin/moses \ -f $base/eswixhier/model/moses.ini \ < $base/testing/test.tokens.es\ > $base/testing/test.hyp.wix fi echo "##### Evaluation" #corpus/wixmorph.py corpus/eval/prueba.norm.wix corpus/train.wix.morph.bin > corpus/eval/prueba.morph.wix #morfessor-segment -L corpus/train.wix.morph.model corpus/eval/prueba.endl.wix -o corpus/eval/prueba.morph.wix #tr '\n' ' ' < corpus/eval/prueba.morph.wix > corpus/eval/prueba.morph.endl.wix #tr '@@@' '\n' < corpus/eval/prueba.morph.endl.wix > corpus/eval/prueba.morph.wix #sed -i '/^[[:space:]]*$/d' corpus/eval/prueba.morph.wix if (( morph == 0)) then echo "#TER" awk '{print $0, "(", NR, ")"}' $base/testing/test.hyp.es > $base/testing/test.hyp.ter.es awk '{print $0, "(", NR, ")"}' $base/testing/test.tokens.es > $base/testing/test.ter.es java -jar $tereval -r $base/testing/test.ter.es -h $base/testing/test.hyp.ter.es #echo "#WER" #python3 $wereval $base/corpus/test.es $base/testing/test.hyp.es echo "#TER" awk '{print $0, "(", NR, ")"}' $base/testing/test.hyp.wix > $base/testing/test.hyp.ter.wix awk '{print $0, "(", NR, ")"}' $base/corpus/test.wix > $base/testing/test.ter.wix java -jar $tereval -r $base/testing/test.ter.wix -h $base/testing/test.hyp.ter.wix #echo "#WER" echo "#BLEU" $moses/scripts/generic/multi-bleu.perl -lc $base/testing/test.tokens.es < $base/testing/test.hyp.es $moses/scripts/generic/multi-bleu.perl -lc $base/corpus/test.norm.wix < $base/testing/test.hyp.wix else echo '######## Morhological transaltion' echo "#TER" awk '{print $0, "(", NR, ")"}' $base/testing/test.hyp.es > $base/testing/test.hyp.ter.es awk '{print $0, "(", NR, ")"}' $base/testing/test.tokens.es > $base/testing/test.ter.es java -jar $tereval -r $base/testing/test.ter.es -h $base/testing/test.hyp.ter.es awk '{print $0, "(", NR, ")"}' $base/testing/test.hyp.wix > $base/testing/test.hyp.ter.wix awk '{print $0, "(", NR, ")"}' $base/corpus/test.seg.wix > $base/testing/test.seg.ter.wix java -jar $tereval -N -r $base/testing/test.seg.ter.wix -h $base/testing/test.hyp.ter.wix echo "#BLEU" $moses/scripts/generic/multi-bleu.perl -lc $base/testing/test.tokens.es < $base/testing/test.hyp.es $moses/scripts/generic/multi-bleu.perl -lc $base/corpus/test.seg.wix < $base/testing/test.hyp.wix fi
pywirrarika/smtwixes
evalmodels.sh
Shell
gpl-3.0
6,415
#!/bin/bash source /opt/gc2/bin/activate python /opt/gc2/gc2smwdaemon.py
JosefAssad/SeMaWi
scripts/syncgc2.sh
Shell
gpl-3.0
74
#!/usr/bin/env bash . ./setdir.sh NAME=nfi TYPE_PARAMS="--marcVersion FENNICA --fixAlephseq --marcxml --emptyLargeCollectors" MARC_DIR=${BASE_INPUT_DIR}/nfi MASK=fennica.mrcx . ./common-script echo "DONE" exit 0
pkiraly/metadata-qa-marc
catalogues/nfi.sh
Shell
gpl-3.0
215
#!/bin/bash hemin=0.001 hemax=0.007 hestep=0.0005 himin=0.002 himax=0.008 histep=0.0005 for he in $(seq $hemin $hestep $hemax); do for hi in $(seq $himin $histep $himax); do #max=`echo $m + $pstep | bc` program="python RUM_Detektor_2Npp.py -he $he -hi $hi" echo $program bsub -q Batch24 $program done done
TiKunze/CanMics
src/python/01_SingleChannel/2pop/Npp/RUM_Detect_2Npp_batch.sh
Shell
gpl-3.0
347
#!/bin/bash #Script looks at process name and kill it. #Delcare Variables PNumber="$(ps -f -u $USER | grep -i $1 | awk 'NR==1{print $2}')" #Process number #echo $PNumber if [[ -z "$PNumber" ]]; then #statements echo "No processes found, try another process name" break else #statements echo "########################" echo "I found this process number " $PNumber " and belongs to " $USER echo "########################" fi # #Empieza menu con select # select ACT in Kill-it Show-it Quit; do if [[ -n "$ACT" ]]; then if [[ "$ACT" == "Kill-it" ]]; then echo "Para evitar errores, te muestro la informacion del proceso," ps -f --pid $PNumber echo "..." echo Mantando el proceso $PNumber sleep 2 elif [[ "$ACT" == "Show-it" ]]; then echo Mostrando el proceso $PNumber ps -f --pid $PNumber echo Seguro que deseas Matar este proceso read ACT echo $ACT elif [[ "$ACT" == "Quit" ]]; then #statements echo Adios sleep 2 break fi else echo Necesito una respuesta fi done #LinuxL263 #EOF
mgvv/learning-how-to-code
Learning_Bash_Advanced/mi_cominenzo/restar_process.sh
Shell
gpl-3.0
1,140
#!/bin/bash XMIN=0.005 for ORD in NLO #for ORD in LO NLO do #for Q2 in 1.0 2.0 4.0 10.0 20.0 100.0 200.0 1000.0 #for Q2 in 1.0 2.0 10.0 100.0 #for Q2 in 1.0 #for Q2 in 4.0 for Q2 in 10.0 #for Q2 in 100.0 #for Q2 in 10.0 do #for iflav in -5 -4 -3 -2 -1 0 1 2 3 4 5 #for iflav in -4 -3 -2 -1 0 1 2 3 4 #for iflav in 0 #for iflav in -3 -2 -1 0 1 2 3 4 #for iflav in -3 -2 -1 0 1 2 3 #for iflav in -2 -1 0 1 2 3 #for iflav in -3 -2 -1 3 #for iflav in 1 for iflav in -2 do #cat <<EOF | ./plotDST --sumplot --Q2=${Q2} --order=${ORD} \ #--DSSV --xmin=${XMIN} --gluon=0.8 --u=0.4 --d=0.8 --suffix=eps #../browser/distout${ORD}DSSV_org.dat DSSV_evo #../browser/distout${ORD}DSSV_grid.dat DSSV_grid #EOF #../browser/distout${ORD}DSSV_imazu.dat DSSV_imazu #cat <<EOF | ./plotDST --sumplot --Q2=${Q2} --order=${ORD} \ #--MRST --xmin=${XMIN} --gluon=0.1 --skips-bar #../browser/distout${ORD}MRST.dat MRST_evo #../browser/distout${ORD}MRST_grid.dat MRST_grid #EOF #--suffix=eps #cat <<EOF | ./plotDST --sumplot --Q2=${Q2} --order=${ORD} \ #--Kretzer --HAD=1 --xmin=${XMIN} --suffix=eps #../browser/distout${ORD}KretzerFF_kret.dat Kretzer_evo #../browser/distout${ORD}KretzerFF_grid.dat Kretzer_grid #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ #--FFK --xmin=${XMIN} #../browser/distout${ORD}FFK_werr_${Q2}.dat My_Calculation #../browser/distout${ORD}KretzerFFK_kret.dat Kretzer00 #../browser/distout${ORD}DSSFFK.dat DSS07 #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ #--FFP --xmin=${XMIN} #../browser/distout${ORD}FFP.dat My_Calculation #../browser/distout${ORD}KretzerFFP_kret.dat Kretzer00 #../browser/distout${ORD}DSSFFP.dat DSS07 #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --DSSV --xmin=${XMIN} --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 #../browser/distout${ORD}DSSV_werr_kret.dat My_Calculation #../browser/distoutNLOGRSV00.dat GRSV00 #../browser/distout${ORD}DSSV.dat DSSV08 #EOF # --ymax=0.3 # --ymax=0.05 --ymin=-0.05 # --scale #../browser/distout${ORD}DSSV_r0.5_kret.dat rescale_0.5 #../browser/distout${ORD}DSSV_r2.0_kret.dat rescale_2.0 #../browser/distout${ORD}DSSV_kret.dat My_Calculation #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --DSSV --xmin=${XMIN} --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 --scale #../browser/distout${ORD}DSSV_werr_kret.dat My_Calculation #../browser/distout${ORD}DSSV_r0.5_kret.dat rescale_0.5 #../browser/distout${ORD}DSSV_r2.0_kret.dat rescale_2.0 #../browser/distoutNLOGRSV00.dat GRSV00 #../browser/distout${ORD}DSSV.dat DSSV08 #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --DSSV --xmin=${XMIN} --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 #../browser/distout${ORD}DSSV_werr_kret.dat My_Calculation #../browser/distout${ORD}DSSV_r0.5_kret.dat rescale_0.5 #../browser/distout${ORD}DSSV_r2.0_kret.dat rescale_2.0 #EOF # --ymax=0.3 # --ymax=0.05 --ymin=-0.05 # --scale #../browser/distout${ORD}DSSV_r0.5_kret.dat rescale_0.5 #../browser/distout${ORD}DSSV_r2.0_kret.dat rescale_2.0 #../browser/distout${ORD}DSSV_kret.dat My_Calculation #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --DSSV --xmin=${XMIN} --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 \ # --nolegend --noxtitle --noytitle #../browser/distout${ORD}DSSV_werr_kret.dat My_Analysis #../browser/distoutNLOGRSV00.dat GRSV00 #../browser/distout${ORD}DSSV.dat DSSV08 #EOF # --shade1=3002 --shade2=3001 \ # --shade1=3002 --shade2=3001 --ymax=0.05 --ymin=-0.05 \ cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ --DSSV --xmin=${XMIN} --fill --color1=2 --color2=1 \ --setLC2=1 --setLC4=4 --setLC5=1 --shade1=3001 --shade2=3002 --scale \ --addflegend1="Stat. Err. (1#sigma)" \ --addflegend2="Syst. Err. (FF)" \ --noautolegend2 --scaleLG1 \ --nolegend --noxtitle --noytitle --setxTick=0.07 --setyTick=0.05 \ --xlabelsize=0.05 --ylabelsize=0.045 \ --zeroline --frameWidth=1 \ --ymax=0.035 --ymin=-0.045 ../browser/distout${ORD}DSSV_werr_kret.dat Present_Analysis ../browser/distout${ORD}DSSV_FF_wg_scale1.dat '' ../browser/distout${ORD}DSSV_FF_wg_scale2.dat '' EOF #../browser/distoutNLOGRSV00.dat GRSV00 #../browser/distout${ORD}DSSV.dat DSSV08 # d-bar # --ymax=0.035 --ymin=-0.045 # u # --ymax=0.38 --ymin=-0.04 # w/ legend # --noxtitle --noytitle --setxTick=0.07 --setyTick=0.05 \ # w/o legend # --nolegend --noxtitle --noytitle --setxTick=0.07 --setyTick=0.05 \ #--flegend2 \ #Syst._Err._(FF) #--setLC3=1 --setLC2=4 \ #--ymax=0.05 --ymin=-0.05 #--nolegend --noxtitle --noytitle #--shade2=3001 \ #--scale \ #../browser/distout${ORD}DSSV_FF_wg_scale1.dat '' #../browser/distout${ORD}DSSV_FF_wg_scale2.dat '' #../browser/distout${ORD}DSSV_FF_scale1.dat '' #../browser/distout${ORD}DSSV_FF_scale2.dat '' # --shade1=3002 --shade2=3001 --ymax=0.05 --ymin=-0.05 \ # --shade1=3002 --shade2=3001 \ #../browser/distout${ORD}DSSV_FF.dat My_Calc_FF #../browser/distout${ORD}DSSV_DSSFF_hfree.dat My_Calc_DSSFF #../browser/distout${ORD}DSSV_KREFF_hfree.dat My_Calc_KREFF #../browser/distout${ORD}DSSV_FF_hfree.dat My_Calc_FF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --KretzerFF --xmin=${XMIN} --ymax=1.2 --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 #../browser/distout${ORD}FF_werr.dat My_Calculation #../browser/distout${ORD}KretzerFF_kret.dat Kretzer00 #../browser/distout${ORD}DSSFF.dat DSS07 #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --KretzerFF --xmin=${XMIN} --ymax=1.2 --fill --color1=2 --color2=1 \ # --shade1=3001 --shade2=3002 --scale #../browser/distout${ORD}FF_werr.dat My_Calculation #../browser/distout${ORD}FF_r0.5.dat rescale_0.5 #../browser/distout${ORD}FF_r1.5.dat rescale_2.0 #../browser/distout${ORD}KretzerFF_kret.dat Kretzer00 #../browser/distout${ORD}DSSFF.dat DSS07 #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --KretzerFF --xmin=${XMIN} --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 #../browser/distout${ORD}FFK_werr.dat My_Calculation #../browser/distout${ORD}KretzerFFK_kret.dat Kretzer00 #../browser/distout${ORD}DSSFFK.dat DSS07 #EOF #../browser/distout${ORD}FFK_r0.5.dat rescale_0.5 #../browser/distout${ORD}FFK_r1.5.dat rescale_2.0 #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --KretzerFF --xmin=${XMIN} --fill --color1=2 --color2=1 \ # --shade1=3002 --shade2=3001 #../browser/distout${ORD}FFP_werr.dat My_Calculation #../browser/distout${ORD}KretzerFFP_kret.dat Kretzer00 #../browser/distout${ORD}DSSFFP.dat DSS07 #EOF #../browser/distout${ORD}FFP_r0.5.dat rescale_0.5 #../browser/distout${ORD}FFP_r1.5.dat rescale_2.0 #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --KretzerFF --xmin=${XMIN} --fill --color1=2 --color2=1 --ymax=1.2 \ # --shade1=3002 --shade2=3001 #../browser/distout${ORD}hadronFF_werr.dat My_Calculation #../browser/distout${ORD}KretzerhadronFF_kret.dat Kretzer00 #../browser/distout${ORD}DSShadronFF.dat DSS07 #EOF #--ymax=2.0 \ #../browser/distout${ORD}hadronFF.dat My_Calculation #../browser/distout${ORD}FF_werr.dat My_Calculation #../browser/distout${ORD}FF_r0.5.dat rescale_0.5 #../browser/distout${ORD}FF_r1.5.dat rescale_1.5 #../browser/distout${ORD}KretzerFF_kret.dat Kretzer00 #../browser/distout${ORD}DSSFF.dat DSS07 #../browser/distout${ORD}FF_r2.0.dat rescale_2.0 #../browser/distout${ORD}KretzerFF_werr.dat My_Calculation #../browser/distout${ORD}KretzerFF_r0.5.dat rescale_0.5 #../browser/distout${ORD}KretzerFF_r2.0.dat rescale_2.0 #../browser/distout${ORD}FF_werr.dat My_Calculation #../browser/distout${ORD}FF_r0.5.dat rescale_0.5 #../browser/distout${ORD}FF_r2.0.dat rescale_2.0 #cat <<EOF | ./plotDST --sumplot --Q2=${Q2} --order=${ORD} \ #--Kretzer --HAD=1 --xmin=${XMIN} #../browser/distout${ORD}KretzerFF_kret.dat Kretzer_evo #../browser/distout${ORD}KretzerFF_grid.dat Kretzer_grid #EOF #cat <<EOF | ./plotDST --sumplot --Q2=${Q2} --order=${ORD} \ #--DSSV --xmin=${XMIN} --gluon=0.8 --u=0.4 --d=0.8 #../browser/distout${ORD}DSSV.dat DSSV_evo #../browser/distout${ORD}DSSV_grid.dat DSSV_grid #EOF #cat <<EOF | ./plotDST --sumplot --Q2=${Q2} --order=${ORD} \ #--MRST --xmin=${XMIN} --gluon=0.1 #../browser/distout${ORD}MRST.dat MRST_evo #../browser/distout${ORD}MRST_grid.dat MRST_grid #EOF #cat <<EOF | ./plotDST --flavor=${iflav} --Q2=${Q2} --order=${ORD} \ # --FF #../browser/distout${ORD}KretzerFFK_grid.dat Kretzer_grid #../browser/distout${ORD}KretzerFFK_kret.dat Kretzer_evo #../browser/distout${ORD}KretzerFFK.dat Kretzer_fit #EOF done done done # MRST #--MRST #../browser/distout${ORD}MRST.dat MRST_evo #../browser/distout${ORD}MRST_grid.dat MRST_grid # DSSV #-- #../browser/distout${ORD}DSSV.dat DSSV_evo #../browser/distout${ORD}DSSV_grid.dat DSSV_grid # Kretzer # --FF #../browser/distout${ORD}KretzerFF_werr.dat Kretzer_fit #../browser/distout${ORD}KretzerFF_r0.25.dat Kretzer_fit_0.25 #../browser/distout${ORD}KretzerFF_r4.dat Kretzer_fit_4.0 #../browser/distout${ORD}KretzerFF_kret.dat Kretzer_evo #../browser/distout${ORD}KretzerFF_grid.dat Kretzer_grid #../browser/distout${ORD}KretzerFF.dat Kretzer_fit #DSSFF #../browser/distout${ORD}DSSFF.dat DSS_evo # Kobayashi # --FF #/home/ykobayashi/QCD/user_program/s_free.dat S_Free #/home/ykobayashi/QCD/user_program/Kretzer.dat Kretzer #Our Fit #../browser/distout${ORD}FF.dat Our_Fit
miyachi-yu/Mellin
plotter/DSTplot.sh
Shell
gpl-3.0
9,490
#!/bin/bash pushd test > /dev/null 2>&1 python test_check_datafiles.py [ $? -ne 0 ] && exit 1 python test_size_to_int.py [ $? -ne 0 ] && exit 1 popd test > /dev/null 2>&1
dveeden/innodb-space
test/run_all.sh
Shell
gpl-3.0
174
#!/bin/bash CPU_FAMILY=$(grep "family" /proc/cpuinfo | sort -u | rev | cut -d ' ' -f1 | rev) CPU_MODEL=$(grep "model" /proc/cpuinfo | grep -v "name" | sort -u | rev | cut -d ' ' -f1 | rev) CPU_MODELNAME=$(grep "model name" /proc/cpuinfo | sort -u | cut -d ':' -f2 | xargs) ./startjob.py -M ${PBS_NODEFILE} -j ${PBS_JOBID} -f walltime=${PBS_WALLTIME} -f jobname=${PBS_JOBNAME} -f queue=${PBS_QUEUE} -f march=${CPU_FAMILY}_${CPU_MODEL} -f cpuname="${CPU_MODELNAME}"
RRZE-HPC/LMS
midware/influxdbrouter/examples/startPBSjob.sh
Shell
gpl-3.0
468
#!/bin/bash set -eu ## A testing script to run every gold solution case in parallel. ## Failing test case names are displayed after all have finished. ## Any flags, e.g. --use-system-epsilon, will be propagated. # Used to build and report failed cases tempfile() { tempprefix=$(basename "$0"); mktemp "/tmp/${tempprefix}.XXXXXX"; } FAILURES=$(tempfile) SUCCESSES=$(tempfile) trap 'rm -f "$SUCCESSES" "$FAILURES"' EXIT # Default arguments to pass to scripts whenever non provided by user defaultargs=--use-system-epsilon # Scripts to test are every subdirectory with a check.sh SCRIPTDIR="$( cd "$( echo "${BASH_SOURCE[0]%/*}" )"; pwd )" for script in "$SCRIPTDIR"/*/check.sh do ( "$SCRIPTDIR/neno" "$script" "${@-$defaultargs}" \ && echo "$script" "${@-$defaultargs}" >> "$SUCCESSES" \ || echo "$script" "${@-$defaultargs}" >> "$FAILURES" ) & done # Wait for all tests to finish wait # Output successes and failures if test $# -lt 1; then echo "The following default options were provided to each case:" else echo "The following options were provided to each case:" fi declare -i i=1 for arg in "${@-$defaultargs}"; do printf $'%6d %s\n' "$i" "$arg" i+=1 done if test -s "$SUCCESSES"; then echo 'Successful cases were as follows: ' sort "$SUCCESSES" | nl fi if test -s "$FAILURES"; then echo 'Failed cases were as follows: ' sort "$FAILURES" | nl fi # Return success whenever $FAILURES is empty ! test -s "$FAILURES"
RhysU/suzerain
apps/perfect/manual.gold/all.sh
Shell
gpl-3.0
1,484
#!/bin/bash set -e pegasus_lite_version_major="4" pegasus_lite_version_minor="7" pegasus_lite_version_patch="0" pegasus_lite_enforce_strict_wp_check="true" pegasus_lite_version_allow_wp_auto_download="true" . pegasus-lite-common.sh pegasus_lite_init # cleanup in case of failures trap pegasus_lite_signal_int INT trap pegasus_lite_signal_term TERM trap pegasus_lite_exit EXIT echo -e "\n################################ Setting up workdir ################################" 1>&2 # work dir export pegasus_lite_work_dir=$PWD pegasus_lite_setup_work_dir echo -e "\n###################### figuring out the worker package to use ######################" 1>&2 # figure out the worker package to use pegasus_lite_worker_package echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2 # set the xbit for any executables staged /bin/chmod +x example_workflow-terminate_0-1.0 echo -e "\n############################# executing the user tasks #############################" 1>&2 # execute the tasks set +e pegasus-kickstart -n example_workflow::terminate_0:1.0 -N ID0000016 -R condorpool -L example_workflow -T 2016-10-24T20:54:16+00:00 ./example_workflow-terminate_0-1.0 job_ec=$? set -e
elainenaomi/sciwonc-dataflow-examples
dissertation2017/Experiment 1A/logs/w-08_0/20161024T205417+0000/00/00/terminate_0_ID0000016.sh
Shell
gpl-3.0
1,237
#! /bin/bash # installs Grafana; it does a arch check to determine the proper .deb file to be used echo "************************************" echo "*******Installing Grafana***********" echo "************************************" . /etc/environment LOCAL_DEB='' case `uname -m` in x86_64) echo 'x86_64' LOCAL_DEB="$PI_GARDEN_ROOT/installers/grafana/grafana_4.1.2-1486989747_amd64.deb" ;; armv7l) echo 'armv7 (raspberry)' LOCAL_DEB="$PI_GARDEN_ROOT/installers/grafana/grafana_4.1.2-1487023783_armhf.deb" ;; *) echo 'Unknown architecture. Installer available only for x86_64 and armv7 (raspberry)' exit 1 ;; esac dpkg -i $LOCAL_DEB apt-get update apt-get -fy install sed -i "s/;http_port = 3000/http_port = 3002/g" /etc/grafana/grafana.ini systemctl daemon-reload systemctl start grafana-server systemctl enable grafana-server.service
petre2dor/piGarden
scripts/updates/update-5.sh
Shell
gpl-3.0
921
#! /bin/bash ## Example workflows using EAGLE CPU=8 ## -- Germline Variant Calling -- # Directly after variant calling, run eagle to calculate variant log likelihood ratios eagle -t $CPU -v variants.vcf -a align.bam -r reference.fa > eagle.txt # Filter variants based on eagle output python scripts/compile_likelihoods.py -p eagle.txt -minlr 5 -minaf 0.3 -mindepth 10 -seen > eagle.filter.txt # Filter the VCF and add eagle score as an annotation python scripts/combine_vcf_eagle.py -v variants.vcf -e eagle.filter.txt > variants.eagle.filter.vcf ## -- Somatic Variant Calling -- # Run variant calling on tumor samples, # then run eagle to calculate log likelihood ratios for both normal and tumor samples eagle -t $CPU -v tumor.vcf -a tumor.bam -r reference.fa > tumor.txt eagle -t $CPU -v tumor.vcf -a normal.bam -r reference.fa > normal.txt # Filter variants based on eagle outputs, where positive sample is tumor and negative sample is normal python scripts/compile_likelihoods.py -p tumor.txt -n normal.txt -minlr 5 -minaf 0.05 -maxlr -2 -maxaf 0.04 -mindepth 10 -seen > somatic.filter.txt # Filter the VCF and add eagle score as an annotation python scripts/combine_vcf_eagle.py -v tumor.vcf -e somatic.filter.txt > variants.somatic.vcf ## -- Trio De Novo Variant Calling -- # Run variant calling on each of child and parents separately, then make a union vcf python ~/scripts/tablize.py -i 0-4 child.vcf father.vcf mother.vcf | cut -f 1-5 > union.vcf # Add headers to union.vcf [optional] grep '^#' child.vcf | cat - union.vcf > tmp && mv tmp union.vcf # then run eagle to calculate log likelihood ratios for both all samples using union.vcf eagle -t $CPU -v union.vcf -a child.bam -r reference.fa > child.txt eagle -t $CPU -v union.vcf -a father.bam -r reference.fa > father.txt eagle -t $CPU -v union.vcf -a mother.bam -r reference.fa > mother.txt # Filter variants based on eagle outputs, where positive sample is child and negative sample is parent python scripts/compile_likelihoods.py -p child.txt -n father.txt -minlr 5 -minaf 0.25 -maxlr -2 -maxaf 0.04 -mindepth 10 -seen > child.vs.father.txt python scripts/compile_likelihoods.py -p child.txt -n mother.txt -minlr 5 -minaf 0.25 -maxlr -2 -maxaf 0.04 -mindepth 10 -seen > child.vs.mother.txt # Get the variants which are present only in the child python scripts/tablize.py -a -i 0-7 child.vs.father.txt child.vs.mother.txt > child.vs.parents.txt # Filter the VCF and add eagle score as an annotation python scripts/combine_vcf_eagle.py -v child.vcf -e child.vs.parents.txt > child.denovo.vcf ## -- Read Classification for RNA-Seq expression quantification with homeologs -- ## Requires LAST (http://last.cbrc.jp/) ## Align the sample data to each origin specific reference separately ## From origin specific transcripts in an allopolyploid, find homeologs and their genotype differences lastdb -uNEAR -R01 A_origin A_transcripts.fa lastdb -uNEAR -R01 B_origin B_transcripts.fa lastal A_origin -P$CPU B_transcripts.fa | last-map-probs -m 0.49 > A_origin.maf lastal B_origin -P$CPU A_transcripts.fa | last-map-probs -m 0.49 > B_origin.maf # [Note] check to make sure your transcripts are based on exons or CDS in the annotation. If CDS then change below, -f exon to -f CDS python scripts/homeolog_genotypes.py -f exon -o Ref_A -g annotation_A.gtf A_origin.maf B_origin.maf python scripts/homeolog_genotypes.py -f exon -o Ref_B -g annotation_B.gtf B_origin.maf A_origin.maf eagle -t $CPU -a data_align2_A.bam -r A.reference.fa -v Ref_A.gtf.vcf --omega=1e-40 --mvh --splice --isc --verbose 1> Ref_A.sample.txt 2> Ref_A.sample.readinfo.txt eagle-rc -a data_align2_A.bam --listonly -o Ref_A.sample Ref_A.sample.txt Ref_A.sample.readinfo.txt > Ref_A.sample.list eagle -t $CPU -a data_align2_B.bam -r B.reference.fa -v Ref_B.gtf.vcf --omega=1e-40 --mvh --splice --isc --verbose 1> Ref_B.sample.txt 2> Ref_B.sample.readinfo.txt eagle-rc -a data_align2_B.bam --listonly -o Ref_B.sample Ref_B.sample.txt Ref_B.sample.readinfo.txt > Ref_B.sample.list ## Find the consensus classification based on likelihood python scripts/ref2_consensus.py -A Ref_A.sample.list -B Ref_B.sample.list -o sample ## Write bam files based on consensus list, using A as the reference eagle-rc -a data_align2_A.bam -o sample.chrA --readlist sample.chrA.list eagle-rc -a data_align2_B.bam -o sample.chrB --readlist sample.chrB.list ## Perform read counting as you prefer, for example: featureCounts -T $CPU -t exon -g transcript_id -a annotation_A.gtf -o sample.A.counts.txt sample.ref.bam featureCounts -T $CPU -t exon -g transcript_id -a annotation_A.gtf -o sample.B.counts.txt sample.alt.bam
tony-kuo/eagle
scripts/example.sh
Shell
gpl-3.0
4,650
# This file is part of the Aloofix project. # Copyright (C) 2013 Kevin Johnson <[email protected]> # Distributed under the terms of the GNU General Public License version 3, # or (at your option) any later version. name=tcpdump version=4.4.0 sequence=1 description="Common-line packet analyzer" site=http://www.tcpdump.org/release depends="user-tcpdump libpcap-lib openssl-lib" builddepends="libpcap-dev openssl-dev" section=networking license=BSD-3-Clause license_files=LICENSE extra_doc_files="CHANGES CREDITS README" extra_configure_args="--enable-ipv6 --with-user=tcpdump --with-chroot=/var/lib/tcpdump --disable-smb --with-crypto" install_target=install preinstall_hook() { rm "$preinstalldir/usr/sbin/tcpdump.$version" strip "$preinstalldir/usr/sbin/tcpdump" install -d -m 700 "$preinstalldir/var/lib/tcpdump" } pkg_base_hook() { register_postinst "chown tcpdump:tcpdump /var/lib/tcpdump" }
aloofschipperke/aloofix
specs/pkgs/tcpdump/spec.sh
Shell
gpl-3.0
930
#!/bin/bash ################################################################################ # OpenAirInterface # Copyright(c) 1999 - 2014 Eurecom # # OpenAirInterface is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # # OpenAirInterface is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenAirInterface.The full GNU General Public License is # included in this distribution in the file called "COPYING". If not, # see <http://www.gnu.org/licenses/>. # # Contact Information # OpenAirInterface Admin: [email protected] # OpenAirInterface Tech : [email protected] # OpenAirInterface Dev : [email protected] # # Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE # ################################################################################ # file utils.bash # brief # author Lionel Gauthier # company Eurecom # email: [email protected] # cidr2mask() { local i mask="" local full_octets=$(($1/8)) local partial_octet=$(($1%8)) for ((i=0;i<4;i+=1)); do if [ $i -lt $full_octets ]; then mask+=255 elif [ $i -eq $full_octets ]; then mask+=$((256 - 2**(8-$partial_octet))) else mask+=0 fi test $i -lt 3 && mask+=. done echo $mask } black='\E[30m' red='\E[31m' green='\E[32m' yellow='\E[33m' blue='\E[34m' magenta='\E[35m' cyan='\E[36m' white='\E[37m' reset_color='\E[00m' ROOT_UID=0 E_NOTROOT=67 HOSTNAME=$(hostname -f) trim () { echo "$1" | sed -n '1h;1!H;${;g;s/^[ \t]*//g;s/[ \t]*$//g;p;}' } trim2() { local var=$@ var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters echo -n "$var" } cecho() # Color-echo # arg1 = message # arg2 = color { local default_msg="No Message." message=${1:-$default_msg} color=${2:-$green} echo -e -n "$color$message$reset_color" echo return } echo_error() { local my_string="" until [ -z "$1" ] do my_string="$my_string$1" shift done cecho "$my_string" $red } echo_warning() { local my_string="" until [ -z "$1" ] do my_string="$my_string$1" shift done cecho "$my_string" $yellow } echo_success() { local my_string="" until [ -z "$1" ] do my_string="$my_string$1" shift done cecho "$my_string" $green } bash_exec() { output=$($1 2>&1) result=$? if [ $result -eq 0 ] then echo_success "$1" else echo_error "$1: $output" fi } extract() { if [ -f $1 ] ; then case $1 in *.tar.bz2) tar xvjf $1 ;; *.tar.gz) tar xvzf $1 ;; *.bz2) bunzip2 $1 ;; *.rar) unrar $1 ;; *.gz) gunzip $1 ;; *.tar) tar xvf $1 ;; *.tbz2) tar xvjf $1 ;; *.tgz) tar xvzf $1 ;; *.zip) unzip $1 ;; *.Z) uncompress $1 ;; *.7z) 7z x $1 ;; *) echo_error "'$1' cannot be extracted via >extract<" ; return 1;; esac else echo_error "'$1' is not a valid file" return 1 fi return 0 } set_openair() { path=`pwd` declare -i length_path declare -i index length_path=${#path} for i in 'openair1' 'openair2' 'openair3' 'openair-cn' 'targets' do index=`echo $path | grep -b -o $i | cut -d: -f1` #echo ${path%$token*} if [[ $index -lt $length_path && index -gt 0 ]] then declare -x OPENAIR_DIR index=`expr $index - 1` openair_path=`echo $path | cut -c1-$index` #openair_path=`echo ${path:0:$index}` export OPENAIR_DIR=$openair_path export OPENAIR_HOME=$openair_path export OPENAIR1_DIR=$openair_path/openair1 export OPENAIR2_DIR=$openair_path/openair2 export OPENAIR3_DIR=$openair_path/openair3 export OPENAIRCN_DIR=$openair_path/openair-cn export OPENAIR_TARGETS=$openair_path/targets return 0 fi done return -1 } test_install_asn1c_4_rrc_cellular() { if [ -d $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c/asn1c ]; then if [ -x $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c/asn1c/asn1c/asn1c ]; then if [ -x /usr/local/bin/asn1c ]; then diff /usr/local/bin/asn1c $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c/asn1c/asn1c/asn1c >/dev/null 2>&1; if [ $? -eq 0 ]; then echo_success "asn1c for RRC cellular installed" return 0 fi fi echo_warning "Installing asn1c for RRC cellular..." cd $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c/asn1c sudo make install return 0 fi else echo_warning "asn1c for RRC cellular is not installed in $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c/. Installing it" cd $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c svn co https://asn1c.svn.sourceforge.net/svnroot/asn1c/trunk asn1c fi echo_warning "Configuring and building and installing asn1c for RRC cellular..." cd $OPENAIR2_DIR/RRC/LITE/MESSAGES/asn1c/asn1c ./configure make sudo make install } wait_process_started () { if [ -z "$1" ] then echo_error "WAITING FOR PROCESS START: NO PROCESS" return 1 fi ps -C $1 > /dev/null 2>&1 while [ $? -ne 0 ]; do echo_warning "WAITING FOR $1 START" sleep 2 ps -C $1 > /dev/null 2>&1 done echo_success "PROCESS $1 STARTED" return 0 } is_process_started () { if [ -z "$1" ] then echo_error "WAITING FOR PROCESS START: ERROR NO PROCESS NAME IN ARGUMENT" return 1 fi ps -C $1 > /dev/null 2>&1 if [ $? -ne 0 ] then echo_success "PROCESS $1 NOT STARTED" return 1 fi echo_success "PROCESS $1 STARTED" return 0 } assert() { # If condition false # exit from script with error message E_PARAM_ERR=98 E_PARAM_FAILED=99 if [ -z "$2" ] # Not enought parameters passed. then return $E_PARAM_ERR fi lineno=$2 if [ ! $1 ] then echo "Assertion failed: \"$1\"" echo "File \"$0\", line $lineno" exit $E_ASSERT_FAILED fi } test_install_package() { # usage: test_install_package package_name_to_be_installed optional_option_to_apt_get_install dpkg --get-selections $1 | grep -i install > /dev/null 2>&1 if [ $? -ne 0 ]; then echo_warning "Package $1 is not installed. Installing it." >&2 apt-get install $2 $1 -y dpkg --get-selections $1 | grep -i install > /dev/null 2>&1 if [ $? -ne 0 ]; then exit 1 fi else echo_success "$1 is installed" fi return 0 } test_command_install_package() { # usage: test_command_install_package searched_binary package_to_be_installed_if_binary_not_found optional_option_to_apt_get_install if [ $# -eq 2 ]; then command -v $1 >/dev/null 2>&1 || { echo_warning "Program $1 is not installed. Trying installing it." >&2; apt-get install $2 -y; command -v $1 >/dev/null 2>&1 || { echo_error "Program $1 is not installed. Aborting." >&2; exit 1; };} else if [ $# -eq 3 ]; then command -v $1 >/dev/null 2>&1 || { echo_warning "Program $1 is not installed. Trying installing it (apt-get install $3 $2)." >&2; apt-get install $3 $2 -y; command -v $1 >/dev/null 2>&1 || { echo_error "Program $1 is not installed. Aborting." >&2; exit 1; };} else echo_success "test_command_install_package: BAD PARAMETER" exit 1 fi fi echo_success "$1 available" } test_command_install_script() { # usage: test_command_install_script searched_binary script_to_be_invoked_if_binary_not_found command -v $1 >/dev/null 2>&1 || { echo_warning "Program $1 is not installed. Trying installing it." >&2; bash $2; command -v $1 >/dev/null 2>&1 || { echo_error "Program $1 is not installed. Aborting." >&2; exit 1; };} echo_success "$1 available" } start_openswitch_daemon() { rmmod -s bridge if [[ -e "/lib/modules/`uname -r`/extra/openvswitch.ko" ]] ; then bash_exec "insmod /lib/modules/`uname -r`/extra/openvswitch.ko" else echo_error "/lib/modules/`uname -r`/extra/openvswitch.ko not found, exiting" exit -1 fi is_process_started "ovsdb-server" if [ $? -ne 0 ] then ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,manager_options --pidfile --detach wait_process_started "ovsdb-server" fi # To be done after installation # ovs-vsctl --no-wait init is_process_started "ovs-vswitchd" if [ $? -ne 0 ] then ovs-vswitchd --pidfile --detach wait_process_started "ovs-vswitchd" fi } check_enb_config() { if [ ! -f $OPENAIR3_DIR/OPENAIRMME/UTILS/CONF/enb_$HOSTNAME.conf ] then echo "Cannot find file $OPENAIR3_DIR/OPENAIRMME/UTILS/CONF/enb_$HOSTNAME.conf" echo "Please make sure to create one that fits your use (you can use mme_default.conf file as template)" exit -1 fi } check_for_root_rights() { if [[ $EUID -ne 0 ]]; then echo "This script must be run as root" 1>&2 exit -1 fi } rotate_log_file () { if [ -f $1 ]; then TIMESTAMP=`date +%Y-%m-%d.%Hh_%Mm_%Ss` NEWLOGFILE=$1.$TIMESTAMP mv $1 $NEWLOGFILE cat /dev/null > $1 sync nohup gzip -f -9 $NEWLOGFILE & fi } ########################################################### declare -x OPENAIR_DIR="" declare -x OPENAIR_HOME="" declare -x OPENAIR1_DIR="" declare -x OPENAIR2_DIR="" declare -x OPENAIR3_DIR="" declare -x OPENAIRCN_DIR="" declare -x OPENAIR_TARGETS="" ########################################################### set_openair cecho "OPENAIR_DIR = $OPENAIR_DIR" $green cecho "OPENAIR_HOME = $OPENAIR_HOME" $green cecho "OPENAIR1_DIR = $OPENAIR1_DIR" $green cecho "OPENAIR2_DIR = $OPENAIR2_DIR" $green cecho "OPENAIR3_DIR = $OPENAIR3_DIR" $green cecho "OPENAIRCN_DIR = $OPENAIRCN_DIR" $green cecho "OPENAIR_TARGETS = $OPENAIR_TARGETS" $green
mspublic/openair4G-mirror
targets/PROJECTS/SPECTRA/DEMO_SPECTRA/spectra_demo_src/common/openair_scripts/utils.bash
Shell
gpl-3.0
10,978
#!/usr/bin/env bash find . -name hardware_header.h -exec rm {} \; find . -name hardware_init.h -exec rm {} \; find . -name protocol_header.h -exec rm {} \; find . -name protocol_init.h -exec rm {} \; find . -name operator_header.h -exec rm {} \; find . -name operator_init.h -exec rm {} \; find . -name action_header.h -exec rm {} \; find . -name action_init.h -exec rm {} \; git checkout master git stash git stash clear git pull origin master git fetch --tags cp CMakeLists.txt CMakeLists.txt.original GIT=$(git describe --always | sed -e 's/^v//g'); COMMIT=$(echo $GIT | cut -d'-' -f2); SHA=$(echo $GIT | cut -d'-' -f3); VERSION=$(echo $GIT | cut -d'-' -f1 | sed -e 's/^v//g'); sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-amd64.cmake make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh amd64 sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-amd64.cmake -DCMAKE_BUILD_TYPE=Debug make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh amd64 debug sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-i386.cmake make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh i386 sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-i386.cmake -DCMAKE_BUILD_TYPE=Debug make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh i386 debug sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-arm.cmake -DCMAKE_C_FLAGS=-fomit-frame-pointer make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh armhf sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-arm.cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS=-fomit-frame-pointer make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh armhf debug sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-mipsel.cmake make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh mipsel sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-mipsel.cmake -DCMAKE_BUILD_TYPE=Debug make -j4 cpack -G DEB mv *.deb ../ cd .. ./gen_package.sh mipsel debug sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-aarch64.cmake make -j4 cpack -G DEB mv *.deb ../ cd .. for i in $(find *.deb); do cp $i $i.cpy; done ./gen_package.sh arm64 for i in $(find *.cpy); do mv $i ${i/.cpy/}; done ./gen_package.sh aarch64 sed -e"" 's/MODULESPACK ON/MODULESPACK OFF/g' CMakeLists.txt > CMakeLists.txt.tmp cp CMakeLists.txt.tmp CMakeLists.txt ./setup.sh clear mkdir -p build cd build cmake .. -DCMAKE_TOOLCHAIN_FILE=/home/pilight/tools/toolchain-aarch64.cmake -DCMAKE_BUILD_TYPE=Debug make -j4 cpack -G DEB mv *.deb ../ cd .. for i in $(find *.deb); do cp $i $i.cpy; done ./gen_package.sh arm64 debug for i in $(find *.cpy); do mv $i ${i/.cpy/}; done ./gen_package.sh aarch64 debug mv CMakeLists.txt.original CMakeLists.txt rm CMakeLists.txt.tmp rm -r build/
pilight/pilight
compile_all.sh
Shell
gpl-3.0
4,237
#!/usr/bin/env bash # File : wds-lfs-chk.sh # Created : <2018-6-26 Tue 23:50:52 BST> # Modified : <2018-7-07 Sat 01:42:50 BST> #Rλatan # Author : #Rλatan # Synopsis : <Check all requirements for the host to buld LFS> set -e # use it for debug #set -x # ------------------------------------------------------------------------------ #+CHANGELOG # 0.0.1 :: <2018-6-26 Tue 23:50:52 BST> # + Iniit # ============================================================================== # ------------------------------------------------------------------------------ #+DEFAUL_CONFIG CMD_VER=0.1.0 CMDNAME=wds-lfs-chk.sh REQUIRE=( "bison" "cat" "chown" "cut" "diff" "md5sum" "wget" "mkdir" "mount" "find" "g++" "gawk" "mkfs" "gcc" "bash" "grep" "gzip" "sh" "head" "ld" "ldd" "m4" "tar" "make" "makeinfo" "patch" "perl" "readlink" "sed" "tar" "xz" "yacc" ) # Conditional symafors PAS=$'[ \033[32;1mPAS\033[0m ] ' ERR=$'[ \033[31;1mERR\033[0m ] ' INF="[ INF ] " # ------------------------------------------------------------------------------ #+UTILITIES_FUNCTIONS _err() { # All errors go to stder. printf "[%s]: %s\n" "$(date +%s.%3N)" "$1" } _msg() { # Default message fro stdout. printf "[%s]: %s\n" "$(date +%s.%3N)" "$1" } chk_require() { # Check that every required command is available. declare -a cmds declare -a warn cmds=(${1}) _msg "--- [ $FUNCNAME ] ---" [ "${#cmds}" -eq "0" ] && { _err "${ERR}No arguments provided."; return 1; } for c in ${cmds[@]}; do command -v "$c" &>/dev/null [ "$?" -eq "1" ] && warn+=("$c") done [ "${#warn}" -ne 0 ] && { _err "${ERR}Commands <${warn[*]}> are not available, install them."; return 1; } _msg "${PAS}verification of required commands completed" } chk_term() { # Check for ANSI terminal for color printing. local ansi_term _msg "--- [ $FUNCNAME ] ---" if [ -t 2 ]; then _msg "${INF}terminal is <${TERM}>" if [ "${TERM+set}" = 'set' ]; then case "$TERM" in xterm*|rxvt*|urxvt*|linux*|vt*|eterm*) ANSI_TERM=true ;; *) ANSI_TERM=false ERR="[ ERR ] " PAS="[ PAS ] " ;; esac fi fi } chk_version () { # Bullet prove checking of cmd version. declare -a cmds declare -a cmd_ver declare -a ver_re cmds=(${1}) ver_re="\b(\d+\.)?(\d+\.)?(\*|\d+)" _msg "--- [ $FUNCNAME ] ---" [ "${#cmds}" -eq "0" ] && { _err "${ERR}No arguments provided."; return 1; } for c in ${cmds[@]}; do if eval "${c} -V:version" &>/dev/null; then cmd_ver="$(eval ${c} -V:version | grep -m1 -oP "$ver_re" | head -n1)" _msg "${c} ${cmd_ver}" elif eval "${c} --version" &>/dev/null; then cmd_ver="$(eval ${c} --version | grep -m1 -oP "$ver_re" | head -n1)" _msg "${c} ${cmd_ver}" elif eval "${c} -V" &>/dev/null; then cmd_ver="$(eval ${c} -V | grep -m1 -oP "$ver_re" | head -n1)" _msg "${c} ${cmd_ver}" elif eval "${c} --help" &>/dev/null; then cmd_ver="$(eval ${c} --help | grep -m1 -oP "$ver_re" | head -n1)" _msg "${c} ${cmd_ver}" elif eval "${c} -version" &>/dev/null; then cmd_ver="$(eval ${c} -version &>1 | grep -m1 -oP "$ver_re" | head -n1)" _msg "${c} ${cmd_ver}" fi done } chk_link () { # Check if command is symlink to actual one. declare -a cmds declare -a cmd_path cmds=(${1}) _msg "--- [ $FUNCNAME ] ---" [ "${#cmds}" -eq "0" ] && { _err "${ERR}No arguments provided."; return 1; } for c in ${cmds[@]}; do cmd_path=$(command -v "$c") if [[ -h "$cmd_path" ]]; then _msg "${c} -> ${cmd_path} -> $(readlink -f "${cmd_path}")" fi done } chk_pkg () { # Check wich package command belong to. declare -a cmds cmds=(${1}) _msg "--- [ $FUNCNAME ] ---" [ "${#cmds}" -eq "0" ] && { _err "${ERR}No arguments provided."; return 1; } for c in ${cmds[@]}; do if command -v rpm &>/dev/null; then _msg "${c} -> $(rpm -qf $(which "$c"))" elif command -v dpkg-query &>/dev/null; then dpkg-query -S $(which "$c") fi done } chk_build_sys () { _msg "--- [ $FUNCNAME ] ---" _msg "your dinamik linker is:" echo readelf -l /bin/ls | grep interpreter echo _msg "linker search order PATH" echo ld --verbose | grep "SEARCH" echo echo 'int main(){}' > dummy.c && g++ -o dummy dummy.c if [ -x dummy ]; then _msg "${PAS}g++ compilation"; else _msg "${ERR}g++ compilation" fi rm -f dummy.c dummy echo 'int main(){}' > dummy.c && gcc -o dummy dummy.c if [ -x dummy ]; then _msg "${PAS}gcc compilation"; else _msg "${ERR}gcc compilation" fi rm -f dummy.c dummy } # ------------------------------------------------------------------------------ recomendations () { cat <<EOF Your host system should have the following software with the minimum versions indicated. Bash-3.2 (/bin/sh should be a symbolic or hard link to bash) Binutils-2.17 (Versions greater than 2.30 are not recommended as they have not been tested) Bison-2.3 (/usr/bin/yacc should be a link to bison or small script that executes bison) Bzip2-1.0.4 Coreutils-6.9 Diffutils-2.8.1 Findutils-4.2.31 Gawk-4.0.1 (/usr/bin/awk should be a link to gawk) GCC-4.7 Glibc-2.11 Grep-2.5.1a Gzip-1.3.12 Linux Kernel-3.2 M4-1.4.10 Make-3.81 Patch-2.5.4 Perl-5.8.8 Sed-4.1.5 Tar-1.22 Texinfo-4.7 Xz-5.0.0 EOF } main () { printf "Start %s v%s at %s\n\n" "$CMD_NAME" "$CMD_VER" "$(date)" chk_term chk_require "${REQUIRE[*]}" || exit 1 chk_version "${REQUIRE[*]}" chk_link "${REQUIRE[*]}" chk_pkg "${REQUIRE[*]}" chk_build_sys _msg "${INF}$(cat /proc/version)" recomendations } main "$@" # End of wds-lfs-chk.sh
Hellseher/wds
wds-hacks/wds-lfs-chk.sh
Shell
gpl-3.0
6,356
#!/bin/bash # Copyright (C) 2018 Maciej Delmanowski <[email protected]> # Copyright (C) 2018 DebOps <https://debops.org/> # SPDX-License-Identifier: GPL-3.0-or-later set -o nounset -o pipefail -o errexit make clean-tests make test-spdx test-pep8 test-yaml test-shell make check
ganto/debops
lib/travis/syntax/script.sh
Shell
gpl-3.0
281
# pacman -Qo /bin/netstat -- to find packages pacman -S libelf linux linux-headers \ systemd gdb rsync net-tools strace openssh make bison flex
mgood7123/UPM
Sources/dtrace-20160613/tools/get-deps-arch.sh
Shell
gpl-3.0
145
#!/bin/bash ORG_ID=1 PRODUCT_NAME="OCP Docker images" upstream_repos=( openshift3/ose-deployer \ openshift3/ose-docker-registry \ openshift3/registry-console \ openshift3/ose-pod \ openshift3/ose-docker-builder \ openshift3/ose-sti-builder \ openshift3/ose-haproxy-router \ openshift3/logging-elasticsearch \ openshift3/logging-kibana \ openshift3/logging-fluentd \ openshift3/logging-auth-proxy \ openshift3/metrics-hawkular-metrics \ openshift3/metrics-cassandra \ openshift3/metrics-heapster \ openshift3/ose \ openshift3/node \ openshift3/openvswitch \ rhel7/etcd \ openshift3/ose-keepalived-ipfailover ) xpaas_images=( redhat-openjdk-18/openjdk18-openshift \ jboss-webserver-3/webserver30-tomcat8-openshift \ jboss-eap-7/eap70-openshift \ redhat-sso-7/sso70-openshift \ rhscl/postgresql-95-rhel7 \ rhscl/nodejs-4-rhel7 \ rhscl/nodejs-6-rhel7 \ rhscl/python-27-rhel7 \ rhscl/python-35-rhel7 ) jenkins_images=( openshift3/jenkins-2-rhel7 \ openshift3/jenkins-slave-base-rhel7 \ openshift3/jenkins-slave-maven-rhel7 \ openshift3/jenkins-slave-nodejs-rhel7 ) hammer product create --name "$PRODUCT_NAME" --organization-id $ORG_ID for i in ${upstream_repos[@]}; do hammer repository create --name "$i" --organization-id $ORG_ID --content-type docker --url "https://registry.access.redhat.com" --docker-upstream-name "$i" --product "$PRODUCT_NAME" done for i in ${xpaas_images[@]}; do hammer repository create --name "$i" --organization-id $ORG_ID --content-type docker --url "https://registry.access.redhat.com" --docker-upstream-name "$i" --product "$PRODUCT_NAME" done for i in ${jenkins_images[@]}; do hammer repository create --name "$i" --organization-id $ORG_ID --content-type docker --url "https://registry.access.redhat.com" --docker-upstream-name "$i" --product "$PRODUCT_NAME" done
opuk/satellite
hammer/populate_ose/populate-docker.sh
Shell
gpl-3.0
2,072
#!/bin/sh # Use-case: start integration branch from master function cleanup_branches { local branches=`git branch | grep -v master` for branch in $branches; do (git branch -D $branch) > /dev/null 2>&1 done } function show_scenario { echo "" echo "Create an integration branch, which contains the following bugfixes:" git branch | grep bugfix echo "" } show_scenario cleanup_branches rm -Rf *.sh
Salamangkero/git-build
sandbox/exercises/exercise_00.sh
Shell
gpl-3.0
417
#!/bin/sh set -e # exit on first error OPENCV_URL=https://github.com/Itseez/opencv/archive/3.0.0-alpha.zip install_dependencies() { sudo apt-get -y install \ libopencv-dev \ build-essential \ cmake \ git \ libgtk2.0-dev \ pkg-config \ python-dev \ python-numpy \ libdc1394-22 \ libdc1394-22-dev \ libjpeg-dev \ libpng12-dev \ libtiff4-dev \ libjasper-dev \ libavcodec-dev \ libavformat-dev \ libswscale-dev \ libxine-dev \ libgstreamer0.10-dev \ libgstreamer-plugins-base0.10-dev \ libv4l-dev \ libtbb-dev \ libqt4-dev \ libfaac-dev \ libmp3lame-dev \ libopencore-amrnb-dev \ libopencore-amrwb-dev \ libtheora-dev \ libvorbis-dev \ libxvidcore-dev \ x264 \ v4l-utils \ unzip } download_opencv() { mkdir -p /usr/local/src/opencv cd /usr/local/src/opencv wget $OPENCV_URL -O opencv-3.0.0-alpha.zip unzip opencv-3.0.0-alpha.zip cd - } install_opencv() { # compile and install opencv cd /usr/local/src/opencv cd opencv-3.0.0-alpha mkdir -p build cd build cmake \ -D CMAKE_BUILD_TYPE=RELEASE \ -D CMAKE_INSTALL_PREFIX=/usr/local \ -D WITH_TBB=ON \ -D WITH_V4L=ON \ -D WITH_QT=ON \ -D WITH_OPENGL=ON .. make -j 4 # sudo make install } # MAIN install_dependencies #download_opencv install_opencv
chutsu/slam
slam_vision/scripts/opencv3_install.sh
Shell
gpl-3.0
1,566
#!/bin/bash set -e DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" mkdir -p $DIR/build npm install bower update gulp build cd $DIR/dist touch $DIR/build/frontend.zip rm $DIR/build/frontend.zip zip -r $DIR/build/frontend.zip * cd $DIR
rudywaltz/web2-frontend
build.sh
Shell
gpl-3.0
242
#!/bin/bash -x #source /host/settings.sh restore_data() { drush @qtr sql-query --file=$(pwd)/qtr.sql $(drush @qtr sql-connect --database=qtr_data) < $(pwd)/qtr_data.sql } restore_config() { # enable features while read feature; do drush --yes @qtr pm-enable $feature drush --yes @qtr features-revert $feature done < qtr_features.txt while read feature; do drush --yes @qtr_dev pm-enable $feature drush --yes @qtr_dev features-revert $feature done < qtr_dev_features.txt # restore private variables drush @qtr php-script $(pwd)/restore-private-vars-qtr.php drush @qtr_dev php-script $(pwd)/restore-private-vars-qtr-dev.php # twitter config if [[ -f trc ]]; then cp trc /home/twitter/.trc chown twitter: /home/twitter/.trc fi } restore_custom_scripts() { if [[ ! -f /host/backup.sh ]] && [[ -f backup.sh ]]; then cp backup.sh /host/ fi if [[ ! -f /host/restore.sh ]] && [[ -f restore.sh ]]; then cp restore.sh /host/ fi if [[ ! -d /host/cmd ]] && [[ -d cmd ]]; then cp -a cmd /host/ fi if [[ ! -d /host/scripts ]] && [[ -d scripts ]]; then cp -a scripts /host/ fi } # go to the backup directory backup=$1 cd /host/$backup # restore restore_data restore_config # restore any custom scripts restore_custom_scripts # custom restore script [[ -f /host/restore.sh ]] && source /host/restore.sh
Q-Translate/qtr_server
ds/scripts/restore.sh
Shell
gpl-3.0
1,462
#!/bin/sh #TODO mieux gérer les chemins (pour pouvoir appeler le script de n'importe où) JARS=`/bin/ls lib/*.jar` CLASSPATH="" for JAR in $JARS ; do CLASSPATH=$CLASSPATH:$JAR done java -cp "$CLASSPATH" io.github.algorys.agshmne.Agshmne
algorys/agshmne
packager/src/main/script/agshmne.sh
Shell
gpl-3.0
248
atm_forcing=$1 sfc_forcing=$2 out=$3 python erai2icar.py $atm_forcing $sfc_forcing $out
johanneshorak/erai2icar
prep_for_icar.sh
Shell
gpl-3.0
88
#!/bin/bash echo "scale=2 ; $*" | sed -e "s:x:*:g" | sed -e "s:,::g" | bc #function math #{ # echo "scale=2 ; $*" | sed -e "s:x:*:g" | sed -e "s:,::g" | bc #}
KIAaze/bin_and_dotfiles_public
bins/public_bin/math.sh
Shell
gpl-3.0
167
#!/bin/bash #/media/StorageOne/HTS/VirusMeta/SAM_BAM/circos_plot_cov/circos_pipeline.sh /media/StorageOne/HTS/Projects/2011_N17_Viraskin2-HiSeq /media/StorageOne/HTS/Projects/2011_N17_Viraskin2-HiSeq/anecto_virus.fasta /media/StorageOne/HTS/Projects/2011_N17_Viraskin2-HiSeq/Data/Intensities/BaseCalls/forward.fastq /media/StorageOne/HTS/Projects/2011_N17_Viraskin2-HiSeq/Data/Intensities/BaseCalls/reverse.fastq ########################## export project_work_dir=$1 export path_htsa_dir=/media/StorageOne/HTS ########################## cp -r $path_htsa_dir/VirusMeta/SAM_BAM/circos_plot_cov $1/circos_plot_cov ## if [ -f $1/circos_plot_cov/data/histogram.txt ]; then rm $1/circos_plot_cov/data/histogram.txt fi if [ -f $1/circos_plot_cov/circos.png ]; then rm $1/circos_plot_cov/circos.png fi if [ -f $1/circos_plot_cov/circos.csv ]; then rm $1/circos_plot_cov/circos.csv fi if [ -f $1/circos_plot_cov/data/histogram.txt ]; then rm $1/circos_plot_cov/data/histogram.txt fi ### cd $project_work_dir/circos_plot_cov #$path_htsa_dir/VirusMeta/SAM_BAM/BWA_NR.sh $1/b_actine_map $1/circos_plot_cov/b_actine.fasta $2 $3 ########################################################################################## # prepare files ########################################################################################## export work_fasta=$(basename $2) export work_fasta_map=work_fasta_map if [ -d $1/$work_fasta_map ]; then rm -r $1/$work_fasta_map fi mkdir $1/$work_fasta_map cp $2 $1/$work_fasta_map/$work_fasta #copy query fasta in the working directory ########################################################################################## # perform BWA-MEM allignment and analyse the allignment ########################################################################################## cd $1/$work_fasta_map /usr/local/bin/bwa index $work_fasta /usr/local/bin/samtools faidx $work_fasta /usr/local/bin/bwa mem $work_fasta $3 $4 -t 70 -M > aln-pe.sam #/usr/local/bin/samtools view -@ 70 -q 10 -b -S aln-pe.sam > aln-pe.bam /usr/local/bin/samtools view -@ 70 -b -S aln-pe.sam > aln-pe.bam /usr/local/bin/samtools sort -@ 70 aln-pe.bam aln-pe.sorted #/usr/local/bin/samtools fillmd -b aln-pe.sorted.bam $db > aln-pe.sorted.md.bam #/usr/local/bin/samtools view -@ 70 aln-pe.sorted.bam | cut -f1,2,3,4,8,5,9 > $work_fasta.txt /usr/local/bin/samtools fillmd -b aln-pe.sorted.bam $work_fasta > aln-pe.sorted.md.bam /usr/local/bin/samtools view -@ 70 aln-pe.sorted.md.bam | awk '{print $1,"\t",$2"\t",$3,"\t",$4,"\t",$8,"\t",$5,"\t",$9,"\t",length($10),"\t",$6,"\t",$15}' > $work_fasta.txt #/usr/local/bin/samtools index aln-pe.sorted.md.bam #/usr/local/bin/samtools mpileup -f $db aln-pe.sorted.bam > aln-pe.pileup #/usr/local/bin/samtools idxstats aln-pe.sorted.md.bam > nr_ref_$work_fasta.idxstats.txt ### python $path_htsa_dir/VirusMeta/SAM_BAM/translate_pysam.py $work_fasta.txt sam_final_$work_fasta.txt unmapped_$work_fasta.txt nr_ref_$work_fasta.txt cat sam_final_$work_fasta.txt | awk -F"\t" '{if($12 == 0 && $13>=90 && $14>=75) {print $1}}' | sort -k1,1 -T $1 | awk '!x[$1]++' > $work_fasta.ID awk 'NR==FNR{a[$1];next} !($1 in a) {print $1}' $work_fasta.ID $4 > NON_$work_fasta.ID awk '{x++}END{ print x}' NON_$work_fasta.ID > nr_unmapped.txt awk '{x++}END{ print x}' $work_fasta.ID > nr_ref_$work_fasta.awk.txt cd $project_work_dir/circos_plot_cov /usr/local/bin/samtools depth $1/$work_fasta_map/aln-pe.sorted.bam > position_coverage.txt #rm $1/$work_fasta_map/$work_fasta* #rm $1/$work_fasta_map/$work_fasta rm $1/$work_fasta_map/*.sam rm $1/$work_fasta_map/*.bam ############################## #create circos Cariotype file# ############################## /usr/local/bin/getorf -sequence $2 -outseq ORF.pos -find 3 -minsize 240 #create tiles_orf_forward.txt grep ">" ORF.pos | awk '{gsub(">","",$0); print $0}' | awk -F"\t" '{gsub("-","",$0); print $0}' | awk -F" " '{if($4 != "(REVERSE") print $0 }' | awk -F"[" '{print $1,$2,$3}' | awk -F"]" '{print $1,$2,$3}' | awk -F" " '{print $1,$2,$3}' | awk -F"_" '{print $1,$2}' | awk -F" " '{print $1,$3,$4}' > data/tiles_orf.txt cat $2 | awk 'BEGIN{RS=">"}NR>1{sub("\n","\t"); gsub("\n",""); print RS$0}' | awk -F"\t" '{gsub(">","",$1); print "chr - "$1,$1,0,length($2),"chr12"}' > data/virus_genome.txt ######################## #Histogram for coverage# ######################## echo ' position_coverage<-read.table("position_coverage.txt") #estimate relative coverage position_coverage$percent_coverage<-position_coverage$V3/max(position_coverage$V3) #create end position (the same as start) position_coverage$end_pos<-position_coverage$V2 #arrange columns according to circos format position_coverage<-position_coverage[,c("V1","V2","end_pos","percent_coverage")] colnames(position_coverage)<-c("chr","start","end","value") #now write file 'histogram.txt' write.table(position_coverage,"data/histogram.txt", row.names=F, col.names=F, quote=F) ' > histogram_format.R R CMD BATCH --no-save histogram_format.R perl $path_htsa_dir/VirusMeta/public_programs/circos-0.64/bin/circos -conf etc/circos.conf
NIASC/VirusMeta
SAM_BAM/circos_plot_cov/circos_pipeline.sh
Shell
gpl-3.0
5,116
#!/bin/bash cd classes scala fos.FJ " class A extends Object { Object x1; A(Object x1) { super(); this.x1 = x1; } Object foo(A x1, B x2) { return x2; } } class B extends Object { A() { super(); } } (new A(new B())).foo(new A(new Object()), new B()) "
sana/WorkAtEPFL
FeatherweightJava/tests/Computation-RInvk.sh
Shell
gpl-3.0
330
#!/bin/sh set -xe cd /tmp mkdir build && cd build cmake -DCMAKE_INSTALL_PREFIX=~/.local -DCMAKE_CXX_FLAGS="-Wall -Wextra" /io make install make tests ctest --output-on-failure --timeout 100 ${MAKEFLAGS}
openturns/otmixmod
run_docker_build.sh
Shell
gpl-3.0
205
/home/share/user/user101/software/ngsTools/angsd/angsd -gl 1 -anc ancestral -dosaf 1 -only_proper_pairs 1 -uniqueOnly 1 -remove_bads 1 -C 50 -minMapQ 30 -minQ 20 -setMinDepth 35 -setMaxDepth 280 -out pda_e -bam file.list -ref ancestral -P 32 -sites /home/share/user/user101/projects/yangshu/16.angsd/01.regions_without_repeat/01.generate_sites.pl.txt
wk8910/bio_tools
36.reseq_pipeline/05.angsd/01.step1.onePop/01.dosaf.sh
Shell
mpl-2.0
351
#!/bin/bash i=5 while test $i -gt 0 do echo ${i}... i=$((i-1)) sleep 1 done echo Décollage.
Marsonge/scripts
script4.sh
Shell
mpl-2.0
97
#!/bin/sh docker-compose exec -e RAILS_ENV=production imgshr rails c
nning/imgshr
examples/traefik/console.sh
Shell
agpl-3.0
69
#!/bin/bash # # packaging.sh Copyright (C) 2019 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/> # shellcheck disable=SC1091 . .travis/common.sh checkcmd pip find zipinfo diff function finish { rm -Rf sources package "$tmp" } trap finish EXIT tmp=$(mktemp -d) pip wheel --no-deps -w "$tmp" . find openquake/ -type f ! -name \*.pyc | grep -Ev 'openquake/__init__.py|/tests/|nrml_examples' | sort > sources zipinfo -1 "$tmp"/openquake.engine*.whl | grep '^openquake/' | grep -v 'nrml_examples' | sort > package diff -uN sources package
gem/oq-engine
.travis/packaging.sh
Shell
agpl-3.0
1,148
source common.sh set +x expect_trace() { expr="$1" expect="$2" actual=$( nix-instantiate \ --trace-function-calls \ --expr "$expr" 2>&1 \ | grep "function-trace" \ | sed -e 's/ [0-9]*$//' ); echo -n "Tracing expression '$expr'" set +e msg=$(diff -swB \ <(echo "$expect") \ <(echo "$actual") ); result=$? set -e if [ $result -eq 0 ]; then echo " ok." else echo " failed. difference:" echo "$msg" return $result fi } # failure inside a tryEval expect_trace 'builtins.tryEval (throw "example")' " function-trace entered (string):1:1 at function-trace entered (string):1:19 at function-trace exited (string):1:19 at function-trace exited (string):1:1 at " # Missing argument to a formal function expect_trace '({ x }: x) { }' " function-trace entered (string):1:1 at function-trace exited (string):1:1 at " # Too many arguments to a formal function expect_trace '({ x }: x) { x = "x"; y = "y"; }' " function-trace entered (string):1:1 at function-trace exited (string):1:1 at " # Not enough arguments to a lambda expect_trace '(x: y: x + y) 1' " function-trace entered (string):1:1 at function-trace exited (string):1:1 at " # Too many arguments to a lambda expect_trace '(x: x) 1 2' " function-trace entered (string):1:1 at function-trace exited (string):1:1 at " # Not a function expect_trace '1 2' " function-trace entered (string):1:1 at function-trace exited (string):1:1 at " set -e
NixOS/nix
tests/function-trace.sh
Shell
lgpl-2.1
1,565
#!/bin/sh case "$1" in dev) export mode="dev" ;; test) export mode="test" ;; *) export mode="prod" ;; esac grails ${mode} drop-postgis-database grails ${mode} create-postgis-database grails ${mode} schema-export ./ddl.sql grails ${mode} run-sql-file ddl.sql grails ${mode} run-sql-file geoms.sql grails ${mode} run-script ./scripts/defaults.groovy #grails ${mode} run-sql-file ../../plugins/omar-scheduler/scripts/tables_postgres.sql
radiantbluetechnologies/omar
apps/geodata-test/setupdb.sh
Shell
lgpl-2.1
437
#!/bin/sh -e # local variable CURRENT=$(pwd) LIBGPG=$CURRENT"/dep/libgpg-error-1.12" LIBGCRYPT=$CURRENT"/dep/libgcrypt-1.5.2" LIBTASN=$CURRENT"/dep/libtasn1-3.3" # check rm -r tmp || true rm -r lib || true rm gcrypt.h || true rm gcrypt-module.h || true rm gpg-error.h || true rm libtasn1.h || true cd $LIBGPG rm bin || true cd $LIBGCRYPT rm ./src/gpg-error.h || true cd $CURRENT mkdir tmp # Compile libgpg-error echo " " echo "*************************************" echo "* GLS Static library compilation... *" echo "*************************************" echo " " echo " " echo "#################################" echo "# Compilation Libgpg-error 1.12 #" echo "#################################" cd $LIBGPG ./configure --enable-static=yes --enable-shared=no make cd src cp libgpg_error*.o $CURRENT/tmp/ cd .. ln -s src bin # Compile libgcrypt echo " " echo "###############################" echo "# Compilation Libgcrypt 1.5.2 #" echo "###############################" cd $LIBGCRYPT ln -s $LIBGPG/src/gpg-error.h ./src/gpg-error.h export LDFLAGS="$LDFLAGS -L$LIBGPG/src/.libs" ./configure --with-gpg-error-prefix=$LIBGPG/ --enable-static=yes --enable-shared=no make cd src/.libs ar x libgcrypt.a cp *.o $CURRENT/tmp/ # Compile libtasn1 echo " " echo "############################" echo "# Compilation Libtasn1 3.3 #" echo "############################" cd $LIBTASN ./configure --enable-static=yes --enable-shared=no make cd lib/gllib cp *.o $CURRENT/tmp/ cd .. cp *.o $CURRENT/tmp/ # Compile GLS echo " " echo "##########################" echo "# Compilation GLS Alpha #" echo "##########################" cd $CURRENT ln -s $LIBGCRYPT/src/gcrypt.h gcrypt.h ln -s $LIBGCRYPT/src/gcrypt-module.h gcrypt-module.h ln -s $LIBGPG/src/gpg-error.h gpg-error.h ln -s $LIBTASN/lib/libtasn1.h libtasn1.h mkdir lib gcc -DEAI_ADDRFAMILY=5001 -DEAI_NODATA=5002 -c GLSServer.c -o ./tmp/GLSServer.o gcc -DEAI_ADDRFAMILY=5001 -DEAI_NODATA=5002 -c GLSSocket.c -o ./tmp/GLSSocket.o gcc -c Crypto.c -o ./tmp/Crypto.o gcc -c Certificate.c -o ./tmp/Certificate.o ar rcs ./lib/libgls.a ./tmp/*.o cp libgls.h ./lib/ cp $LIBGPG/src/gpg-error.h ./lib/ cp $LIBGCRYPT/src/gcrypt.h ./lib/ cp $LIBGCRYPT/src/gcrypt-module.h ./lib/ # Clean echo " " echo "#############" echo "# Cleaning #" echo "#############" rm -r tmp rm gcrypt.h rm gcrypt-module.h rm gpg-error.h rm libtasn1.h cd $LIBGPG rm bin make clean cd $LIBGCRYPT rm ./src/gpg-error.h make clean cd $LIBTASN make clean # end echo " " echo "***********************************" echo "* Done ! Your library is in ./lib *" echo "***********************************"
alvarezgregory/GLS
compileStatic.sh
Shell
lgpl-2.1
2,605
#!/bin/sh - GIT_ROOT=/opt/data if [ $# -lt 1 ]; then echo "Usage: $0 TAG" exit 1 fi if [ ! -w $GIT_ROOT ]; then echo "You don't have enough rights on $GIT_ROOT to execute this script."; exit 2 fi exec git stash && git pull --all && git checkout $1 && git stash pop
nuxeo/nuxeo.io
tools/upgrade-sources.sh
Shell
lgpl-2.1
277
#!/bin/bash if [[ "$1" = "-h" ]]; then echo Usage: $0 /path/to/encoded/vts /path/to/patched/up/vts exit 0 fi mkdir -p ${2} for a in ${1}/*VOB; do name=$(basename $a) ./make_vob ${a} ${2}/${name} done
lu-zero/dvdtools
patchup.sh
Shell
lgpl-2.1
220
#!/bin/sh # pancake script to sync a git repo from a mercurial one # hg-git seems broken as long as git is more restrictive in author names # so... i just rewrote it from scratch to push commits by blocks GITDIR=radare2.git GITPUSH=git+ssh://[email protected]/radare/${GITDIR} GITPULL=git://github.com/radare/${GITDIR} controlc() { echo "^C ignored.. wait or you will do nasty things" } trap controlc 2 getgittip() { cd ${GITDIR} git log -1|tail -n1 |awk -F 'r2:hg:' '{print $2}' cd .. } gethgtip() { echo $(hg tip | grep changeset: | cut -d : -f 2) } if [ ! -d "${GITDIR}" ]; then git config --global user.name pancake git config --global user.email [email protected] git clone ${GITPULL} ${GITDIR} [ ! $? = 0 ] && exit 1 else cd ${GITDIR} git pull ${GITPULL} cd .. fi GIT_HG_TIP=$(getgittip) HG_TIP=$(gethgtip) echo "GIT TIP: ${GIT_HG_TIP}" echo "HG TIP: ${HG_TIP}" if [ "${GIT_HG_TIP}" = "${HG_TIP}" ]; then echo "Nothing to push" else echo "Preparing hg to git..." hg log -v -r ${HG_TIP} -r $((${GIT_HG_TIP}+1)) > /tmp/commitmsg echo >> /tmp/commitmsg echo "mk/gitpush.sh: imported from r2:hg:${HG_TIP}" >> /tmp/commitmsg cd ${GITDIR} rm -rf * hg clone .. tmpdir cp -rf tmpdir/* . rm -rf tmpdir DELETED=$(git status | grep deleted |cut -d : -f 2) git add * [ -n "${DELETED}" ] && git rm ${DELETED} git commit -F /tmp/commitmsg git push ${GITPUSH} fi
glandium/radare2
mk/gitpush.sh
Shell
lgpl-3.0
1,388
#!/bin/bash ### # This script builds the project and generates Lider distribution (Lider.tar.gz) # # Generated file can be found under /tmp/lider ### set -e pushd $(dirname $0) > /dev/null PRJ_ROOT_PATH=$(dirname $(pwd -P)) popd > /dev/null echo "Project path: $PRJ_ROOT_PATH" # Generate Lider packages echo "Generating Javadoc" cd "$PRJ_ROOT_PATH" mvn clean javadoc:aggregate cd "$PRJ_ROOT_PATH"/target/site tar -zcf lider-console-javadoc.tar.gz apidocs cd "$PRJ_ROOT_PATH" echo "Generated Javadoc" EXPORT_PATH=/tmp/lider-console echo "Export path: $EXPORT_PATH" # Copy resulting files echo "Copying generated Javadoc to $EXPORT_PATH..." mkdir -p "$EXPORT_PATH" mv -f "$PRJ_ROOT_PATH"/target/site/lider-console-javadoc.tar.gz "$EXPORT_PATH" echo "Copied generated Javadoc." echo "Operation finished successfully!" echo "Files can be found under: $EXPORT_PATH"
Agem-Bilisim/lider-console
scripts/generate-javadoc.sh
Shell
lgpl-3.0
867
#!/bin/bash #requires the dpid and resourceId {$1}/{$2} echo "Mapping openflow switches to OpenNaaS resource." response=$(curl -X GET http://admin:123456@localhost:8888/opennaas/vrf/routing/switchMapping) echo $response
dana-i2cat/opennaas-routing-nfv
autoMapSwDevices.sh
Shell
lgpl-3.0
220
#!/usr/bin/env bash cd dspot && mvn clean test jacoco:report coveralls:report
danzone/dspot
.travis/travis-coverage.sh
Shell
lgpl-3.0
79
#!/bin/bash ## Identify regions outside of the genic features ## including 5'UTR, 3'UTR, TSS (1kb region cntered on 5'UTR start), ## fragments (unique exonic regions), and introns ## Get bed file of full genome (use chrom.sizes) awk '{print $1"\t"0"\t"$2}' ${CHROM} > ${ROZ}/${SPECIES}.chrom.sizes.bed ## Combine coordinates of unique features (5'UTR,3'UTR,TSS1kbWindow,TSS300bpWindow,fragments,fusions,intronic) cat ${FEATuniq}/*_unique.bed ${FRAGMENT} ${FUSION} ${INTRON} | \ sort -k1,1 -k2,2n > ${FEATuniq}/${SPECIES}_all_features.bed ## Get intergenic regions using bedtools bedtools subtract \ -a ${ROZ}/${SPECIES}.chrom.sizes.bed \ -b ${FEATuniq}/${SPECIES}_all_features.bed \ > ${ROZ}/temp_${SPECIES}_intergenic.bed ## Add intergenic featureIDs ## Require a region length >50bp - <50bp are places in another file in roz awk '$3-$2>50{print $1"\t"$2"\t"$3"\tintergenic_"$1"_"$2"_"$3}' \ ${ROZ}/temp_${SPECIES}_intergenic.bed \ > ${FEATuniq}/${SPECIES}_intergenic.bed awk '$3-$2<=50{print $1"\t"$2"\t"$3"\tintergenic_"$1"_"$2"_"$3}' \ ${ROZ}/temp_${SPECIES}_intergenic.bed \ > ${ROZ}/${SPECIES}_intergenic_LE_50.bed ## Remove temprary files rm ${ROZ}/temp_${SPECIES}_intergenic.bed rm ${ROZ}/${SPECIES}.chrom.sizes.bed
McIntyre-Lab/papers
nanni_chip_rna_2022/scripts/annotation/intergenic_03avn.sh
Shell
lgpl-3.0
1,267
#!/bin/bash # -*- coding:utf-8-unix; mode:sh; -*- set -u cd $(dirname "$0") export PATH=/usr/local/bin:/usr/bin:/bin BASE_DIR="/var/share/tv_recorded" TMP="${TMP-/tmp/av-archive}" SPLIT="./split-ts.sh" CONVERT="./convert-mp4.sh" main() { local m2ts for m2ts in "$BASE_DIR"{,/**}/*.m2ts; do if [[ ! -e "$m2ts" ]] then continue fi local new_m2ts="$(normalize_file_name "$m2ts")" if [[ "$m2ts" != "$new_m2ts" ]]; then mv -v "$m2ts" "$new_m2ts" m2ts="$new_m2ts" fi local mp4="$(get_mp4_name "$m2ts")" local log="$(get_log_name "$m2ts")" if [[ -e "$mp4" ]] then continue fi if [[ -e "$log" ]] then continue fi archive "$m2ts" 2>&1 | tee -a "$log" if [[ ${PIPESTATUS[0]} -ne 0 ]] then break fi rm "$log" done rm -frv "$TMP" } normalize_file_name() { sed 's/〜/~/g' <<<"$*" | \ sed 's/⁉/!?/g' | \ sed 's/‼/!!/g' | \ sed 's/[♯#]/#/g' | \ sed 's/♥//g' } archive() { echo "##" echo "## archive $1" echo "##" local m2ts="$1" local mp4="$(get_mp4_name "$m2ts")" local log="$(get_log_name "$m2ts")" local tmpdir="$TMP/$(head -c 7 /dev/urandom | xxd -p)" mkdir -pv "$tmpdir" # wait some micro secs each file read time "$SPLIT" -OUT "$tmpdir" -WAIT,3 "$m2ts" ls -S "$tmpdir"/* | tail -n +2 | xargs rm -fv local extracted="$(ls -S "$tmpdir"/* | head -n1)" time "$CONVERT" "$extracted" "$mp4" rm -frv "$tmpdir" } get_mp4_name() { printf "$(prefix "$1").mp4" } get_log_name() { printf "$(prefix "$1").log" } prefix() { printf "${1%.*}" } main
kui/dotfiles
bin/archive-ts.sh
Shell
unlicense
1,738
#!/bin/bash readonly program="${1}" shift i=0 for arg in "${@}"; do if [[ -e "${arg}" ]]; then arg="$(cygpath -w "${arg}")" fi args["${i}"]="${arg}" ((i++)) done "${program}" "${args[@]}"
ntsuji/cygcmd
cygcmd.sh
Shell
unlicense
200
#! /bin/bash function print_success { arg1=$1 printf "%s \e[0;32m[✔]\e[0m\n" "${arg1}" } function print_success_bold { arg1=$1 bold=$(tput bold) printf "\n\n${bold}\e[0;32m%s\e[0m" "${arg1}" printf "\e[0;32m..........................................\e[0m" printf "\e[0;32m[✔]\e[0m\n" } function fancy_print { arg1=$1 bold=$(tput bold) printf "\n\n${bold}%s...............\n\n" "${arg1}" }
lfiolhais/dotfiles
script/utilities.sh
Shell
unlicense
416
#!/bin/bash -x # # Generated - do not edit! # # Macros TOP=`pwd` CND_PLATFORM=MinGW-Windows CND_CONF=Release CND_DISTDIR=dist CND_BUILDDIR=build CND_DLIB_EXT=dll NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging TMPDIRNAME=tmp-packaging OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libesx-reader.a OUTPUT_BASENAME=libesx-reader.a PACKAGE_TOP_DIR=ESx-Reader/ # Functions function checkReturnCode { rc=$? if [ $rc != 0 ] then exit $rc fi } function makeDirectory # $1 directory path # $2 permission (optional) { mkdir -p "$1" checkReturnCode if [ "$2" != "" ] then chmod $2 "$1" checkReturnCode fi } function copyFileToTmpDir # $1 from-file path # $2 to-file path # $3 permission { cp "$1" "$2" checkReturnCode if [ "$3" != "" ] then chmod $3 "$2" checkReturnCode fi } # Setup cd "${TOP}" mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package rm -rf ${NBTMPDIR} mkdir -p ${NBTMPDIR} # Copy files and create directories and links cd "${TOP}" makeDirectory "${NBTMPDIR}/ESx-Reader/lib" copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644 # Generate tar file cd "${TOP}" rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/ESx-Reader.tar cd ${NBTMPDIR} tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/ESx-Reader.tar * checkReturnCode # Cleanup cd "${TOP}" rm -rf ${NBTMPDIR}
Koncord/ESx-Reader
nbproject/Package-Release.bash
Shell
apache-2.0
1,474
#!/usr/bin/env bash if [[ ! -x "../${PWD##*/}/gradlew" ]]; then echo "Please run me from the WebdavBulkDeleter Git repo" exit 1 fi type -p java &>/dev/null if (( $? != 0 )); then echo "No \"java\" command in path - please add the java JRE or JDK \"bin\" directory to your path variable" exit 1 fi binary="build/libs/WebdavBulkDeleter-all.jar" if [[ ! -f "$binary" ]]; then ./gradlew shadowJar fi java -Dlog4j.configuration=log4j.properties -jar "$binary" $@
ftclausen/WebdavBulkDeleter
webdavbulkdeleter.sh
Shell
apache-2.0
483
docker build -t tstromberg/chrome .
tstromberg/dockerfiles
chrome/build.sh
Shell
apache-2.0
36
#!/bin/sh _LS_SOURCE=$(dirname "${0}") if [[ "${_LS_SOURCE}" = "." ]]; then _LS_SOURCE=$(dirname "${PWD}") elif [[ -z "$(echo ${_LS_SOURCE} | grep "^/" 2> /dev/null)" ]]; then _LS_SOURCE="${PWD}" else _LS_SOURCE=$(dirname "${_LS_SOURCE}") fi _LS_SOURCE=$(dirname "${_LS_SOURCE}") _LS_SOURCE=$(dirname "${_LS_SOURCE}") LS_SOURCE=$(dirname "${_LS_SOURCE}") export LS_SOURCE ## DISTRO SPECIFIC PART LS_DISTRO="Solaris" # lightstreamer is too long LS_GROUP="lights" LS_USER="lights" LS_USERADD_HOME=/ export LS_DISTRO LS_GROUP LS_USER LS_USERADD_HOME ## END: DISTRO SPECIFIC PART . "${LS_SOURCE}/bin/unix-like/install/common.inc" # Override defaults in common.inc SOURCE_INIT_DIR="${LS_SOURCE}/bin/unix-like/install/${LS_DISTRO}/init" INIT_PATH="/lib/svc/method/http-lightstreamer" MANIFEST_PATH="/lib/svc/manifest/network/http-lightstreamer.xml" INIT_DIR=$(basename "${INIT_PATH}") copy_init_script() { local manifest_name=$(basename "${MANIFEST_PATH}") local method_name=$(basename "${INIT_PATH}") cp -p "${SOURCE_INIT_DIR}/${manifest_name}" "${MANIFEST_PATH}" || return 1 cp -p "${SOURCE_INIT_DIR}/${method_name}" "${INIT_PATH}" || return 1 } setup_init_script_perms() { chmod 555 "${INIT_PATH}" || return 1 chmod 444 "${MANIFEST_PATH}" || return 1 } add_service() { if [ "${LS_ADD_SERVICE}" != "0" ]; then local ls_serv="svc:/network/http:lightstreamer" svcadm restart "svc:/system/manifest-import" "${ls_serv}" &> /dev/null sleep 2 svcadm enable "${ls_serv}" &> /dev/null echo "Init script installed at: ${INIT_PATH}" echo "It is set to automatically start at boot (and is also now online)" echo "You can restart Lightstreamer Server using:" echo " svcadm restart ${ls_serv}" fi return 0 } show_intro || exit 1 echo "Installing to ${LS_DESTDIR}..." setup_user_group && \ copy_to_destdir && \ setup_init_script && \ add_service
asirnayeef23/fsm
bin/unix-like/install/Solaris/install.sh
Shell
apache-2.0
1,963
#!/bin/bash -e RELEASE_DIR=`git rev-parse --show-toplevel` cd $RELEASE_DIR DEPLOYMENT=$1 if [[ -z $DEPLOYMENT ]]; then DEPLOYMENT=templates/bosh-lite/bosh-lite.yml fi echo "Using stub $DEPLOYMENT" mkdir -p tmp DEPLOYMENT_FILE=`basename $DEPLOYMENT` DEPLOYMENT_ENV=`basename $(dirname $DEPLOYMENT)` MANIFEST_FILE=tmp/${DEPLOYMENT_FILE/stub/$DEPLOYMENT_ENV} ./scripts/make_manifest_spiff.sh $DEPLOYMENT $MANIFEST_FILE sed -i '' '/persistent_disk:/d' $MANIFEST_FILE
apoydence/logradile
scripts/make_manifest_bosh_lite.sh
Shell
apache-2.0
471
#!/bin/bash # 1) Shutdown Solr. /home/$USER/solr_tomcat/apache-tomcat-7.0.32/bin/shutdown.sh # 2) Move the existing data and conf into a sub-dir mkdir /home/$USER/solr_tomcat/apache-solr-config/stories mv /home/$USER/solr_tomcat/apache-solr-config/conf /home/$USER/solr_tomcat/apache-solr-config/stories mv /home/$USER/solr_tomcat/apache-solr-config/data /home/$USER/solr_tomcat/apache-solr-config/stories # 3) Create new directory for collections core mkdir -p /home/$USER/solr_tomcat/apache-solr-config/collections/data mkdir -p /home/$USER/solr_tomcat/apache-solr-config/collections/conf # 4) Pre-populate configuration with existing config cp -r /home/$USER/solr_tomcat/apache-solr-config/stories/conf/* /home/$USER/solr_tomcat/apache-solr-config/collections/conf # 5) Update the base solr.xml file cp /home/user/solr_tomcat/apache-solr-config/solr.xml /home/user/solr_tomcat/apache-solr-config/solr.xml.orig-single-core cat << 'EOF' > /home/user/solr_tomcat/apache-solr-config/solr.xml <?xml version="1.0" encoding="UTF-8" ?> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <!-- This is an example of a simple "solr.xml" file for configuring one or more Solr Cores, as well as allowing Cores to be added, removed, and reloaded via HTTP requests. More information about options available in this configuration file, and Solr Core administration can be found online: http://wiki.apache.org/solr/CoreAdmin --> <!-- All (relative) paths are relative to the installation path persistent: Save changes made via the API to this file sharedLib: path to a lib directory that will be shared across all cores --> <solr persistent="false"> <!-- adminPath: RequestHandler path to manage cores. If 'null' (or absent), cores will not be manageable via request handler --> <cores adminPath="/admin/cores" defaultCoreName="collection1"> <core name="stories" instanceDir="./stories" /> <core name="collections" instanceDir="./collections" /> </cores> </solr> EOF #6) Run the reindex to get the new core initialized /home/$USER/main/tool/start_solr.sh /home/$USER/main/tool/reindex.sh
stori-es/stori_es
tool/update_solr.sh
Shell
apache-2.0
2,856
#!/bin/sh export DEPLIB=target/dependency export JAVA_HOME=/Library/Java/JavaVirtualMachines/1.7.0.jdk/Contents/Home export JVMFLAGS="-server -Dgameserver -XX:+HeapDumpOnOutOfMemoryError -XX:MaxPermSize=256m -mx1024m -Dusefakesender=false" export JVMFLAGS="-server" export GSBINDIR=/Users/wangqi/disk/projects/snsgames/babywar/server/deploy export GSMAIN="com.xinqihd.sns.gameserver.bootstrap.Bootstrap" export PORT=3443 export HTTPPORT=8080 export ZOO=192.168.0.77 #$JAVA_HOME/bin/java $JVMFLAGS -Dfile.encoding=utf8 -cp target/bootstrap.jar com.xinqihd.sns.gameserver.bootstrap.Bootstrap -t zoo -h 192.168.0.77 -p 3443 -http 8080 -httphost 192.168.0.77 -d ../deploy/data -s ../deploy/script -u ../babywarserver/target/classes $JAVA_HOME/bin/java $JVMFLAGS -Duserdir=../deploy -Dconfigdir=. -Dfile.encoding=utf8 -cp target/bootstrap.jar com.xinqihd.sns.gameserver.bootstrap.Bootstrap -t zoo -h 192.168.0.77 -p 3443 -http 8080 -httphost 192.168.0.77 -d ../deploy/data -s ../deploy/script -u ../deploy/babywarserver.jar
wangqi/gameserver
bootstrap/startboot.sh
Shell
apache-2.0
1,021
# WARNING: This file is managed by Salt. # Modify sources to keep changes persistent. ############################################################################### export EDITOR="vim" ############################################################################### {% if grains['os_platform_type'].startswith('rhel5') %} # Add `/sbin` and `/usr/sbin` to `PATH` for commands # like `ip`, `service`, `tcpdump`, etc. export PATH="${PATH}:/sbin:/usr/sbin" {% endif %} ############################################################################### # Set variable to indicate which pillar profile is used. {% set profile_name = pillar['properties']['profile_name'] %} export SALT_PROFILE_NAME="{{ profile_name }}" ############################################################################### {% set proxy_config = pillar['system_features']['external_http_proxy'] %} {% if proxy_config['feature_enabled'] %} # Proxy settings: # TODO: Use `secret_id` from `system_secrets` for `password_value`. export http_proxy='{{ proxy_config['proxy_url_schema'] }}{{ proxy_config['proxy_username'] }}:{{ pillar['system_secrets'][ proxy_config['proxy_password_secret_id'] ] }}@{{ proxy_config['proxy_url_hostname'] }}:{{ proxy_config['proxy_url_port'] }}/' export https_proxy="${http_proxy}" {% endif %} ############################################################################### {% if pillar['system_features']['assign_DISPLAY_environment_variable'] %} # Use role's host (which should be part of DNS or any host resolution method). # If current minion is among assigned hosts for `primary_console_role`, # use only `:0.0`. {% if grains['id'] in pillar['system_host_roles']['primary_console_role']['assigned_hosts'] %} {% set x_display_server = '' %} {% else %} {% set x_display_server = pillar['system_host_roles']['primary_console_role']['hostname'] %} {% endif %} if [ -n "$DISPLAY" ] then # If stdin is not tty, do not print anything. if [ -t 1 ] then # Avoid setting DISPLAY if you use SSH. # Use automatic X forwarding for SSH instead. echo -n "Reusing: DISPLAY=$DISPLAY " 1>&2 echo "SSH X port forwarding." 1>&2 fi else export DISPLAY="{{ x_display_server }}:0.0" # If stdin is not tty, do not print anything. if [ -t 1 ] then echo -n "Setting: DISPLAY=$DISPLAY " 1>&2 # Display hint only if `x_display_server` contains any hostname. if [ -n '{{ x_display_server }}' ] then echo "If \`{{ x_display_server }}\` is not resolvable, set IP address in \`/etc/hosts\`." 1>&2 else echo "Using local graphical environment." 1>&2 fi fi fi {% endif %} ############################################################################### # Add timestamps to bash history. export HISTTIMEFORMAT="%y-%m-%dT%T " ############################################################################### # EOF ###############################################################################
uvsmtid/common-salt-states
states/common/shell/variables/common.custom.variables.sh
Shell
apache-2.0
3,023
#!/bin/bash -eux yum -y erase gtk2 libX11 hicolor-icon-theme avahi freetype bitstream-vera-fonts yum -y clean all rm -rf VBoxGuestAdditions_*.iso
chef/basebox
.centos/cleanup.sh
Shell
apache-2.0
146
tclsh graph_size.tcl metis_flash_4GB_i26.log metis_15000_RAID1_4GB_i26.log kredit_RAID_32GB_i26.log pc990_2GB_i26.log appetizer tclsh graph_size.tcl metis_flash_8GB_i26.log metis_flash_4GB_i26.log metis_flash_2GB_i26.log metis_15000_RAID1_8GB_i26.log metis_15000_RAID1_4GB_i26.log metis_15000_RAID1_2GB_i26.log SSD_RAM tclsh graph_size.tcl metis_flash_8GB_i26.log metis_flash_4GB_i26.log metis_15000_RAID1_8GB_i26.log metis_15000_RAID1_4GB_i26.log pc990_2GB_i26.log appetizer # Index-size is for 4GB RAM gnuplot index_size.pl tclsh graph_size.tcl metis_flash_2GB_i9_1.log metis_flash_2GB_i9_2.log metis_flash_2GB_i9_1s.log metis_flash_2GB_i9_2s.log SSD_Simultaneous # Threading tclsh graph_size.tcl metis_15000_RAID1_8GB_i37_t1.log metis_15000_RAID1_8GB_i37_t2.log metis_15000_RAID1_8GB_i37_t4.log harddisk_threading tclsh graph_size.tcl metis_flash_RAID0_8GB_i37_t1.log metis_flash_RAID0_8GB_i37_t2.log metis_flash_RAID0_8GB_i37_t4.log ssd_threading # Multiple searchers tclsh graph_size.tcl -ymax 500 metis_flash_RAID0_8GB_i37_t1_l21.log metis_flash_RAID0_8GB_i37_t2_l21.log metis_flash_RAID0_8GB_i37_t3_l21.log metis_flash_RAID0_8GB_i37_t4_l21.log metis_15000_RAID1_8GB_i37_t1_l21.log metis_15000_RAID1_8GB_i37_t2_l21.log metis_15000_RAID1_8GB_i37_t3_l21.log metis_15000_RAID1_8GB_i37_t4_l21.log threads_shared_searcher_21 tclsh graph_size.tcl -ymax 500 metis_15000_RAID1_8GB_i37_t1_l21.log metis_15000_RAID1_8GB_i37_t2u_l21.log metis_15000_RAID1_8GB_i37_t3u_l21.log metis_15000_RAID1_8GB_i37_t4u_l21.log threads_harddisk_unique_21 tclsh graph_size.tcl -ymax 500 metis_flash_RAID0_8GB_i37_t1_l21.log metis_flash_RAID0_8GB_i37_t2u_l21.log metis_flash_RAID0_8GB_i37_t3u_l21.log metis_flash_RAID0_8GB_i37_t4u_l21.log threads_ssd_unique_21 tclsh graph_size.tcl -ymax 500 metis_flash_RAID0_8GB_i37_t1_l22.log metis_flash_RAID0_8GB_i37_t2_l22.log metis_flash_RAID0_8GB_i37_t3_l22.log metis_flash_RAID0_8GB_i37_t4_l22.log metis_15000_RAID1_8GB_i37_t1_l22.log metis_15000_RAID1_8GB_i37_t2_l22.log metis_15000_RAID1_8GB_i37_t3_l22.log metis_15000_RAID1_8GB_i37_t4_l22.log threads_shared_searcher_22 tclsh graph_size.tcl -ymax 500 metis_15000_RAID1_8GB_i37_t1_l22.log metis_15000_RAID1_8GB_i37_t2u_l22.log metis_15000_RAID1_8GB_i37_t3u_l22.log metis_15000_RAID1_8GB_i37_t4u_l22.log threads_harddisk_unique_22 tclsh graph_size.tcl -ymax 500 metis_flash_RAID0_8GB_i37_t1_l22.log metis_flash_RAID0_8GB_i37_t2u_l22.log metis_flash_RAID0_8GB_i37_t3u_l22.log metis_flash_RAID0_8GB_i37_t4u_l22.log threads_ssd_unique_22 tclsh graph_size.tcl -ymax 500 metis_flash_RAID0_8GB_i37_t1_l23.log metis_flash_RAID0_8GB_i37_t2_l23.log metis_flash_RAID0_8GB_i37_t3_l23.log metis_flash_RAID0_8GB_i37_t4_l23.log metis_15000_RAID1_8GB_i37_t1_l23.log metis_15000_RAID1_8GB_i37_t2_l23.log metis_15000_RAID1_8GB_i37_t3_l23.log metis_15000_RAID1_8GB_i37_t4_l23.log threads_shared_searcher_23 tclsh graph_size.tcl -ymax 500 metis_15000_RAID1_8GB_i37_t1_l23.log metis_15000_RAID1_8GB_i37_t2u_l23.log metis_15000_RAID1_8GB_i37_t3u_l23.log metis_15000_RAID1_8GB_i37_t4u_l23.log threads_harddisk_unique_23 tclsh graph_size.tcl -ymax 500 metis_flash_RAID0_8GB_i37_t1_l23.log metis_flash_RAID0_8GB_i37_t2u_l23.log metis_flash_RAID0_8GB_i37_t3u_l23.log metis_flash_RAID0_8GB_i37_t4u_l23.log threads_ssd_unique_23 # Lucene versions tclsh graph_size.tcl -ymax 1000 metis_flash_RAID0_8GB_i37_t2_l21.log metis_flash_RAID0_8GB_i37_t2_l22.log metis_flash_RAID0_8GB_i37_t2_l23.log metis_flash_RAID0_8GB_i37_t3_l21.log metis_flash_RAID0_8GB_i37_t3_l22.log metis_flash_RAID0_8GB_i37_t3_l23.log metis_flash_RAID0_8GB_i37_t3u_l21.log metis_flash_RAID0_8GB_i37_t3u_l22.log metis_flash_RAID0_8GB_i37_t3u_l23.log versions_ssd tclsh graph_size.tcl -ymax 1000 metis_15000_RAID1_8GB_i37_t2_l21.log metis_15000_RAID1_8GB_i37_t2_l22.log metis_15000_RAID1_8GB_i37_t2_l23.log metis_15000_RAID1_8GB_i37_t3_l21.log metis_15000_RAID1_8GB_i37_t3_l22.log metis_15000_RAID1_8GB_i37_t3_l23.log metis_15000_RAID1_8GB_i37_t3u_l21.log metis_15000_RAID1_8GB_i37_t3u_l22.log metis_15000_RAID1_8GB_i37_t3u_l23.log versions_harddisk # Warmup tclsh graph_size.tcl -xmin 1 -xmax 340000 -ymax 1000 -xlogscale -smooth false metis_15000_RAID1_8GB_i37_t2_l21.log metis_15000_RAID1_8GB_i37_t3u_l21.log metis_flash_RAID0_8GB_i37_t2_l21.log metis_flash_RAID0_8GB_i37_t3u_l21.log warming_21 tclsh graph_size.tcl -xmin 1 -xmax 340000 -ymax 1000 -xlogscale -smooth false metis_15000_RAID1_8GB_i37_t2_l23.log metis_15000_RAID1_8GB_i37_t3u_l23.log metis_flash_RAID0_8GB_i37_t2_l23.log metis_flash_RAID0_8GB_i37_t3u_l23.log warming_23
statsbiblioteket/summa
Core/scripts/performance/report_graphs.sh
Shell
apache-2.0
4,577
#!/bin/bash export JAVA_OPTS="-XX:+CMSClassUnloadingEnabled -XX:MaxMetaspaceSize=1G -XX:MetaspaceSize=256M -Xms8G -Xmx8G" sbt
dnvriend/activiti-test
helloworld/sbt-test.sh
Shell
apache-2.0
126
#!/bin/bash # Hard Drive Burn-In Testing # As seen here: # https://forums.freenas.org/index.php?threads/how-to-hard-drive-burn-in-testing.21451/ # Isolate the available disks camcontrol devlist # Isolate the names of the disks #TODO: extract the adax device camcontrol devlist | awk '{print $10}' # Run short tests smartctl -t short /dev/ada0 smartctl -t short /dev/ada1 smartctl -t short /dev/ada2 smartctl -t short /dev/ada3 smartctl -t short /dev/ada4 smartctl -t short /dev/ada5 # Run conveyance tests smartctl -t conveyance /dev/ada0 smartctl -t conveyance /dev/ada1 smartctl -t conveyance /dev/ada2 smartctl -t conveyance /dev/ada3 smartctl -t conveyance /dev/ada4 smartctl -t conveyance /dev/ada5 # Run long tests smartctl -t long /dev/ada0 smartctl -t long /dev/ada1 smartctl -t long /dev/ada2 smartctl -t long /dev/ada3 smartctl -t long /dev/ada4 smartctl -t long /dev/ada5 # Check the results smartctl -a /dev/ada0 smartctl -a /dev/ada1 smartctl -a /dev/ada2 smartctl -a /dev/ada3 smartctl -a /dev/ada4 smartctl -a /dev/ada5 # Enable kernel geoometry flags sysctl kern.geom.debugflags=0x10 # Run the badblocks test badblocks -b 4096 -ws /dev/ada0 badblocks -b 4096 -ws /dev/ada1 badblocks -b 4096 -ws /dev/ada2 badblocks -b 4096 -ws /dev/ada3 badblocks -b 4096 -ws /dev/ada4 badblocks -b 4096 -ws /dev/ada5
anonymuse/stowery
smart_check.sh
Shell
apache-2.0
1,328
#!/bin/bash # # -- Run as: ./run_intensity_range_test_evenness_scaling.sh 1> test_output/stdout.txt 2> test_output/stderr.txt # errecho(){ >&2 echo $@; } # Declare test ranges: declare -a range_intensities=$(seq 0.25 0.25 5) #("1.25") #$(seq 3 90) #e.g. ("44" "45") # Declare test output location: result_set="test_output/Intensity_Results_RangeTest_"$(date -d "today" +"%Y-%m-%d_%H")"-00" relative_result_path_from_run="exp/Evenness_Scaling/" # Specify properties file location(s). default_properties_filename="../../properties/default.properties" pre_default_properties_filename=$default_properties_filename".pre-"${result_set//\//-} post_default_properties_filename=$default_properties_filename".post-"${result_set//\//-} function pre_test_file_organisation(){ # Prep File Organisation & properties file backup: mkdir -p $result_set cp -n $default_properties_filename $pre_default_properties_filename } function post_test_file_organisation(){ # Backup the finished properties file into $post_default_properties_filename. (ensure there's a backup) cp -b $default_properties_filename $post_default_properties_filename # Restore the original properties file: (force) cp -f $pre_default_properties_filename $default_properties_filename } function update_properties_file_output_directory(){ # Update the run.py evaluation results directory, by modifying the default.properties file: python helpers/update_default-properties_file.py "DO_OVERWRITE" $default_properties_filename "results_file.results_output_file_path_prefix=" "$relative_result_path_from_run""$result_set""/" } function run_range_test(){ for n in $range_intensities do ./run_intensity_evenness_scaling_trial.sh $n if [ $? -ne 0 ]; then errecho ""; errecho " -------------- " errecho "run_intensity_evenness_scaling_trial.sh failed." errecho "Exiting." exit 1; fi done } function main(){ pre_test_file_organisation update_properties_file_output_directory run_range_test post_test_file_organisation } main
LightStage-Aber/LightStage-Repo
exp/Evenness_Scaling/run_intensity_range_test_evenness_scaling.sh
Shell
apache-2.0
2,115
#!/bin/bash -eu # file: mw.sh function init { bundle rake db:schema:load rake db:migrate rake fake_data } function run { sv exit watson # no need for this on here exec rails server } command=$1 cd /usr/src/service if [ "$command" = "init" ]; then init elif [ "$command" = "run" ]; then run else echo "unknown command (name: $command)" exit 1 fi
datawire/microwizard
src/lobsters/microwizard/mw.sh
Shell
apache-2.0
369
node pollerHost.js --site 9E7A --site C877
KRMAssociatesInc/eHMP
ehmp/product/production/osync/run-pollerHost.sh
Shell
apache-2.0
42
# Retrieve index settings curl -XGET "http://localhost:9200/conference/_settings?pretty" # Create index with single shard curl -XPUT "http://localhost:9200/single_shard_index/" -d' { "settings": { "number_of_shards": 1 } }' # Adjust number of replicas curl -XPUT "http://localhost:9200/single_shard_index/_settings" -d' { "settings": { "number_of_replicas": 2 } }' # Install Kopf (this is an older version) bin/plugin --install lmenezes/elasticsearch-kopf/1.2 # Adjust number of replicas curl -XPUT "http://localhost:9200/conference/_settings" -d' { "settings": { "number_of_replicas": 2 } }' # Reset number of replicas curl -XPUT "http://localhost:9200/conference/_settings" -d' { "settings": { "number_of_replicas": 1 } }' # Wait for cluster health yellow curl -XGET 'http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=60s' # Show current recovery curl -XGET "http://localhost:9200/_recovery?pretty=true" # Add tag to index setting curl -XPUT "http://localhost:9200/conference/_settings" -d' { "index.routing.allocation.include.tag" : "strong-machine" }' # Prefer local shards curl -XGET "http://localhost:9200/conference/talk/_search?q=elasticsearch&preference=_local" # Add document with routing curl -XPOST "http://localhost:9200/conference/talk/1?routing=Karlsruhe" -d' { "title": "Search-Driven Applications", "conference": { "name": "Entwicklertag", "city": "Karlsruhe" } }' # Search with routing curl -XPOST "http://localhost:9200/conference/talk/_search?routing=Karlsruhe" -d' { "query": { "filtered": { "query": { "match": { "title": "search" } }, "filter": { "term": { "conference.city": "karlsruhe" } } } } }' # Retrieve cluster state curl -XGET "http://localhost:9200/_cluster/state?pretty" # Update minimum master nodes curl -XPUT "http://localhost:9200/_cluster/settings" -d' { "persistent": { "discovery.zen.minimum_master_nodes": 3 } }' # Index with consistency setting curl -XPOST "http://localhost:9200/conference2/talk?consistency=one" -d' { "title": "What to do when there are not enough shards" }'
fhopf/elasticsearch-book-examples
06.sh
Shell
apache-2.0
2,313
#!/bin/csh # Author James Dempsey # Date 27 Jun 2016 # Calibrate the data using # 1934-638 as the flux and bandpass cal and # 0727-115, 0823-500, 1049-53 as the phase cals. # Process the flux and bandpass cal mfcal vis=1934-638.1420 options=interpolate gpcal vis=1934-638.1420 options=xyvary # Process each phase cal foreach calnam (0727-115 0823-500 1049-53) #foreach srcnam (347.817+0.018 347.902+0.052 348.195+0.768) echo "##--## Processing phase cal ${calnam} ##--##" gpcopy vis=1934-638.1420 out=${calnam}.1420 gpcal vis=${calnam}.1420 options=xyvary,qusolv gpboot vis=${calnam}.1420 cal=1934-638.1420 mfboot vis=${calnam}.1420,1934-638.1420 "select=source(1934-638)" echo "#### Validation ####" uvflux vis=1934-638.1420 stokes=i,q,u,v uvflux vis=${calnam}.1420 stokes=i,q,u,v uvplt vis=${calnam}.1420 stokes=i,q,u,v axis=real,imag options=equal,nobase device=/xs echo "gpplt vis=${calnam}.1420 device=/xs yaxis=phase options=xygains" end
jd-au/magmo-HI
prototypes/calibrate.sh
Shell
apache-2.0
971
#!/usr/bin/env bash sudo docker cp ../generated/skysail.app.bookmarks.jar skysail-server:/home/skysail/products/demo/plugins/skysail.app.bookmarks.jar
evandor/skysail-core
skysail.app.bookmarks/deploy/deploy2skysail-server.sh
Shell
apache-2.0
152
#!/usr/bin/env bash set -eu dotnet restore dotnet build # Tests
r-ralph/Diamond
build.sh
Shell
apache-2.0
65
#!/bin/bash function usage { echo "Deploy Queue application to cluster." echo "Usage: $0 -m host:port -t tag" echo " host:port : Marathon REST API endpoint for discovery" echo " tag : Docker image tag" } host_port="" tag="" # Optional arguments OPTIND=1 while getopts "m:t:h:" opt; do case $opt in m) host_port=$OPTARG ;; t) tag=$OPTARG ;; h) usage ; exit 0 ;; \?) echo "Invalid option: -$OPTARG" >&2 usage ; exit 1 ;; esac done # Remove optional arguments shift $(($OPTIND - 1)) if [ -z "$host_port" ]; then echo "Marathon host:port not specified" >&2 usage ; exit 1 fi if [ -z "$tag" ]; then echo "Docker tag not specified" >&2 usage ; exit 1 fi sed "s/{{TAGNAME}}/$tag/" queue-app.json | \ curl -X POST -H "Content-Type: application/json" "http://$host_port/v2/apps" -d@-
sirca/bdkd_datastore
subprojects/samples/laser/cluster_pe/components/queue/deploy.sh
Shell
apache-2.0
854
#!/bin/bash set -e # set -x C="$1" shift docker=${docker:-docker} EXIT=false if ! $docker exec $C ipa-server-install -U -r EXAMPLE.TEST -p Secret123 -a Secret123 --setup-dns --no-forwarders --no-ntp ; then EXIT=true fi FAILED=$( $docker exec $C systemctl list-units --state=failed --no-pager -l --no-legend | tee /dev/stderr | sed 's/ .*//' | sort ) for s in $FAILED ; do $docker exec $C systemctl status $s --no-pager -l || : done if [ -n "$FAILED" ] ; then EXIT=true fi if $EXIT ; then exit 1 fi $docker exec $C ls -la /var/log/ipaserver-install.log MACHINE_ID=$( $docker exec $C cat /etc/machine-id ) if $docker exec $C test -d /data ; then $docker exec $C ls -la /var/log/journal/$MACHINE_ID/system.journal else $docker exec $C ls -la /run/log/journal/$MACHINE_ID/system.journal fi echo OK $0.
freeipa/freeipa-container
tests/systemd-container-ipa-server-install.sh
Shell
apache-2.0
810
#!/bin/bash # Define a bunch of functions and set a bunch of variables TEST_DIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"` | grep -o '.*/oshinko-s2i/test/e2e') source $TEST_DIR/common SCRIPT_DIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"`) source $SCRIPT_DIR/../../builddc TEMPLATE_DIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"` | grep -o '.*/oshinko-s2i')/templates set_template $TEMPLATE_DIR/sparklyrdc.json set_worker_count $S2I_TEST_WORKERS # Clear these flags set_fixed_app_name os::test::junit::declare_suite_start "$MY_SCRIPT" # Make the S2I test image if it's not already in the project set_git_uri https://github.com/tmckayus/r-openshift-ex.git make_image $S2I_TEST_IMAGE_SPARKLYR $GIT_URI set_image $TEST_IMAGE echo "++ dc_test_no_app_name" dc_test_no_app_name echo "++ test_exit" test_exit echo "++ test_cluster_name" test_cluster_name echo "++ test_del_cluster" test_del_cluster echo "++ test_app_args" test_app_args echo "++ test_pod_info" test_podinfo echo "++ test_named_config" test_named_config echo "++ test_driver_config" test_driver_config echo "++ test_spark_options" test_spark_options echo "++ test_driver_host" test_driver_host echo "++ test_no_source_or_image" test_no_source_or_image os::test::junit::declare_suite_end
rimolive/oshinko-s2i
test/e2e/templates/sparklyr/dc/sparklyrdc.sh
Shell
apache-2.0
1,265
ip netns exec qgw ip link set veth0 down ip netns exec qgw ip link set lo down ip link set dev qgw-veth0.t down brctl delif vbr-pext qgw-veth0.t ip link delete qgw-veth0.t ip netns delete qgw ip netns exec qhost1 ip link set veth0 down ip netns exec qhost1 ip link set lo down ip link set dev qhost1-veth0.t down brctl delif vbr-pext qhost1-veth0.t ip link delete qhost1-veth0.t ip netns delete qhost1 ip link set dev vbr-pext down brctl delbr vbr-pext
penguinwatcher/magnet
magnet/test/delete.1host-1gw.sh
Shell
apache-2.0
453
# ----------------------------------------------------------------------------- # # Package : github.com/dgryski/go-sip13 # Version : v0.0.0-20190329191031-25c5027a8c7b # Source repo : https://github.com/dgryski/go-sip13 # Tested on : RHEL 8.3 # Script License: Apache License, Version 2 or later # Maintainer : BulkPackageSearch Automation <[email protected]> # # Disclaimer: This script has been tested in root mode on given # ========== platform using the mentioned version of the package. # It may not work as expected with newer versions of the # package and/or distribution. In such case, please # contact "Maintainer" of this script. # # ---------------------------------------------------------------------------- PACKAGE_NAME=github.com/dgryski/go-sip13 PACKAGE_VERSION=v0.0.0-20190329191031-25c5027a8c7b PACKAGE_URL=https://github.com/dgryski/go-sip13 yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq wget gcc-c++ wget https://golang.org/dl/go1.16.1.linux-ppc64le.tar.gz && tar -C /bin -xf go1.16.1.linux-ppc64le.tar.gz && mkdir -p /home/tester/go/src /home/tester/go/bin /home/tester/go/pkg export PATH=$PATH:/bin/go/bin export GOPATH=/home/tester/go OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"` export PATH=$GOPATH/bin:$PATH export GO111MODULE=on function test_with_master_without_flag_u(){ echo "Building $PACKAGE_PATH with master branch" export GO111MODULE=auto if ! go get -d -t $PACKAGE_NAME; then echo "------------------$PACKAGE_NAME:install_fails-------------------------------------" echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/install_fails echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_Fails" > /home/tester/output/version_tracker exit 0 else cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*) echo "Testing $PACKAGE_PATH with master branch without flag -u" # Ensure go.mod file exists go mod init if ! gi test ./...; then echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------" echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_fails echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails" > /home/tester/output/version_tracker exit 0 else echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------" echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker exit 0 fi fi } function test_with_master(){ echo "Building $PACKAGE_PATH with master" export GO111MODULE=auto if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then test_with_master_without_flag_u exit 0 fi cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*) echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION" # Ensure go.mod file exists go mod init if ! go test ./...; then test_with_master_without_flag_u exit 0 else echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------" echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker exit 0 fi } function test_without_flag_u(){ echo "Building $PACKAGE_PATH with $PACKAGE_VERSION and without -u flag" if ! go get -d -t $PACKAGE_NAME@$PACKAGE_VERSION; then test_with_master exit 0 fi cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*) echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION" # Ensure go.mod file exists go mod init if ! go test ./...; then test_with_master exit 0 else echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------" echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker exit 0 fi } echo "Building $PACKAGE_PATH with $PACKAGE_VERSION" if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then test_without_flag_u exit 0 fi cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*) echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION" # Ensure go.mod file exists go mod init if ! go test ./...; then test_with_master exit 0 else echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------" echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker exit 0 fi
ppc64le/build-scripts
g/github.com__dgryski__go-sip13/github.com__dgryski__go-sip13_rhel_8.3.sh
Shell
apache-2.0
5,121
#!/bin/bash # This script will monitor the KBread/sec &KBwriten/sec of Disk. # Creater: CCC IT loren ext:2288 2005/8/3 # As sda ,sdb,sdc,sdd,hda. # disk=sda network=`sar -n DEV | egrep -v 'IFACE|lo|^$|Linux' | awk '{print $2,$5,$6}' | uniq` Interface=`echo $network |awk '{print $1}'` KBinput_sec=`echo $network |awk '{print $2}'` KBoutput_sec=`echo $network |awk '{print $3}'` echo "$KBinput_sec" echo "$KBoutput_sec" echo "$Interface"
lichengshuang/createvhost
others/mrtg/shell/net.sh
Shell
apache-2.0
439
cd compose;go test;cd .. cd actors/eureka;go test;cd ../.. cd actors/priamCassandra;go test;cd ../.. cd tooling/archaius;go test;cd ../.. cd tooling/architecture;go test;cd ../.. cd tooling/dhcp;go test;cd ../.. cd tooling/flow;go test;cd ../.. cd tooling/gotocol;go test;cd ../.. cd tooling/graphjson;go test;cd ../.. cd tooling/names;go test;cd ../.. cd tooling/ribbon;go test;cd ../.. cd tooling/usl;go test;cd ../..
jwatson0/spigo
misc/test.sh
Shell
apache-2.0
420
#!/bin/bash HADOOP_DIR=../hadoop-2.4.1-src rm -rf $HADOOP_DIR/hadoop-common-project/hadoop-annotations;cp -r hadoop-annotations $HADOOP_DIR/hadoop-common-project/; rm -rf $HADOOP_DIR/hadoop-common-project/hadoop-auth;cp -r hadoop-auth $HADOOP_DIR/hadoop-common-project/; rm -rf $HADOOP_DIR/hadoop-common-project/hadoop-common;cp -r hadoop-common $HADOOP_DIR/hadoop-common-project/; rm -rf $HADOOP_DIR/hadoop-common-project/hadoop-minikdc;cp -r hadoop-minikdc $HADOOP_DIR/hadoop-common-project/; rm -rf $HADOOP_DIR/hadoop-hdfs-project/hadoop-hdfs; cp -r hadoop-hdfs $HADOOP_DIR/hadoop-hdfs-project/; rm -rf $HADOOP_DIR/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient;cp -r hadoop-mapreduce-client-jobclient $HADOOP_DIR/hadoop-mapreduce-project/hadoop-mapreduce-client/;
songweijia/hdfsrs
copy.sh
Shell
apache-2.0
804
#!/bin/bash set -e # Warning: This is NOT a robust testing script. # DO NOT OVERLY TRUST IT. # unified Flask restful api applications need x-ersa-auth-token # Match this with ERSA_AUTH_TOKEN in config-***.py formated as: # ERSA_AUTH_TOKEN = "UUID STRING" # Make sure all test configs are named as config-$package.py and created for package in hcp hnas hpc swift xfs do echo Testing models of $package config=config-$package.py echo "Config file is $config" export APP_SETTINGS=$config python -m unittest unified.models.tests.test_$package done
eResearchSA/reporting-unified
test_unified_models.sh
Shell
apache-2.0
567
#!/bin/bash script_dir="$(dirname $0)" . "${script_dir}/functions" backup_dir="/etc/puppetlabs/code_backup" backup_date=$(date +'%Y%m%d-%H%M%S') environment=${1:-production} PATH=$PATH:/usr/local/bin echo_title "Installing prerequisite packages and gems" puppet resource package git ensure=present if [ $(facter osfamily) == 'Debian' ]; then puppet resource package ruby ensure=present else puppet resource package rubygems ensure=present fi puppet resource package deep_merge ensure=present provider=gem puppet resource package hiera-eyaml ensure=present provider=gem puppet resource package r10k ensure=present provider=gem mkdir -p /etc/puppetlabs/code/environments/ if [ -d /etc/puppetlabs/code/environments/$environment ]; then ask_interactive "Directory /etc/puppetlabs/code/environments/$environment exists. We have to move it." if [ "$?" = 0 ]; then mkdir -p $backup_dir mv /etc/puppetlabs/code/environments/$environment "${backup_dir}/${environment}-${backup_date}" echo_subtitle "/etc/puppetlabs/code/environments/$environment moved to ${backup_dir}/${environment}-${backup_date}" else echo "Can't proceed. Remove /etc/puppetlabs/code/environments/$environment or pass as argument a different environment" exit 1 fi fi echo title "Cloning git://github.com/example42/psick.git to /etc/puppetlabs/code/environments/$environment" git clone git://github.com/example42/psick.git /etc/puppetlabs/code/environments/$environment cd /etc/puppetlabs/code/environments/$environment echo_title "Running r10k puppetfile install -v" r10k puppetfile install -v echo_title "Linking /etc/puppetlabs/puppet/hiera.yaml to /etc/puppetlabs/code/environments/$environment/hiera3.yaml" ln -sf /etc/puppetlabs/code/environments/$environment/hiera3.yaml /etc/puppetlabs/puppet/hiera.yaml
snesbittsea/psick
bin/puppet_deploy_controlrepo.sh
Shell
apache-2.0
1,811
#!/bin/bash if [ -z "$VCAP_APP_PORT" ]; then SERVER_PORT=80; else SERVER_PORT="$VCAP_APP_PORT"; fi echo port is $SERVER_PORT python manage.py makemigrations --settings=cognitive.settings.bluemix python manage.py migrate --settings=cognitive.settings.bluemix echo "from django.contrib.auth.models import User; User.objects.create_superuser(username='xxxxxxxx',password='yyyyyyyy',email='[email protected]')" | python manage.py shell --settings=cognitive.settings.bluemix echo [$0] Starting Django Server... python manage.py runserver 0.0.0.0:$SERVER_PORT --noreload --settings=cognitive.settings.bluemix
chughts/watson-betaworks-python-sample
runinitial.sh
Shell
apache-2.0
603
#!/bin/bash -e # Copyright 2016 C.S.I.R. Meraka Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. . /etc/profile.d/modules.sh module load ci module add zlib export LD_LIBRARY_PATH="/lib64:$LD_LIBRARY_PATH" echo "About to make the modules" cd $WORKSPACE/${NAME}-${VERSION}/build-${BUILD_NUMBER} ls echo $? echo "Run Make Check - This is the Test" make check echo "Run make Install" make install mkdir -p modules ( cat <<MODULE_FILE #%Module1.0 ## $NAME modulefile ## proc ModulesHelp { } { puts stderr "\tAdds $NAME $VERSION to your environment" } module-whatis "$NAME $VERSION." setenv LIBPNG_VERSION $VERSION setenv LIBPNG_DIR /data/ci-build/$::env(SITE)/$::env(OS)/$::env(ARCH)/$NAME/$VERSION prepend-path PATH $::env(LIBPNG_DIR)/bin prepend-path MANPATH $::env(LIBPNG_DIR)/man prepend-path LD_LIBRARY_PATH $::env(LIBPNG_DIR)/lib MODULE_FILE ) > modules/${VERSION} mkdir -p ${LIBRARIES}/${NAME} cp modules/${VERSION} ${LIBRARIES}/${NAME}/${VERSION} # Testing module module avail module list module add ${NAME}/${VERSION} which libpng16-config libpng16-config --version
SouthAfricaDigitalScience/libpng-deploy
check-build.sh
Shell
apache-2.0
1,642
scripts_dir="../scripts/" echo "-------------------------------------------------------------------" >>log.txt echo "---------New Script Run--------------------------------------------" >>log.txt echo "-------------------------------------------------------------------" >>log.txt if [ -f dirty_transaction ]; then echo "Potentially incomplete transaction, read log.txt and fix the issue!" exit 1 fi psql demodb -f engine-db-scripts/create-shema-version-table.sql &>>log.txt current_version=`psql demodb --quiet --no-align -f engine-db-scripts/get-schema-version.sql | sed -n 2p` # the above return the result as second line, but when there are no rows in result set, then the second line has already the number of rows in result set i.e. 0 rows echo $current_version | grep row && current_version=-1 && echo "No current version number... using -1" >> log.txt echo "current_version=$current_version" >> log.txt ls $scripts_dir | \ while read file_name; do file_number=`echo $file_name | sed 's/\([0-9]*\).*/\1/'` if [[ -z "${file_number// }" ]]; then echo "no more numbered scripts..." && exit 0 fi if [ "$file_number" -gt "$current_version" ] ; then if [ -f dirty_transaction ]; then echo "Potentially incomplete transaction, read log.txt and fix the issue!" exit 1 fi echo "$file_number > $current_version executing $file_name" touch dirty_transaction INSERT_SQL="insert into schema_version values($file_number);" echo "executing $file_name" >> log.txt psql demodb -v "ON_ERROR_STOP=1" -f $scripts_dir/$file_name &>> log.txt && \ echo "execution succeeded - updating db schema number" >> log.txt && \ current_version=$file_number && \ echo $INSERT_SQL &>>log.txt && \ psql demodb -v "ON_ERROR_STOP=1" -c "$INSERT_SQL" &>>log.txt && \ psql demodb -f engine-db-scripts/delete-from-schema-version.sql &>>log.txt && \ echo "schema number succesfully updated in db" >> log.txt && \ rm dirty_transaction else echo "$file_number =< $current_version skipping $file_name" fi done
PauliJokinen/db-schema-updater
engine/schema-engine.sh
Shell
apache-2.0
2,107
#!/bin/bash sudo apt-get install zsh cat << 'EOF' > ~/.zshrc # FUNCTIONS # ############# function glob-exists () { # glob-exists "<GLOB>" # Test to see if a glob matches anything. # Example: glob-exists "~/.ssh/*.pub" [[ -n `zsh -G -c 'echo ${~1}'` ]]; } function tty-of-pid () { echo /dev/tty`ps -p "$1" -o tt | tail -n 1` } # PATHS # ########## export PLATFORM=`uname -s` if [ $PLATFORM = Darwin ]; then dev=~/Development desk=~/Desktop USER_PATH=~/Development/bin PYTHON_PATH=/Library/Frameworks/Python.framework/Versions/Current/bin export PATH=$USER_PATH:$PYTHON_PATH:$PATH elif [ $PLATFORM = Linux ]; then if glob-exists "~/.ssh/*.pub" && which keychain >/dev/null 2>&1; then eval `ls ~/.ssh/*.pub | sed 's/\.pub//' | xargs keychain --eval` fi fi export EDITOR='subl -w' export GIT_EDITOR='subl -w' mkdir -p ~/.zsh/cache # LANGUAGES/TIMEZONES # ######################## export LC_ALL="en_US.UTF-8" export LANG="en_US.UTF-8" export LC_CTYPE=C export TZ='GMT' # OPTIONS # ############ # Changing Directories # #-----------------------# setopt AUTO_CD # If a command isn't found, and names a directory, cd to it. setopt AUTO_PUSHD # Make `cd` behave like `pushd`. setopt CDABLE_VARS # `cd param` => `cd ~param` => `cd /path/to/dir` setopt PUSHD_IGNORE_DUPS # Don't push multiple copies of the same dir onto the stack. setopt PUSHD_TO_HOME # `pushd` with no args == `pushd $HOME`. # Completion # #-------------# setopt AUTO_LIST # Automatically list choices on an ambiguous completion. setopt AUTO_MENU # Use a menu after the second <tab> for an ambiguous completion. setopt AUTO_NAME_DIRS # param=/path/to/dir => ~param. setopt AUTO_PARAM_KEYS # Automatically complete characters that have to come after a parameter name. setopt AUTO_PARAM_SLASH # Automatically add trailing slashes to parameters containing directory names. unsetopt AUTO_REMOVE_SLASH # Don't remove slashes at the end of completed dirnames. setopt GLOB_COMPLETE # Allow cycling through expansions using globs. setopt HASH_LIST_ALL # The first time completion is attempted, hash the whole PATH. setopt LIST_TYPES # Show the type of file in completions with a trailing character. setopt REC_EXACT # During completion, recognize exact matches even if they are ambiguous. # Expansion and Globbing # #-------------------------# setopt BAD_PATTERN # Raise errors for badly-formed filename generation patterns. setopt EXTENDED_GLOB # More powerful globbing with `#`, `~` and `^`. setopt GLOB # Enable globbing. setopt CSH_NULL_GLOB # Only raise an error if all arguments to a command are null globs. setopt NUMERIC_GLOB_SORT # Sort numeric filenames numerically when using globs. setopt RC_EXPAND_PARAM # $xx = (a b c); foo${xx}bar => (fooabar foobbar foocbar) # History # #----------# setopt BANG_HIST # Enable textual history substitution, using !-syntax. setopt EXTENDED_HISTORY # Save beginning and ending timestamps to the history file. setopt HIST_ALLOW_CLOBBER # Allow clobbering (with pipes) in the command history. setopt HIST_IGNORE_SPACE # Don't remember space-prefixed commands. setopt HIST_REDUCE_BLANKS # Remove superfluous blanks from commands being added to the history. setopt APPEND_HISTORY # Parallel zsh sessions will append their history to the history file. # Input/Output # #---------------# unsetopt CORRECT # Attempt to correct the spelling of commands. unsetopt CORRECT_ALL # Attempt to correct all arguments in a line. setopt INTERACTIVE_COMMENTS # Allow comments in the interactive shell. unsetopt HASH_CMDS # Use command hashing the first time a command is called. setopt MAIL_WARNING # Inform me if I have system mail. setopt RC_QUOTES # 'Zack''s Shell' => "Zack's Shell" # Job Control # #--------------# setopt AUTO_CONTINUE # Send stopped jobs a CONTINUE signal after they're disowned. setopt AUTO_RESUME # Single-word simple commands will resume a currently-running job. unsetopt BG_NICE # Don't set background tasks to a lower priority. setopt LONG_LIST_JOBS # List jobs in the long format by default. setopt NOTIFY # Report the status of background jobs immediately, rather than waiting until the next prompt. # Prompting # #------------# setopt PROMPT_SUBST # Perform substitution/expansion in prompts. # Scripts and Functions # #------------------------# setopt FUNCTION_ARGZERO # Set $0 to the name of a function/script when running. setopt MULTIOS # Perform implicit `tee`s or `cat`s for multiple redirections. # ZSH MODULES # ################ zmodload zsh/stat zmodload -a mapfile zmodload zsh/terminfo autoload colors; colors # ANSI color codes zmodload zsh/complist autoload -U compinit; compinit # Completion # HISTORY # ############ # Log 10K commands export HISTSIZE=10000 export SAVEHIST=10000 export HISTFILE=$HOME/.zsh/history # VIEWING/EDITING # #################### export PAGER='less' alias more='less' export EDITOR='vim' export MUTT_EDITOR='vim' export GIT_EDITOR='vim' # PROMPT # ########### export VIRTUAL_ENV_DISABLE_PROMPT=definitely _virtualenv_prompt () { if [[ -n $VIRTUAL_ENV ]]; then echo " workon%F{cyan}" `basename "$VIRTUAL_ENV"`"%f" fi } _git_prompt () { test -z "$(pwd | egrep '/\.git(/|$)')" || return local _git_branch="`git branch 2>/dev/null | egrep '^\*' | sed 's/^\* //'`" test -z "$_git_branch" && return local _git_status=`git status --porcelain | sort | awk ' BEGIN { modified = 0; staged = 0; new = 0; } /^ / { modified += 1 } /^[^\? ]/ { staged += 1 } /^\?/ { new += 1 } END { if (staged) { print "."; exit } if (modified) { print "."; exit } if (new) { print "."; exit } }'` if [[ -n $_git_status ]]; then _git_status=":%F{yellow}$_git_status%f]" else _git_status="] " fi echo -n "[%F{gray}±%f:%F{blue}$_git_branch%f$_git_status" } PROMPT=' %(?..[%F{red}%?%f] )%F{magenta}%n%f at %F{yellow}%m%f in %F{green}%~%f$(_virtualenv_prompt) %F{magenta}>>%f%F{yellow}>%f ' if which git >/dev/null 2>&1; then RPROMPT='$(_git_prompt)' fi # KEY BINDINGS # ################# # Ctrl-A, Ctrl-E bindkey '^a' beginning-of-line bindkey '^e' end-of-line # Arrow Keys bindkey "$terminfo[kcuu1]" up-line-or-history bindkey "$terminfo[kcud1]" down-line-or-history bindkey '^[f' forward-word bindkey '^[b' backward-word # Misc ## bindkey ' ' magic-space # Do history expansion on space bindkey '^r' history-incremental-search-backward bindkey "^[[3~" delete-char bindkey "^?" backward-delete-char # PYTHON # ########### if [ -f ~/.pythonrc ]; then export PYTHONSTARTUP=~/.pythonrc fi # virtualenv if which virtualenvwrapper.sh >/dev/null 2>&1; then export WORKON_HOME=$HOME/.virtualenvs . "`which virtualenvwrapper.sh`" workon default fi function cd.py () { cd "$(python -c "import os.path as _, ${1}; \ print _.dirname(_.realpath(${1}.__file__[:-1]))")" } # pip if which pip >/dev/null 2>&1; then if [ ! -f ~/.zsh/cache/pip_completion ]; then pip completion --zsh | egrep -v '^\s*(#|$)' > ~/.zsh/cache/pip_completion 2>/dev/null fi . ~/.zsh/cache/pip_completion export PIP_RESPECT_VIRTUALENV=true fi # Functions # #------------# python_lib () { # python_lib # Print the full path to the current Python site-packages directory. echo `python -c 'import distutils; print distutils.sysconfig_get_python_lib()'` } pylink () { # pylink <package/module> # Symlink a Python package/module into the site-packages directory. ln -s $(abspath `pwd`/"$1") `python_lib`/`basename "$1"` } pyunlink () { # pyunlink <package/module> # Remove the link to a Python package/module from the site-packages directory. unlink `python_lib`/`basename "$1"` } # RUBY # ######### [[ -s $HOME/.rvm/scripts/rvm ]] && source $HOME/.rvm/scripts/rvm export RUBYOPT="-rubygems" # FUNCTIONS # ############## abspath () { # abspath <directory> # Print the absolute path to a given file (using Python's `os.path.abspath()`). python -c 'import os, sys; print os.path.abspath(sys.argv[1])' "$@" } # ALIASES # ############ if [ $PLATFORM = Linux ]; then alias ls='ls -Flh --color=auto' else alias ls='ls -FGlh' fi alias service='sudo service' alias tail='sudo tail' alias apt-get='sudo apt-get' alias dir='ll' alias l='ll' alias ll='ls -lh' alias la='ls -A' alias curl='curl -s' alias tree='tree -C --dirsfirst' alias rmpyc='find . -name "*.pyc" -delete' alias mailenc='gpg2 --armor --sign --encrypt' alias moar='curl -s "http://meme.boxofjunk.ws/moar.txt?lines=1"' alias wtcommit='curl -s "http://whatthecommit.com/index.txt"' alias top='top -ocpu' alias dj='django-admin.py' if [ -f ~/.wcookies ]; then alias download="wget -c --load-cookies ~/.wcookies" fi # COMPLETION STYLES # ###################### zstyle ':completion::complete:*' use-cache on zstyle ':completion::complete:*' cache-path ~/.zsh/cache/$HOST zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS} zstyle ':completion:*' list-prompt '%SAt %p: Hit TAB for more, or the character to insert%s' zstyle ':completion:*' menu select=1 _complete _ignored _approximate zstyle -e ':completion:*:approximate:*' max-errors \ 'reply=( $(( ($#PREFIX+$#SUFFIX)/2 )) numeric )' zstyle ':completion:*' select-prompt '%SScrolling active: current selection at %p%s' zstyle ':completion:*:processes' command 'ps -axw' zstyle ':completion:*:processes-names' command 'ps -awxho command' # Completion Styles zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31' # list of completers to use zstyle ':completion:*::::' completer _expand _complete _ignored _approximate # allow one error for every three characters typed in approximate completer zstyle -e ':completion:*:approximate:*' max-errors \ 'reply=( $(( ($#PREFIX+$#SUFFIX)/2 )) numeric )' # insert all expansions for expand completer zstyle ':completion:*:expand:*' tag-order all-expansions # NEW completion: # 1. All /etc/hosts hostnames are in autocomplete # 2. If you have a comment in /etc/hosts like #%foobar.domain, # then foobar.domain will show up in autocomplete! zstyle ':completion:*' hosts $(awk '/^[^#]/ {print $2 $3" "$4" "$5}' /etc/hosts | grep -v ip6- && grep "^#%" /etc/hosts | awk -F% '{print $2}') # formatting and messages zstyle ':completion:*' verbose yes zstyle ':completion:*:descriptions' format '%B%d%b' zstyle ':completion:*:messages' format '%d' zstyle ':completion:*:warnings' format 'No matches for: %d' zstyle ':completion:*:corrections' format '%B%d (errors: %e)%b' zstyle ':completion:*' group-name '' # match uppercase from lowercase zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}' # offer indexes before parameters in subscripts zstyle ':completion:*:*:-subscript-:*' tag-order indexes parameters # command for process lists, the local web server details and host completion zstyle ':completion:*:processes' command 'ps -o pid,s,nice,stime,args' # zstyle ':completion:*:urls' local 'www' '/var/www/htdocs' 'public_html' zstyle '*' hosts $hosts # Filename suffixes to ignore during completion (except after rm command) zstyle ':completion:*:*:(^rm):*:*files' ignored-patterns '*?.o' '*?.c~' \ '*?.old' '*?.pro' # the same for old style completion #fignore=(.o .c~ .old .pro) # ignore completion functions (until the _ignored completer) zstyle ':completion:*:functions' ignored-patterns '_*' zstyle ':completion:*:scp:*' tag-order \ files users 'hosts:-host hosts:-domain:domain hosts:-ipaddr"IP\ Address *' zstyle ':completion:*:scp:*' group-order \ files all-files users hosts-domain hosts-host hosts-ipaddr zstyle ':completion:*:ssh:*' tag-order \ users 'hosts:-host hosts:-domain:domain hosts:-ipaddr"IP\ Address *' zstyle ':completion:*:ssh:*' group-order \ hosts-domain hosts-host users hosts-ipaddr zstyle '*' single-ignored show # TIMEZONE # ############ export TZ='UTC' # FINALLY # ############ if [ -f ~/.zsh_profile ]; then . ~/.zsh_profile fi cd ~ EOF USERNAME="$(whoami)" sudo ln $USERNAME/.zshrc /root/.zshrc
tylermenezes/Debian-Settings
install_zsh.sh
Shell
artistic-2.0
12,685
#!/usr/bin/env bash shopt -s extglob # allow for complex regex-like globs files () { [ -f "$1" ] && echo "$@" } NODE_VERSION="$(node --version)" if [[ "${NODE_VERSION:0:6}" != "v0.10." && "${NODE_VERSION:0:6}" != "v0.12." ]]; then echo "running: test/start/env/test.js" cd test/start/env OPBEAT_APP_ID=from-env node -r ../../../start test.js || exit $?; cd ../../.. echo "running: test/start/file/test.js" cd test/start/file node -r ../../../start test.js || exit $?; cd ../../.. fi for file in $(files test/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/sourcemaps/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/modules/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/modules/http/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/modules/pg/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/modules/mysql/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/modules/bluebird/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done for file in $(files test/instrumentation/modules/koa-router/!(_*).js); do echo "running: node $file" node "$file" || exit $?; done
opbeat/opbeat-node
test/test.sh
Shell
bsd-2-clause
1,620
#!/bin/sh echo "installing dependencies..." sudo apt install -y pkg-config xorg-dev libgl1-mesa-dev echo "downloading and installing http://github.com/skaslev/gl3w" git clone http://github.com/skaslev/gl3w.git mkdir temp && gl3w/gl3w_gen.py temp mkdir core/impl/render/gl/GL cp temp/include/GL/* core/impl/render/gl/GL/ cp temp/src/* core/impl/render/gl/GL/ rm -r -f temp gl3w echo "downloading and installing http://github.com/glfw/glfw" git clone http://github.com/glfw/glfw.git cd glfw && cmake CMakeLists.txt && make && sudo make install && cd ../ && rm -r -f glfw echo "finished."
clearlycloudy/enhance
install_dependencies.sh
Shell
bsd-2-clause
591
npm run build rm static/css/*.css rm static/js/*.js rm static/js/*.map mkdir -p static/css mkdir -p static/js cp templates/hs_discover/css/app*.css static/css/app.css cp templates/hs_discover/css/chunk-vendors*.css static/css/chunk-vendors.css cp templates/hs_discover/css/*.css static/css cp templates/hs_discover/js/app*.js static/js/app.js cp templates/hs_discover/js/*.js static/js cp templates/hs_discover/map.js static/js/map.js cp templates/hs_discover/js/*.map static/js/ cp templates/hs_discover/js/chunk-vendors*.js static/js/chunk-vendors.js docker exec -it hydroshare python manage.py collectstatic --no-input
hydroshare/hydroshare
hs_discover/deploy.sh
Shell
bsd-3-clause
622
#!/bin/bash # # Copyright (c) 2017, SingularityWare, LLC. All rights reserved. # # Copyright (c) 2015-2017, Gregory M. Kurtzer. All rights reserved. # # Copyright (c) 2016-2017, The Regents of the University of California, # through Lawrence Berkeley National Laboratory (subject to receipt of any # required approvals from the U.S. Dept. of Energy). All rights reserved. # # This software is licensed under a customized 3-clause BSD license. Please # consult LICENSE file distributed with the sources of this project regarding # your rights to use or distribute this software. # # NOTICE. This Software was developed under funding from the U.S. Department of # Energy and the U.S. Government consequently retains certain rights. As such, # the U.S. Government has been granted for itself and others acting on its # behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software # to reproduce, distribute copies to the public, prepare derivative works, and # perform publicly and display publicly, and to permit other to do so. # # ## Basic sanity if [ -z "$SINGULARITY_libexecdir" ]; then echo "Could not identify the Singularity libexecdir." exit 1 fi ## Load functions if [ -f "$SINGULARITY_libexecdir/singularity/functions" ]; then . "$SINGULARITY_libexecdir/singularity/functions" else echo "Error loading functions: $SINGULARITY_libexecdir/singularity/functions" exit 1 fi if [ -z "${SINGULARITY_ROOTFS:-}" ]; then message ERROR "Singularity root file system not defined\n" exit 1 fi install -d -m 0755 "$SINGULARITY_ROOTFS" install -d -m 0755 "$SINGULARITY_ROOTFS/.singularity.d" install -d -m 0755 "$SINGULARITY_ROOTFS/.singularity.d/env" if [ -f "$SINGULARITY_BUILDDEF" ]; then ARGS=`singularity_section_args "pre" "$SINGULARITY_BUILDDEF"` singularity_section_get "pre" "$SINGULARITY_BUILDDEF" | /bin/sh -e -x $ARGS || ABORT 255 fi exit 0
Trophime/singularity
libexec/bootstrap-scripts/pre.sh
Shell
bsd-3-clause
1,920
#!/usr/bin/env bash ## ### ### Based on the original script by the friendly guys at Boundary ### ### Copyright 2011-2013, Boundary ### Copyright 2013, Server Density ### ### Licensed under the Apache License, Version 2.0 (the "License"); ### you may not use this file except in compliance with the License. ### You may obtain a copy of the License at ### ### http://www.apache.org/licenses/LICENSE-2.0 ### ### Unless required by applicable law or agreed to in writing, software ### distributed under the License is distributed on an "AS IS" BASIS, ### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ### See the License for the specific language governing permissions and ### limitations under the License. ### PLATFORMS=("Ubuntu" "Debian" "CentOS" "Amazon" "RHEL" "CloudLinux" "Fedora") # Put additional version numbers here. # These variables take the form ${platform}_VERSIONS, where $platform matches # the tags in $PLATFORMS Ubuntu_VERSIONS=("10.04" "10.10" "11.04" "11.10" "12.04" "12.10" "13.04" "13.10" "14.04" "14.10") Debian_VERSIONS=("5" "6") CentOS_VERSIONS=("5" "6") Amazon_VERSIONS=("2012.09" "2013.03") RHEL_VERSIONS=("5" "6") CloudLinux_VERSIONS=("5" "6") Fedora_VERSIONS=("21") # For version number updates you hopefully don't need to modify below this line # ----------------------------------------------------------------------------- SUPPORTED_ARCH=0 SUPPORTED_PLATFORM=0 APT_CMD="apt-get -q -y --force-yes" YUM_CMD="yum -d0 -e0 -y" trap "exit" INT TERM EXIT function print_supported_platforms() { echo "Supported platforms are:" for d in ${PLATFORMS[*]} do echo -n " * $d:" foo="\${${d}_VERSIONS[*]}" versions=`eval echo $foo` for v in $versions do echo -n " $v" done echo "" done } function check_distro_version() { PLATFORM=$1 DISTRO=$2 VERSION=$3 TEMP="\${${DISTRO}_versions[*]}" VERSIONS=`eval echo $TEMP` if [ $DISTRO = "Ubuntu" ]; then MAJOR_VERSION=`echo $VERSION | awk -F. '{print $1}'` MINOR_VERSION=`echo $VERSION | awk -F. '{print $2}'` PATCH_VERSION=`echo $VERSION | awk -F. '{print $3}'` TEMP="\${${DISTRO}_VERSIONS[*]}" VERSIONS=`eval echo $TEMP` for v in $VERSIONS ; do if [ "$MAJOR_VERSION.$MINOR_VERSION" = "$v" ]; then return 0 fi done elif [ $DISTRO = "CentOS" ] || [ $DISTRO = "RHEL" ] || [ $DISTRO = "CloudLinux" ]; then MAJOR_VERSION=`echo $VERSION | awk -F. '{print $1}'` MINOR_VERSION=`echo $VERSION | awk -F. '{print $2}'` TEMP="\${${DISTRO}_VERSIONS[*]}" VERSIONS=`eval echo $TEMP` for v in $VERSIONS ; do if [ "$MAJOR_VERSION" = "$v" ]; then return 0 fi done elif [ $DISTRO = "Amazon" ]; then VERSION=`echo $PLATFORM | awk '{print $5}'` # Some of these include minor numbers. Trim. VERSION=${VERSION:0:7} TEMP="\${${DISTRO}_VERSIONS[*]}" VERSIONS=`eval echo $TEMP` for v in $VERSIONS ; do if [ "$VERSION" = "$v" ]; then return 0 fi done elif [ $DISTRO = "Debian" ]; then MAJOR_VERSION=`echo $VERSION | awk -F. '{print $1}'` MINOR_VERSION=`echo $VERSION | awk -F. '{print $2}'` TEMP="\${${DISTRO}_VERSIONS[*]}" VERSIONS=`eval echo $TEMP` for v in $VERSIONS ; do if [ "$MAJOR_VERSION" = "$v" ]; then return 0 fi done elif [ $DISTRO = "Fedora" ]; then VERSION=`echo $PLATFORM | awk '{print $3}'` # Some of these include minor numbers. Trim. TEMP="\${${DISTRO}_VERSIONS[*]}" VERSIONS=`eval echo $TEMP` for v in $VERSIONS ; do if [ "$VERSION" = "$v" ]; then return 0 fi done fi echo "Detected $DISTRO but with an unsupported version ($VERSION)" return 1 } function print_help() { echo " $0 -a https://example.serverdensity.io -k agentKey" echo " -a: Required. Account URL in form https://example.serverdensity.io" echo " -k: Agent key. Not required if API token provided. " echo " -t: API token. Not required if agent key provided. " echo " -g: Group. Optional. Group to add the new device into." echo " -T: Tag. Optional. Tag this device - multiple tags not supported." echo " -p: Provider. Optional. Hosting provider." echo " -i: Provider Id. Optional. Requires Provider. Cloud device Id." exit 0 } function do_install() { if [ "$DISTRO" = "Ubuntu" ] || [ "$DISTRO" = "Debian" ]; then sudo $APT_CMD update > /dev/null APT_STRING="deb http://www.serverdensity.com/downloads/linux/deb all main" echo "Adding repository" sudo sh -c "echo \"deb http://www.serverdensity.com/downloads/linux/deb all main\" > /etc/apt/sources.list.d/sd-agent.list" $CURL -Ls https://www.serverdensity.com/downloads/boxedice-public.key | sudo apt-key add - if [ $? -gt 0 ]; then echo "Error downloading key" exit 1 fi echo "Installing agent" sudo $APT_CMD update > /dev/null sudo $APT_CMD install sd-agent return $? elif [ "$DISTRO" = "CentOS" ] || [ $DISTRO = "Amazon" ] || [ $DISTRO = "RHEL" ] || [ $DISTRO = "CloudLinux" ] || [ $DISTRO = "Fedora" ]; then echo "Adding repository" sudo sh -c "cat - > /etc/yum.repos.d/serverdensity.repo <<EOF [serverdensity] name=Server Density baseurl=http://www.serverdensity.com/downloads/linux/redhat/ enabled=1 gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-serverdensity EOF" $CURL -Ls https://www.serverdensity.com/downloads/boxedice-public.key | sudo tee /etc/pki/rpm-gpg/RPM-GPG-KEY-serverdensity > /dev/null if [ $? -gt 0 ]; then echo "Error downloading key" exit 1 fi echo "Installing agent" sudo $YUM_CMD install sd-agent return $? fi } function configure_agent() { echo "Configuring agent" sudo sh -c "cat - > /etc/sd-agent/config.cfg <<EOF # # Server Density Agent Config # Docs: http://www.serverdensity.com/docs/agent/configvariables/ # [Main] sd_url: $ACCOUNT agent_key: $AGENTKEY # # Plugins # # Leave blank to ignore. See http://www.serverdensity.com/docs/agent/writingplugins/ # plugin_directory: # # Optional status monitoring # # See http://www.serverdensity.com/docs/agent/config/ # Ignore these if you do not wish to monitor them # # Apache # See http://www.serverdensity.com/docs/agent/apache/ apache_status_url: http://www.example.com/server-status/?auto apache_status_user: apache_status_pass: # MongoDB # See http://www.serverdensity.com/docs/agent/mongodb/ mongodb_server: mongodb_dbstats: no mongodb_replset: no # MySQL # See http://www.serverdensity.com/docs/agent/mysql/ mysql_server: mysql_user: mysql_pass: # nginx # See http://www.serverdensity.com/docs/agent/nginx/ nginx_status_url: http://www.example.com/nginx_status # RabbitMQ # See http://www.serverdensity.com/docs/agent/rabbitmq/ # for rabbit > 2.x use this url: # rabbitmq_status_url: http://www.example.com:55672/api/overview # for earlier, use this: rabbitmq_status_url: http://www.example.com:55672/json rabbitmq_user: guest rabbitmq_pass: guest # Temporary file location # See http://www.serverdensity.com/docs/agent/config/ # tmp_directory: /var/log/custom_location # Pid file location # See http://www.serverdensity.com/docs/agent/config/ # pidfile_directory: /var/custom_location # Set log level # See http://www.serverdensity.com/docs/agent/config/ # logging_level: debug EOF" sudo /etc/init.d/sd-agent restart } function pre_install_sanity() { SUDO=`which sudo` if [ $? -ne 0 ]; then echo "This script requires that sudo be installed and configured for your user." echo "Please install sudo. For assistance, [email protected]" exit 1 fi which curl > /dev/null if [ $? -gt 0 ]; then echo "The 'curl' command is either not installed or not on the PATH ..." echo "Installing curl ..." if [ $DISTRO = "Ubuntu" ] || [ $DISTRO = "Debian" ]; then sudo $APT_CMD update > /dev/null sudo $APT_CMD install curl elif [ $DISTRO = "CentOS" ] || [ $DISTRO = "Amazon" ] || [ $DISTRO = "RHEL" ] || [ $DISTRO = "CloudLinux" ] || [ $DISTRO = "Fedora" ]; then sudo $YUM_CMD install curl fi fi CURL="`which curl`" } function get_existing_device() { RESULT=`curl -v "https://api.serverdensity.io/inventory/resources/?token=$1&filter=$2"` exit_status=$? # an exit status of 1 indicates an unsupported protocol. (e.g., # https hasn't been baked in.) if [ "$exit_status" -eq "1" ]; then echo "Your local version of curl has not been built with HTTPS support: `which curl`" exit 1 # if the exit code is 7, that means curl couldnt connect so we can bail elif [ "$exit_status" -eq "7" ]; then echo "Could not connect to create server" exit 1 # it appears that an exit code of 28 is also a can't connect error elif [ "$exit_status" -eq "28" ]; then echo "Could not connect to create server" exit 1 elif [ "$exit_status" -ne "0" ]; then echo "Error connecting to api.serverdensity.io; status $exit_status." exit 1 fi AGENTKEY=`echo $RESULT | sed 's/\\\\\//\//g' | sed 's/[{}]//g' | awk -v k="text" '{n=split($0,a,","); for (i=1; i<=n; i++) print a[i]}' | sed 's/\"\:\"/\|/g' | sed 's/[\,]/ /g' | sed 's/\"//g' | grep -w agentKey | cut -d"|" -f2| sed -e 's/^ *//g' -e 's/ *$//g'` if [ "$AGENTKEY" = "401" ]; then echo "Authentication error: $OUTPUT" echo "Verify that you have passed in the correct account URL and API token" exit 1 elif [ "$AGENTKEY" = "403" ]; then echo "Forbidden error: $OUTPUT" echo "Verify that you have passed in the correct account URL and API token" exit 1 fi } # Grab some system information if [ -f /etc/redhat-release ] ; then PLATFORM=`cat /etc/redhat-release` DISTRO=`echo $PLATFORM | awk '{print $1}'` if [ "$DISTRO" = "Red" ]; then DISTRO="RHEL" VERSION=`echo $PLATFORM | awk '{print $7}'` elif [ "$DISTRO" = "CentOS" ]; then VERSION=`echo $PLATFORM | awk '{print $3}'` elif [ "$DISTRO" = "CloudLinux" ]; then VERSION=`echo $PLATFORM | awk '{print $4}'` elif [ "$DISTRO" = "Fedora" ]; then VERSION=`echo $PLATFORM | awk '{print $3}'` else DISTRO="unknown" PLATFORM="unknown" VERSION="unknown" fi MACHINE=`uname -m` elif [ -f /etc/system-release ]; then PLATFORM=`cat /etc/system-release | head -n 1` DISTRO=`echo $PLATFORM | awk '{print $1}'` VERSION=`echo $PLATFORM | awk '{print $5}'` MACHINE=`uname -m` elif [ -f /etc/lsb-release ] ; then #Ubuntu version lsb-release - https://help.ubuntu.com/community/CheckingYourUbuntuVersion . /etc/lsb-release PLATFORM=$DISTRIB_DESCRIPTION DISTRO=$DISTRIB_ID VERSION=$DISTRIB_RELEASE MACHINE=`uname -m` elif [ -f /etc/debian_version ] ; then #Debian Version /etc/debian_version - Source: http://www.debian.org/doc/manuals/debian-faq/ch-software.en.html#s-isitdebian DISTRO="Debian" VERSION=`cat /etc/debian_version` INFO="$DISTRO $VERSION" PLATFORM=$INFO MACHINE=`uname -m` else PLATFORM=`uname -sv | grep 'SunOS joyent'` > /dev/null if [ "$?" = "0" ]; then PLATFORM="SmartOS" DISTRO="SmartOS" VERSION=`cat /etc/product | grep 'Image' | awk '{ print $3}' | awk -F. '{print $1}'` MACHINE="i686" JOYENT=`cat /etc/product | grep 'Name' | awk '{ print $2}'` elif [ "$?" != "0" ]; then PLATFORM="unknown" DISTRO="unknown" MACHINE=`uname -m` fi fi IGNORE_RELEASE=0 while getopts ":a:k:g:t:T:p:i:" opt; do case $opt in a) ACCOUNT="$OPTARG" >&2 ;; k) AGENTKEY="$OPTARG" >&2 ;; g) GROUPNAME="$OPTARG" >&2 ;; t) API_KEY="$OPTARG" >&2 ;; T) TAGNAME="$OPTARG" >&2 ;; p) PROVIDER="$OPTARG" >&2 ;; i) PROVIDERID="$OPTARG" >&2 ;; \?) exit ;; :) echo "Option -$OPTARG requires an argument." >&2 #exit 1 ;; esac done if [ -z $ACCOUNT ]; then print_help fi if [ -z $AGENTKEY ]; then if [ "${HOSTNAME}" = "" ]; then echo "Host does not appear to have a hostname set!" exit 1 fi if [ "${PROVIDERID}" != "" ]; then if [ -z "${PROVIDER}" ]; then echo "We need a provider argument if you pass a provider id!" exit 1 fi fi echo "" echo "Using API key $API_KEY to automatically create device with hostname ${HOSTNAME}" echo "" TAG_ARG="" GROUP_ARG="" CLOUD_ARG="" if [ "${TAGNAME}" != "" ]; then TAGS=`curl --silent -X GET https://api.serverdensity.io/inventory/tags?token=${API_KEY}` # very messy way to get the tag ID without using any json tools TAGID=`echo $TAGS | sed -e $'s/},{/\\\n/g'| grep -i "\"$TAGNAME"\" | sed 's/.*"_id":"\([a-z0-9]*\)".*/\1/g'` if [ ! -z $TAGID ]; then echo "Found $TAGNAME, using tag ID $TAGID" else MD5=`which md5` if [ -z $MD5 ]; then MD5=`which md5sum` fi HEX="#`echo -n $TAGNAME | $MD5 | cut -c1-6`" echo "Creating tag $TAGNAME with random hex code $HEX" TAGS=`curl --silent -X POST https://api.serverdensity.io/inventory/tags?token=${API_KEY} --data "name=$TAGNAME&color=$HEX"` TAGID=`echo $TAGS | grep -i $TAGNAME | sed 's/.*"_id":"\([a-z0-9]*\)".*/\1/g'` echo "Tag cretated, using tag ID $TAGID" fi TAG_ARG="&tags=[\"${TAGID}\"]" fi if [ "${GROUPNAME}" != "" ]; then GROUP_ARG="&group=${GROUPNAME}" fi if [ "${PROVIDERID}" != "" ]; then CLOUD_ARG="&provider=${PROVIDER}&providerId=${PROVIDERID}" FILTER="\{\"provider\":\"${PROVIDER}\",\"providerId\":\"${PROVIDERID}\",\"type\":\"device\"\}" get_existing_device ${API_KEY} ${FILTER} fi if [ "${AGENTKEY}" = "" ]; then RESULT=`curl -v https://api.serverdensity.io/inventory/devices/?token=${API_KEY} --data "name=${HOSTNAME}${GROUP_ARG}${TAG_ARG}${CLOUD_ARG}"` exit_status=$? # an exit status of 1 indicates an unsupported protocol. (e.g., # https hasn't been baked in.) if [ "$exit_status" -eq "1" ]; then echo "Your local version of curl has not been built with HTTPS support: `which curl`" exit 1 # if the exit code is 7, that means curl couldnt connect so we can bail elif [ "$exit_status" -eq "7" ]; then echo "Could not connect to create server" exit 1 # it appears that an exit code of 28 is also a can't connect error elif [ "$exit_status" -eq "28" ]; then echo "Could not connect to create server" exit 1 elif [ "$exit_status" -ne "0" ]; then echo "Error connecting to api.serverdensity.io; status $exit_status." exit 1 fi AGENTKEY=`echo $RESULT | sed 's/\\\\\//\//g' | sed 's/[{}]//g' | awk -v k="text" '{n=split($0,a,","); for (i=1; i<=n; i++) print a[i]}' | sed 's/\"\:\"/\|/g' | sed 's/[\,]/ /g' | sed 's/\"//g' | grep -w agentKey | cut -d"|" -f2| sed -e 's/^ *//g' -e 's/ *$//g'` if [ "$AGENTKEY" = "" ]; then echo "Unknown error communicating with api.serverdensity.io: $OUTPUT" exit 1 elif [ "$AGENTKEY" = "401" ]; then echo "Authentication error: $OUTPUT" echo "Verify that you have passed in the correct account URL and API token" exit 1 elif [ "$AGENTKEY" = "403" ]; then echo "Forbidden error: $OUTPUT" echo "Verify that you have passed in the correct account URL and API token" exit 1 fi fi fi echo "" echo "Server Density Agent Installer" echo "" echo "Account: $ACCOUNT" echo "Agent Key: $AGENTKEY" echo "OS: $DISTRO $VERSION..." echo "" if [ $MACHINE = "i686" ]; then ARCH="32" SUPPORTED_ARCH=1 fi if [ $MACHINE = "x86_64" ] || [ $MACHINE = "amd64" ]; then ARCH="64" SUPPORTED_ARCH=1 fi if [ $SUPPORTED_ARCH -eq 0 ]; then echo "Unsupported architecture ($MACHINE) ..." echo "This is an unsupported platform for the sd-agent." echo "Please contact [email protected] to request support for this architecture." exit 1 fi # Check the distribution for d in ${PLATFORMS[*]} ; do if [ $DISTRO = $d ]; then SUPPORTED_PLATFORM=1 break fi done if [ $SUPPORTED_PLATFORM -eq 0 ]; then echo "Your platform is not supported by this script, but you may be able to do a manual install. Select Manual Install from the dropdown in the web UI." echo "" print_supported_platforms exit 0 fi if [ $IGNORE_RELEASE -ne 1 ]; then # Check the version number check_distro_version "$PLATFORM" $DISTRO $VERSION if [ $? -ne 0 ]; then IGNORE_RELEASE=1 echo "Detected $PLATFORM $DISTRO $VERSION" fi fi # The version number hasn't been found; let's just try and masquerade # (and tell users what we're doing) if [ $IGNORE_RELEASE ] ; then TEMP="\${${DISTRO}_VERSIONS[*]}" VERSIONS=`eval echo $TEMP` # Assume ordered list; grab latest version VERSION=`echo $VERSIONS | awk '{print $NF}'` MAJOR_VERSION=`echo $VERSION | awk -F. '{print $1}'` MINOR_VERSION=`echo $VERSION | awk -F. '{print $2}'` echo "" echo "Continuing; for reference, script is masquerading as: $DISTRO $VERSION" echo "" fi # At this point, we think we have a supported OS. pre_install_sanity $d $v do_install configure_agent if [ $? -ne 0 ]; then echo "I added the correct repositories, but the agent installation failed." echo "Please contact [email protected] about this problem." exit 1 fi echo "" echo "The agent has been installed successfully!" echo "Head back to $ACCOUNT to see your stats and set up some alerts."
canerdogan/sd-agent
install.sh
Shell
bsd-3-clause
18,505
#! /bin/sh do_setup() { true } do_test() { ubongo-solve-3d board03 A G I M } do_teardown() { true } . ./do-test.sh
m-kasahr/ubongo-utils
test/3d/board03-solve-01.sh
Shell
bsd-3-clause
131
#!/bin/bash set -ex zip=$1 if [ ! -f "$zip" ]; then echo "usage: $0 toupcamsdk.zip" >&2 exit 1 fi zip=$(cd $(dirname "$zip"); /bin/pwd)/$(basename "$zip") SRC=$(cd $(dirname "$0")/..; /bin/pwd) TMP=/tmp/toupcamsdk.$$ trap "rm -rf $TMP" INT TERM QUIT EXIT mkdir -p $TMP cd $TMP unzip "$zip" # includes cp ./inc/toupcam.h "$SRC"/cameras/ # Windows libs cp ./win/x86/toupcam.dll "$SRC"/WinLibs/ cp ./win/x86/toupcam.lib "$SRC"/cameras/ # Linux libs for arch in x86 x64 armhf armel arm64; do cp ./linux/$arch/libtoupcam.so "$SRC"/cameras/toupcam/linux/$arch/ done # Mac cp ./mac/libtoupcam.dylib "$SRC"/cameras/toupcam/mac/
OpenPHDGuiding/phd2
build/unpack_toupcam_sdk.sh
Shell
bsd-3-clause
641
#!/bin/sh export ALINOUS_HOME=${OPENSHIFT_ALINOUS_DIR}ALINOUS_HOME/ export CATALINA_PID=${OPENSHIFT_ALINOUS_DIR}run/alinous.pid
alinous-core/scalable-alinous-cartridge
versions/apache-tomcat-7.0.55/bin/setenv.sh
Shell
mit
130
#!/bin/bash GHCID_FILE=./ghcid.txt echo '...' > "$GHCID_FILE" emacsclient "$GHCID_FILE" & COMMAND='nix-shell --run "cabal repl enumerate"' ghcid -o "$GHCID_FILE" --command "$COMMAND"
sboosali/enumerate
watch.sh
Shell
mit
187