code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/bin/bash apt-get update apt-get --yes install libmysqlclient-dev mysql-server-5.5 curl -L https://get.rvm.io | bash -s stable --autolibs=3 --ruby source /usr/local/rvm/scripts/rvm rvm in /vagrant do bundle install cd /vagrant rake
pressable/mysqlnoio
vagrant/provision.sh
Shell
gpl-3.0
239
#!/bin/bash head -c $RANDOM /dev/urandom > rnd.txt
ItsLastDay/academic_university_2016-2018
subjects/Bash&Python/bash_hw1/3.sh
Shell
gpl-3.0
51
#!/bin/bash res=`find . -name *.pyc` if ! [ -z "$res" ]; then rm $res echo "Clean done" else echo "Nothing to clean" fi
OlivierB/Network-Display-On-Pi
other/python_test/clean.sh
Shell
gpl-3.0
123
#!/bin/bash cd /opt/rocrail/ rm -f nohup.out nohup ./rocrail -l /opt/rocrail -lcd & echo "$!" > rocrail.pid
KlausMerkert/FreeRail
doc/opt/rocrail/rocrail.sh
Shell
gpl-3.0
113
#!/bin/bash # # Unattended/SemiAutomatted OpenStack Installer # Reynaldo R. Martinez P. # E-Mail: [email protected] # OpenStack KILO for Centos 7 # # PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin if [ -f ./configs/main-config.rc ] then source ./configs/main-config.rc mkdir -p /etc/openstack-control-script-config else echo "Can't access my config file. Aborting !" echo "" exit 0 fi if [ -f /etc/openstack-control-script-config/db-installed ] then echo "" echo "DB Proccess OK. Let's Continue" echo "" else echo "" echo "DB Proccess not completed. Aborting !" echo "" exit 0 fi if [ -f /etc/openstack-control-script-config/keystone-installed ] then echo "" echo "Keystone Proccess OK. Let's continue" echo "" else echo "" echo "DB Proccess not completed. Aborting !" echo "" exit 0 fi if [ -f /etc/openstack-control-script-config/keystone-extra-idents ] then echo "" echo "This module was already completed. Exiting" echo "" exit 0 fi source $keystone_fulladmin_rc_file echo "" echo "Creating GLANCE Identities" echo "" echo "Glance User" openstack user create --password $glancepass --email $glanceemail $glanceuser echo "Glance Role" openstack role add --project $keystoneservicestenant --user $glanceuser $keystoneadminuser echo "Glance Service" openstack service create \ --name $glancesvce \ --description "OpenStack Image service" \ image echo "Glance Endpoint" openstack endpoint create \ --publicurl "http://$glancehost:9292" \ --internalurl "http://$glancehost:9292" \ --adminurl "http://$glancehost:9292" \ --region $endpointsregion \ image echo "" echo "GLANCE Identities DONE" echo ""
tigerlinux/openstack-kilo-installer-centos7
modules/keystone-glance.sh
Shell
gpl-3.0
1,673
#!/bin/sh ## Copyright (C) 2014-2020 Assaf Gordon <[email protected]> ## ## This file is part of GNU Datamash. ## ## GNU Datamash is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## GNU Datamash is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with GNU Datamash. If not, see <https://www.gnu.org/licenses/>. ## ## A small helper script to build (possibly static) binary executable ## die() { BASE=$(basename "$0") echo "$BASE error: $@" >&2 exit 1 } cd $(dirname "$0")/.. || die "failed to set directory" DATAMASHVER=$(./build-aux/git-version-gen .tarball-version) || die "can't get datamash version" KERNEL=cygwin MACHINE=win64 SRC=./datamash.exe [ -e "$SRC" ] || die "Expected program file '$SRC' not found" DATE=$(date -u +"%F-%H%M%S") NAME="datamash-${DATAMASHVER}-bin__${KERNEL}__${MACHINE}" mkdir -p "bin/$NAME" || die "failed to create 'bin/$NAME' directory" cp "$SRC" "bin/$NAME/datamash.exe" || die "failed to create destination binary (bin/$NAME/datamash)" # Copy additional Cygwin DLLs DLLS=$(ldd "$SRC" | awk '{print $1}' | grep "^cyg.*\.dll") || die "Failed to detect DLLs" for d in $DLLS ; do FULLPATH=$(which "$d") || die "Failed to find full path of DLL '$d'" cp "$FULLPATH" "bin/$NAME" || die "failed to copy DLL '$FULLPATH' to 'bin/$NAME'" done cd "bin" || die zip -r "$NAME.zip" "$NAME" || die "failed to create TarBall for binary executable" cd ".." echo "Done. File =" echo " ./bin/$NAME.zip" echo "Upload to AWS S3:" echo " ./build-aux/aws-upload.sh ./bin/$NAME.zip bin" echo
agordon/datamash
build-aux/make-cygwin-dist.sh
Shell
gpl-3.0
1,999
#!/bin/sh #------------------------------------------------------------------- # config.sh: This file is read at the beginning of the execution of the ASGS to # set up the runs that follow. It is reread at the beginning of every cycle, # every time it polls the datasource for a new advisory. This gives the user # the opportunity to edit this file mid-storm to change config parameters # (e.g., the name of the queue to submit to, the addresses on the mailing list, # etc) #------------------------------------------------------------------- # # Copyright(C) 2012--2014 Jason Fleming # # This file is part of the ADCIRC Surge Guidance System (ASGS). # # The ASGS is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # ASGS is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # the ASGS. If not, see <http://www.gnu.org/licenses/>. #------------------------------------------------------------------- # Fundamental INSTANCENAME=namcpra2017 # "name" of this ASGS process COLDSTARTDATE=2015071800 # calendar year month day hour YYYYMMDDHH24 HOTORCOLD=coldstart # "hotstart" or "coldstart" LASTSUBDIR=null # path to previous execution (if HOTORCOLD=hotstart) HINDCASTLENGTH=30.0 # length of initial hindcast, from cold (days) REINITIALIZESWAN=no # used to bounce the wave solution # Source file paths ADCIRCDIR=~/adcirc/v51release/work # ADCIRC executables SCRIPTDIR=~/asgs/2014stable # ASGS executables INPUTDIR=${SCRIPTDIR}/input/meshes/cpra2017 # grid and other input files OUTPUTDIR=${SCRIPTDIR}/output # post processing scripts PERL5LIB=${SCRIPTDIR}/PERL # DateCale.pm perl module # Physical forcing BACKGROUNDMET=on # NAM download/forcing TIDEFAC=on # tide factor recalc TROPICALCYCLONE=off # tropical cyclone forcing WAVES=off # wave forcing VARFLUX=off # variable river flux forcing # Computational Resources TIMESTEPSIZE=1.0 # adcirc time step size (seconds) SWANDT=600 # swan time step size (seconds) HINDCASTWALLTIME="23:00:00" # hindcast wall clock time ADCPREPWALLTIME="02:00:00" # adcprep wall clock time, including partmesh NOWCASTWALLTIME="08:00:00" # longest nowcast wall clock time FORECASTWALLTIME="08:00:00" # forecast wall clock time NCPU=960 # number of compute CPUs for all simulations NCPUCAPACITY=2880 CYCLETIMELIMIT=99:00:00 # queue ACCOUNT=ERDCV00898N10 #ACCOUNT=ERDCV00898HSP #QUEUENAME=high #SERQUEUE=high #QUEUENAME=R130625 #SERQUEUE=R130625 QUEUENAME=standard SERQUEUE=standard # External data sources : Tropical cyclones STORM=04 # storm number, e.g. 05=ernesto in 2006 YEAR=2015 # year of the storm TRIGGER=rssembedded # either "ftp" or "rss" #RSSSITE=filesystem #FTPSITE=filesystem #FDIR=~/asgs/input #HDIR=~/asgs/input RSSSITE=www.nhc.noaa.gov # site information for retrieving advisories FTPSITE=ftp.nhc.noaa.gov # hindcast/nowcast ATCF formatted files FDIR=/atcf/afst # forecast dir on nhc ftp site HDIR=/atcf/btk # hindcast dir on nhc ftp site # External data sources : Background Meteorology FORECASTCYCLE="06" BACKSITE=ftp.ncep.noaa.gov # NAM forecast data from NCEP BACKDIR=/pub/data/nccf/com/nam/prod # contains the nam.yyyymmdd files FORECASTLENGTH=84 # hours of NAM forecast to run (max 84) PTFILE=ptFile_oneEighth.txt # the lat/lons for the OWI background met ALTNAMDIR="/work/jgflemin/asgs30658" # External data sources : River Flux RIVERSITE=ftp.nssl.noaa.gov RIVERDIR=/projects/ciflow/adcirc_info RIVERUSER=ldm RIVERDATAPROTOCOL=scp # Input files and templates GRIDFILE=cpra_2017_v07a_chk.grd # mesh (fort.14) file GRIDNAME=cpra_2017_v07a_chk CONTROLTEMPLATE=cpra_2017_v07a_626900cfs.15.template # fort.15 template #ELEVSTATIONS=hsdrrs_2014_stations.txt # or substitute your own stations file #VELSTATIONS=hsdrrs_2014_stations.txt #METSTATIONS=hsdrrs_2014_stations.txt ELEVSTATIONS=cpra2017v07_stations.txt # or substitute your own stations file VELSTATIONS=cpra2017v07_stations.txt METSTATIONS=cpra2017v07_stations.txt NAFILE=cpra_2017_v07a.13 #NAFILE=cpra_2017_v07a_datum15cm.13 SWANTEMPLATE=cpra_2017_v07a.26.template # only used if WAVES=on RIVERINIT=null # this mesh no variable flux rivers RIVERFLUX=null HINDCASTRIVERFLUX=null PREPPEDARCHIVE=prepped_${GRIDNAME}_${INSTANCENAME}_${NCPU}.tar.gz HINDCASTARCHIVE=prepped_${GRIDNAME}_hc_${INSTANCENAME}_${NCPU}.tar.gz # Output files # water surface elevation station output FORT61="--fort61freq 900.0 --fort61netcdf" # water current velocity station output FORT62="--fort62freq 0" # full domain water surface elevation output FORT63="--fort63freq 3600.0 --fort63netcdf" # full domain water current velocity output FORT64="--fort64freq 0" # met station output FORT7172="--fort7172freq 3600.0 --fort7172netcdf" # full domain meteorological output FORT7374="--fort7374freq 3600.0 --fort7374netcdf" #SPARSE="--sparse-output" SPARSE="" NETCDF4="--netcdf4" OUTPUTOPTIONS="${SPARSE} ${NETCDF4} ${FORT61} ${FORT62} ${FORT63} ${FORT64} ${FORT7172} ${FORT7374}" # fulldomain or subdomain hotstart files HOTSTARTCOMP=fulldomain # binary or netcdf hotstart files HOTSTARTFORMAT=netcdf # "continuous" or "reset" for maxele.63 etc files MINMAX=reset # Notification EMAILNOTIFY=yes # yes to have host HPC platform email notifications NOTIFY_SCRIPT=corps_nam_notify.sh ACTIVATE_LIST="[email protected]" NEW_ADVISORY_LIST="[email protected]" POST_INIT_LIST="[email protected]" POST_LIST="[email protected]" JOB_FAILED_LIST="[email protected]" [email protected] [email protected] # Post processing and publication INITPOST=null_init_post.sh POSTPROCESS=corps_post.sh POSTPROCESS2=null_post.sh TARGET=garnet WEBHOST=alpha.he.net WEBUSER=seahrse WEBPATH=/home/seahrse/public_html/ASGS OPENDAPHOST=br0.renci.org OPENDAPUSER=ncfs OPENDAPBASEDIR=/projects/ncfs/opendap/data NUMCERASERVERS=3 # Archiving ARCHIVE=null_archive.sh ARCHIVEBASE=/projects/ncfs/data ARCHIVEDIR=archive # Common control properties SLAM0=265.5 SFEA0=29.0 INTENDEDAUDIENCE=developers-only # Forecast ensemble members RMAX=default PERCENT=default ENSEMBLESIZE=1 # number of storms in the ensemble case $si in -1) # do nothing ... this is not a forecast ;; 0) ENSTORM=namforecast ;; 1) ENSTORM=veerRight50 PERCENT=50 ;; 2) ENSTORM=veerLeft50 PERCENT=-50 ;; *) echo "CONFIGRATION ERROR: Unknown ensemble member number: '$si'." ;; esac
jasonfleming/asgs
config/2015/asgs_config_nam_garnet_cpra2017.sh
Shell
gpl-3.0
7,179
#!/bin/bash qmake TestManyDigitNewickDesktop.pro make
richelbilderbeek/TestManyDigitNewick
build_desktop.sh
Shell
gpl-3.0
54
#!/bin/bash echo "Introduce la IP de la antena" read HOST USER="ubnt" PASS="ubnt" echo "$USER@$HOST $PASS" echo y | plink -ssh $HOST -l $USER -pw $PASS "sleep 2 && cfgmtd -w -p /etc/"
procamora/Scripts-Bash
ubnt_save.sh
Shell
gpl-3.0
185
# Goto: http://www-unix.mcs.anl.gov/mpi/mpich/ cd /home/jon tar cvzpf sshhome.tgz .ssh for fil in `/bin/cat /etc/hostsalone` do echo $fil doing scp /home/jon/sshhome.tgz $fil:/home/jon/ ssh $fil "cd /home/jon ; tar xvzpf sshhome.tgz" echo $fil done done # (done!)
pseudotensor/harm_harmgit
scripts/mysh/sshhome.sh
Shell
gpl-3.0
270
#!/bin/sh valgrind --tool=helgrind --log-file=helgrind.txt ../ToolTestServerPusher-build-desktop/./ToolTestServerPusher
richelbilderbeek/TestServerPusher
helgrind.sh
Shell
gpl-3.0
120
#!/bin/bash # # Copyright (C) 2013 by Massimo Lauria # # Created : "2013-04-17, Wednesday 17:55 (CEST) Massimo Lauria" # Time-stamp: "2013-04-17, 18:22 (CEST) Massimo Lauria" # # Description:: # # Desktop search tool built around recoll and dmenu # http://blog.desdelinux.net/como-buscar-archivos-a-texto-completo-en-distros-linux-livianas/ # # Configuration QUERY_TOOL="recoll -b -t" PROMPT_THEME='-nf #dcdcdc -nb #2f2f2f -sb #a6c292 -sf black' # Code:: # Use argument or query interactively. if [ -z "$@" ]; then QUERY=`dmenu $PROMPT_THEME -p "Chercher:" </dev/null` else QUERY="$@" fi DOC=$($QUERY_TOOL "$QUERY" | grep 'file://' \ | sed -e 's|^ *file://||' | sed -e "s|$HOME/||" \ | perl -e 'use URI::Escape; print uri_unescape(<STDIN>);' \ | dmenu -p 'Choisir:' \ -i $PROMPT_THEME -l 20) if [ "x$DOC" != x ]; then mimeopen "$HOME/$DOC" fi
csantosb/myscripts
search-recoll-dmenu.sh
Shell
gpl-3.0
867
#!/bin/sh pkgname=python-scipy pkgver=1.0.1 vcs=git vcs_pkgname=scipy gittag=v${pkgver} # beta versions #relmon_id=4768 #relmon_sed='s/v//g' kiin_make() { python setup.py config_fc --fcompiler=gnu95 build } kiin_install() { python setup.py config_fc --fcompiler=gnu95 install \ --prefix=/usr --root=${pkgdir} --optimize=1 }
alekseyrybalkin/kiin-repo
python-modules/python-scipy/package.sh
Shell
gpl-3.0
343
#!/bin/sh if [ `whoami` = "root" ] && [ ! -w /usr/bin/sudo ]; then apt-get -y --ignore-missing install $* elif [ `whoami` != "root" ] && [ ! -z "$DISPLAY" ]; then if [ -x /usr/bin/gksudo ]; then ROOT="/usr/bin/gksudo" elif [ -x /usr/bin/kdesudo ]; then ROOT="/usr/bin/kdesudo" elif [ -x /usr/bin/sudo ]; then ROOT="/usr/bin/sudo" fi elif [ -z "$DISPLAY" ]; then sudo -- apt-get -y --ignore-missing install $* else su -c "apt-get -y --ignore-missing install $*" exit fi # if [ -x /usr/bin/aptitude ]; then # aptitude is nice since it doesn't fail if a non-existant package is hit # See: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=503215 # $ROOT -- "aptitude -y install $*" #else $ROOT -- su -c "apt-get -y --ignore-missing install $*" #fi
BryanQuigley/phoronix-test-suite
pts-core/external-test-dependencies/scripts/install-ubuntu-packages.sh
Shell
gpl-3.0
762
#!/usr/bin/expect set HOSTNAME [lindex $argv 0]; set USERNAME [lindex $argv 1]; set PASSWORD [lindex $argv 2]; set TFTP_HOST [lindex $argv 3]; set TFTP_DIR [lindex $argv 4]; if { $PASSWORD == "none" } { set PASSWORD "" } spawn telnet $HOSTNAME expect "User:" send "$USERNAME\r" expect "Password:" send "$PASSWORD\r" expect " >" send "ena\r" expect "Password:" send "\r" expect " #" send "terminal length 0\r" log_file $TFTP_DIR/$HOSTNAME.cfg; send "show running-config\r" send "logout\r" expect "Would you like to save them now? (y/n)" send "n\r" log_file; expect "Connection closed by foreign host." exit 0 #copy binary file #send "copy nvram:startup-config tftp://$TFTP_HOST/$HOSTNAME.cfg\r" #expect "Are you sure you want to start? (y/n)" #send "y\r" #expect "File transfer operation completed successfully." #send "logout\r" #expect "Would you like to save them now? (y/n)" #send "n\r" #expect "Connection closed by foreign host."
andrasbabos/scripts
routerconfigs/bin/netgear.sh
Shell
gpl-3.0
941
#!/bin/bash -f # Vivado (TM) v2016.4 (64-bit) # # Filename : blk_mem_gen_0.sh # Simulator : Mentor Graphics Questa Advanced Simulator # Description : Simulation script for compiling, elaborating and verifying the project source files. # The script will automatically create the design libraries sub-directories in the run # directory, add the library logical mappings in the simulator setup file, create default # 'do/prj' file, execute compilation, elaboration and simulation steps. # # Generated by Vivado on Thu Dec 22 18:04:56 +0800 2016 # IP Build 1731160 on Wed Dec 14 23:47:21 MST 2016 # # usage: blk_mem_gen_0.sh [-help] # usage: blk_mem_gen_0.sh [-lib_map_path] # usage: blk_mem_gen_0.sh [-noclean_files] # usage: blk_mem_gen_0.sh [-reset_run] # # Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the # 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the # Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch # that points to these libraries and rerun export_simulation. For more information about this switch please # type 'export_simulation -help' in the Tcl shell. # # You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this # script with the compiled library directory path or specify this path with the '-lib_map_path' switch when # executing this script. Please type 'blk_mem_gen_0.sh -help' for more information. # # Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)' # # ******************************************************************************************************** # Script info echo -e "blk_mem_gen_0.sh - Script generated by export_simulation (Vivado v2016.4 (64-bit)-id)\n" # Main steps run() { check_args $# $1 setup $1 $2 compile elaborate simulate } # RUN_STEP: <compile> compile() { # Compile design files source compile.do 2>&1 | tee -a compile.log } # RUN_STEP: <elaborate> elaborate() { source elaborate.do 2>&1 | tee -a elaborate.log } # RUN_STEP: <simulate> simulate() { vsim -64 -c -do "do {simulate.do}" -l simulate.log } # STEP: setup setup() { case $1 in "-lib_map_path" ) if [[ ($2 == "") ]]; then echo -e "ERROR: Simulation library directory path not specified (type \"./blk_mem_gen_0.sh -help\" for more information)\n" exit 1 fi copy_setup_file $2 ;; "-reset_run" ) reset_run echo -e "INFO: Simulation run files deleted.\n" exit 0 ;; "-noclean_files" ) # do not remove previous data ;; * ) copy_setup_file $2 esac # Add any setup/initialization commands here:- # <user specific commands> } # Copy modelsim.ini file copy_setup_file() { file="modelsim.ini" if [[ ($1 != "") ]]; then lib_map_path="$1" else lib_map_path="D:/Document/Verilog/VGA/VGA.cache/compile_simlib/questa" fi if [[ ($lib_map_path != "") ]]; then src_file="$lib_map_path/$file" cp $src_file . fi } # Delete generated data from the previous run reset_run() { files_to_remove=(compile.log elaborate.log simulate.log vsim.wlf work msim) for (( i=0; i<${#files_to_remove[*]}; i++ )); do file="${files_to_remove[i]}" if [[ -e $file ]]; then rm -rf $file fi done } # Check command line arguments check_args() { if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then echo -e "ERROR: Unknown option specified '$2' (type \"./blk_mem_gen_0.sh -help\" for more information)\n" exit 1 fi if [[ ($2 == "-help" || $2 == "-h") ]]; then usage fi } # Script usage usage() { msg="Usage: blk_mem_gen_0.sh [-help]\n\ Usage: blk_mem_gen_0.sh [-lib_map_path]\n\ Usage: blk_mem_gen_0.sh [-reset_run]\n\ Usage: blk_mem_gen_0.sh [-noclean_files]\n\n\ [-help] -- Print help information for this script\n\n\ [-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\ using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\ [-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\ from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\ -noclean_files switch.\n\n\ [-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n" echo -e $msg exit 1 } # Launch script run $1 $2
hsnuonly/PikachuVolleyFPGA
VGA.ip_user_files/sim_scripts/blk_mem_gen_0_1/questa/blk_mem_gen_0.sh
Shell
gpl-3.0
4,690
#!/bin/bash # A script to reset your nginx configs to the latest versions "upgrading" nginx # Beware, this script *will* overwrite any personal modifications you have made. # Author: liara hostname=$(grep -m1 "server_name" /etc/nginx/sites-enabled/default | awk '{print $2}' | sed 's/;//g') locks=($(find /usr/local/bin/swizzin/nginx -type f -printf "%f\n" | cut -d "." -f 1 | sort -d -r)) if [[ ! -f /install/.nginx.lock ]]; then echo_error "nginx doesn't appear to be installed. What do you hope to accomplish by running this script?" exit 1 fi for i in "${locks[@]}"; do app=${i} if [[ -f /install/.$app.lock ]]; then rm -f /etc/nginx/apps/$app.conf fi done rm -f /etc/nginx/apps/dindex.conf rm -f /etc/nginx/apps/rindex.conf rm -f /etc/nginx/apps/*.scgi.conf rm -f /etc/nginx/sites-enabled/default rm -f /etc/nginx/conf.d/* rm -f /etc/nginx/snippets/{ssl-params,proxy,fancyindex}.conf . /etc/swizzin/sources/functions/php phpversion=$(php_service_version) sock="php${phpversion}-fpm" if [[ ! -f /etc/nginx/modules-enabled/50-mod-http-fancyindex.conf ]]; then ln -s /usr/share/nginx/modules-available/mod-http-fancyindex.conf /etc/nginx/modules-enabled/50-mod-http-fancyindex.conf fi for i in NGC SSC PROX FIC; do cmd=$(sed -n -e '/'$i'/,/'$i'/ p' /etc/swizzin/scripts/install/nginx.sh) eval "$cmd" done if [[ ! $hostname == "_" ]]; then sed -i "s/server_name _;/server_name $hostname;/g" /etc/nginx/sites-enabled/default sed -i "s/ssl_certificate .*/ssl_certificate \/etc\/nginx\/ssl\/${hostname}\/fullchain.pem;/g" /etc/nginx/sites-enabled/default sed -i "s/ssl_certificate_key .*/ssl_certificate_key \/etc\/nginx\/ssl\/${hostname}\/key.pem;/g" /etc/nginx/sites-enabled/default fi for i in "${locks[@]}"; do app=${i} if [[ -f /install/.$app.lock ]]; then echo_progress_start "Reinstalling nginx config for $app" /usr/local/bin/swizzin/nginx/$app.sh echo_progress_done fi done systemctl reload nginx
liaralabs/swizzin
scripts/upgrade/nginx.sh
Shell
gpl-3.0
1,959
export DATABASE_PASSWORD='your_password'
masasam/dotfiles
backup_sample/zsh/env.sh
Shell
gpl-3.0
41
#!/bin/sh libtoolize aclocal automake --add-missing -cf autoconf
bboozzoo/sbus
autogen.sh
Shell
gpl-3.0
65
### check if file exists file="onelm_multi_lvlearn" if [ -f "../$file" ] then echo "Remove $file." rm ../$file else echo "$file not found." fi cd .. ### compile c++ code if [ "$1" == "all" ] || [ "$1" == "compile" ] || [ "$1" == "run" ] ; then echo "Compile." g++ test/lvlearn_multi_onelm.cpp src/agent.cpp src/environment.cpp src/simulation.cpp src/controller.cpp src/goal.cpp src/landmark.cpp src/pipe.cpp src/object.cpp src/pin.cpp src/goallearning.cpp src/routelearning.cpp -std=c++11 -o $file -O1 -larmadillo fi ### run program if [ "$1" == "all" ] || [ "$1" == "run" ] ; then echo "Run program." ./$file fi cd data/scripts ### plot data using gnuplot gui if [ "$1" = "all" ] || [ "$1" = "plot" ] ; then echo "Plot data." #gnuplot track_vectors.plot #gnuplot histogram.gnu #gnuplot stat_distance.plot #gnuplot track.plot gnuplot track_local.plot gnuplot track_lmr_vectors.plot gnuplot distri.plot #gnuplot activations.plot #gnuplot gv.plot gnuplot lv.plot gnuplot lv_signals.plot gnuplot signals.plot gnuplot ref.plot #gnuplot reward.plot #python circle.py #python circle_gv.py #gnuplot gv_performance.plot python density.py fi cd .. ### backup data with timestamp if [ "$1" = "all" ] || [ "$1" = "run" ] ; then echo "Backup data." timestamp=$( date +"%y%m%d-%T") mkdir ../data_container/lvlearn_multi_onelm/$timestamp/ cp *.dat ../data_container/lvlearn_multi_onelm/$timestamp/ fi if [ "$1" = "" ] ; then echo "Nothing" fi echo "Done."
degoldcode/NaviSim
test/lvlearn_multi_onelm.sh
Shell
gpl-3.0
1,450
#!/bin/bash # file: scripts/mqtt.d/entrance_door.sh # host: palermo (vkhome-fi) . ~/scripts/create_pid_or_exit_on_existing_pid.sh . /usr/local/lib/mqtt_lib.sh # by default: # _payload_parser='_parse_as_stamped_plaintext' # the payload contains two words: the value and the timestamp (ex.: 28.6 @1429627454) # MQTT_root_topic='vkhome-fi' # topics begin from vkhome-fi (ex.: vkhome-fi/sensors/count-entrance_doors) # ENTRANCE DOORS - if doors opened while true; do mqtt_monitor_topic 'sensors/count-entrance_doors' \ 'opened(%s)' '' '' '' 'mqtt_pub_stamped_plaintext security/doors -r' sleep 60 done
vkonst/tele-dacha
palermo/home/_username_/scripts/mqtt.d/entrance_door.sh
Shell
gpl-3.0
608
#! /bin/bash git pull scons -c clear scons -j 4
endeav0r/Rainbows-And-Pwnies-Tools
pullbuild.sh
Shell
gpl-3.0
48
#!/bin/bash # @see https://blog.golang.org/cover # Generate the coverage report instrument(){ go test -covermode=count -coverprofile=validation/"$1".out "./$1" } # Print coverage report as a nice html cover(){ go tool cover -html=validation/"$1".out } instrument event instrument person instrument habit instrument timetogo # generate report cover event cover person cover habit cover timetogo
eric-burel/pog
coverage.sh
Shell
gpl-3.0
396
#!/bin/bash width=$1 read -r -d '' HEAD <<- EOM <?xml version="1.0"?> <!DOCTYPE rdf:RDF [ <!ENTITY quest "http://obda.org/quest#" > <!ENTITY owl "http://www.w3.org/2002/07/owl#" > <!ENTITY xsd "http://www.w3.org/2001/XMLSchema#" > <!ENTITY rdfs "http://www.w3.org/2000/01/rdf-schema#" > <!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#" > ]> <rdf:RDF xmlns="http://www.w3.org/2002/07/owl#" xml:base="http://www.ifis.uni-luebeck.de/LinearRoadBenchmark#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"> <owl:Ontology rdf:about="http://www.ifis.uni-luebeck.de/LinearRoadBenchmark#"/> <owl:DatatypeProperty rdf:about="#hasToll"> <rdfs:domain rdf:resource="#Car"/> <rdfs:range rdf:resource="&xsd;integer"/> </owl:DatatypeProperty> <owl:Class rdf:about="#VID"> </owl:Class> EOM echo "$HEAD" printf "\n" for feature in `seq 0 $(($width-1))`; do leftSubFeature=$((2*feature)) rightSubFeature=$((leftSubFeature+1)) F=Feature$feature subHasL=hasSubFeature$leftSubFeature subHasR=hasSubFeature$rightSubFeature printf "<owl:Class rdf:about=\"#$F\">\n</owl:Class>\n\n" printf "<owl:ObjectProperty rdf:about=\"#has$F\">\n<rdfs:domain rdf:resource=\"#VID\"/>\n<rdfs:range rdf:resource=\"#$F\"/>\n</owl:ObjectProperty>\n\n" printf "<owl:ObjectProperty rdf:about=\"#$subHasL\">\n<rdfs:subPropertyOf rdf:resource=\"#has$F\"/>\n</owl:ObjectProperty>\n\n" printf "<owl:ObjectProperty rdf:about=\"#$subHasR\">\n<rdfs:subPropertyOf rdf:resource=\"#has$F\"/>\n</owl:ObjectProperty>\n\n" done echo "</rdf:RDF>"
OntLRB/Benchmark
Generators/makeFeatureOntology.sh
Shell
gpl-3.0
1,670
#!/bin/sh #tests comment commands cd ../src g++ main.cpp -o rshell echo "" echo "Testing comment commands now:" echo "Testing 'ls; #comment at end':" echo "ls; exit; #comment at end" | ./a.out echo "Testing '':" echo "echo hello #comment" & echo "exit" | ./rshell echo "End testing comment commands." echo ""
AminWatad/rshell
tests/commented_command.sh
Shell
gpl-3.0
318
#!/bin/sh # Make sure that ls -i works properly on symlinks. # Copyright (C) 2003-2019 Free Software Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. . "${srcdir=.}/tests/init.sh"; path_prepend_ ./src print_ver_ ls touch f || framework_failure_ ln -s f slink || framework_failure_ # When listed explicitly: set x $(ls -Ci f slink); shift test $# = 4 || fail=1 # The inode numbers should differ. test "$1" != "$3" || fail=1 set x $(ls -CLi f slink); shift test $# = 4 || fail=1 # With -L, they must be the same. test "$1" = "$3" || fail=1 set x $(ls -CHi f slink); shift test $# = 4 || fail=1 # With -H, they must be the same, too, from the command line. # Note that POSIX says -H must make ls dereference only # symlinks (specified on the command line) to directories, # but the historical BSD meaning of -H is to dereference # any symlink given on the command line. For compatibility GNU ls # implements the BSD semantics. test "$1" = "$3" || fail=1 # When listed from a directory: set x $(ls -Ci); shift test $# = 4 || fail=1 # The inode numbers should differ. test "$1" != "$3" || fail=1 set x $(ls -CLi); shift test $# = 4 || fail=1 # With -L, they must be the same. test "$1" = "$3" || fail=1 set x $(ls -CHi); shift test $# = 4 || fail=1 # With -H, they must be different from inside a directory. test "$1" != "$3" || fail=1 Exit $fail
komh/coreutils-os2
tests/ls/inode.sh
Shell
gpl-3.0
1,958
#!/bin/bash python AnalyzeSimulation.py --paralog1 YER102W --paralog2 YBL072C --simnum 56 > YER102W_YBL072C_MG94_nonclock_Sim56_PrintScreen.txt
xjw1001001/IGCexpansion
Simulation/ShFiles/MG94_YER102W_YBL072C_sim56.sh
Shell
gpl-3.0
145
#!/bin/sh # taken from glusterfs. ## Check all dependencies are present MISSING="" # Check for aclocal env aclocal --version > /dev/null 2>&1 if [ $? -eq 0 ]; then ACLOCAL=aclocal else MISSING="$MISSING aclocal" fi # Check for autoconf env autoconf --version > /dev/null 2>&1 if [ $? -eq 0 ]; then AUTOCONF=autoconf else MISSING="$MISSING autoconf" fi # Check for autoheader env autoheader --version > /dev/null 2>&1 if [ $? -eq 0 ]; then AUTOHEADER=autoheader else MISSING="$MISSING autoheader" fi # Check for automake env automake --version > /dev/null 2>&1 if [ $? -eq 0 ]; then AUTOMAKE=automake else MISSING="$MISSING automake" fi # Check for libtoolize or glibtoolize env libtoolize --version > /dev/null 2>&1 if [ $? -eq 0 ]; then # libtoolize was found, so use it TOOL=libtoolize else # libtoolize wasn't found, so check for glibtoolize env glibtoolize --version > /dev/null 2>&1 if [ $? -eq 0 ]; then TOOL=glibtoolize else MISSING="$MISSING libtoolize/glibtoolize" fi fi # Check for tar env tar -cf /dev/null /dev/null > /dev/null 2>&1 if [ $? -ne 0 ]; then MISSING="$MISSING tar" fi ## If dependencies are missing, warn the user and abort if [ "x$MISSING" != "x" ]; then echo "Aborting." echo echo "The following build tools are missing:" echo for pkg in $MISSING; do echo " * $pkg" done echo echo "Please install them and try again." echo exit 1 fi ## Do the autogeneration echo Running ${ACLOCAL}... $ACLOCAL echo Running ${AUTOHEADER}... $AUTOHEADER echo Running ${TOOL}... $TOOL --automake --copy --force echo Running ${AUTOCONF}... $AUTOCONF echo Running ${AUTOMAKE}... $AUTOMAKE --add-missing --copy --foreign # Instruct user on next steps echo echo "Please proceed with configuring, compiling, and installing."
sandrain/fuse-xattrfs
autogen.sh
Shell
gpl-3.0
1,803
#!/bin/bash -e . ../../blfs.comm build_src() { srcfil=tcl8.6.2-src.tar.gz srcdir=tcl8.6.2 tar -xf $BLFSSRC/$PKGLETTER/$CURDIR/$srcfil cd $srcdir tar -xf $BLFSSRC/$PKGLETTER/$CURDIR/tcl8.6.2-html.tar.gz --strip-components=1 export SRCDIR=`pwd` cd unix ./configure --prefix=/usr \ --without-tzdata \ --mandir=/usr/share/man \ $([ $(uname -m) = x86_64 ] && echo --enable-64bit) make sed -e "s#$SRCDIR/unix#/usr/lib#" \ -e "s#$SRCDIR#/usr/include#" \ -i tclConfig.sh && sed -e "s#$SRCDIR/unix/pkgs/tdbc1.0.1#/usr/lib/tdbc1.0.0#" \ -e "s#$SRCDIR/pkgs/tdbc1.0.1/generic#/usr/include#" \ -e "s#$SRCDIR/pkgs/tdbc1.0.1/library#/usr/lib/tcl8.6#" \ -e "s#$SRCDIR/pkgs/tdbc1.0.1#/usr/include#" \ -i pkgs/tdbc1.0.1/tdbcConfig.sh && sed -e "s#$SRCDIR/unix/pkgs/itcl4.0.1#/usr/lib/itcl4.0.0#" \ -e "s#$SRCDIR/pkgs/itcl4.0.1/generic#/usr/include#" \ -e "s#$SRCDIR/pkgs/itcl4.0.1#/usr/include#" \ -i pkgs/itcl4.0.1/itclConfig.sh && unset SRCDIR make DESTDIR=$BUILDDIR install make DESTDIR=$BUILDDIR install-private-headers ln -v -sf tclsh8.6 $BUILDDIR/usr/bin/tclsh chmod -v 755 $BUILDDIR/usr/lib/libtcl8.6.so mkdir -v -p $BUILDDIR/usr/share/doc/tcl-8.6.2 cp -v -r ../html/* $BUILDDIR/usr/share/doc/tcl-8.6.2 cleanup_src ../.. $srcdir } gen_control() { cat > $DEBIANDIR/control << EOF $PKGHDR Homepage: http://www.tcl.tk/ Description: The Tcl Programming Language The Tcl package contains the Tool Command Language, a robust general-purpose scripting language. . [tclsh] is a symlink to the tclsh8.6 program. . [tclsh8.6] is a simple shell containing the Tcl interpreter. . [libtcl8.6.so] contains the API functions required by Tcl. EOF } build
fangxinmiao/projects
Architeture/OS/Linux/Distributions/LFS/build-scripts/blfs-7.6-systemv/t/Tcl-8.6.2/build.sh
Shell
gpl-3.0
1,781
docker stop influxdb docker rm influxdb docker pull arm32v7/influxdb docker run -d --name influxdb -p 8086:8086 --restart unless-stopped --memory="1G" -v $HOME/docker_influxdb/influxdb_data:/var/lib/influxdb influxdb
nio101/BASECAMP
docker_stuff/docker_influxdb/run_me.sh
Shell
gpl-3.0
217
#!/bin/bash set -e mkdir -p build output rm -rf build/* output/* cd build cmake ../.. \ -DCMAKE_CXX_COMPILER=afl-g++ \ -DCMAKE_CC_COMPILER=afl-gcc \ -DENABLE_FUZZING=On \ -DENABLE_SANITIZERS=On \ -DBUILD_SHARED_LIBS=Off \ -DBUILD_OSTREE=ON make -j8 fuzz
advancedtelematic/sota_client_cpp
fuzz/afl.sh
Shell
mpl-2.0
293
#!/usr/bin/env bash # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # Copyright (c) 2017 Mozilla Corporation # # Contributors: # Brandon Myers [email protected] source /opt/mozdef/envs/mozdef/bin/activate /opt/mozdef/envs/mozdef/cron/import_threat_exchange.py -c /opt/mozdef/envs/mozdef/cron/import_threat_exchange.conf
ameihm0912/MozDef
cron/import_threat_exchange.sh
Shell
mpl-2.0
474
#!/usr/bin/env bash set -e if [ "$BUILD_TOOLS" = false ]; then curl https://sh.rustup.rs -sSf | sh -s -- -y --default-host i686-unknown-linux-gnu source ~/.profile git clone --branch $RUST_G_VERSION https://github.com/tgstation/rust-g cd rust-g cargo build --release mkdir -p ~/.byond/bin ln -s $PWD/target/release/librust_g.so ~/.byond/bin/rust_g fi
neersighted/tgstation
tools/travis/build_dependencies.sh
Shell
agpl-3.0
384
#!/bin/bash apt-get -y -q install arduino for user in `ls /home` do echo "Adding $user to group dialout." usermod -a -G dialout "$user" done
CodersOS/create
commands/arduino.sh
Shell
agpl-3.0
148
#!/bin/bash BACKUPFILE=backup-$(date +%Y-%m-%d-%H:%M:%S) archive=${BACKUPFILE} tar cvf $archive `find . -name "*.sh"` &> /dev/null
cxsjabc/basic
bash/_basic/backup.sh
Shell
agpl-3.0
134
#!/bin/bash ### # restart_script.sh # # Script per il riavvio del software di gestione campanelle # Restart script for the "campanella" bell management system # For more information on the software please visit: # https://lizzit.it/campanella # # Written by: Michele Lizzit <[email protected]>, 20 Mar 2014 # Last update: 22 Sept 2017 # Version: 1.2 # # Copyright (c) 2016 Michele Lizzit # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ### green_color () { echo -e "\e[1;31m"; } default_color () { echo -e "\e[1;0m"; } green_color; echo "Checking for root permissions..." default_color; #sleep 1; if [[ $EUID -ne 0 ]]; then green_color; echo "This script must be run as root"; default_color; exit 1; else green_color; echo "Root OK"; default_color; fi killall demone.py killall serial_daemon.py killall lcd_daemon.py /opt/campanella/demone.py & /opt/campanella/serial_daemon.py & /opt/campanella/lcd_daemon.py & exit
michelelizzit/campanella
campanella/restart_script.sh
Shell
agpl-3.0
1,626
#!/bin/bash # Check if nvcc is available if command -v nvcc &> /dev/null then nvcc_version=($(python scripts/get_cuda_version.py)) # Check cuda version, if less than 11 then download CUB, otherwise skip if [[ "$nvcc_version" < "11" ]] then # Check if ./lib/cub exists if [ ! -d "./lib/cub" ]; then cd lib mkdir -p temp cd temp echo "==== GOMC needs CUB library to run..." echo "==== Finding latest CUB library..." # download the download html page wget https://nvlabs.github.io/cub/download_cub.html > /dev/null 2>&1 # find the lines that have the link grep "https://github.com/NVlabs/" download_cub.html > link_lines # the last line is the easiest to find the link awk '/./{line=$0} END{print line}' link_lines > last_line # the substring between two quotes is the link!!!! LINK="$(awk -F'"' '{ print $2 }' last_line)" echo "==== Link found at ${LINK}" # remove any temporary files rm link_lines rm download_cub.html rm last_line # download the zip file echo "==== Downloading the CUB library... (Shouldn't take too long)" wget "${LINK}" > /dev/null 2>&1 #unzip echo "==== Extracting the CUB library..." for z in *.zip; do unzip "$z" > /dev/null 2>&1 rm "$z" > /dev/null 2>&1 done # move the cub directory to and remove the rest for d in */ ; do mv "$d"/cub ../cub > /dev/null 2>&1 rm -r "$d" > /dev/null 2>&1 done cd .. rmdir temp cd .. else echo "==== cub library already exists. Skipping..." fi else echo "CUDA version is 11.0 or higher, no need to download CUB library! Skipping..." fi fi mkdir -p bin_MPI cd bin_MPI ICC_PATH="$(which icc)" ICPC_PATH="$(which icpc)" export CC=${ICC_PATH} export CXX=${ICPC_PATH} #cmake .. -DGOMC_MPI=on -DCMAKE_BUILD_TYPE=Debug cmake .. -DGOMC_MPI=on #-DCMAKE_BUILD_TYPE=Debug make -j8
GOMC-WSU/GOMC
metamakeMPI.sh
Shell
agpl-3.0
2,758
#!/bin/bash if [ "$1" = "--build" ]; then BUILD_ONLY=1 elif [ -n "$1" ]; then echo 'Usage: run.sh [--build]' >&2 exit 1 fi set -o pipefail set -eux export LANG=C.UTF-8 export MAKEFLAGS="-j $(nproc)" # Running the tests normally takes under half an hour. Sometimes the build or # (more usually) the tests will wedge. After 50 minutes, print out some # information about the running processes in order to give us a better chance # of tracking down problems. ( set +x sleep 50m echo ===== 50 mins ==================== ps auxwfe echo top -b -n1 echo ===== 50 mins ==================== )& # copy host's source tree to avoid changing that, and make sure we have a clean tree if [ ! -e /source/.git ]; then echo "This container must be run with --volume <host cockpit source checkout>:/source:ro" >&2 exit 1 fi git clone /source /tmp/source [ ! -d /source/node_modules ] || cp -r /source/node_modules /tmp/source/ cd /tmp/source ./autogen.sh --prefix=/usr --enable-strict --with-systemdunitdir=/tmp make all if [ -n "${BUILD_ONLY:-}" ]; then exit 0 fi if dpkg-architecture --is amd64; then # run distcheck on main arch make XZ_COMPRESS_FLAGS='-0' V=0 distcheck 2>&1 || { find -name test-suite.log | xargs cat exit 1 } # check translation build make po/cockpit.pot # do some spot checks grep -q 'pkg/base1/cockpit.js' po/cockpit.pot grep -q 'pkg/lib/machine-dialogs.js' po/cockpit.pot grep -q 'pkg/systemd/services.html' po/cockpit.pot grep -q 'src/ws/login.html' po/cockpit.pot grep -q 'pkg/systemd/manifest.json.in' po/cockpit.pot grep -q 'src/bridge/cockpitpackages.c' po/cockpit.pot ! grep -q 'test-.*.js' po/cockpit.pot else # on i386, validate that "distclean" does not remove too much make dist-gzip mkdir _distcleancheck tar -C _distcleancheck -xf cockpit-[0-9]*.tar.gz cd _distcleancheck/cockpit-* ./configure make distclean ./configure make check 2>&1 || { find -name test-suite.log | xargs cat exit 1 } fi make check-memory 2>&1 || { cat test-suite.log exit 1 }
deryni/cockpit
containers/unit-tests/run.sh
Shell
lgpl-2.1
2,144
#!/bin/bash # # Copyright (C) 2020 Alexander Larsson <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. set -euo pipefail . $(dirname $0)/libtest.sh echo "1..2" setup_repo sha256() { sha256sum -b | awk "{ print \$1 }" } gunzip_sha256() { gunzip -c $1 | sha256 } make_app() { APP_ID=$1 APPARCH=$2 REPO=$3 DIR=`mktemp -d` cat > ${DIR}/metadata <<EOF [Application] name=$APP_ID runtime=org.test.Platform/$APPARCH/master EOF mkdir -p ${DIR}/files/bin cat > ${DIR}/files/bin/hello.sh <<EOF #!/bin/sh echo "Hello world, from a sandbox" EOF chmod a+x ${DIR}/files/bin/hello.sh mkdir -p ${DIR}/files/share/app-info/xmls mkdir -p ${DIR}/files/share/app-info/icons/flatpak/64x64 gzip -c > ${DIR}/files/share/app-info/xmls/${APP_ID}.xml.gz <<EOF <?xml version="1.0" encoding="UTF-8"?> <components version="0.8"> <component type="desktop"> <id>$APP_ID.desktop</id> <name>Hello world test app: $APP_ID</name> <summary>Print a greeting</summary> <description><p>This is a test app.</p></description> <releases> <release timestamp="1525132800" version="0.0.1"/> </releases> </component> </components> EOF cp $(dirname $0)/org.test.Hello.png ${DIR}/files/share/app-info/icons/flatpak/64x64/${APP_ID}.png $FLATPAK build-finish --command=hello.sh ${DIR} &> /dev/null $FLATPAK build-export --no-update-summary ${GPGARGS} --arch=$APPARCH --disable-sandbox ${REPO} ${DIR} &> /dev/null rm -rf ${DIR} } active_subsets() { REPO=$1 $FLATPAK repo --subsets $REPO | awk "{ print \$2 }" } active_subset_for_arch() { REPO=$1 THE_ARCH=$2 $FLATPAK repo --subsets $REPO --subset $THE_ARCH | awk "{ print \$2 }" } active_subset() { active_subset_for_arch $1 $ARCH } n_histories() { REPO=$1 $FLATPAK repo --subsets $REPO | awk "{ sum +=\$3 } END {print sum}" } verify_subsummaries() { REPO=$1 ACTIVE_SUBSETS=$(active_subsets $REPO) for SUBSET in $ACTIVE_SUBSETS; do assert_has_file $REPO/summaries/$SUBSET.gz done N_HISTORIES=$(n_histories $REPO) N_DELTAS=0 for DELTA_FILE in $REPO/summaries/*.delta; do DELTA=$(basename $DELTA_FILE .delta) FROM=$(echo $DELTA | cut -d "-" -f 1) TO=$(echo $DELTA | cut -d "-" -f 2) # All TO should be in an active SUBSET if [[ "$ACTIVE_SUBSETS" != *"$TO"* ]]; then assert_not_reached fi N_DELTAS=$(($N_DELTAS+1)) done assert_streq "$N_DELTAS" "$N_HISTORIES" N_OLD=0 for SUMMARY_FILE in $REPO/summaries/*.gz; do DIGEST=$(basename $SUMMARY_FILE .gz) COMPUTED_DIGEST=$(gunzip_sha256 $SUMMARY_FILE) assert_streq "$DIGEST" "$COMPUTED_DIGEST" if [[ "$ACTIVE_SUBSETS" != *"$DIGEST"* ]]; then N_OLD=$(($N_OLD+1)) fi done assert_streq "$N_OLD" "$N_HISTORIES" } set +x # A non-default arch (that isn't compatible) if [ $ARCH == x86_64 -o $ARCH == i386 ]; then OTHER_ARCH=aarch64 else OTHER_ARCH=x86_64 fi # Set up some arches, including the current one declare -A arches for A in x86_64 aarch64 arm $($FLATPAK --supported-arches); do arches[$A]=1 done ARCHES=${!arches[@]} for A in $ARCHES; do # Create runtimes for all arches (based on $ARCH version) if [ $A != $ARCH ]; then $FLATPAK build-commit-from ${GPGARGS} --src-ref=runtime/org.test.Platform/$ARCH/master repos/test runtime/org.test.Platform/$A/master fi # Create a bunch of apps (for all arches) for I in $(seq 10); do make_app org.app.App$I $A repos/test done update_repo # Make sure we have no superfluous summary files verify_subsummaries repos/test done set -x ok subsummary update generations ACTIVE_SUBSET=$(active_subset repos/test) ACTIVE_SUBSET_OTHER=$(active_subset_for_arch repos/test $OTHER_ARCH) # Ensure we have no initial cache rm -rf $FL_CACHE_DIR/summaries/* assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${ACTIVE_SUBSET}.sub assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${ACTIVE_SUBSET_OTHER}.sub httpd_clear_log # Prime cache for default arch $FLATPAK $U remote-ls test-repo > /dev/null assert_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${ACTIVE_SUBSET}.sub assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${ACTIVE_SUBSET_OTHER}.sub # We downloaded the full summary (not delta) assert_file_has_content httpd-log summaries/${ACTIVE_SUBSET}.gz httpd_clear_log $FLATPAK $U remote-ls test-repo --arch=$OTHER_ARCH > /dev/null assert_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${ACTIVE_SUBSET}.sub assert_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${ACTIVE_SUBSET_OTHER}.sub # We downloaded the full summary (not delta) assert_file_has_content httpd-log summaries/${ACTIVE_SUBSET_OTHER}.gz # Modify the ARCH subset $FLATPAK build-commit-from ${GPGARGS} --src-ref=app/org.app.App1/$ARCH/master repos/test app/org.app.App1.NEW/$ARCH/master OLD_ACTIVE_SUBSET=$ACTIVE_SUBSET OLD_ACTIVE_SUBSET_OTHER=$ACTIVE_SUBSET_OTHER ACTIVE_SUBSET=$(active_subset repos/test) ACTIVE_SUBSET_OTHER=$(active_subset_for_arch repos/test $OTHER_ARCH) assert_not_streq "$OLD_ACTIVE_SUBSET" "$ACTIVE_SUBSET" assert_streq "$OLD_ACTIVE_SUBSET_OTHER" "$ACTIVE_SUBSET_OTHER" sleep 1 # Ensure mtime differs for cached summary files (so they are removed) httpd_clear_log $FLATPAK $U remote-ls test-repo > /dev/null assert_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${ACTIVE_SUBSET}.sub assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${OLD_ACTIVE_SUBSET}.sub assert_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${ACTIVE_SUBSET_OTHER}.sub # This is the same as before # We should have uses the delta assert_not_file_has_content httpd-log summaries/${ACTIVE_SUBSET}.gz assert_file_has_content httpd-log summaries/${OLD_ACTIVE_SUBSET}-${ACTIVE_SUBSET}.delta # Modify the ARCH *and* OTHER_ARCH subset $FLATPAK build-commit-from ${GPGARGS} --src-ref=app/org.app.App1/$ARCH/master repos/test app/org.app.App1.NEW2/$ARCH/master $FLATPAK build-commit-from ${GPGARGS} --src-ref=app/org.app.App1/$OTHER_ARCH/master repos/test app/org.app.App1.NEW2/$OTHER_ARCH/master OLD_OLD_ACTIVE_SUBSET=$OLD_ACTIVE_SUBSET OLD_OLD_ACTIVE_SUBSET_OTHER=$OLD_ACTIVE_SUBSET_OTHER OLD_ACTIVE_SUBSET=$ACTIVE_SUBSET OLD_ACTIVE_SUBSET_OTHER=$ACTIVE_SUBSET_OTHER ACTIVE_SUBSET=$(active_subset repos/test) ACTIVE_SUBSET_OTHER=$(active_subset_for_arch repos/test $OTHER_ARCH) assert_not_streq "$OLD_ACTIVE_SUBSET" "$ACTIVE_SUBSET" assert_not_streq "$OLD_ACTIVE_SUBSET_OTHER" "$ACTIVE_SUBSET_OTHER" sleep 1 # Ensure mtime differs for cached summary files (so they are removed) httpd_clear_log $FLATPAK $U remote-ls test-repo > /dev/null # Only update for $ARCH assert_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${ACTIVE_SUBSET}.sub assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${OLD_ACTIVE_SUBSET}.sub # We didn't get OTHER_ARCH summary assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${ACTIVE_SUBSET_OTHER}.sub assert_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${OLD_ACTIVE_SUBSET_OTHER}.sub # We should have used the delta assert_not_file_has_content httpd-log summaries/${ACTIVE_SUBSET}.gz assert_file_has_content httpd-log summaries/${OLD_ACTIVE_SUBSET}-${ACTIVE_SUBSET}.delta sleep 1 # Ensure mtime differs for cached summary files (so they are removed) httpd_clear_log $FLATPAK $U remote-ls --arch=* test-repo > /dev/null # update for all arches assert_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${ACTIVE_SUBSET}.sub assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${ARCH}-${OLD_ACTIVE_SUBSET}.sub assert_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${ACTIVE_SUBSET_OTHER}.sub assert_not_has_file $FL_CACHE_DIR/summaries/test-repo-${OTHER_ARCH}-${OLD_ACTIVE_SUBSET_OTHER}.sub # We should have used the delta assert_not_file_has_content httpd-log summaries/${ACTIVE_SUBSET_OTHER}.gz assert_file_has_content httpd-log summaries/${OLD_ACTIVE_SUBSET_OTHER}-${ACTIVE_SUBSET_OTHER}.delta # We should have used the $ARCH one from the cache assert_not_file_has_content httpd-log summaries/${ACTIVE_SUBSET}.gz assert_not_file_has_content httpd-log summaries/${OLD_ACTIVE_SUBSET}-${ACTIVE_SUBSET}.delta ok subsummary fetching and caching
flatpak/flatpak
tests/test-summaries.sh
Shell
lgpl-2.1
9,041
#!/bin/sh # test_flake8.sh - run Python flake8 tests # # Copyright (C) 2019 Arthur de Jong # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA set -e # find source directory srcdir="${srcdir-`dirname "$0"`}" builddir="${builddir-`dirname "$0"`}" top_srcdir="${top_srcdir-${srcdir}/..}" top_builddir="${top_builddir-${builddir}/..}" python="${PYTHON-python}" # if Python is missing, ignore if ! ${python} --version > /dev/null 2> /dev/null then echo "Python (${python}) not found" exit 77 fi # find virtualenv command if ! virtualenv --version > /dev/null 2>&1 then echo "virtualenv: command not found" exit 77 fi # create virtualenv venv="${builddir}/flake8-venv" [ -x "$venv"/bin/pip ] || virtualenv "$venv" --python="$python" "$venv"/bin/pip install \ flake8 \ flake8-author \ flake8-blind-except \ flake8-class-newline \ flake8-commas \ flake8-deprecated \ flake8-docstrings \ flake8-exact-pin \ flake8-print \ flake8-quotes \ flake8-tidy-imports \ flake8-tuple \ pep8-naming # run flake8 over pynslcd "$venv"/bin/flake8 \ --config="${srcdir}/flake8.ini" \ "${top_srcdir}/pynslcd" # run flake8 over utils "$venv"/bin/flake8 \ --config="${srcdir}/flake8.ini" \ "${top_srcdir}/utils" # run flake8 over tests "$venv"/bin/flake8 \ --config="${srcdir}/flake8.ini" \ "${top_srcdir}/tests"/*.py
arthurdejong/nss-pam-ldapd
tests/test_flake8.sh
Shell
lgpl-2.1
2,024
#!/bin/sh set -e if [ ! -f "build/env.sh" ]; then echo "$0 must be run from the root of the repository." exit 2 fi # Create fake Go workspace if it doesn't exist yet. workspace="$PWD/build/_workspace" root="$PWD" xeniodir="$workspace/src/github.com/xenioplatform" if [ ! -L "$xeniodir/go-xenio" ]; then mkdir -p "$xeniodir" cd "$xeniodir" ln -s ../../../../../. go-xenio cd "$root" fi # Set up the environment to use the workspace. GOPATH="$workspace" export GOPATH # Run the command inside the workspace. cd "$xeniodir/go-xenio" PWD="$xeniodir/go-xenio" # Launch the arguments with the configured environment. exec "$@"
xenioplatform/go-xenio
build/env.sh
Shell
lgpl-3.0
651
rm -Rf /guyirvine.com/people-blog/* cp -R $WORKSPACE/gi/_site/* /guyirvine.com/people-blog
guyirvine/people-blog
bin/copy_live.sh
Shell
lgpl-3.0
92
cd .. mkdir Solution cd Solution cmake ../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr make -j $(nproc) cd .. cd Tools
AndyD87/CcOS
Tools/build.sh
Shell
lgpl-3.0
130
#!/bin/bash # declare STRING variable date N=10 for i in `seq 1 $1`; do x=`shuf -i 100000-1000000 -n 1` ./prefix-scan $x done STRING="Done!!!" echo $STRING date
aocalderon/PhD
Y1Q1/GPU/lab3/3.2-prefix-scan/test2.sh
Shell
lgpl-3.0
163
#!/bin/bash # This must match the path to your osg-android build BASE_PATH=/home/erk/GIT-repos/osg-android mkdir -p build && cd build cmake .. \ -DOSG_DIR:PATH="$BASE_PATH/osg-android" \ -DOSG_INCLUDE_DIR:PATH="$BASE_PATH/include" \ -DOSG_GEN_INCLUDE_DIR:PATH="$BASE_PATH/build/include" \ -DCURL_INCLUDE_DIR:PATH="$BASE_PATH/3rdparty/curl/include" \ -DCURL_LIBRARY:PATH="$BASE_PATH/3rdparty/build/curl/obj/local/armeabi-v7a/libcurl.a" \ -DGDAL_INCLUDE_DIR:PATH="$BASE_PATH/3rdparty/gdal/include" \ -DGDAL_LIBRARY:PATH="$BASE_PATH/3rdparty/build/gdal/obj/local/armeabi-v7a/libgdal.a" \ -DGEOS_INCLUDE_DIR:PATH="$BASE_PATH/3rdparty/jni/geos-3.3.4/include" \ -DGEOS_LIBRARY:PATH="$BASE_PATH/3rdparty/build/geos/obj/local/armeabi-v7a/libgeos.a" \ -DOPENTHREADS_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libOpenThreads.a \ -DOSGDB_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgDB.a \ -DOSGFX_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgFX.a \ -DOSGGA_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgGA.a \ -DOSGMANIPULATOR_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgManipulator.a \ -DOSGSHADOW_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgShadow.a \ -DOSGSIM_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgSim.a \ -DOSGTERRAIN_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgTerrain.a \ -DOSGTEXT_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgText.a \ -DOSGUTIL_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgUtil.a \ -DOSGVIEWER_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgViewer.a \ -DOSGWIDGET_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosgWidget.a \ -DOSG_LIBRARY=$BASE_PATH/build/obj/local/armeabi-v7a/libosg.a \ -DOSGEARTH_USE_QT:BOOL=OFF \ -DOSG_BUILD_PLATFORM_ANDROID:BOOL=ON \ -DDYNAMIC_OSGEARTH:BOOL=OFF && cp -r ../AutoGenShaders/src . make
thahemp/osgearth-android
buildOSGEarthAndroid.sh
Shell
lgpl-3.0
1,835
#!/bin/sh CURL_VERSION=7.48.0 WORKSPACE=$(pwd) INSTALL_DIR="${WORKSPACE}/Linux/i386" mkdir -p $INSTALL_DIR if [ ! -e "curl-${CURL_VERSION}-i386" ]; then tar -jxvf curl-${CURL_VERSION}.tar.bz2 mv curl-${CURL_VERSION} curl-${CURL_VERSION}-i386 fi cd curl-${CURL_VERSION}-i386 export CFLAGS="-m32 -I${WORKSPACE}/../openssl/Linux/i386/include -I${WORKSPACE}/../zlib/Linux/i386/include" export LDFLAGS="-m32 -L${WORKSPACE}/../openssl/Linux/i386/lib -L${WORKSPACE}/../zlib/Linux/i386/lib" ./configure \ --prefix=${INSTALL_DIR} \ --with-ssl=${WORKSPACE}/../openssl/Linux/i386 \ --with-zlib=${WORKSPACE}/../zlib/Linux/i386 \ --disable-shared \ --enable-static \ --disable-ldap \ --disable-ldaps \ --with-pic \ | tee ${INSTALL_DIR}/configuration.txt || exit 1 make clean make -j4 || exit 1 make install || exit 1
WadeHsiao/B
3rd/curl/build-curl-for-linux-i386.sh
Shell
lgpl-3.0
823
# Sets “Do Not Disturb” status for (FOCUS_INTERVAL / 60) minutes # FOCUS_INTERVAL is amount of seconds user focuses on (env variable from Focus.app) # note: $FOCUS_INTERVAL is 0 by default, otherwise it's the interval for the previous focus session if [ "$FOCUS_INTERVAL" -eq "0" ]; then FOCUS_INTERVAL=1500 fi FOCUS_MINUTES=$((FOCUS_INTERVAL / 60)) # before using replace TOKEN with a token value from https://api.slack.com/docs/oauth-test-tokens # repeat the command below for each slack you need to change DND-status for curl -L "https://slack.com/api/dnd.setSnooze?token=TOKEN&num_minutes=$FOCUS_MINUTES" # close distracting apps osascript -e 'quit app "Mail"' osascript -e 'quit app "Tweetbot"'
mistadikay/dotfiles
focus/focus.sh
Shell
unlicense
710
#!/bin/bash case $1 in noloop) [ -d log/ ] || mkdir log/ [ -f log/console.log ] && mv log/console.log "log/backup/`date +%Y-%m-%d_%H-%M-%S`_console.log" java -Xms128m -Xmx1536m -ea -XX:-UseSplitVerifier -javaagent:./libs/al-commons.jar -cp ./libs/*:AL-Game.jar com.aionemu.gameserver.GameServer > log/console.log 2>&1 echo $! > gameserver.pid echo "Server started!" ;; *) ./StartGS_loop.sh & ;; esac
ilivoo/game-logic
bin/StartGS.sh
Shell
apache-2.0
416
#!/bin/bash if [ "$#" -ne 1 ] ; then echo "Usage: $0 <release directory>" >&2 exit 1 fi RELEASE=$PWD/$1 echo Processing ${RELEASE} TRANSITION_FILE=$1-transition.txt NOW_FILE=$1-now.txt echo > ${TRANSITION_FILE} echo > ${NOW_FILE} for zone in `find ${RELEASE} -type f`; do zdump -v -c 1901,2036 ${zone} >> ${TRANSITION_FILE} zdump ${zone} >> ${NOW_FILE} done
RMGiroux/tzvalidate
scripts/dumpall.sh
Shell
apache-2.0
370
#!/bin/bash sudo npm run audit
shamrickus/GloryOfNephilim
audit.sh
Shell
apache-2.0
32
#!/bin/bash -e fs_type=${FS_TYPE} filesys=${PARTITION_NAME} EXISTING_FS_TYPE=$(sudo lsblk -no FSTYPE $PARTITION_NAME) if [ -z $EXISTING_FS_TYPE ] ; then mkfs_executable='' case ${fs_type} in ext2 | ext3 | ext4 | fat | ntfs ) mkfs_executable='mkfs.'${fs_type};; swap ) mkfs_executable='mkswap';; * ) echo "File system type is not supported." exit 1;; esac echo "Creating ${fs_type} file system using ${mkfs_executable}" sudo ${mkfs_executable} ${filesys} else if [ "$EXISTING_FS_TYPE" != "$fs_type" ] ; then echo "Existing filesystem ($EXISTING_FS_TYPE) but not the expected type ($fs_type)" exit 1 fi echo "Not making a filesystem since a it already exist" fi
alien4cloud/alien4cloud-extended-types
alien-extended-storage-types/scripts/mkfs.sh
Shell
apache-2.0
771
#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. set -euo pipefail MY_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" export AIRFLOW_CI_SILENT=${AIRFLOW_CI_SILENT:="true"} export PYTHON_VERSION=${PYTHON_VERSION:-3.6} # shellcheck source=scripts/ci/_utils.sh . "${MY_DIR}/_utils.sh" basic_sanity_checks script_start if [[ -f ${BUILD_CACHE_DIR}/.skip_tests ]]; then echo echo "Skip tests" echo script_end exit fi rebuild_ci_image_if_needed IMAGES_TO_CHECK=("CI") export IMAGES_TO_CHECK pre-commit run --all-files --show-diff-on-failure script_end
Fokko/incubator-airflow
scripts/ci/ci_run_all_static_tests.sh
Shell
apache-2.0
1,341
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && cd ../../.. && pwd -P)" # Defines: # + NATIVE_ROOT: The Rust code directory, ie: src/rust/engine. # + MODE: Whether to run in debug or release mode. # + MODE_FLAG: The string to pass to Cargo to determine if we're in debug or release mode. # Exposes: # + calculate_current_hash: Generate a stable hash to determine if we need to rebuild the engine. # shellcheck source=build-support/bin/rust/calculate_engine_hash.sh source "${REPO_ROOT}/build-support/bin/rust/calculate_engine_hash.sh" KERNEL=$(uname -s | tr '[:upper:]' '[:lower:]') case "${KERNEL}" in linux) readonly LIB_EXTENSION=so ;; darwin) readonly LIB_EXTENSION=dylib ;; *) die "Unknown kernel ${KERNEL}, cannot bootstrap Pants native code!" ;; esac readonly NATIVE_ENGINE_BINARY="native_engine.so" readonly NATIVE_ENGINE_RESOURCE="${REPO_ROOT}/src/python/pants/engine/internals/${NATIVE_ENGINE_BINARY}" readonly NATIVE_ENGINE_RESOURCE_METADATA="${NATIVE_ENGINE_RESOURCE}.metadata" function _build_native_code() { # NB: See Cargo.toml with regard to the `extension-module` feature. "${REPO_ROOT}/cargo" build --features=extension-module ${MODE_FLAG} -p engine || die echo "${NATIVE_ROOT}/target/${MODE}/libengine.${LIB_EXTENSION}" } function bootstrap_native_code() { # We expose a safety valve to skip compilation iff the user already has `native_engine.so`. This # can result in using a stale `native_engine.so`, but we trust that the user knows what # they're doing. if [[ "${SKIP_NATIVE_ENGINE_SO_BOOTSTRAP}" == "true" ]]; then if [[ ! -f "${NATIVE_ENGINE_RESOURCE}" ]]; then die "You requested to override bootstrapping native_engine.so via the env var" \ "SKIP_NATIVE_ENGINE_SO_BOOTSTRAP, but the file does not exist at" \ "${NATIVE_ENGINE_RESOURCE}. This is not safe to do." fi return fi # Bootstraps the native code only if needed. local engine_version_calculated engine_version_calculated="$(calculate_current_hash)" local engine_version_in_metadata if [[ -f "${NATIVE_ENGINE_RESOURCE_METADATA}" ]]; then engine_version_in_metadata="$(sed -n 's/^engine_version: //p' "${NATIVE_ENGINE_RESOURCE_METADATA}")" fi if [[ ! -f "${NATIVE_ENGINE_RESOURCE}" || "${engine_version_calculated}" != "${engine_version_in_metadata}" ]]; then echo "Building native engine" local -r native_binary="$(_build_native_code)" # If bootstrapping the native engine fails, don't attempt to run pants # afterwards. if [[ ! -f "${native_binary}" ]]; then die "Failed to build native engine." fi # Pick up Cargo.lock changes if any caused by the `cargo build`. engine_version_calculated="$(calculate_current_hash)" # Create the native engine resource. # NB: On Mac Silicon, for some reason, first removing the old native_engine.so is necessary to avoid the Pants # process from being killed when recompiling. rm -f "${NATIVE_ENGINE_RESOURCE}" cp "${native_binary}" "${NATIVE_ENGINE_RESOURCE}" # Create the accompanying metadata file. local -r metadata_file=$(mktemp -t pants.native_engine.metadata.XXXXXX) echo "engine_version: ${engine_version_calculated}" > "${metadata_file}" echo "repo_version: $(git describe --dirty)" >> "${metadata_file}" mv "${metadata_file}" "${NATIVE_ENGINE_RESOURCE_METADATA}" fi }
jsirois/pants
build-support/bin/rust/bootstrap_code.sh
Shell
apache-2.0
3,527
#!/bin/sh ## you need to set env variables : JAVA_HOME, or modify the 2 values here export SAMPLE_PROPERTIES=" -Dsamples.trustAll=true " export SAMPLEDIR=. if [ "x${JAVA_HOME}" = "x" ] then if [ "x${JAVAHOME}" = "x" ] then echo JAVA_HOME not defined. Must be defined to run java apps. exit fi export JAVA_HOME="${JAVAHOME}" fi export PATH=${JAVA_HOME}/bin:${PATH} LOCALCLASSPATH=${PWD}/lib/vim25.jar exec ${JAVA_HOME}/bin/java ${SAMPLE_PROPERTIES} -cp $(echo lib/*.jar | tr ' ' ':') -Xmx1024M com.vmware.common.Main "$@"
jdgwartney/vsphere-ws
java/JAXWS/run.sh
Shell
apache-2.0
573
#!/bin/bash export PWD=`pwd` emacs `./lib/sourcefiles.sh` --geometry 120x70 --eval "(add-hook 'emacs-startup-hook 'delete-other-windows)" --title "`basename $PWD` ide" &
xoba/jira
lib/x-ide.sh
Shell
apache-2.0
171
#!/bin/sh # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This shell script is used to rebuild the gcc and toolchain binaries # for the Android NDK. # PROGDIR=$(dirname "$0") . "$PROGDIR/prebuilt-common.sh" PROGRAM_PARAMETERS="<dst-dir>" PROGRAM_DESCRIPTION="\ This script allows you to generate a 'wrapper toolchain', i.e. a set of simple scripts that act as toolchain binaries (e.g. my-cc, my-c++, my-ld, etc...) but call another installed toolchain instead, possibly with additional command-line options. For example, imagine we want a toolchain that generates 32-bit binaries while running on a 64-bit system, we could call this script as: $PROGNAME --cflags="-m32" --cxxflags="-m32" --ldflags="-m32" /tmp/my-toolchain Then, this will create programs like: /tmp/my-toolchain/my-cc /tmp/my-toolchain/my-gcc /tmp/my-toolchain/my-c++ /tmp/my-toolchain/my-g++ /tmp/my-toolchain/my-ld ... Where the compilers and linkers will add the -m32 flag to the command-line before calling the host version of 'cc', 'gcc', etc... Generally speaking: - The 'destination toolchain' is the one that will be called by the generated wrapper script. It is identified by a 'destination prefix' (e.g. 'x86_64-linux-gnu-', note the dash at the end). If is empty by default, but can be changed with --dst-prefix=<prefix> - The 'source prefix' is the prefix added to the generated toolchain scripts, it is 'my-' by default, but can be changed with --src-prefix=<prefix> - You can use --cflags, --cxxflags, --ldflags, etc... to add extra command-line flags for the generated compiler, linker, etc.. scripts " DEFAULT_SRC_PREFIX="my-" DEFAULT_DST_PREFIX="" SRC_PREFIX=$DEFAULT_SRC_PREFIX register_var_option "--src-prefix=<prefix>" SRC_PREFIX "Set source toolchain prefix" DST_PREFIX=$DEFAULT_DST_PREFIX register_var_option "--dst-prefix=<prefix>" DST_PREFIX "Set destination toolchain prefix" EXTRA_CFLAGS= register_var_option "--cflags=<options>" EXTRA_CFLAGS "Add extra C compiler flags" EXTRA_CXXFLAGS= register_var_option "--cxxflags=<options>" EXTRA_CXXFLAGS "Add extra C++ compiler flags" EXTRA_LDFLAGS= register_var_option "--ldflags=<options>" EXTRA_LDFLAGS "Add extra linker flags" EXTRA_ASFLAGS= register_var_option "--asflags=<options>" EXTRA_ASFLAGS "Add extra assembler flags" EXTRA_ARFLAGS= register_var_option "--arflags=<options>" EXTRA_ARFLAGS "Add extra archiver flags" CCACHE= register_var_option "--ccache=<prefix>" CCACHE "Use ccache compiler driver" PROGRAMS="cc gcc c++ g++ cpp as ld ar ranlib strip strings nm objdump dlltool" register_var_option "--programs=<list>" PROGRAMS "List of programs to generate wrapper for" extract_parameters "$@" PROGRAMS=$(commas_to_spaces "$PROGRAMS") if [ -z "$PROGRAMS" ]; then panic "Empty program list, nothing to do!" fi DST_DIR="$PARAMETERS" if [ -z "$DST_DIR" ]; then panic "Please provide a destination directory as a parameter! See --help for details." fi mkdir -p "$DST_DIR" fail_panic "Could not create destination directory: $DST_DIR" # Generate a small wrapper program # # $1: program name, without any prefix (e.g. gcc, g++, ar, etc..) # $2: source prefix (e.g. 'i586-mingw32msvc-') # $3: destination prefix (e.g. 'i586-px-mingw32msvc-') # $4: destination directory for the generate program # gen_wrapper_program () { local PROG="$1" local SRC_PREFIX="$2" local DST_PREFIX="$3" local DST_FILE="$4/${SRC_PREFIX}$PROG" local FLAGS="" local LDL_FLAGS="" case $PROG in cc|gcc) FLAGS=$FLAGS" $EXTRA_CFLAGS" LDL_FLAGS="-ldl" ;; c++|g++) FLAGS=$FLAGS" $EXTRA_CXXFLAGS" LDL_FLAGS="-ldl" ;; ar) FLAGS=$FLAGS" $EXTRA_ARFLAGS";; as) FLAGS=$FLAGS" $EXTRA_ASFLAGS";; ld|ld.bfd|ld.gold) FLAGS=$FLAGS" $EXTRA_LDFLAGS";; esac if [ -n "$CCACHE" ]; then DST_PREFIX=$CCACHE" "$DST_PREFIX fi cat > "$DST_FILE" << EOF #!/bin/sh # Auto-generated, do not edit ${DST_PREFIX}$PROG $FLAGS "\$@" $LDL_FLAGS EOF chmod +x "$DST_FILE" log "Generating: ${SRC_PREFIX}$PROG" } log "Generating toolchain wrappers in: $DST_DIR" for PROG in $PROGRAMS; do gen_wrapper_program $PROG "$SRC_PREFIX" "$DST_PREFIX" "$DST_DIR" done log "Done!"
yongjhih/android_tools
ndk/build/tools/gen-toolchain-wrapper.sh
Shell
apache-2.0
4,848
APT="apt-get" function install_git { $APT install -y git || _error "Could not install packages" } function install_script_dependencies { $APT update $APT install -y openssl wget || _error "Could not install packages" } function install_rocksdb_dependencies { $APT install -y libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev || _error "Could not install packages" } function install_compiler { $APT update $APT install -y bzip2 build-essential gcc || _error "Could not install packages" }
bauerj/electrumx-installer
distributions/base-debianoid.sh
Shell
apache-2.0
498
#doitlive prompt: {user.dim}: {cwd.red}/ $ #doitlive speed: 2 #doitlive commentecho: true # This is bootstrap code. Please continue until you get the presentation header. sudo mkdir -p /shared/project sudo chown dottie_demo /shared/project cp ./project/* /shared/project/ clear # # #Keeping Secrets in your code # #- https://github.com/krotscheck/presentations/tree/master/src/2018_11_10_keeping_secrets #- Michael Krotscheck<[email protected]> # # more gpg.config gpg --generate-key --batch gpg.config gpg --list-keys gpg --list-secret-keys #<<< openssl rand -base64 32 > passphrase more passphrase cat passphrase | gpg --encrypt -r [email protected] -o /shared/project/passphrase.gpg xxd /shared/project/passphrase.gpg gpg --decrypt --use-agent --batch /shared/project/passphrase.gpg gpg --decrypt --use-agent --batch /shared/project/passphrase.gpg #<<< more /shared/project/jenkins.gpg.asc gpg --import /shared/project/jenkins.gpg.asc gpg --list-keys gpg --list-secret-keys cat passphrase | gpg --encrypt -r [email protected] -r [email protected] -o /shared/project/passphrase.gpg #<<< cd /shared/project ls -la more passphrase.sh ./passphrase.sh more ansible.cfg ansible-vault create ./group_vars/all/main.yml more ./group_vars/all/main.yml ansible-playbook ./main.yml
krotscheck/presentations
src/2018_11_10_keeping_secrets/presentation.dottie.sh
Shell
apache-2.0
1,290
#!/bin/bash # # Licensed to Jasig under one or more contributor license # agreements. See the NOTICE file distributed with this work # for additional information regarding copyright ownership. # Jasig licenses this file to you under the Apache License, # Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a # copy of the License at the following location: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # echo -e "Building branch: ${TRAVIS_BRANCH}" echo -e "Build directory: ${TRAVIS_BUILD_DIR}" echo -e "Build id: ${TRAVIS_BUILD_ID}" echo -e "Builder number: ${TRAVIS_BUILD_NUMBER}" echo -e "Job id: ${TRAVIS_JOB_ID}" echo -e "Job number: ${TRAVIS_JOB_NUMBER}" echo -e "Repo slug: ${TRAVIS_REPO_SLUG}" echo -e "OS name: ${TRAVIS_OS_NAME}" if [ "$TRAVIS_SECURE_ENV_VARS" == "false" ] then echo -e "Secure environment variables are NOT available...\n" else echo -e "Secure environment variables are available...\n" #echo -e "GH_TOKEN -> ${GH_TOKEN}" fi
0be1/cas
travis/init-travis-build.sh
Shell
apache-2.0
1,345
#!/usr/bin/env bash set -o errexit set -o nounset set -o pipefail set -o errtrace (shopt -p inherit_errexit &>/dev/null) && shopt -s inherit_errexit readonly SCRIPT_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" source "$SCRIPT_DIR"/../src/logger.sh "test.log" function foo() { ENTER DEBUG "DEBUG message" INFO "INFO message" echo "echo message" WARN "WARN message" ERROR "ERROR message" EXIT } ENTER foo EXIT
adoyle-h/bash-logger
test/logger_to_file.sh
Shell
apache-2.0
447
#!/bin/bash set -euo pipefail # Vars without defaults : "${NFSYSTEM:?NFSYSTEM not set}" : "${CONT:?CONT not set}" : "${DATADIR:?DATADIR not set}" : "${LOGDIR:?LOGDIR not set}" : "${PRETRAINED_DIR:?PRETRAINED_DIR not set}" # Vars with defaults : "${NEXP:=5}" : "${DATESTAMP:=$(date +'%y%m%d%H%M%S%N')}" : "${CLEAR_CACHES:=1}" # Other vars readonly _config_file="./config_${NFSYSTEM}.sh" readonly _logfile_base="${LOGDIR}/${DATESTAMP}" readonly _cont_name=single_stage_detector _cont_mounts=("--volume=${DATADIR}:/data" "--volume=${LOGDIR}:/results" "--volume=${PRETRAINED_DIR}:/workspace/single_stage_detector/torch-model-cache") # Setup directories mkdir -p "${LOGDIR}" # Get list of envvars to pass to docker source "${_config_file}" mapfile -t _config_env < <(env -i bash -c ". ${_config_file} && compgen -e" | grep -E -v '^(PWD|SHLVL)') _config_env+=(MLPERF_HOST_OS) mapfile -t _config_env < <(for v in "${_config_env[@]}"; do echo "--env=$v"; done) # Cleanup container cleanup_docker() { docker container rm -f "${_cont_name}" || true } cleanup_docker trap 'set -eux; cleanup_docker' EXIT # Setup container nvidia-docker run --rm --init --detach \ -v /mlperf/training_v0.7_v2/ssd/scripts:/workspace/single_stage_detector \ --net=host --uts=host --ipc=host --security-opt=seccomp=unconfined \ --ulimit=stack=67108864 --ulimit=memlock=-1 \ --name="${_cont_name}" "${_cont_mounts[@]}" \ "${CONT}" sleep infinity docker exec -it "${_cont_name}" true # Run experiments for _experiment_index in $(seq 1 "${NEXP}"); do ( echo "Beginning trial ${_experiment_index} of ${NEXP}" # Print system info docker exec -it "${_cont_name}" python -c " import mlperf_log_utils from mlperf_logging.mllog import constants mlperf_log_utils.mlperf_submission_log(constants.SSD)" # Clear caches if [ "${CLEAR_CACHES}" -eq 1 ]; then sync && sudo /sbin/sysctl vm.drop_caches=3 docker exec -it "${_cont_name}" python -c " from mlperf_logging.mllog import constants from mlperf_logger import log_event log_event(key=constants.CACHE_CLEAR, value=True)" fi # Run experiment docker exec -it "${_config_env[@]}" "${_cont_name}" ./run_and_time.sh ) |& tee "${_logfile_base}_${_experiment_index}.log" done
mlperf/training_results_v0.7
Inspur/benchmarks/ssd/implementations/implementation_closed/run_with_docker.sh
Shell
apache-2.0
2,306
#!/bin/bash sudo apt update sudo apt full-upgrade -y sudo apt install -y git if [ ! -d ~/git ]; then mkdir -p ~/git fi cd ~/git if [ ! -d star.ubuntu-setup ]; then git clone https://github.com/starlone/star.ubuntu-setup.git fi cd star.ubuntu-setup ./setup.sh
starlone/star.ubuntu-setup
bootstrap.sh
Shell
apache-2.0
272
#!/bin/bash # Copyright 2013 Brno University of Technology (Author: Karel Vesely) # Apache 2.0. # Sequence-discriminative MMI/BMMI training of DNN. # 4 iterations (by default) of Stochastic Gradient Descent with per-utterance updates. # Boosting of paths with more errors (BMMI) gets activated by '--boost <float>' option. # For the numerator we have a fixed alignment rather than a lattice-- # this actually follows from the way lattices are defined in Kaldi, which # is to have a single path for each word (output-symbol) sequence. # Begin configuration section. cmd=run.pl num_iters=4 boost=0.0 #ie. disable boosting acwt=0.1 lmwt=1.0 learn_rate=0.00001 halving_factor=1.0 #ie. disable halving drop_frames=true verbose=1 seed=777 # seed value used for training data shuffling skip_cuda_check=false # End configuration section echo "$0 $@" # Print the command line for logging [ -f ./path.sh ] && . ./path.sh; # source the path. . parse_options.sh || exit 1; if [ $# -ne 6 ]; then echo "Usage: steps/$0 <data> <lang> <srcdir> <ali> <denlats> <exp>" echo " e.g.: steps/$0 data/train_all data/lang exp/tri3b_dnn exp/tri3b_dnn_ali exp/tri3b_dnn_denlats exp/tri3b_dnn_mmi" echo "Main options (for others, see top of script file)" echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs." echo " --config <config-file> # config containing options" echo " --num-iters <N> # number of iterations to run" echo " --acwt <float> # acoustic score scaling" echo " --lmwt <float> # linguistic score scaling" echo " --learn-rate <float> # learning rate for NN training" echo " --drop-frames <bool> # drop frames num/den completely disagree" echo " --boost <boost-weight> # (e.g. 0.1), for boosted MMI. (default 0)" exit 1; fi data=$1 lang=$2 srcdir=$3 alidir=$4 denlatdir=$5 dir=$6 for f in $data/feats.scp $alidir/{tree,final.mdl,ali.1.gz} $denlatdir/lat.scp $srcdir/{final.nnet,final.feature_transform}; do [ ! -f $f ] && echo "$0: no such file $f" && exit 1; done # check if CUDA is compiled in, if ! $skip_cuda_check; then cuda-compiled || { echo 'CUDA was not compiled in, skipping! Check src/kaldi.mk and src/configure' && exit 1; } fi mkdir -p $dir/log cp $alidir/{final.mdl,tree} $dir silphonelist=`cat $lang/phones/silence.csl` || exit 1; #Get the files we will need nnet=$srcdir/$(readlink $srcdir/final.nnet || echo final.nnet); [ -z "$nnet" ] && echo "Error nnet '$nnet' does not exist!" && exit 1; cp $nnet $dir/0.nnet; nnet=$dir/0.nnet class_frame_counts=$srcdir/ali_train_pdf.counts [ -z "$class_frame_counts" ] && echo "Error class_frame_counts '$class_frame_counts' does not exist!" && exit 1; cp $srcdir/ali_train_pdf.counts $dir feature_transform=$srcdir/final.feature_transform if [ ! -f $feature_transform ]; then echo "Missing feature_transform '$feature_transform'" exit 1 fi cp $feature_transform $dir/final.feature_transform model=$dir/final.mdl [ -z "$model" ] && echo "Error transition model '$model' does not exist!" && exit 1; # Shuffle the feature list to make the GD stochastic! # By shuffling features, we have to use lattices with random access (indexed by .scp file). cat $data/feats.scp | utils/shuffle_list.pl --srand $seed > $dir/train.scp ### ### PREPARE FEATURE EXTRACTION PIPELINE ### # import config, cmvn_opts= delta_opts= D=$srcdir [ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, [ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) [ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, [ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) # # Create the feature stream, feats="ark,o:copy-feats scp:$dir/train.scp ark:- |" # apply-cmvn (optional), [ ! -z "$cmvn_opts" -a ! -f $data/cmvn.scp ] && echo "$0: Missing $data/cmvn.scp" && exit 1 [ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp ark:- ark:- |" # add-deltas (optional), [ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" # # Record the setup, [ ! -z "$cmvn_opts" ] && echo $cmvn_opts >$dir/cmvn_opts [ ! -z "$delta_opts" ] && echo $delta_opts >$dir/delta_opts ### ### ### ### ### Prepare the alignments ### # Assuming all alignments will fit into memory ali="ark:gunzip -c $alidir/ali.*.gz |" ### ### Prepare the lattices ### # The lattices are indexed by SCP (they are not gziped because of the random access in SGD) lats="scp:$denlatdir/lat.scp" # Optionally apply boosting if [[ "$boost" != "0.0" && "$boost" != 0 ]]; then #make lattice scp with same order as the shuffled feature scp awk '{ if(r==0) { latH[$1]=$2; } if(r==1) { if(latH[$1] != "") { print $1" "latH[$1] } } }' $denlatdir/lat.scp r=1 $dir/train.scp > $dir/lat.scp #get the list of alignments ali-to-phones $alidir/final.mdl "$ali" ark,t:- | awk '{print $1;}' > $dir/ali.lst #remove feature files which have no lattice or no alignment, #(so that the mmi training tool does not blow-up due to lattice caching) mv $dir/train.scp $dir/train.scp_unfilt awk '{ if(r==0) { latH[$1]="1"; } if(r==1) { aliH[$1]="1"; } if(r==2) { if((latH[$1] != "") && (aliH[$1] != "")) { print $0; } } }' $dir/lat.scp r=1 $dir/ali.lst r=2 $dir/train.scp_unfilt > $dir/train.scp #create the lat pipeline lats="ark,o:lattice-boost-ali --b=$boost --silence-phones=$silphonelist $alidir/final.mdl scp:$dir/lat.scp '$ali' ark:- |" fi ### ### ### # Run several iterations of the MMI/BMMI training cur_mdl=$nnet x=1 while [ $x -le $num_iters ]; do echo "Pass $x (learnrate $learn_rate)" if [ -f $dir/$x.nnet ]; then echo "Skipped, file $dir/$x.nnet exists" else $cmd $dir/log/mmi.$x.log \ nnet-train-mmi-sequential \ --feature-transform=$feature_transform \ --class-frame-counts=$class_frame_counts \ --acoustic-scale=$acwt \ --lm-scale=$lmwt \ --learn-rate=$learn_rate \ --drop-frames=$drop_frames \ --verbose=$verbose \ $cur_mdl $alidir/final.mdl "$feats" "$lats" "$ali" $dir/$x.nnet || exit 1 fi cur_mdl=$dir/$x.nnet #report the progress grep -B 2 MMI-objective $dir/log/mmi.$x.log | sed -e 's|^[^)]*)[^)]*)||' x=$((x+1)) learn_rate=$(awk "BEGIN{print($learn_rate*$halving_factor)}") done (cd $dir; [ -e final.nnet ] && unlink final.nnet; ln -s $((x-1)).nnet final.nnet) echo "MMI/BMMI training finished" echo "Re-estimating priors by forwarding the training set." . cmd.sh nj=$(cat $alidir/num_jobs) steps/nnet/make_priors.sh --cmd "$train_cmd" --nj $nj $data $dir || exit 1 exit 0
thorsonlinguistics/german-neutralization
steps/nnet/train_mmi.sh
Shell
apache-2.0
6,857
#!/usr/bin/env bash # Copyright 2017 Sean Mackrory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source scripts/functions.sh set -e set -v set -x IFS=' ' read -r -a ZK_QUORUM <<< "$(split ${1})" declare -a IDS for ((i=0; i<${#ZK_QUORUM[*]}; i++)); do IDS[${i}]=$(echo ${ZK_QUORUM[${i}]} | sed -e "s/${HOST_PREFIX}//" | sed -e "s/${HOST_SUFFIX}//") done for host in ${ZK_QUORUM[@]}; do ssh -i ${ID_FILE} root@${host} ". /tmp/env.sh wget http://www-us.apache.org/dist/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz tar xzf zookeeper-${ZK_VERSION}.tar.gz mv zookeeper-${ZK_VERSION} zookeeper cd zookeeper cat > conf/zoo.cfg <<EOF tickTime=2000 dataDir=/var/lib/zookeeper clientPort=2181 initLimit=5 syncLimit=2 server.${IDS[0]}=${ZK_QUORUM[0]}:2888:3888 server.${IDS[1]}=${ZK_QUORUM[1]}:2888:3888 server.${IDS[2]}=${ZK_QUORUM[2]}:2888:3888 EOF i=\`hostname | sed -e 's/${HOST_PREFIX}//' | sed -e 's/${HOST_SUFFIX}//'\` mkdir -p /var/lib/zookeeper chmod 777 /var/lib/zookeeper echo \${i} > /var/lib/zookeeper/myid bin/zkServer.sh start " < /dev/null done
mackrorysd/hadoop-compatibility
scripts/zookeeper.sh
Shell
apache-2.0
1,622
#!/bin/sh set -e echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" install_framework() { if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then local source="${BUILT_PRODUCTS_DIR}/$1" elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" elif [ -r "$1" ]; then local source="$1" fi local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" if [ -L "${source}" ]; then echo "Symlinked..." source="$(readlink "${source}")" fi # use filter instead of exclude so missing patterns dont' throw errors echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\"" rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" local basename basename="$(basename -s .framework "$1")" binary="${destination}/${basename}.framework/${basename}" if ! [ -r "$binary" ]; then binary="${destination}/${basename}" fi # Strip invalid architectures so "fat" simulator / device frameworks work on device if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then strip_invalid_archs "$binary" fi # Resign the code if required by the build settings to avoid unstable apps code_sign_if_enabled "${destination}/$(basename "$1")" # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then local swift_runtime_libs swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]}) for lib in $swift_runtime_libs; do echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\"" rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" code_sign_if_enabled "${destination}/${lib}" done fi } # Signs a framework with the provided identity code_sign_if_enabled() { if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then # Use the current code_sign_identitiy echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'" if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then code_sign_cmd="$code_sign_cmd &" fi echo "$code_sign_cmd" eval "$code_sign_cmd" fi } # Strip invalid architectures strip_invalid_archs() { binary="$1" # Get architectures for current file archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)" stripped="" for arch in $archs; do if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then # Strip non-valid architectures in-place lipo -remove "$arch" -output "$binary" "$binary" || exit 1 stripped="$stripped $arch" fi done if [[ "$stripped" ]]; then echo "Stripped $binary of architectures:$stripped" fi } if [[ "$CONFIGURATION" == "Debug" ]]; then install_framework "$BUILT_PRODUCTS_DIR/Kingfisher/Kingfisher.framework" install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework" install_framework "$BUILT_PRODUCTS_DIR/RxCocoa/RxCocoa.framework" install_framework "$BUILT_PRODUCTS_DIR/RxSwift/RxSwift.framework" install_framework "$BUILT_PRODUCTS_DIR/SQLite.swift/SQLite.framework" fi if [[ "$CONFIGURATION" == "Release" ]]; then install_framework "$BUILT_PRODUCTS_DIR/Kingfisher/Kingfisher.framework" install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework" install_framework "$BUILT_PRODUCTS_DIR/RxCocoa/RxCocoa.framework" install_framework "$BUILT_PRODUCTS_DIR/RxSwift/RxSwift.framework" install_framework "$BUILT_PRODUCTS_DIR/SQLite.swift/SQLite.framework" fi if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then wait fi
ellited/Lieferando-Services
Pods/Target Support Files/Pods-Lieferando Services/Pods-Lieferando Services-frameworks.sh
Shell
apache-2.0
4,291
#!/bin/bash set -euo pipefail export GOPATH=$(cd "$(dirname "$0")"; pwd) export GOBIN=${GOBIN:-${GOPATH}/bin} export KODING_REPO=$(git rev-parse --show-toplevel) export KODING_GIT_VERSION=$(git rev-parse --short HEAD || cat ./VERSION || cat ../VERSION || cat ../../../VERSION || echo "0") export KODING_VERSION=${KODING_VERSION:-$KODING_GIT_VERSION} export KODING_LDFLAGS="-X koding/artifact.VERSION=${KODING_VERSION} -X main.GitCommit=${KODING_VERSION} -s" export KODING_TAGS="" koding-go-install() { go install -v -tags "${KODING_TAGS}" -ldflags "${KODING_LDFLAGS}" $* } export COMMANDS=( koding/kites/kontrol koding/kites/kloud koding/kites/kloud/kloudctl koding/kites/cmd/terraformer koding/kites/cmd/tunnelserver koding/workers/cmd/tunnelproxymanager koding/workers/removenonexistents koding/kites/kloud/scripts/userdebug koding/kites/kloud/scripts/sl koding/klient koding/klientctl koding/scripts/multiec2ssh socialapi/workers/api socialapi/workers/cmd/realtime socialapi/workers/cmd/realtime/gatekeeper socialapi/workers/cmd/realtime/dispatcher socialapi/workers/cmd/migrator socialapi/workers/cmd/algoliaconnector socialapi/workers/cmd/algoliaconnector/deletedaccountremover socialapi/workers/cmd/presence socialapi/workers/cmd/collaboration socialapi/workers/cmd/email/emailsender socialapi/workers/cmd/team socialapi/workers/algoliaconnector/tagmigrator socialapi/workers/algoliaconnector/contentmigrator vendor/github.com/koding/kite/kitectl vendor/github.com/canthefason/go-watcher vendor/github.com/mattes/migrate vendor/github.com/alecthomas/gocyclo vendor/github.com/remyoudompheng/go-misc/deadcode vendor/github.com/jteeuwen/go-bindata/go-bindata vendor/github.com/wadey/gocovmerge vendor/github.com/opennota/check/cmd/varcheck vendor/gopkg.in/alecthomas/gometalinter.v1 ) export TERRAFORM_COMMANDS=( vendor/github.com/hashicorp/terraform $(go list vendor/github.com/hashicorp/terraform/builtin/bins/... | grep -v -E 'provisioner|provider-github') ) export TERRAFORM_CUSTOM_COMMANDS=( koding/kites/cmd/provider-vagrant vendor/github.com/koding/terraform-provider-github/cmd/provider-github vendor/github.com/Banno/terraform-provider-marathon ) # source configuration for kloud providers for provider in $KODING_REPO/go/src/koding/kites/kloud/provider/*; do if [[ -d "${provider}/build.sh.d" ]]; then for src in ${provider}/build.sh.d/*; do if [[ -f "$src" ]]; then source "$src" fi done fi done fileToBeCleaned=$GOPATH/src/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go grep "interpolationFuncFile()," $fileToBeCleaned && sed -i.bak '/interpolationFuncFile(),/d' $fileToBeCleaned go generate koding/kites/config koding/kites/kloud/kloud koding-go-install ${COMMANDS[@]} ${TERRAFORM_COMMANDS[@]} koding-go-install ${TERRAFORM_CUSTOM_COMMANDS[@]} # clean up unused resources in any case rm -rf $GOBIN/terraform-provisioner-* for cmd in $GOBIN/provider-*; do NAME=$(echo $cmd | rev | cut -d/ -f1 | rev) ln -sf $GOBIN/$NAME $GOBIN/terraform-$NAME done
drewsetski/koding
go/build.sh
Shell
apache-2.0
3,056
test_init_auto() { # - lxd init --auto --storage-backend zfs # and # - lxd init --auto # can't be easily tested on jenkins since it hard-codes "default" as pool # naming. This can cause naming conflicts when multiple test-suites are run on # a single runner. if [ "$(storage_backend "$LXD_DIR")" = "zfs" ]; then # lxd init --auto --storage-backend zfs --storage-pool <name> LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) chmod +x "${LXD_INIT_DIR}" spawn_lxd "${LXD_INIT_DIR}" configure_loop_device loop_file_1 loop_device_1 # shellcheck disable=SC2154 zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" kill_lxd "${LXD_INIT_DIR}" zpool destroy "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" sed -i "\|^${loop_device_1}|d" "${TEST_DIR}/loops" # lxd init --auto --storage-backend zfs --storage-pool <name>/<existing-dataset> LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) chmod +x "${LXD_INIT_DIR}" spawn_lxd "${LXD_INIT_DIR}" configure_loop_device loop_file_1 loop_device_1 # shellcheck disable=SC2154 zpool create "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" "${loop_device_1}" -m none -O compression=on zfs create -p -o mountpoint=none "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" LXD_DIR=${LXD_INIT_DIR} lxd init --auto --storage-backend zfs --storage-pool "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool/existing-dataset" kill_lxd "${LXD_INIT_DIR}" zpool destroy "lxdtest-$(basename "${LXD_DIR}")-pool1-existing-pool" sed -i "\|^${loop_device_1}|d" "${TEST_DIR}/loops" # lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool <name> --auto LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) chmod +x "${LXD_INIT_DIR}" spawn_lxd "${LXD_INIT_DIR}" ZFS_POOL="lxdtest-$(basename "${LXD_DIR}")-init" LXD_DIR=${LXD_INIT_DIR} lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool "${ZFS_POOL}" --auto kill_lxd "${LXD_INIT_DIR}" zpool destroy "${ZFS_POOL}" fi }
lxc/lxd-pkg-ubuntu
test/suites/init_auto.sh
Shell
apache-2.0
2,278
export HADOOP_INSTALL=/opt/hadoop-2.7.0 export PATH=$PATH:$HADOOP_INSTALL/bin input=$1 output=$2 k=$3 hdfs dfs -copyToLocal $input kmeans_input >> /dev/null python kmeans_scikit.py kmeans_input kmeans_output $k hdfs dfs -copyFromLocal kmeans_output $output rm -rf kmeans_input kmeans_output
project-asap/IReS-Platform
asap-platform/asap-server/asapLibrary/operators/kmeans_scikit/kmeans_scikit.sh
Shell
apache-2.0
294
#!/bin/bash # # Copyright 2015 6WIND S.A. # # devstack/plugin.sh # Functions to install/configure the fast path with ML2 ovs, linuxbridge, odl... # Dependencies: # # ``functions`` file # ``TOP_DIR`` must be defined # ``stack.sh`` calls the entry points in this order: # install_fast_path # write_fast_path_conf # start_fast_path # setup_develop networking-6wind # configure_nova_rootwrap # configure_ml2_for_fast_path # stop_fast_path # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace NET_6WIND_DIR=$DEST/networking-6wind NET_6WIND_AGT_BIN_DIR=$(get_python_exec_prefix) NOVA_ROOTWRAP=$(get_rootwrap_location nova) function create_nova_rootwrap { # copy 6wind.filers for vif_ovsfp_plug scripts sudo cp $NET_6WIND_DIR/etc/nova/rootwrap.d/6wind.filters /etc/nova/rootwrap.d/. # fast-path commands are install in /usr/local, so nova-rootwrap needs to # be allowed to find the tools installed in /usr/local/bin. iniset /etc/nova/rootwrap.conf DEFAULT exec_dirs \ "$(iniget /etc/nova/rootwrap.conf DEFAULT exec_dirs),/usr/local/bin" } function configure_ml2_for_fast_path { if [[ "$Q_USE_SECGROUP" == "False" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop else iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid fi } function nova_set_hugepages_flavor { for flavor_id in $(nova flavor-list | awk '{print $2}' | grep [0-9]); do nova flavor-key $flavor_id set hw:mem_page_size=large done } # main loop if is_service_enabled net-6wind; then source $NET_6WIND_DIR/devstack/libs/fast-path if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then if [[ "$OFFLINE" != "True" ]]; then if is_service_enabled n-cpu; then setup_va_repo install_va fi fi elif [[ "$1" == "stack" && "$2" == "install" ]]; then if is_service_enabled n-cpu; then start_va fi setup_install $DEST/networking-6wind elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then if is_service_enabled n-cpu; then create_nova_rootwrap fi if is_service_enabled neutron-agent; then configure_ml2_for_fast_path fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then if is_service_enabled nova; then nova_set_hugepages_flavor fi if is_service_enabled n-cpu; then run_process net-6wind-agt "$NET_6WIND_AGT_BIN_DIR/neutron-fastpath-agent" fi fi if [[ "$1" == "unstack" ]]; then if is_service_enabled n-cpu; then stop_process net-6wind-agt stop_va fi fi if [[ "$1" == "clean" ]]; then if is_service_enabled n-cpu; then uninstall_va remove_va_repo fi fi fi # Restore xtrace $XTRACE
openstack/networking-6wind
devstack/plugin.sh
Shell
apache-2.0
2,915
#!/bin/bash # Add custom configuration file set -e SCRIPT_DIR=$(dirname $0) ADDED_DIR=${SCRIPT_DIR}/added cp -p ${ADDED_DIR}/clustered-openshift.xml $JBOSS_HOME/standalone/configuration/
bdecoste/cct_module
os-jdg7-conffiles/configure.sh
Shell
apache-2.0
189
#!/bin/sh NAME=security.cov lcov --quiet --base-directory . --directory . -c -o $NAME lcov --quiet --remove $NAME "/usr*" -o $NAME lcov --quiet --remove $NAME "/build*" -o $NAME lcov --quiet --remove $NAME "/opt*" -o $NAME lcov --quiet --remove $NAME "*/adainclude*" -o $NAME lcov --quiet --remove $NAME "*/regtests*" -o $NAME lcov --quiet --remove $NAME "*/b__*" -o $NAME rm -rf cover genhtml --quiet --ignore-errors source -o ./cover -t "test coverage" --num-spaces 4 $NAME
stcarrez/ada-security
coverage.sh
Shell
apache-2.0
478
#!/bin/bash # force_coreos_update.command # CoreOS GUI for OS X # # Created by Rimantas on 01/04/2014. # Copyright (c) 2014 Rimantas Mocevicius. All rights reserved. function pause(){ read -p "$*" } cd ~/coreos-osx/coreos-vagrant vagrant up vagrant ssh -c "sudo update_engine_client -update" echo " " echo "Update has finished !!!" pause 'Press [Enter] key to continue...'
rudymccomb/coreos-osx-gui
src/force_coreos_update.command
Shell
apache-2.0
381
#!/bin/bash # Cause the script to exit if a single command fails. set -e # Show explicitly which commands are currently running. set -x # Much of this is taken from https://github.com/matthew-brett/multibuild. # This script uses "sudo", so you may need to type in a password a couple times. MACPYTHON_URL=https://www.python.org/ftp/python MACPYTHON_PY_PREFIX=/Library/Frameworks/Python.framework/Versions DOWNLOAD_DIR=python_downloads NODE_VERSION="14" PY_VERSIONS=("3.6.1" "3.7.0" "3.8.2") PY_INSTS=("python-3.6.1-macosx10.6.pkg" "python-3.7.0-macosx10.6.pkg" "python-3.8.2-macosx10.9.pkg") PY_MMS=("3.6" "3.7" "3.8") # The minimum supported numpy version is 1.14, see # https://issues.apache.org/jira/browse/ARROW-3141 NUMPY_VERSIONS=("1.14.5" "1.14.5" "1.14.5") ./ci/travis/install-bazel.sh mkdir -p $DOWNLOAD_DIR mkdir -p .whl # Use the latest version of Node.js in order to build the dashboard. source "$HOME"/.nvm/nvm.sh nvm install $NODE_VERSION nvm use node # Build the dashboard so its static assets can be included in the wheel. # TODO(mfitton): switch this back when deleting old dashboard code. pushd python/ray/new_dashboard/client npm ci npm run build popd for ((i=0; i<${#PY_VERSIONS[@]}; ++i)); do PY_VERSION=${PY_VERSIONS[i]} PY_INST=${PY_INSTS[i]} PY_MM=${PY_MMS[i]} NUMPY_VERSION=${NUMPY_VERSIONS[i]} # The -f flag is passed twice to also run git clean in the arrow subdirectory. # The -d flag removes directories. The -x flag ignores the .gitignore file, # and the -e flag ensures that we don't remove the .whl directory. git clean -f -f -x -d -e .whl -e $DOWNLOAD_DIR -e python/ray/new_dashboard/client -e dashboard/client # Install Python. INST_PATH=python_downloads/$PY_INST curl $MACPYTHON_URL/"$PY_VERSION"/"$PY_INST" > "$INST_PATH" sudo installer -pkg "$INST_PATH" -target / PYTHON_EXE=$MACPYTHON_PY_PREFIX/$PY_MM/bin/python$PY_MM PIP_CMD="$(dirname "$PYTHON_EXE")/pip$PY_MM" pushd /tmp # Install latest version of pip to avoid brownouts. curl https://bootstrap.pypa.io/get-pip.py | $PYTHON_EXE popd pushd python # Setuptools on CentOS is too old to install arrow 0.9.0, therefore we upgrade. $PIP_CMD install --upgrade setuptools # Install setuptools_scm because otherwise when building the wheel for # Python 3.6, we see an error. $PIP_CMD install -q setuptools_scm==3.1.0 # Fix the numpy version because this will be the oldest numpy version we can # support. $PIP_CMD install -q numpy=="$NUMPY_VERSION" cython==0.29.15 # Install wheel to avoid the error "invalid command 'bdist_wheel'". $PIP_CMD install -q wheel # Set the commit SHA in __init__.py. if [ -n "$TRAVIS_COMMIT" ]; then sed -i.bak "s/{{RAY_COMMIT_SHA}}/$TRAVIS_COMMIT/g" ray/__init__.py && rm ray/__init__.py.bak else echo "TRAVIS_COMMIT variable not set - required to populated ray.__commit__." exit 1 fi # Add the correct Python to the path and build the wheel. This is only # needed so that the installation finds the cython executable. PATH=$MACPYTHON_PY_PREFIX/$PY_MM/bin:$PATH $PYTHON_EXE setup.py bdist_wheel mv dist/*.whl ../.whl/ popd done
richardliaw/ray
python/build-wheel-macos.sh
Shell
apache-2.0
3,290
#!/bin/sh ############################### # Parameter settings # ############################### elasticSearchServer=$1 if [ -z "$elasticSearchServer" ] then echo "ERROR: You must provide an ElasticSearch server!" echo " A valid call could be: " echo " shell> $0 http://whvmescidev6.fiz-karlsruhe.de:9200/" echo " Schema was not updated." exit 1 #exit script fi echo echo Using ElasticSearch server echo -------------------------- echo $elasticSearchServer ############################### # Configuration settings # ############################### index=ddb echo echo Using ElasticSearch index echo ------------------------- echo $index ############################### # Posting data to server # ############################### postData() { echo Posting data to $elasticSearchServer/$index/$1/ response=`export http_proxy="" && curl --request POST --data $3 --silent $elasticSearchServer/$index/$1/$2` echo $response case "`echo $response | jshon -k`" in *ok*) echo "ok" ;; *) echo "ERROR: " echo $response | jshon ;; esac } #Create some folderLists # Delete old test list: http://whvmescidev6.fiz-karlsruhe.de:9200/ddb/folderList/_query?q=title:* # Search ids for users ddb-daily ddb-domain and add them to field users # whvmescidoc6: # ddb-daily: 3fb89f5f12e1f6781b245a1f8db270ec # ddb-domain: 0edb4f2774e46b72959f9a79ee641746 # # ddbelse-t1: # ddb-daily: ebb86eb13054046f9b14f72189b0205b # ddb-domain: # # ddbelse-p1: # ddb-daily: 56bf18c366c89d1915887decda83c0ab # ddb-domain: c06a4d66bc3acedc00aa4da230eca90e postData "folderList" "dailyList" "{\"title\":\"ddbnext.lists.dailyList\",\"createdAt\":`date +%s`,\"users\":[\"3fb89f5f12e1f6781b245a1f8db270ec\",\"0edb4f2774e46b72959f9a79ee641746\"],\"folders\":[]}"
Deutsche-Digitale-Bibliothek/ddb-next
elasticsearch/createReleaseData_4.4.sh
Shell
apache-2.0
1,856
#!/bin/bash set -e cd ~/uploads/etc # Setup the sudoers config sudo visudo -c -f sudoers sudo EDITOR="cp sudoers" visudo sudo rm /etc/sudoers.d/vagrant sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config.orig sudo cp ssh/sshd_config /etc/ssh/sshd_config sudo cp /etc/pam.d/su /etc/pam.d/su.orig sudo cp pam.d/su /etc/pam.d/su sudo cp /etc/default/grub /etc/default/grub.orig sudo cp default/grub /etc/default/grub sudo grub2-mkconfig -o /boot/grub2/grub.cfg cd ~/ rm -rf ~/uploads
dedickinson/engineering-notebook
packer/base-centos/provisioners/uploads.sh
Shell
apache-2.0
486
#!/bin/bash # Build the Dokka docs. ./assemble_docs.sh # Copy outside files into the docs folder. sed -e '/full documentation here/ { N; d; }' < README.md > docs/index.md cp .github/ISSUE_TEMPLATE/CONTRIBUTING.md docs/contributing.md cp CHANGELOG.md docs/changelog.md cp coil-compose-singleton/README.md docs/compose.md cp coil-gif/README.md docs/gifs.md cp coil-svg/README.md docs/svgs.md cp coil-video/README.md docs/videos.md cp logo.svg docs/logo.svg cp README-ko.md docs/README-ko.md cp README-tr.md docs/README-tr.md cp README-zh.md docs/README-zh.md # Deploy to Github pages. mkdocs gh-deploy # Clean up. rm docs/index.md \ docs/contributing.md \ docs/changelog.md \ docs/compose.md \ docs/logo.svg \ docs/gifs.md \ docs/svgs.md \ docs/videos.md \ docs/README-ko.md \ docs/README-tr.md \ docs/README-zh.md
coil-kt/coil
deploy_docs.sh
Shell
apache-2.0
847
#!/bin/bash # # Copyright (c) 2016-2017 Nest Labs, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # @file # run weave perf test suite if [ -z "$happy_dns" ]; then export happy_dns="8.8.8.8 172.16.255.1 172.16.255.153 172.16.255.53" fi if [ -z "$weave_service_address" ]; then export weave_service_address="frontdoor.qa.nestlabs.com" fi if [ -z "$FABRIC_SEED" ]; then export FABRIC_SEED="0x00001" fi if [ -z "$NUM_TUNNELS" ]; then export NUM_TUNNELS=50 fi if [ -z "$NUM_DEVICES" ]; then export NUM_DEVICES=2 fi if [ -z "$ENABLE_RANDOM_FABRIC" ]; then export randomFabric="" else export randomFabric="randomFabric" fi # currently RESOURCE_IDS is for weave pairing only if [ -z "$RESOURCE_IDS" ]; then export RESOURCE_IDS='gsrbr1 gsrbr1' fi export CASE=1 export USE_SERVICE_DIR=1 VAR_TEST=true if [ -z "$TESTCASES" ]; then TESTCASES=(pairing/test_weave_pairing_01.py echo/test_weave_echo_02.py tunnel/test_weave_tunnel_02.py time/test_weave_time_01.py wdmNext/test_weave_wdm_next_service_mutual_subscribe_05.py wdmNext/test_weave_wdm_next_service_mutual_subscribe_09.py wdmNext/test_weave_wdm_next_service_mutual_subscribe_17.py ) else TESTCASES=($(echo $TESTCASES | tr ',' "\n")) fi TEST_DIR=$(dirname $(readlink -f $0)) (source $TEST_DIR/run_parallel_topology.sh create ${NUM_TUNNELS} ${NUM_DEVICES} ${FABRIC_SEED} cellular mobile ${randomFabric}) VAR_TOPOLOGY_CREATE=$? for i in ${TESTCASES[@]}; do (source $TEST_DIR/run_parallel_weave_service_test.sh ${NUM_TUNNELS} ${NUM_DEVICES} ${FABRIC_SEED} 0 ${weave_service_address} $TEST_DIR/${i}) if [ $? -eq 0 ]; then echo -e "\033[32m SUCCESS - ${i} exited with a status of $?" echo -e "\033[0m" else echo -e "\033[31m FAILED - ${i} exited with a status of $?" echo -e "\033[0m" VAR_TEST=false fi done (source $TEST_DIR/run_parallel_topology.sh destroy ${NUM_TUNNELS} ${NUM_DEVICES} ${FABRIC_SEED}) VAR_TOPOLOGY_DESTROY=$? if $VAR_TEST; then echo -e "\033[32m test is ok" echo -e "\033[0m" exit 0 else echo -e "\033[31m test is not ok" echo -e "\033[0m" exit 1 fi
openweave/openweave-core
src/test-apps/happy/tests/service/weave_service_perf_run.sh
Shell
apache-2.0
2,820
#!/bin/bash -xe # # Copyright 2005-2014 The Kuali Foundation # # Licensed under the Educational Community License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.opensource.org/licenses/ecl2.php # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Update authorized keys from SVN for servers # Script must be called from Jenkins as it sets $WORKSPACE KEYFILE="$WORKSPACE/src/main/resources/authorized_keys" PRIVATEKEY=/var/lib/jenkins/.ssh/kr-key.pem # check return code and exit if not zero check_ret_code() { ret_code=$? if [ $ret_code -ne 0 ]; then printf "\n\nReturn code is $ret_code. Exiting.....\n\n"; exit $ret_code fi } hosts=( [email protected] [email protected] [email protected] [email protected] ) # Make sure key file exists and it is not zero byte if [[ ! -s $KEYFILE ]]; then echo "Unable to checkout $KEYFILE or it is zero byte" exit 1 fi # Update servers with the key for host in ${hosts[@]}; do echo Updating $host ssh -i $PRIVATEKEY $host "chmod 600 ~/.ssh/authorized_keys" check_ret_code scp -i $PRIVATEKEY $KEYFILE $host:.ssh/. check_ret_code done echo Updated authorized_keys successfully.
ua-eas/ua-rice-2.1.9
config/access/src/main/resources/update_authorized_keys.sh
Shell
apache-2.0
1,583
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. set -e set -x function install_vhd_util() { [[ -f /bin/vhd-util ]] && return wget --no-check-certificate https://github.com/rhtyd/cloudstack-nonoss/raw/master/vhd-util -O /bin/vhd-util chmod a+x /bin/vhd-util } function debconf_packages() { echo 'sysstat sysstat/enable boolean true' | debconf-set-selections echo "strongwan strongwan/install_x509_certificate boolean false" | debconf-set-selections echo "strongwan strongwan/install_x509_certificate seen true" | debconf-set-selections echo "iptables-persistent iptables-persistent/autosave_v4 boolean true" | debconf-set-selections echo "iptables-persistent iptables-persistent/autosave_v6 boolean true" | debconf-set-selections echo "libc6 libraries/restart-without-asking boolean false" | debconf-set-selections } function install_packages() { export DEBIAN_FRONTEND=noninteractive export DEBIAN_PRIORITY=critical local arch=`dpkg --print-architecture` debconf_packages install_vhd_util local apt_get="apt-get --no-install-recommends -q -y" ${apt_get} install grub-legacy \ rsyslog logrotate cron net-tools ifupdown tmux vim-tiny htop netbase iptables \ openssh-server e2fsprogs tcpdump iftop socat wget \ python bzip2 sed gawk diffutils grep gzip less tar telnet ftp rsync traceroute psmisc lsof procps \ inetutils-ping iputils-arping httping curl \ dnsutils zip unzip ethtool uuid file iproute acpid sudo \ sysstat python-netaddr \ apache2 ssl-cert \ dnsmasq dnsmasq-utils \ nfs-common \ samba-common cifs-utils \ xl2tpd bcrelay ppp ipsec-tools tdb-tools \ xenstore-utils libxenstore3.0 \ ipvsadm conntrackd libnetfilter-conntrack3 \ keepalived irqbalance \ ipcalc \ openjdk-8-jre-headless \ ipset \ iptables-persistent \ libtcnative-1 libssl-dev libapr1-dev \ python-flask \ haproxy \ radvd \ sharutils genisoimage aria2 \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins \ virt-what open-vm-tools qemu-guest-agent hyperv-daemons apt-get -q -y -t stretch-backports install nftables apt-get -y autoremove --purge apt-get clean apt-get autoclean ${apt_get} install links #32 bit architecture support for vhd-util: not required for 32 bit template if [ "${arch}" != "i386" ]; then dpkg --add-architecture i386 apt-get update ${apt_get} install libuuid1:i386 libc6:i386 fi # Install xenserver guest utilities as debian repos don't have it wget https://mirrors.kernel.org/ubuntu/pool/main/x/xe-guest-utilities/xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb dpkg -i xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb rm -f xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb } return 2>/dev/null || install_packages
wido/cloudstack
tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh
Shell
apache-2.0
3,545
#!/usr/bin/env bash # Cause the script to exit if a single command fails. set -e # Show explicitly which commands are currently running. set -x ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd) platform="" case "${OSTYPE}" in linux*) platform="linux";; darwin*) platform="macosx";; msys*) platform="windows";; *) echo "Unrecognized platform."; exit 1;; esac BUILD_DIR="${TRAVIS_BUILD_DIR-}" if [ -z "${BUILD_DIR}" ]; then BUILD_DIR="${GITHUB_WORKSPACE}" fi TEST_DIR="${BUILD_DIR}/python/ray/tests" TEST_SCRIPTS=("$TEST_DIR/test_microbenchmarks.py" "$TEST_DIR/test_basic.py") DASHBOARD_TEST_SCRIPT="${BUILD_DIR}/python/ray/tests/test_dashboard.py" function retry { local n=1 local max=3 while true; do if "$@"; then break fi if [ $n -lt $max ]; then ((n++)) echo "Command failed. Attempt $n/$max:" else echo "The command has failed after $n attempts." exit 1 fi done } if [[ "$platform" == "linux" ]]; then # Install miniconda. PY_WHEEL_VERSIONS=("36" "37" "38" "39") PY_MMS=("3.6.13" "3.7.10" "3.8.10" "3.9.5") wget --quiet "https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh" -O miniconda3.sh "${ROOT_DIR}"/../suppress_output bash miniconda3.sh -b -p "$HOME/miniconda3" export PATH="$HOME/miniconda3/bin:$PATH" for ((i=0; i<${#PY_MMS[@]}; ++i)); do PY_MM="${PY_MMS[i]}" PY_WHEEL_VERSION="${PY_WHEEL_VERSIONS[i]}" conda install -y python="${PY_MM}" PYTHON_EXE="$HOME/miniconda3/bin/python" PIP_CMD="$HOME/miniconda3/bin/pip" # Find the right wheel by grepping for the Python version. PYTHON_WHEEL="$(printf "%s\n" "$ROOT_DIR"/../../.whl/*"$PY_WHEEL_VERSION"* | head -n 1)" # Install the wheel. "$PIP_CMD" install -q "$PYTHON_WHEEL" # Check that ray.__commit__ was set properly. "$PYTHON_EXE" -u -c "import ray; print(ray.__commit__)" | grep "$TRAVIS_COMMIT" || (echo "ray.__commit__ not set properly!" && exit 1) # Install the dependencies to run the tests. "$PIP_CMD" install -q aiohttp grpcio pytest==5.4.3 requests # Run a simple test script to make sure that the wheel works. for SCRIPT in "${TEST_SCRIPTS[@]}"; do retry "$PYTHON_EXE" "$SCRIPT" done retry "$PYTHON_EXE" "$DASHBOARD_TEST_SCRIPT" done # Check that the other wheels are present. NUMBER_OF_WHEELS="$(find "$ROOT_DIR"/../../.whl/ -mindepth 1 -maxdepth 1 -name "*.whl" | wc -l)" if [[ "$NUMBER_OF_WHEELS" != "4" ]]; then echo "Wrong number of wheels found." ls -l "$ROOT_DIR/../.whl/" exit 2 fi elif [[ "$platform" == "macosx" ]]; then MACPYTHON_PY_PREFIX=/Library/Frameworks/Python.framework/Versions PY_WHEEL_VERSIONS=("36" "37" "38" "39") PY_MMS=("3.6" "3.7" "3.8" "3.9") for ((i=0; i<${#PY_MMS[@]}; ++i)); do PY_MM="${PY_MMS[i]}" PY_WHEEL_VERSION="${PY_WHEEL_VERSIONS[i]}" PYTHON_EXE="$MACPYTHON_PY_PREFIX/$PY_MM/bin/python$PY_MM" PIP_CMD="$(dirname "$PYTHON_EXE")/pip$PY_MM" # Find the appropriate wheel by grepping for the Python version. PYTHON_WHEEL="$(printf "%s\n" "$ROOT_DIR"/../../.whl/*"$PY_WHEEL_VERSION"* | head -n 1)" # Install the wheel. "$PIP_CMD" install -q "$PYTHON_WHEEL" # Install the dependencies to run the tests. "$PIP_CMD" install -q aiohttp grpcio pytest==5.4.3 requests # Run a simple test script to make sure that the wheel works. for SCRIPT in "${TEST_SCRIPTS[@]}"; do retry "$PYTHON_EXE" "$SCRIPT" done done elif [ "${platform}" = windows ]; then echo "WARNING: Wheel testing not yet implemented for Windows." else echo "Unrecognized environment." exit 3 fi
pcmoritz/ray-1
ci/travis/test-wheels.sh
Shell
apache-2.0
3,717
#!/bin/sh BASEDIR=$(dirname "$0") docker build --rm -t workcalendar -f "$BASEDIR/Dockerfile" .
OlegAndreych/work_calendar
docker_build.sh
Shell
apache-2.0
95
#!/bin/bash ############################################################################### ## ## Description: ## This script checks the guest time could be sync with host. ## The guest time should be same with host after enable timesync. ## ############################################################################### ## ## Revision: ## v1.0 - ldu - 08/29/2017 - Draft script for case ESX-OVT-015. ## ############################################################################### dos2unix utils.sh # Source utils.sh . utils.sh || { echo "Error: unable to source utils.sh!" exit 1 } #. constants.sh || { # echo "Error: unable to source constants.sh!" # exit 1 #} # Source constants file and initialize most common variables UtilsInit # # Start the testing # if [[ $DISTRO == "redhat_6" ]]; then SetTestStateSkipped exit fi enable=`vmware-toolbox-cmd timesync enable` disable=`vmware-toolbox-cmd timesync disable` # set new time for guest olddate=`date -s "2017-08-29 12:00:00"` #stanversion='open-vm-tools-10.1.5-2.el7.x86_64' datehost=`vmware-toolbox-cmd stat hosttime` timehost=`date +%s -d"$datehost"` UpdateSummary "timehost after disable: $timehost" timeguest=`date +%s` UpdateSummary "timeguest after disable: $timeguest" offset=$[timehost-timeguest] UpdateSummary "offset: $offset." if [ "$offset" -eq 0 ]; then LogMsg "Info :Set the guest time behand the host time failed" UpdateSummary "offset: $offset,Set the guest time behand the host time failed ." SetTestStateAborted exit 1 else LogMsg $offset UpdateSummary "offset: $offset,Set the guest time behand the host time successfully." sleep 1 vmware-toolbox-cmd timesync enable if [ $? -ne 0 ]; then LogMsg "Test Failed. command enable failed." UpdateSummary "Test Failed. command enable failed." SetTestStateAborted exit 1 fi sleep 1 #enable the guest timesync with host datehost=`vmware-toolbox-cmd stat hosttime` timehost=`date +%s -d"$datehost"` UpdateSummary "timehost after enable: $timehost" timeguest=`date +%s` UpdateSummary "timeguest after enable: $timeguest" #calculate the guest time and host time difference not bigger then 1 offset=$[timehost-timeguest] offset=${offset/-/} if [ $offset -gt 1 ]; then LogMsg "Info : The guest time is sync with host failed." UpdateSummary "offset: $offset,offset Test Failed,The guest time is sync with host failed." SetTestStateFailed exit 1 else LogMsg "$offset" UpdateSummary "offset: $offset,Test Successfully. The guest time is sync with host successfully." SetTestStateCompleted exit 0 fi fi
VirtQE-S1/ESX-LISA
remote-scripts/ovt_check_oneshot_timesync.sh
Shell
apache-2.0
2,923
#!/bin/bash if [ "$#" -ne 1 ]; then echo "Usage: ./populate_table.sh bucket-name" exit fi BUCKET=$1 echo "Populating Cloud SQL instance flights from gs://${BUCKET}/flights/raw/..." # To run mysqlimport and mysql, authorize CloudShell bash authorize_cloudshell.sh # the table name for mysqlimport comes from the filename, so rename our CSV files, changing bucket name as needed counter=0 #for FILE in $(gsutil ls gs://${BUCKET}/flights/raw/2015*.csv); do # gsutil cp $FILE flights.csv-${counter} for FILE in 201501.csv 201507.csv; do gsutil cp gs://${BUCKET}/flights/raw/$FILE flights.csv-${counter} counter=$((counter+1)) done # import csv files MYSQLIP=$(gcloud sql instances describe flights --format="value(ipAddresses.ipAddress)") mysqlimport --local --host=$MYSQLIP --user=root --ignore-lines=1 --fields-terminated-by=',' --password bts flights.csv-* rm flights.csv-*
GoogleCloudPlatform/training-data-analyst
quests/data-science-on-gcp-edition1_tf2/03_sqlstudio/populate_table.sh
Shell
apache-2.0
897
#!/usr/bin/env bash # The base function to output logging information. This should *not* be used # directly, but the helper functions can be used safely. log() { local OPTIND local color while getopts ":c:" opt do case "$opt" in c) color=$OPTARG ;; *) alert "Unused argument specified: $opt" ;; esac done shift $(( OPTIND - 1 )) printf "${color}[%s] %s\e[0m\n" "$(date +%FT%T)" "$*" >&2 } if [[ "$(awk '$1 == "'"$TERM"'" {print 1}' "$BASEDIR/etc/color-terminals.txt")" ]] then debug() { [[ -z $RSTAR_DEBUG ]] && return log -c "\e[1;30m" -- "$*" } info() { log -- "$*" } notice() { log -c "\e[0;34m" -- "$*" } warn() { log -c "\e[0;33m" -- "$*" } crit() { log -c "\e[0;31m" -- "$*" } alert() { log -c "\e[1;31m" -- "$*" } emerg() { log -c "\e[1;4;31m" -- "$*" } else debug() { [[ -z $RSTAR_DEBUG ]] && return log -- "[DEBUG] $*" } info() { log -- "[INFO] $*" } notice() { log -- "[NOTIC] $*" } warn() { log -- "[WARN] $*" } crit() { log -- "[CRIT] $*" } alert() { log -- "[ALERT] $*" } emerg() { log -- "[EMERG] $*" } fi
rakudo/star
lib/logging.bash
Shell
artistic-2.0
1,117
#!/bin/sh cp t/basic-backup basic cp t/multilang-backup multilang
Uladox/Editsrc-Uggedit
reset-tests.sh
Shell
artistic-2.0
66
#!/bin/bash appname="$1" windowtitle="$2" [ "$appname" = "" ] && echo "expecting app-name" && exit 1 mydir="$(dirname "$0")" script="$mydir/app-scripts/url of $appname" function call_applescript { ret="$(osascript -ss "$1" 2>/dev/null)" [ $? != 0 ] && return 1 echo "$ret" | sed "s/^\"\(.*\)\"$/\1/g" } [ -e "$script.scpt" ] && { call_applescript "$script.scpt" exit $? } [ -e "$script.sh" ] && { sh "$script.sh" "$windowtitle" exit $? } [ -e "$script.py" ] && { python "$script.py" "$windowtitle" exit $? } # NOTE: This has the huge drawback that it sometimes opens another instance of the app. # For example, I had several OpenLieroX instances on my PC and has started one of it. # This little snippet here just started another instance. # Also, it seems that the fallback below covers anyway all cases. #{ # echo "tell application \"$appname\"" # echo "set weburl to \"file://\" & (path of front document as string)" # echo "end tell" #} | call_applescript - && exit 0 { echo "tell application \"System Events\"" echo " tell process \"$appname\"" echo " tell (1st window whose value of attribute \"AXMain\" is true)" echo " return value of attribute \"AXDocument\"" echo " end tell" echo " end tell" echo "end tell" } | call_applescript - && exit 0 exit 1
albertz/foreground_app_info
mac/get_app_url.sh
Shell
bsd-2-clause
1,297
cd "${INSTALLDIR}/${NAME}/" manage="${VENV}/bin/python ${INSTALLDIR}/${NAME}/manage.py" # run the following if migration `core 0031` has faild # $manage migrate core 0031 --fake --settings=tuneme.settings.production $manage migrate --noinput --settings=tuneme.settings.production # process static files $manage collectstatic --noinput --settings=tuneme.settings.production $manage compress --settings=tuneme.settings.production # compile i18n strings $manage compilemessages --settings=tuneme.settings.production # Update the search index $manage update_index --settings=tuneme.settings.production # Malawi # ------ $manage migrate --noinput --settings=tuneme.settings.malawi $manage collectstatic --noinput --settings=tuneme.settings.malawi $manage compress --settings=tuneme.settings.malawi $manage compilemessages --settings=tuneme.settings.malawi $manage update_index --settings=tuneme.settings.malawi
praekelt/molo-tuneme
sideloader/postinstall.sh
Shell
bsd-2-clause
912
#!/bin/bash i="0" while [ $i -lt 1 ] do hostn=$(cat /etc/hostname) ext_ip4=$(dig +short myip.opendns.com @resolver1.opendns.com) ext_ip6=$(curl icanhazip.com) # mosquitto_pub -d -t hello/world -m "$(date) : irot LdB, online. IP is $ext_ip" -h "uveais.ca" # mosquitto_pub -d -t uvea/alive -m "$(date) : $hostn server IP $ext_ip is online." -h "2604:8800:100:19a::2" # mosquitto_pub -d -t uvea/alive -m "$(date) : $hostn IPv4 $ext_ip4 is online." -h "2001:5c0:1100:dd00:240:63ff:fefd:d3f1" # mosquitto_pub -d -t aebl/alive -m "$(date) : $hostn IPv6 $ext_ip6 is online." -h "2001:5c0:1100:dd00:240:63ff:fefd:d3f1" mosquitto_pub -d -t uvea/alive -m "$(date) : $hostn IPv4 $ext_ip4 is online." -h "ihdn.ca" mosquitto_pub -d -t aebl/alive -m "$(date) : $hostn IPv6 $ext_ip6 is online." -h "ihdn.ca" # i=$[$i+1] sleep 300 done
krattai/noo-ebs
ref_code/mqtt/pub.sh
Shell
bsd-2-clause
823
#!/bin/bash source activate $CONDA_ENV # Make sure any error below is reported as such set -v -e # Ensure the README is correctly formatted if [ "$BUILD_DOC" == "yes" ]; then rstcheck README.rst; fi # Ensure that the documentation builds without warnings pushd docs if [ "$BUILD_DOC" == "yes" ]; then make SPHINXOPTS=-W clean html; fi popd # Run system info tool pushd bin numba -s popd # switch off color messages export NUMBA_DISABLE_ERROR_MESSAGE_HIGHLIGHTING=1 # switch on developer mode export NUMBA_DEVELOPER_MODE=1 # enable the fault handler export PYTHONFAULTHANDLER=1 # deal with threading layers if [ -z ${TEST_THREADING+x} ]; then echo "INFO: Threading layer not explicitly set." else case "${TEST_THREADING}" in "workqueue"|"omp"|"tbb") export NUMBA_THREADING_LAYER="$TEST_THREADING" echo "INFO: Threading layer set as: $TEST_THREADING" ;; *) echo "INFO: Threading layer explicitly set to bad value: $TEST_THREADING." exit 1 ;; esac fi unamestr=`uname` if [[ "$unamestr" == 'Linux' ]]; then if [[ "${BITS32}" == "yes" ]]; then SEGVCATCH="" else SEGVCATCH=catchsegv fi elif [[ "$unamestr" == 'Darwin' ]]; then SEGVCATCH="" else echo Error fi # limit CPUs in use on PPC64LE, fork() issues # occur on high core count systems archstr=`uname -m` if [[ "$archstr" == 'ppc64le' ]]; then TEST_NPROCS=16 fi # First check that the test discovery works python -m numba.tests.test_runtests # Now run tests based on the changes identified via git NUMBA_ENABLE_CUDASIM=1 $SEGVCATCH python -m numba.runtests -b -v -g -m $TEST_NPROCS -- numba.tests # List the tests found echo "INFO: All discovered tests:" python -m numba.runtests -l # Now run the Numba test suite with slicing # Note that coverage is run from the checkout dir to match the "source" # directive in .coveragerc echo "INFO: Running slice of discovered tests: ($TEST_START_INDEX,None,$TEST_COUNT)" if [ "$RUN_COVERAGE" == "yes" ]; then export PYTHONPATH=. coverage erase $SEGVCATCH coverage run runtests.py -b -j "$TEST_START_INDEX,None,$TEST_COUNT" --exclude-tags='long_running' -m $TEST_NPROCS -- numba.tests else NUMBA_ENABLE_CUDASIM=1 $SEGVCATCH python -m numba.runtests -b -j "$TEST_START_INDEX,None,$TEST_COUNT" --exclude-tags='long_running' -m $TEST_NPROCS -- numba.tests fi
sklam/numba
buildscripts/incremental/test.sh
Shell
bsd-2-clause
2,375
#!/bin/sh # s2gridToyFeatMerge.sh <dataName> <doSubmit> # WARNING: This script will only use the "ang" group of machines" EXPECTED_ARGS=1 E_BADARGS=65 if [ $# -lt $EXPECTED_ARGS ] # use -lt instead of < then echo "Usage: `basename $0` <dataName=AR>" exit $E_BADARGS fi TimeLimit=3500 DATANAME=$1 for infName in 'Prior' 'SM' 'SMmergeseq' do COMMAND="runToyDataFeatMerge.sh $DATANAME $infName $TimeLimit" echo $COMMAND if [ $# -eq 2 ] then qsub -t 1-$2 $COMMAND else continue fi done exit ########################################################################## AR N=200, T=1000, kappa=50 (nearly same as orig. experiment) runToyDataFeatMerge.sh AR Prior 3500 Your job-array 1637996.1-2:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh AR SM 3500 Your job-array 1637997.1-2:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh AR SMmergeseq 3500 Your job-array 1637998.1-2:1 ("runToyDataFeatMerge.sh") has been submitted ########################################################################## Gauss N=600, T=200 runToyDataFeatMerge.sh Gaussian Prior 3500 Your job-array 1637993.1-2:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh Gaussian SM 3500 Your job-array 1637994.1-2:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh Gaussian SMmergeseq 3500 Your job-array 1637995.1-2:1 ("runToyDataFeatMerge.sh") has been submitted ########################################################################## Gauss N=400, T=200 runToyDataFeatMerge.sh Gaussian Prior 3500 Your job-array 1633654.1-5:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh Gaussian SM 3500 Your job-array 1633655.1-5:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh Gaussian SMmergeseq 3500 Your job-array 1633656.1-5:1 ("runToyDataFeatMerge.sh") has been submitted ########################################################################## AR N=400, T=200 runToyDataFeatMerge.sh AR Prior 3500 Your job-array 1635154.1-3:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh AR SM 3500 Your job-array 1635155.1-3:1 ("runToyDataFeatMerge.sh") has been submitted runToyDataFeatMerge.sh AR SMmergeseq 3500 Your job-array 1635156.1-3:1 ("runToyDataFeatMerge.sh") has been submitted OLD REPEAT JOB jobIDs = [806212:806214];
michaelchughes/NPBayesHMM
code/experiments/s2gridToyFeatMerge.sh
Shell
bsd-3-clause
2,379
#!/bin/sh cc -c -o dybuf.o dybuf.c cc -c -o grabber.o grabber.c cc -c -o output_csv.o output_csv.c cc -c -o output_xml.o output_xml.c cc -c -o output_json.o output_json.c cc -o grabber grabber.o dybuf.o output_csv.o output_xml.o output_json.o -levent -lm
deoxxa/banner-grabber
build.sh
Shell
bsd-3-clause
256
#!/bin/bash if [ -z "$VIRTUAL_ENV" ]; then echo "Not running in a virtualenv??? You've got 10 seconds to CTRL-C ..." sleep 10 virtualenv .env/ source .env/bin/activate fi pip install nose coverage pip install -r requirements.txt python setup.py develop --no-deps if [ "$1" == "--without-mockapi" ]; then echo "Not running drest.mockapi..." else ./utils/run-mockapi.sh DREST_MOCKAPI_PROCESS 2>/dev/null 1>/dev/null & sleep 5 fi rm -rf coverage_report/ coverage erase python setup.py nosetests RET=$? # This is a hack to wait for tests to run sleep 5 if [ "$1" == "--without-mockapi" ]; then echo "Not killing drest.mockapi (I didn't start it) ..." else echo "Killing drest.mockapi..." # Then kill the mock api ps auxw \ | grep 'DREST_MOCKAPI_PROCESS' \ | awk {' print $2 '} \ | xargs kill 2>/dev/null 1>/dev/null fi echo if [ "$RET" == "0" ]; then echo "TESTS PASSED OK" else echo "TESTS FAILED" fi echo exit $RET
datafolklabs/drest
utils/run-tests.sh
Shell
bsd-3-clause
986
#!/bin/bash exec &> dune-info.dat pushd . >/dev/null # Find base of git directory while [ ! -d .git ] && [ ! `pwd` = "/" ]; do cd ..; done # Go to Dune root dir cd .. # Collect information about all Dune module using dunecontrol #echo "== SVN info: " #dune-common/bin/dunecontrol svn info #echo #echo "== SVN diff: " #dune-common/bin/dunecontrol svn diff #echo echo "== Git info: " dune_dirs=`find . -maxdepth 1 -type d -name "dune-*"` echo $dune_dirs for dir in $dune_dirs ; do echo "--- calling git log for $dir ---" cd $dir git log --max-count=1 cd .. echo "--- $dir done ---" done echo echo "== Git diff: " for dir in $dune_dirs ; do echo "--- calling git diff for $dir ---" cd $dir git diff HEAD cd .. echo "--- $dir done ---" done echo popd >/dev/null
pederpansen/dune-ax1
src/dune-info.bash
Shell
bsd-3-clause
777
#!/bin/bash # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Reproduces the content of 'components' and 'components-chromium' using the # list of dependencies from 'bower.json'. Downloads needed packages and makes # Chromium specific modifications. To launch the script you need 'bower', # 'crisper', and 'vulcanize' installed on your system. set -e cd "$(dirname "$0")" rm -rf components components-chromium rm -rf ../../web-animations-js/sources bower install rm components/*/.travis.yml mv components/web-animations-js ../../web-animations-js/sources cp ../../web-animations-js/sources/COPYING ../../web-animations-js/LICENSE # Remove unused gzipped binary which causes git-cl problems. rm ../../web-animations-js/sources/web-animations.min.js.gz # Remove source mapping directives since we don't compile the maps. sed -i 's/^\s*\/\/#\s*sourceMappingURL.*//' \ ../../web-animations-js/sources/*.min.js # These components are needed only for demos and docs. rm -rf components/{hydrolysis,marked,marked-element,prism,prism-element,\ iron-component-page,iron-doc-viewer,webcomponentsjs} # Test and demo directories aren't needed. rm -rf components/*/{test,demo} rm -rf components/polymer/explainer # Remove promise-polyfill and components which depend on it. rm -rf components/promise-polyfill rm -rf components/iron-ajax rm -rf components/iron-form # Remove iron-image as it's only a developer dependency of iron-dropdown. # https://github.com/PolymerElements/iron-dropdown/pull/17 rm -rf components/iron-image # Make checkperms.py happy. find components/*/hero.svg -type f -exec chmod -x {} \; find components/iron-selector -type f -exec chmod -x {} \; # Remove carriage returns to make CQ happy. find components -type f \( -name \*.html -o -name \*.css -o -name \*.js\ -o -name \*.md -o -name \*.sh -o -name \*.json -o -name \*.gitignore\ -o -name \*.bat \) -print0 | xargs -0 sed -i -e $'s/\r$//g' # Resolve a unicode encoding issue in dom-innerHTML.html. NBSP=$(python -c 'print u"\u00A0".encode("utf-8")') sed -i 's/['"$NBSP"']/\\u00A0/g' components/polymer/polymer-mini.html ./extract_inline_scripts.sh components components-chromium # Remove import of external resource in font-roboto (fonts.googleapis.com) # and apply additional chrome specific patches. NOTE: Where possible create # a Polymer issue and/or pull request to minimize these patches. patch -p1 < chromium.patch new=$(git status --porcelain components-chromium | grep '^??' | \ cut -d' ' -f2 | egrep '\.(html|js|css)$') if [[ ! -z "${new}" ]]; then echo echo 'These files appear to have been added:' echo "${new}" | sed 's/^/ /' fi deleted=$(git status --porcelain components-chromium | grep '^.D' | \ sed 's/^.//' | cut -d' ' -f2 | egrep '\.(html|js|css)$') if [[ ! -z "${deleted}" ]]; then echo echo 'These files appear to have been removed:' echo "${deleted}" | sed 's/^/ /' fi if [[ ! -z "${new}${deleted}" ]]; then echo fi
Workday/OpenFrame
third_party/polymer/v1_0/reproduce.sh
Shell
bsd-3-clause
3,077
#!/bin/bash # TODO explain --remote-host/--remote-path DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" PROJECT_PATH="$(cd "$DIR/../../../.." && pwd)" REMOTE_PATH="$PROJECT_PATH" REMOTE_HOST="localhost" LOCAL_HOST="localhost" LOCAL_PORT=1234 OTSRC="src/ext/oblivc/ot.c" BENCHDIR="test/oblivc/ottest/" BENCHSRC="$BENCHDIR/ottime.c" BENCHBIN="ottime" OBLIVCC="$PROJECT_PATH/bin/oblivcc" BUILDCMD="make cilly oblivruntime RELEASELIB=1 NATIVECAML=1" while [ $# -ge 1 ]; do if [[ $1 = "--remote-host="* ]]; then REMOTE_HOST=${1#--remote-host=} elif [[ $1 = "--remote-path="* ]]; then REMOTE_PATH=${1#--remote-path=} elif [[ $1 = "--local-host="* ]]; then LOCAL_HOST=${1#--local-host=} elif [[ $1 = "--local-port-init="* ]]; then LOCAL_PORT=${1#--local-port-init=} elif [ $1 = "--thread-never" ]; then sed -i 's/#define OT_THREAD_THRESHOLD .*$/#define OT_THREAD_THRESHOLD 0x7fffffff/' $PROJECT_PATH/$OTSRC fi shift done echo $REMOTE_HOST:$REMOTE_PATH cd "$PROJECT_PATH/$BENCHDIR" port=$LOCAL_PORT for macro_suffix in BASE_OT EXTENSION VALIDATION PAYLOAD; do # Change thread count sed -i "1i #define PHASE_TIME_UPTO_$macro_suffix" $PROJECT_PATH/$OTSRC # Build project ( cd "$PROJECT_PATH" && $BUILDCMD ) # Build remote project if [ $REMOTE_HOST != "localhost" ]; then scp $PROJECT_PATH/$OTSRC $REMOTE_HOST:$REMOTE_PATH/$OTSRC ssh $REMOTE_HOST "cd $REMOTE_PATH && $BUILDCMD" fi # Compile benchmark program $OBLIVCC -O3 $PROJECT_PATH/$BENCHSRC -o $BENCHBIN ssh $REMOTE_HOST "cd $REMOTE_PATH/$BENCHDIR && $REMOTE_PATH/bin/oblivcc -O3 $REMOTE_PATH/$BENCHSRC -o $BENCHBIN -DREMOTEHOST='\"$LOCAL_HOST\"'" for ottype in M Q; do for ((run=0; run<5; run++)); do ./$BENCHBIN $port 1 $ottype 5000000 & sleep 0.3 echo -n "$port $ottype $tcount" >> $0.log ssh $REMOTE_HOST time $REMOTE_PATH/$BENCHDIR/$BENCHBIN $port 2 $ottype 5000000 &>> $0.log port=$((port+1)) done done sed -i "/#define PHASE_TIME_UPTO_$macro_suffix/d" $PROJECT_PATH/$OTSRC done # Restore source file git checkout HEAD $PROJECT_PATH/$OTSRC scp $PROJECT_PATH/$OTSRC $REMOTE_HOST:$REMOTE_PATH/$OTSRC
samuelhavron/obliv-c
test/oblivc/ottest/bench/time-for-phases.sh
Shell
bsd-3-clause
2,169
#!/bin/bash rm -f bus-service gcc -O2 spamsignals.c -o spamsignals `pkg-config --cflags --libs libsystemd`
tasleson/dbus-signals
build.sh
Shell
mit
108
#!/usr/bin/env bash . "test/testlib.sh" begin_test "smudge" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects output="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | git lfs smudge)" [ "smudge a" = "$output" ] git push origin master # download it from the git lfs server rm -rf .git/lfs/objects output="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | git lfs smudge)" [ "smudge a" = "$output" ] ) end_test begin_test "smudge --info" ( set -e cd repo output="$(pointer aaaaa15df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 123 | git lfs smudge --info)" [ "123 --" = "$output" ] ) end_test begin_test "smudge with temp file" ( set -e cd repo rm -rf .git/lfs/objects mkdir -p .git/lfs/tmp/objects touch .git/lfs/tmp/objects/fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254-1 pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | GIT_TRACE=5 git lfs smudge | tee smudge.log [ "smudge a" = "$(cat smudge.log)" ] || { rm -rf .git/lfs/tmp git lfs logs last exit 1 } ) end_test begin_test "smudge with invalid pointer" ( set -e cd repo [ "wat" = "$(echo "wat" | git lfs smudge)" ] [ "not a git-lfs file" = "$(echo "not a git-lfs file" | git lfs smudge)" ] [ "version " = "$(echo "version " | git lfs smudge)" ] ) end_test begin_test "smudge include/exclude" ( set -e reponame="$(basename "$0" ".sh")-includeexclude" setup_remote_repo "$reponame" clone_repo "$reponame" includeexclude git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin master # this WOULD download except we're going to prevent it with include/exclude rm -rf .git/lfs/objects git config "lfs.fetchexclude" "a*" [ "$pointer" = "$(echo "$pointer" | git lfs smudge a.dat)" ] ) end_test begin_test "smudge with skip" ( set -e reponame="$(basename "$0" ".sh")-skip" setup_remote_repo "$reponame" clone_repo "$reponame" "skip" git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin master # Must clear the cache because smudge will use # cached objects even with --skip/GIT_LFS_SKIP_SMUDGE # (--skip applies to whether or not it downloads). rm -rf .git/lfs/objects [ "$pointer" = "$(echo "$pointer" | GIT_LFS_SKIP_SMUDGE=1 git lfs smudge)" ] echo "test clone with env" export GIT_LFS_SKIP_SMUDGE=1 env | grep LFS_SKIP clone_repo "$reponame" "skip-clone-env" [ "$pointer" = "$(cat a.dat)" ] [ "0" = "$(grep -c "Downloading a.dat" clone.log)" ] git lfs pull [ "smudge a" = "$(cat a.dat)" ] echo "test clone without env" unset GIT_LFS_SKIP_SMUDGE [ "$(env | grep LFS_SKIP)" == "" ] clone_repo "$reponame" "no-skip" [ "smudge a" = "$(cat a.dat)" ] [ "1" = "$(grep -c "Downloading a.dat" clone.log)" ] echo "test clone with init --skip-smudge" git lfs install --skip-smudge clone_repo "$reponame" "skip-clone-init" [ "$pointer" = "$(cat a.dat)" ] [ "0" = "$(grep -c "Downloading a.dat" clone.log)" ] git lfs install --force ) end_test begin_test "smudge clone with include/exclude" ( set -e reponame="smudge_include_exclude" setup_remote_repo "$reponame" clone_repo "$reponame" "repo_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \*.dat" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" clone="$TRASHDIR/clone_$reponame" git -c lfs.fetchexclude="a*" clone "$GITSERVER/$reponame" "$clone" cd "$clone" # Should have succeeded but not downloaded refute_local_object "$contents_oid" ) end_test
ryansimmen/git-lfs
test/test-smudge.sh
Shell
mit
4,897
#!/bin/sh # installheaders.sh - install header files # usage: installheaders.sh srcdir destdir # srcdir/*.h is copied to destdir, if different. # if [ $# != 2 ]; then echo "$0: Usage: $0 srcdir destdir" 1>&2 exit 1 fi for H in "$1"/*.h; do BH=`basename "$H"` if diff "$H" "$2/$BH" >/dev/null 2>&1; then : else echo cp "$H" "$2/$BH" cp "$H" "$2/$BH" fi done
computist/os161
mk/installheaders.sh
Shell
mit
393
#!/bin/bash if (( $# != 3 )) then echo "Usage: glassfish_start_service <path_to_war> <target_port> <threadpool_size>" exit fi TARGET_PORT=$2 SOURCE_PATH=$1 THREADPOOL_SIZE=$3 if [ -z "$KB_RUNTIME" ] then export KB_RUNTIME=/kb/runtime fi if [ -z "$GLASSFISH_HOME" ] then export GLASSFISH_HOME=$KB_RUNTIME/glassfish3 fi asadmin=$GLASSFISH_HOME/glassfish/bin/asadmin ps ax | grep "\-Dcom.sun.aas.installRoot=\/kb/runtime/glassfish3/glassfish " > /dev/null if [ $? -eq 0 ]; then echo "Glassfish is already running." else $asadmin start-domain domain1 fi $asadmin list-virtual-servers | grep server-${TARGET_PORT} > /dev/null if [ $? -eq 0 ]; then echo "Virtual server was already created." else $asadmin create-virtual-server --hosts \$\{com.sun.aas.hostName\} server-${TARGET_PORT} fi $asadmin list-threadpools server | grep thread-pool-${TARGET_PORT} > /dev/null if [ $? -eq 0 ]; then echo "Thread pool was already created." else $asadmin create-threadpool --maxthreadpoolsize=${THREADPOOL_SIZE} --minthreadpoolsize=${THREADPOOL_SIZE} thread-pool-${TARGET_PORT} fi $asadmin list-http-listeners | grep http-listener-${TARGET_PORT} > /dev/null if [ $? -eq 0 ]; then echo "Http-listener was already created." else $asadmin create-http-listener --listeneraddress 0.0.0.0 --listenerport ${TARGET_PORT} --default-virtual-server server-${TARGET_PORT} --securityEnabled=false --acceptorthreads=${THREADPOOL_SIZE} http-listener-${TARGET_PORT} $asadmin set server.network-config.network-listeners.network-listener.http-listener-${TARGET_PORT}.thread-pool=thread-pool-${TARGET_PORT} fi $asadmin list-applications | grep app-${TARGET_PORT} > /dev/null if [ $? -eq 0 ]; then $asadmin undeploy app-${TARGET_PORT} fi $asadmin deploy --virtualservers server-${TARGET_PORT} --contextroot / --name app-${TARGET_PORT} ${SOURCE_PATH}
kbase/cmonkey
glassfish_start_service.sh
Shell
mit
1,878
#!/bin/bash script_directory="$(dirname $(readlink -f ${BASH_SOURCE}))" set -e -x cd "${script_directory}/../src" if [[ -d cov-int ]]; then rm -rf cov-int fi make clean # XXX: Get Coverity Build Tool from here: # https://scan.coverity.com/download/cxx/linux-64 # Unpack into /vagrant (root of sdn_sensor) # XXX: Add notes how to run cov-configure on the compiler ../cov-analysis-linux64-*/bin/cov-configure --comptype clangcc --compiler /usr/bin/clang ../cov-analysis-linux64-*/bin/cov-build --dir cov-int make tar czf sdn_sensor_coverity.tgz cov-int
TidyHuang/sdn_sensor
scripts/run-coverity.bash
Shell
mit
563