code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/zsh
xinput set-prop 'ELAN21EF:00 04F3:21EF' 'Coordinate Transformation Matrix' 1 0 0 0 1 0 0 0 1;
xinput set-prop 'SYNA2B29:00 06CB:77C6' 'Coordinate Transformation Matrix' 1 0 0 0 1 0 0 0 1;
xinput enable "AT Translated Set 2 keyboard";
xinput enable 'SYNA2B29:00 06CB:77C6';
xrandr -o normal;
|
juanknebel/scripts-yoga-900-13ISK
|
rotate-normal.sh
|
Shell
|
gpl-3.0
| 303 |
# !/bin/bash
#
# Filename: vzswap_convert.sh
# Author: Nipun Arora
# Created: Fri Nov 28 11:31:43 2014 (-0500)
# URL: http://www.nipunarora.net
#
# Description:
#
# This script converts a typical OpenVZ container to a VSwap Container
# Usage
#
CTID= $1
RAM= $2
SWAP= $3
CFG=/etc/vz/conf/${CTID}.conf
cp $CFG $CFG.pre-vswap
grep -Ev '^(KMEMSIZE|LOCKEDPAGES|PRIVVMPAGES|SHMPAGES|NUMPROC|PHYSPAGES|VMGUARPAGES|OOMGUARPAGES|NUMTCPSOCK|NUMFLOCK|NUMPTY|NUMSIGINFO|TCPSNDBUF|TCPRCVBUF|OTHERSOCKBUF|DGRAMRCVBUF|NUMOTHERSOCK|DCACHESIZE|NUMFILE|AVNUMPROC|NUMIPTENT|ORIGIN_SAMPLE|SWAPPAGES)=' > $CFG < $CFG.pre-vswap
vzctl set $CTID --ram $RAM --swap $SWAP --save
vzctl set $CTID --reset_ub
|
Programming-Systems-Lab/Parikshan
|
vzswap_convert.sh
|
Shell
|
gpl-3.0
| 687 |
#!/bin/bash
# -*- ENCODING: UTF-8 -*-
#Asistente de Instalación Inteligente de Software para distribuciones GNU/Linux basados en Ubuntu 12.04
#Scripts de instalación de software de terceros (fuera de los repositorios oficiales de Ubuntu)
#Copyright (C) <2014> <Sebastian Nolberto Lagos Gutierrez, [email protected], Arica, Chile>
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#Instalando dependencias de Skype
apt-get install -q -y libqt4-dbus libqt4-network libqt4-webkit libqt4-xml libqtcore4 libqtgui4
cd /usr/share/aiis/packages
wget http://download.skype.com/linux/skype-ubuntu-precise_4.3.0.37-1_i386.deb
dpkg -i /usr/share/aiis/packages/skype-ubuntu-precise_4.3.0.37-1_i386.deb
exit
|
sebalagos90/asistente_instalacion_inteligente
|
aiis-0_5_0/usr/share/aiis/scripts/_skype.sh
|
Shell
|
gpl-3.0
| 1,323 |
#! /bin/bash
#====================================================================
# Script per la creazione del bakup di fine anno
# nel server LTSP della scuola
#
#
# v. 1.0 - (it) 10/06/2011 - [email protected]
#
#
# TODO: redirigere stdout/err del comando di copia su file di log, da cancellare se ok
#====================================================================
set -u
usage ()
{
echo "Uso: $0 <percorso_backup>"
echo "esempio: $0 /backup/2010_2011"
exit 250
}
#------------------------------------------------
# Controllo se l'esecutore ha i permessi di root
#------------------------------------------------
if [ `id -u` -ne 0 ]; then
echo "ERRORE: solo l'utente root puo' eseguire questo script"
exit 1
fi
#------------------------------------------------
# Controllo dei parametri
#------------------------------------------------
if [ $# -gt 0 ]; then
if [ "$1" = "" ]
then
usage;
else
PATH_BACKUP=$1
fi
else
usage;
fi
#-----------------------------------------------------------------
# Verifica la presenza del path di destinazione o tenta di crearlo
#-----------------------------------------------------------------
if [ -d $PATH_BACKUP ]
then
# la directory esiste, verifico se posso scrivere
if [ ! -w $PATH_BACKUP ]; then
echo "ERRORE: Impossibile ottenere il permesso di scrittura in $PATH_BACKUP"
exit 2
fi
else
# la directory non esiste, tento la creazione
mkdir -p $PATH_BACKUP
if [ $? -ne 0 ]
then
echo "ERRORE: impossibile creare la directory $PATH_BACKUP"
exit 3
fi
fi
#-----------------------------------------------------------------
# Esegue la copia del file /etc/group nel path indicato
#-----------------------------------------------------------------
mkdir -p $PATH_BACKUP/etc && cp --remove-destination /etc/group $PATH_BACKUP/etc/
if [ $? -ne 0 ]; then
echo "ERRORE: durante la copia del file /etc/group in $PATH_BACKUP - Errore $?"
exit 10
fi
#-----------------------------------------------------------------
# Esegue la copia delle home directory nel path indicato
#-----------------------------------------------------------------
tar cp --exclude-caches-all --exclude='.*' /home | (tar xp -C $PATH_BACKUP)
chmod 755 $PATH_BACKUP
if [ $? -eq 0 ]; then
echo "----------------------------------------------------"
echo "[SCR3]Script completato con successo!"
echo "----------------------------------------------------"
else
echo "----------------------------------------------------"
echo "[SCR3]Script completato con errore!"
echo "----------------------------------------------------"
exit 50
fi
|
itarozzi/classerman
|
src/scripts/endyear_backup.sh
|
Shell
|
gpl-3.0
| 2,668 |
#!/bin/bash
PID=`cat boot.pid`
kill $PID
exit 0
|
jimdowling/gvod
|
bootstrap/bootstrap-server/deploy/kill.sh
|
Shell
|
gpl-3.0
| 49 |
#!/bin/bash
curDir="$(pwd)"
scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $scriptDir
cpth="Tern.jar"
cpth=$cpth":bluecove-2.1.1-SNAPSHOT-63.jar"
cptp=$cpth":bluecove-gpl-2.1.1-SNAPSHOT-63-arm.jar"
cpth=$cpth":bluecove-bluez-2.1.1-SNAPSHOT.jar"
cpth=$cpth":ev3j-0.1.1.jar"
echo "Starting Tern"
echo "Folder $scriptDir"
echo "Using classpath $cpth"
java -cp "$cpth" tern.Main
cd $curDir
|
sovcik/TernBerryBot
|
runtern.sh
|
Shell
|
gpl-3.0
| 408 |
#!/usr/bin/env bash
#
# Author: Stefan Buck
# License: MIT
# https://gist.github.com/stefanbuck/ce788fee19ab6eb0b4447a85fc99f447
#
#
# This script accepts the following parameters:
#
# * owner
# * repo
# * tag
# * filename
# * github_api_token
#
# Script to upload a release asset using the GitHub API v3.
#
# Example:
#
# upload-github-release-asset.sh github_api_token=TOKEN owner=stefanbuck repo=playground tag=v0.1.0 filename=./build.zip
#
# Check dependencies.
set -ex
xargs=$(which gxargs || which xargs)
# Validate settings.
[ "${TRACE}" ] && set -x
CONFIG=$@
for line in ${CONFIG}; do
eval "${line}"
done
# Define variables.
GH_API="https://api.github.com"
GH_REPO="${GH_API}/repos/${owner}/${repo}"
GH_RELEASES="${GH_REPO}/releases"
GH_TAGS="${GH_REPO}/releases/tags/${tag}"
AUTH="Authorization: token ${github_api_token}"
WGET_ARGS="--content-disposition --auth-no-challenge --no-cookie"
CURL_ARGS="-LJO#"
if [[ "${tag}" == 'LATEST' ]]; then
GH_TAGS="${GH_REPO}/releases/latest"
fi
# Validate token.
curl -o /dev/null -sH "${AUTH}" ${GH_REPO} || { echo "Error: Invalid repo, token or network issue!"; exit 1; }
curl -H "${AUTH}" "${GH_RELEASES}" -d "{
\"tag_name\": \"${tag}\",
\"target_commitish\": \"master\",
\"name\": \"Canari ${tag}\",
\"body\": \"**Change log:**\",
\"draft\": false,
\"prerelease\": false
}" || { echo "Error: Unable to create tagged release"; exit 1; }
# Read asset tags.
response=$(curl -sH "${AUTH}" ${GH_TAGS})
# Get ID of the asset based on given filename.
eval $(echo "${response}" | grep -m 1 "id.:" | grep -w id | tr : = | tr -cd '[[:alnum:]]=')
[ "${id}" ] || { echo "Error: Failed to get release id for tag: ${tag}"; echo "${response}" | awk 'length($0)<100' >&2; exit 1; }
# Upload asset
echo "Uploading asset... "
# Construct url
GH_ASSET="https://uploads.github.com/repos/${owner}/${repo}/releases/${id}/assets?name=$(basename ${filename})"
curl --data-binary @"${filename}" -H "${AUTH}" -H "Content-Type: application/octet-stream" ${GH_ASSET}
|
redcanari/canari3
|
lambda/upload-github-release-asset.sh
|
Shell
|
gpl-3.0
| 2,033 |
#!/usr/bin/env bash
#@cd var/logs; if [ -f background.log ]; then cp background.log rotate/background.$(shell date +%s).log; truncate -s 0 background.log; fi
#@cd var/logs; if [ -f dev.log ]; then cp dev.log rotate/dev.$(shell date +%s).log; truncate -s 0 dev.log; fi
#@cd var/logs; if [ -f monitoring.log ]; then cp monitoring.log rotate/monitoring.$(shell date +%s).log; truncate -s 0 monitoring.log; fi
#@cd var/logs; if [ -f php.log ]; then cp php.log rotate/php.$(shell date +%s).log; truncate -s 0 php.log; fi
#@cd var/logs; if [ -f prod.log ]; then cp prod.log rotate/prod.$(shell date +%s).log; truncate -s 0 prod.log; fi
#@cd var/logs; if [ -f scheduler.log ]; then cp scheduler.log rotate/scheduler.$(shell date +%s).log; truncate -s 0 scheduler.log; fi
#@cd var/logs; if [ -f vtigercrm.log ]; then cp vtigercrm.log rotate/vtigercrm.$(shell date +%s).log; truncate -s 0 vtigercrm.log; fi
#@cd var/logs; if [ -f last_query.log ]; then cp last_query.log rotate/last_query.$(shell date +%s).log; truncate -s 0 last_query.log; fi
#@cd var/logs; if [ -f vtigercrm.json ]; then cp vtigercrm.json rotate/vtigercrm.$(shell date +%s).json; truncate -s 0 vtigercrm.json; fi
chmod 777 -R var/logs || true
|
Myddleware/myddleware
|
docker/script/logs-rotate.sh
|
Shell
|
gpl-3.0
| 1,206 |
#4uto
vpn_lists=$(curl http://www.vpngate.net/api/iphone/ 2> /dev/null | awk -F',' '{print $1 "_" $7, $15}')
echo "${vpn_lists}" | while read line; do
hostname=${line%% *}
config=${line##* }
echo $config | base64 -di > ${hostname}.ovpn 2> /dev/null
done
find . -type f ! \( -name "vpn*" -o -name "pass*" \) -delete 2> /dev/null
|
Hackplayers/4nonimizer
|
vpn/vpngate/vpn-get-ovpn.sh
|
Shell
|
gpl-3.0
| 335 |
#!/bin/bash
RETRIES=3
DELAY=10
COUNT=1
while [ $COUNT -lt $RETRIES ]; do
$*
if [ $? -eq 0 ]; then
RETRIES=0
break
fi
let COUNT=$COUNT+1
echo "retrying..."
sleep $DELAY
done
|
bwinkel/cygrid
|
retry.sh
|
Shell
|
gpl-3.0
| 194 |
#!/bin/bash
WORKDIR=/srv/emailer
# check environment variables
[ -z "${DB_PORT_5432_TCP_ADDR}" ] && echo "The Postgres container is not correctly linked! Add --link postgres:db to the docker run parameters!" && exit 1
[ -z "${DB_ENV_POSTGRES_PASSWORD}" ] && echo "Undefined postgres password! Add --link postgres:db to the docker run parameters!" && exit 1
[ -z "${MANDRILL_API_KEY}" ] && echo "Undefined Mandrill API KEY! Add -e MANDRILL_API_KEY=\"apikey\" to the docker run parameters!" && exit 1
[ -z "${DOMAIN}" ] && echo "Domain undefined! Add -e DOMAIN=\"ip or domain name\" to the docker run parameters!" && exit 1
cp $WORKDIR/config.ini.example $WORKDIR/config.ini
sed -i "s/provider = smtp/provider = mandrill/" $WORKDIR/config.ini
sed -i "/\[database\]/{n;s/.*/hostname = ${DB_PORT_5432_TCP_ADDR}/}" $WORKDIR/config.ini
sed -i "s/username = mitro/username = postgres/" $WORKDIR/config.ini
sed -i "s/password = mitro/password = ${DB_ENV_POSTGRES_PASSWORD}/" $WORKDIR/config.ini
sed -i "s/[email protected]/[email protected]/" $WORKDIR/config.ini
sed -i "s/[email protected]/[email protected]/" $WORKDIR/config.ini
sed -i "s/mitro.co/${DOMAIN}/" $WORKDIR/config.ini
sed -i "/\[mandrill\]/{n;s/.*/api_key = ${MANDRILL_API_KEY}/}" $WORKDIR/config.ini
sed -i "s/logging.INFO/logging.DEBUG/" $WORKDIR/emailer.py
exec "$@"
|
servomac/mitro
|
emailer/docker-entrypoint.sh
|
Shell
|
gpl-3.0
| 1,345 |
#!/bin/bash
#######################################
# ### Raúl Caro Pastorino ### #
## ## ## ##
### # https://github.com/fryntiz/ # ###
## ## ## ##
# ### www.fryntiz.es ### #
#######################################
# Crea un script llamado intervalo.sh donde el usuario introduce dos números
# enteros y deben devolverse todos los números enteros del intervalo formado
# por los dos números. El programa debe comprobar que se pasan sólo 2
# números y que el segundo es mayor que el primero, en caso contrario mostrar
# mensaje de error.
INICIO=0
FIN=0
echo "Introduce el primero número (Inicio del rango)"
read INICIO
echo "Introduce el segundo número (Fin del rango)"
read FIN
if [ $INICIO -lt $FIN ]; then
clear
echo "Mostrando rango"
seq $INICIO $FIN
else
echo "No se cumplen los requisitos"
echo "Se necesita un segundo número mayor que el primero"
fi
|
fryntiz/ciclosuperior
|
Scripts_Bash/Nivel medio/14_intervalo.sh
|
Shell
|
gpl-3.0
| 952 |
#!/bin/sh
# you have to MANUALLY adapt these and match them with the
# Compare* calls in MoSShE
mkdir -p CompareFiles
# CheckFileChanges passwd /etc/passwd
cp /etc/passwd CompareFiles/
# CheckFileChanges shadow /etc/shadow
cp /etc/shadow CompareFiles/
# CheckFileChanges resolv.conf /etc/resolv.conf
cp /etc/resolv.conf CompareFiles/
# CheckFileChanges sshauth /root/.ssh/authorized_keys
cp /root/.ssh/authorized_keys CompareFiles/authorized_keys
# CheckConfigChanges routing.txt "netstat -nr"
netstat -nr > CompareFiles/routing.txt
# CheckConfigChanges listeners.txt "netstat -tulpen"
netstat -tulpen > CompareFiles/listeners.txt
# clean up - make things safe
chmod 600 CompareFiles/*
chmod 0700 CompareFiles
chown root:root CompareFiles
|
wyae/MoSShE
|
generate_compares.sh
|
Shell
|
gpl-3.0
| 756 |
#!/bin/sh
# Exercise cp --link's behavior regarding the dereferencing of symbolic links.
# Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
if grep '^#define HAVE_LINKAT 1' "$CONFIG_HEADER" > /dev/null \
&& grep '^#define LINK_FOLLOWS_SYMLINKS 0' "$CONFIG_HEADER" > /dev/null; then
# With this config (which is the case on GNU/Linux) cp will attempt to
# linkat() to hardlink a symlink. So now see if the current file system
# supports this operation.
ln -s testtarget test_sl || framework_failure_
ln -P test_sl test_hl_sl || framework_failure_
ino_sl="$(stat -c '%i' test_sl)" || framework_failure_
ino_hl="$(stat -c '%i' test_hl_sl)" || framework_failure_
test "$ino_sl" = "$ino_hl" && can_hardlink_to_symlink=1
fi
mkdir dir || framework_failure_
: > file || framework_failure_
ln -s dir dirlink || framework_failure_
ln -s file filelink || framework_failure_
ln -s nowhere danglink || framework_failure_
# printf format of the output line.
outformat='%s|result=%s|inode=%s|type=%s|error=%s\n'
for src in dirlink filelink danglink; do
# Get symlink's target.
tgt=$(readlink $src) || framework_failure_
# Get inodes and file type of the symlink (src) and its target (tgt).
# Note: this will fail for 'danglink'; catch it.
ino_src="$(stat -c '%i' $src)" || framework_failure_
typ_src="$(stat -c '%F' $src)" || framework_failure_
ino_tgt="$(stat -c '%i' $tgt 2>/dev/null)" || ino_tgt=
typ_tgt="$(stat -c '%F' $tgt 2>/dev/null)" || typ_tgt=
for o in '' -L -H -P; do
# Skip the -P case where we don't or can't hardlink symlinks
! test "$can_hardlink_to_symlink" && test "$o" = '-P' && continue
for r in '' -R; do
command="cp --link $o $r $src dst"
$command 2> err
result=$?
# Get inode and file type of the destination (which may fail, too).
ino_dst="$(stat -c '%i' dst 2>/dev/null)" || ini_dst=
typ_dst="$(stat -c '%F' dst 2>/dev/null)" || typ_dst=
# Print the actual result in a certain format.
printf "$outformat" \
"$command" \
"$result" \
"$ino_dst" \
"$typ_dst" \
"$(cat err)" \
> out
# What was expected?
if [ "$o" = "-P" ]; then
# cp --link should not dereference if -P is given.
exp_result=0
exp_inode=$ino_src
exp_ftype=$typ_src
exp_error=
elif [ "$src" = 'danglink' ]; then
# Dereferencing should fail for the 'danglink'.
exp_result=1
exp_inode=
exp_ftype=
exp_error="cp: cannot stat 'danglink': No such file or directory"
elif [ "$src" = 'dirlink' ] && [ "$r" != '-R' ]; then
# Dereferencing should fail for the 'dirlink' without -R.
exp_result=1
exp_inode=
exp_ftype=
exp_error="cp: omitting directory 'dirlink'"
elif [ "$src" = 'dirlink' ]; then
# cp --link -R 'dirlink' should create a new directory.
exp_result=0
exp_inode=$ino_dst
exp_ftype=$typ_dst
exp_error=
else
# cp --link 'filelink' should create a hard link to the target.
exp_result=0
exp_inode=$ino_tgt
exp_ftype=$typ_tgt
exp_error=
fi
# Print the expected result in a certain format.
printf "$outformat" \
"$command" \
"$exp_result" \
"$exp_inode" \
"$exp_ftype" \
"$exp_error" \
> exp
compare exp out || { ls -lid $src $tgt dst; fail=1; }
rm -rf dst err exp out || framework_failure_
done
done
done
Exit $fail
|
houwentaoff/coreutils
|
tests/cp/link-deref.sh
|
Shell
|
gpl-3.0
| 4,320 |
#!/bin/bash
set -e
if [ -z "$@" ]; then
exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf --nodaemon
else
PATH=/usr/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin exec $@
fi
|
pablodanielrey/scripts
|
docker/entrypoint.sh
|
Shell
|
gpl-3.0
| 195 |
#!/bin/bash
## note: run 'source activate uga' before installing R packages
## run this from the command line to get the current package versions, otherwise same versions can be installed manually
## versions installed during development
# kinship2_1.8.4
# geepack_1.2-1
# lme4_1.1-21
# lmerTest_3.1-0
# pbkrtest_0.4-7
# seqMeta_1.6.7
# RColorBrewer_1.1-2
# R.utils_2.9.0
# ggplot2_3.2.0
R -e 'install.packages(c("kinship2", "geepack", "lme4", "lmerTest", "pbkrtest", "seqMeta", "RColorBrewer", "ggplot2","R.utils"), repos="http://cran.us.r-project.org", dependencies=TRUE)'
|
rmkoesterer/uga
|
env/install_r_packages.sh
|
Shell
|
gpl-3.0
| 578 |
#!/bin/bash
cd `dirname ${BASH_SOURCE[0]}`
buffdir=`dirname $0`
java -cp ${buffdir}/buffer/java/Mobita2ft.jar:${buffdir}/buffer/java/BufferClient.jar Mobita2ft.Mobita2ft $@
|
bopjesvla/BCI
|
dataAcq/startJavaMobita.sh
|
Shell
|
gpl-3.0
| 173 |
mv /var/log/mikescript.log /var/log/mikescript-cached-$RANDOM$RANDOM.log
echo $(date): Any previous logfiles cached elsewhere >> /var/log/mikescript.log
echo $(date): Script was initialized >> /var/log/mikescript.log
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
echo $(date): Script was not run as root >> /var/log/mikescript.log
exit 1
fi
echo "Clearing HOSTS file"
echo $(date): Clearing HOSTS file >> /var/log/mikescript.log
echo 127.0.0.1 localhost > /etc/hosts
echo 127.0.1.1 ubuntu >> /etc/hosts
echo ::1 ip6-localhost ip6-loopback >> /etc/hosts
echo fe00::0 ip6-localnet >> /etc/hosts
echo ff00::0 ip6-mcastprefix >> /etc/hosts
echo ff02::1 ip6-allnodes >> /etc/hosts
echo ff02::2 ip6-allrouters >> /etc/hosts
msg=$(echo HOSTS file cleared | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
echo $(date): Verifying an internet connection with aptitude >> /var/log/mikescript.log
apt-get install cowsay -y &> /dev/null
if [ "$?" -eq "1" ]; then
echo "This script cannot access aptitude properly."
echo $(date): Apititude check failed >> /var/log/mikescript.log
exit 1
fi
apt-get install pastebinit -y
cd /var/log/apt
gunzip history.log.*.gz
cat history* | grep Commandline | grep -v pastebinit | grep -v cowsay | sed 's/Commandline\: apt-get//g' | sed 's/remove/removed/g' | sed 's/install/installed/g' | sed 's/purge/purged/g' > /tmp/pasted
msg=$(pastebinit -u marshallcyber1 -p [[]] -i /tmp/pasted | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
cat $(locate bash_history) > /tmp/usershistory
msg=$(pastebinit -u marshallcyber1 -p [[]] -i /tmp/usershistory | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break
yum repolist all &> /dev/null
if [[ $? -eq 0 ]]; then
pkgmgr="yum"
fi
apt-get -h &> /dev/null
if [[ $? -eq 0 ]]; then
pkgmgr="apt"
echo $(date): $pkgmgr identified as package manager >> /var/log/mikescript.log
fi
add-apt-repository "deb http://archive.canonical.com/ubuntu precise partner"
add-apt-repository "deb http://archive.ubuntu.com/ubuntu precise multiverse main universe restricted"
add-apt-repository "deb http://security.ubuntu.com/ubuntu/ precise-security universe main multiverse restricted"
add-apt-repository "deb http://archive.ubuntu.com/ubuntu precise-updates universe main multiverse restricted"
if [ $? -eq 0 ]; then
msg=apt%20repositories%20successfully%20added
break
fi
echo $(date): Finished adding repos >> /var/log/mikescript.log
apt-get update &> /dev/null
if [ $? -eq 1 ]; then
echo $(date): Finished updating package lists with errors >> /var/log/mikescript.log
else
echo $(date): Finished updating package lists successfully >> /var/log/mikescript.log
fi
echo pkgmgr is $pkgmgr
# Detect readme
# Get ready for a f***ing rideeee
updatedb
locate -i readme > /tmp/readme
cat /tmp/readme | grep Desktop > /tmp/readmes
ls / | grep readme >> /dev/null
readmename=$(ls / | grep -i readme)
readmenameinroot=$(ls /root | grep -i readme)
if [ -s /tmp/readmes ]; then
suslocs=$(cat /tmp/readmes | wc -l)
if [ $suslocs -eq 1 ]; then
echo Only one primary candidate for readme
readmeloc=$(cat /tmp/readmes)
echo $readmeloc
else
echo Multiple readme candidates detected \(multiple readmes in desktops\)
multiprimes=$(cat /tmp/readmes | grep Desktop | grep $(users))
primescount=$(echo $multiprimes | wc -l)
if [ "$primescount" -eq "1" ]; then
echo $multiprimes detected as main candidate
readmeloc=$multiprimes
else
echo Cannot properly distinguish candidates
fi
fi
else
if [ -z "$readmename" ]; then
echo no readme in root file sys
else
echo /$readmename identified
readmeloc=/$readmename
fi
if [ -z "$readmenameinroot" ]; then
echo found readme at /root/$readmenameinroot
else
if [ -z "$readmename" ]; then
readmeloc=/root/$readmenameinroot
fi
fi
fi
if ! [ -z "$readmeloc" ]; then
echo Readme resolved at $readmeloc
else
echo Cannot determine the location of the readme
echo $readmeloc
echo Please enter the full path here:
read readmeloc
fi
echo $readmeloc locked in
if [ $? -eq 0 ]; then
msg=$(echo $readmeloc | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )%20identified%20as%20readme
break>> /dev/null
fi
echo $(date): $readmeloc is located readme >> /var/log/mikescript.log
cut -d: -f1,3 /etc/passwd | egrep ':[0-9]{4}$' | cut -d: -f1 > usersover1000
echo root >> usersover1000
for ScottStork in `cat usersover1000`
do
cat $readmeloc | grep $ScottStork
if [ "$?" -eq "1" ]; then
if [ "$ScottStork" = "root" ]; then
echo Root Excempt
else
echo Rogue user $ScottStork detected
echo Delete? \(Y\/N\)
msg=$(echo $ScottStork rogue user detected. requires immediate user intervention. | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
read yorn
if [ "$yorn" = "Y" ]; then
userdel $ScottStork
fi
fi
fi
done
echo $(date): $readmeloc set as READMELOC >> /var/log/mikescript.log
# SSH Server Configuration
cat /etc/ssh/sshd_config | grep PermitRootLogin | grep yes
if [ $?==0 ]; then
sed -i 's/PermitRootLogin yes/PermitRootLogin no/g' /etc/ssh/sshd_config
echo $(date): PermitRootLogin rule detected in SSH >> /var/log/mikescript.log
msg=$(echo PermitRootLogin rule changed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
cat /etc/ssh/sshd_config | grep Protocol | grep 1
if [ $?==0 ]; then
sed -i 's/Protocol 2,1/Protocol 2/g' /etc/ssh/sshd_config
sed -i 's/Protocol 1,2/Protocol 2/g' /etc/ssh/sshd_config
echo $(date): Protocol rule detected in SSH >> /var/log/mikescript.log
msg=$(echo SSH Protocol changed to exclusively 1 | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
grep X11Forwarding /etc/ssh/sshd_config | grep yes
if [ $?==0 ]; then
sed -i 's/X11Forwarding yes/X11Forwarding no/g' /etc/ssh/sshd_config
echo $(date): X11Forwarding rule detected in SSH >> /var/log/mikescript.log
msg=$(echo X11Forwarding rule changed to no | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
# Sudoers - require password
grep PermitEmptyPasswords /etc/ssh/sshd_config | grep yes
if [ $?==0 ]; then
sed -i 's/PermitEmptyPasswords yes/PermitEmptyPasswords no/g' /etc/ssh/sshd_config
echo $(date): PermitEmptyPasswords rule detected in SSH >> /var/log/mikescript.log
msg=$(echo PermitEmptyPasswords rule changed to no | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
grep NOPASSWD /etc/sudoers
if [ $?==0 ]; then
tits=$(grep NOPASSWD /etc/sudoers)
sed -i 's/$tits/ /g' /etc/sudoers
echo $(date): NOPASSWD rule detected >> /var/log/mikescript.log
msg=$(echo SUDOERS NOPASSWD rule removed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
cd /etc/sudoers.d && ls /etc/sudoers.d | grep -v cyberpatriot | grep -v scor | xargs rm
msg=$(echo Removed any sudoers.d rules other than cyberpatriot | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
cat /etc/apt/apt.conf.d/10periodic | grep APT::Periodic::Update-Package-Lists | grep 0 >> /dev/null
if [ $?==0 ]; then
sed -i 's/APT::Periodic::Update-Package-Lists "0"/APT::Periodic::Update-Package-Lists "1"/g' /etc/apt/apt.conf.d/10periodic
echo $(date): Periodic Updates enabled >> /var/log/mikescript.log
fi
/usr/lib/lightdm/lightdm-set-defaults -l false
if [ $?==0 ]; then
msg=$(echo Set allow guest to false | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo "exit 0" > /etc/rc.local
msg=$(echo X11Forwarding rule changed to exclusively 1 | sed 's/\//%2F/g' | sed 's/\./%2E/g' )
break>> /dev/null
# Get rid of and replace any UID that is equal to 0
# Gives it a big-ass new UID, throws a nonfatal error or two but lol idc
cut -d: -f1,3 /etc/passwd | egrep ':0$' | cut -d: -f1 | grep -v root >> /tmp/blackthought
while read p <&3; do
useruid=$RANDOM$RANDOM
sed -i 's/'$p':x:0'/$p':x:'$useruid'/g' /etc/passwd
echo $(date): $p Rogue UID detected >> /var/log/mikescript.log
msg=$(echo Rogue root UID detected | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
done 3< /tmp/blackthought
#Disables ctrl+alt+del
sed '/^exec/ c\exec false' /etc/init/control-alt-delete.conf
msg=$(echo Ctrl alt delete is disabled | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
# Lord forgive me
# Alias Windows Commands for Linux Commands
# Also clears any rogue aliases :)
unalias -a
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias grep='grep --color=auto'
alias l='ls -CF'
alias la='ls -A'
alias ll='ls -alF'
alias ls='ls --color=auto'
alias cls=clear
alias dir=ls
alias type=cat
#only allow root in cron
cd /etc/
/bin/rm -f cron.deny at.deny
echo root >cron.allow
echo root >at.allow
/bin/chown root:root cron.allow at.allow
/bin/chmod 400 cron.allow at.allow
#Critical File Permissions
chown -R root:root /etc/apache2
chown -R root:root /etc/apache
#Secure Apache 2
if [ -e /etc/apache2/apache2.conf ]; then
echo \<Directory \> >> /etc/apache2/apache2.conf
echo -e ' \t AllowOverride None' >> /etc/apache2/apache2.conf
echo -e ' \t Order Deny,Allow' >> /etc/apache2/apache2.conf
echo -e ' \t Deny from all' >> /etc/apache2/apache2.conf
echo \<Directory \/\> >> /etc/apache2/apache2.conf
echo UserDir disabled root >> /etc/apache2/apache2.conf
echo $(date): Apache security measures enabled >> /var/log/mikescript.log
fi
#SYN Cookie Protection
sysctl -w net.ipv4.tcp_syncookies=0
if [ "$?" -eq "0" ]; then
echo $(date): SYN cookie protection enabled >> /var/log/mikescript.log
msg=$(echo SYN Cookie protection enabled | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo Edit Passwords
#List users with UID over 1000
echo echo $(date): Parsing passwd for UID 1000 or more >> /var/log/mikescript.log
cut -d: -f1,3 /etc/passwd | egrep ':[0-9]{4}$' | cut -d: -f1 > u$
echo root >> usersover1000
for ScottStorch in `cat usersover1000`
do
echo $ScottStorch password being changed
echo $ScottStorch':Y0L0SWAg1!' | chpasswd
if [ "$?" -eq "0" ]; then
echo "Password change successful"
echo $(date): $ScottStorch password changed >> /var/log/mikescript.log
else
echo "Password change failed"
echo $(date): $ScottStorch password failed to change >> /var/log/mikescript.log
fi
done
#Set password policy
apt-get install libpam-cracklib -y &> /dev/null
grep "auth optional pam_tally.so deny=5 unlock_time=900 onerr=fail audit even_deny_root_account silent " /etc/pam.d/common-auth
if [ "$?" -eq "1" ]; then
echo "auth optional pam_tally.so deny=5 unlock_time=900 onerr=fail audit even_deny_root_account silent " >> /etc/pam.d/common-auth
echo "password requisite pam_cracklib.so retry=3 minlen=8 difok=3 reject_username minclass=3 maxrepeat=2 dcredit=1 ucredit=1 lcredit=1 ocredit=1" >> /etc/pam.d/common-password
echo "password requisite pam_pwhistory.so use_authtok remember=24 enforce_for_root" >> /etc/pam.d/common-password
echo $(date): Super anal password policy applied >> /var/log/mikescript.log
fi
OLDFILE=/etc/login.defs
NEWFILE=/etc/login.defs.new
PASS_MAX_DAYS=15
PASS_MIN_DAYS=6
PASS_MIN_LEN=8
PASS_WARN_AGE=7
SEDSCRIPT=$(mktemp)
# change existing arguments at the same position
cat - > $SEDSCRIPT <<EOF
s/\(PASS_MAX_DAYS\)\s*[0-9]*/\1 $PASS_MAX_DAYS/
s/\(PASS_MIN_DAYS\)\s*[0-9]*/\1 $PASS_MIN_DAYS/
s/\(PASS_WARN_AGE\)\s*[0-9]*/\1 $PASS_WARN_AGE/
EOF
sed -f $SEDSCRIPT $OLDFILE > $NEWFILE
# add non-existing arguments
grep -q "^PASS_MAX_DAYS\s" $NEWFILE || echo "PASS_MAX_DAYS $PASS_MAX_DAYS" >> $NEWFILE
grep -q "^PASS_MIN_DAYS\s" $NEWFILE || echo "PASS_MIN_DAYS $PASS_MIN_DAYS" >> $NEWFILE
grep -q "^PASS_WARN_AGE\s" $NEWFILE || echo "PASS_WARN_AGE $PASS_WARN_AGE" >> $NEWFILE
rm $SEDSCRIPT
# Check result
grep ^PASS $NEWFILE
# Copy result back. Don't use "mv" or "cp" to keep owner, group and access-mode
cat $NEWFILE > $OLDFILE
if [ $? -eq 0 ]; then
msg=$(echo Password min. max. and warning age is set | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): Password age established >> /var/log/mikescript.log
# TCP SYN Cookies
sysctl -w net.ipv4.tcp_syncookies=1
echo $(date): TCP SYN Cookie Flood Protection Enabled >> /var/log/mikescript.log
# Don't act as router
sysctl -w net.ipv4.ip_forward=0
sysctl -w net.ipv4.conf.all.send_redirects=0
sysctl -w net.ipv4.conf.default.send_redirects=0
if [ $? -eq 0 ]; then
msg=$(echo IP Forwarding and redirects disallowed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): IP forwarding and redirects disallowed >> /var/log/mikescript.log
# Make sure no one can alter the routing tables
sysctl -w net.ipv4.conf.all.accept_redirects=0
sysctl -w net.ipv4.conf.default.accept_redirects=0
sysctl -w net.ipv4.conf.all.secure_redirects=0
sysctl -w net.ipv4.conf.default.secure_redirects=0
if [ $? -eq 0 ]; then
msg=$(echo Accepting redirects and secure redirects disallowed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): Accepting redirects and secure redirects disallowed as well >> /var/log/mikescript.log
sysctl -p
echo $(date): Locating world writeable files... >> /var/log/mikescript.log
cd / && ls -laR | grep rwxrwxrwx | grep -v "lrwx" &> /tmp/777s
cat /tmp/777s >> /var/log/mikescript.log
echo $(date): Looking for rogue SUID/SGID binaries... >> /var/log/mikescript.log
echo Sysctl complete
echo $(date): Sysctl completed >> /var/log/mikescript.log
#Prohibited Media Files
if [ $pkgmgr = "apt" ]; then
echo $(date): Running and installing debsums >> /var/log/mikescript.log
apt-get install debsums -y &> /dev/null
debsums -e | grep FAIL
debsums -c | grep FAIL
debsums -c | grep FAIL >> /var/log/mikescript.log
echo $(date): Debsums run >> /var/log/mikescript.log
if [ $? -eq 0 ]; then
msg=$(echo Debsums run | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
fi
echo Working on deleting prohibited media...
echo $(date): Logging media >> /var/log/mikescript.log
find / -name "*.mp3" -type f >> /var/log/mikescript.log
find / -name "*.wav" -type f >> /var/log/mikescript.log
find / -name "*.wmv" -type f >> /var/log/mikescript.log
find / -name "*.mp4" -type f >> /var/log/mikescript.log
find / -name "*.mov" -type f >> /var/log/mikescript.log
find / -name "*.avi" -type f >> /var/log/mikescript.log
find / -name "*.mpeg" -type f >> /var/log/mikescript.log
find /home -name "*.jpeg" -type f >> /var/log/mikescript.log
find /home -name "*.jpg" -type f >> /var/log/mikescript.log
find /home -name "*.png" -type f >> /var/log/mikescript.log
find /home -name "*.gif" -type f >> /var/log/mikescript.log
find /home -name "*.tif" -type f >> /var/log/mikescript.log
find /home -name "*.tiff" -type f >> /var/log/mikescript.log
find / -name "*.mp3" -type f -delete
find / -name "*.wav" -type f -delete
find / -name "*.wmv" -type f -delete
find / -name "*.mp4" -type f -delete
find / -name "*.mov" -type f -delete
find / -name "*.avi" -type f -delete
find / -name "*.mpeg" -type f -delete
find /home -name "*.jpeg" -type f -delete
find /home -name "*.jpg" -type f -delete
find /home -name "*.png" -type f -delete
find /home -name "*.gif" -type f -delete
find /home -name "*.tif" -type f -delete
find /home -name "*.tiff" -type f -delete
if [ $? -eq 0 ]; then
msg=$(echo Prohibited media logged and deleted | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
if [ $pkgmgr=="apt" ]; then
dpkg -l | grep apache
dpkg -l | grep avahi
dpkg -l | grep openssh-server
dpkg -l | grep cupsd
dpkg -l | grep master
dpkg -l | grep nginx
apt-get install ufw -y >> /dev/null
if [ $? -eq 0 ]; then
msg=$(echo UFW installed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
fi
if [ $pkgmgr=="yum" ]; then
yum -y install ufw >> /dev/null
if [ $? -eq 0 ]; then
msg=$(echo UFW installed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
fi
ufw enable
echo $(date): UFW enabled >> /var/log/mikescript.log
if [ $? -eq 0 ]; then
msg=$(echo UFW enabled | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
ufw allow http
if [ $? -eq 0 ]; then
msg=$(echo UFW HTTP exception added | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): UFW exception added for regular HTTP >> /var/log/mikescript.log
ufw allow https
if [ $? -eq 0 ]; then
msg=$(echo UFW HTTPS exception added | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): UFW exception added for HTTPS >> /var/log/mikescript.log
if [ $pkgmgr = "apt" ]; then
apt-get install apparmor apparmor-profiles -y &> /dev/null
fi
# Rootkit checker
clear
echo $(date): Checking for clearly bad packages >> /var/log/mikescript.log
echo $(date): Repopulating package lists.... >> /var/log/mikescript.log
apt-get update &> /dev/null
dpkg -l | grep netcat
if [ "$?" -eq "0" ]; then
apt-get purge netcat netcat-openbsd netcat-traditional -y
killnetcat=$(find / -name netcat -o -name nc)
rm -rf $killnetcat
if [ $? -eq 0 ]; then
msg=$(echo Netcats removed | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
fi
dpkg -l | cut -d\ -f3 | grep -v +++ | grep -v Status,Err | grep -v Desired > /tmp/dpkglisting
grep apache /tmp/dpkglisting
if [ "$?" -eq "0"]; then
apachefun=0
grep -i apache $readmeloc
let apachefun=$?+$apachefun
grep -i web $readmeloc
let apachefun=$?+$apachefun
fi
echo $(date): Installing RKHunter manually >> /var/log/mikescript.log
apt-get install chkrootkit -y &> /dev/null
if [ $? -eq 0 ]; then
msg=$(echo Chkrootkit was installed. | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): Installing RKHunter manually >> /var/log/mikescript.log
apt-get install rkhunter
if [ $? -eq 0 ]; then
msg=$(echo Rkhunter was installed. running. | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
clear
rkhunter -c --rwo
if [ $? -eq 0 ]; then
msg=$(echo Rkhunter was run | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): Rootkit Hunter was run in warning mode >> /var/log/mikescript.log
chkrootkit -q
if [ $? -eq 0 ]; then
msg=$(echo Chkrootkit was run | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): Chkrootkit was run in quiet mode >> /var/log/mikescript.log
apt-get install tiger -y &> /dev/null
echo $(date): Tiger IDS was installed >> /var/log/mikescript.log
tiger
if [ $? -eq 0 ]; then
msg=$(echo Tiger IDS was run | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break>> /dev/null
fi
echo $(date): Displaying crontabs >> /var/log/mikescript.log
for user in `cat /etc/passwd | cut -d ":" -f 1`; do
cron=$(sudo -u $user crontab -l 2> /dev/null| grep -v "#")
if [ "$cron" ]; then
echo "$user" >> /var/log/mikescript.log
echo "$cron" >> /var/log/mikescript.log
fi
done
msg=$(pastebinit -u marshallcyber1 -p [[]]] -i /var/log/mikescript.log | sed 's/\//%2F/g' | sed 's/\./%2E/g' | sed 's/\ /%20/g' )
break
echo $(date): Tiger IDS was run >> /var/log/mikescript.log
apt-get install zenity -y &> /dev/null
msg=The%20Linux%20Security%20Script%20has%20finished%2E%20Return%20to%20computer%20ASAP%2E
break>> /dev/null
zenity --info --text="The script finished successfully. Michael has been texted."
|
hexidecimals/cyberpatriot
|
linux.sh
|
Shell
|
gpl-3.0
| 20,260 |
#!/bin/sh
#
_exit_usage() {
echo
echo "usage: $0 filename.mp3"
echo
exit 100
}
test -z "$1" && _exit_usage
if ! [ -r "$1" ] ; then
echo "ERROR: can't read file '$1'"
_exit_usage
fi
# exit immediately on any error
set -e
MD5=$(mp3cat - - < "$1" | md5sum | cut -d ' ' -f 1)
eyeD3 --set-user-text-frame=audiomd5:$MD5 "$1" > /dev/null 2>&1
echo "$MD5:$1"
|
ehrenfeu/simplify
|
misc/audiomd5.sh
|
Shell
|
gpl-3.0
| 380 |
#!/bin/bash -x
source /host/settings.sh
DOMAIN=${DOMAIN:-$IMAGE.example.org}
### create a configuration file
cat <<EOF > /etc/apache2/sites-available/bcl.conf
<VirtualHost *:80>
ServerName $DOMAIN
RedirectPermanent / https://$DOMAIN/
</VirtualHost>
<VirtualHost _default_:443>
ServerName $DOMAIN
DocumentRoot /var/www/bcl
<Directory /var/www/bcl/>
AllowOverride All
</Directory>
SSLEngine on
SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
#SSLCertificateChainFile /etc/ssl/certs/ssl-cert-snakeoil.pem
<FilesMatch "\.(cgi|shtml|phtml|php)$">
SSLOptions +StdEnvVars
</FilesMatch>
</VirtualHost>
EOF
### we need to refer to this apache2 config by the name "$DOMAIN.conf" as well
ln /etc/apache2/sites-available/{bcl,$DOMAIN}.conf
cat <<EOF > /etc/apache2/conf-available/downloads.conf
Alias /downloads /var/www/downloads
<Directory /var/www/downloads>
Options Indexes FollowSymLinks
</Directory>
EOF
a2enconf downloads
### enable ssl etc.
a2enmod ssl
a2dissite 000-default
a2ensite bcl
a2enmod headers rewrite
### create a script to check for apache2, and start it if not running
cat <<'EOF' > /usr/local/sbin/apachemonitor.sh
#!/bin/bash
# restart apache if it is down
if ! /usr/bin/pgrep apache2
then
date >> /usr/local/apachemonitor.log
rm /var/run/apache2/apache2.pid
/etc/init.d/apache2 restart
fi
EOF
chmod +x /usr/local/sbin/apachemonitor.sh
### setup a cron job to monitor apache2
mkdir -p /etc/cron.d/
cat <<'EOF' > /etc/cron.d/apachemonitor
* * * * * root /usr/local/sbin/apachemonitor.sh >/dev/null 2>&1
EOF
chmod +x /etc/cron.d/apachemonitor
### limit the memory size of apache2 when developing
if [[ -n $DEV ]]; then
sed -i /etc/php/7.1/apache2/php.ini \
-e '/^\[PHP\]/ a apc.rfc1867 = 1' \
-e '/^display_errors/ c display_errors = On'
sed -i /etc/apache2/mods-available/mpm_prefork.conf \
-e '/^<IfModule/,+5 s/StartServers.*/StartServers 2/' \
-e '/^<IfModule/,+5 s/MinSpareServers.*/MinSpareServers 2/' \
-e '/^<IfModule/,+5 s/MaxSpareServers.*/MaxSpareServers 4/' \
-e '/^<IfModule/,+5 s/MaxRequestWorkers.*/MaxRequestWorkers 50/'
fi
### modify the configuration of php
cat <<EOF > /etc/php/7.1/mods-available/apcu.ini
extension=apcu.so
apcu.mmap_file_mask=/tmp/apcu.XXXXXX
apcu.shm_size=96M
EOF
sed -i /etc/php/7.1/apache2/php.ini \
-e '/^;\?memory_limit/ c memory_limit = 200M' \
-e '/^;\?max_execution_time/ c max_execution_time = 90' \
-e '/^;\?display_errors/ c display_errors = On' \
-e '/^;\?post_max_size/ c post_max_size = 16M' \
-e '/^;\?cgi\.fix_pathinfo/ c cgi.fix_pathinfo = 1' \
-e '/^;\?upload_max_filesize/ c upload_max_filesize = 16M' \
-e '/^;\?default_socket_timeout/ c default_socket_timeout = 90'
service apache2 restart
|
B-Translator/btr_client
|
ds/scripts/install/apache2.sh
|
Shell
|
gpl-3.0
| 2,984 |
#!/bin/sh
cd ..
make
./a.out << EOF
cd -
ls
cd src
ls
cd
ls
pwd
exit
EOF
|
nseth001/rshell
|
tests/cd_command.sh
|
Shell
|
gpl-3.0
| 74 |
#!/bin/sh
if [ -z "$1" ] ; then
echo pass a tag as argv, i.e. v1.1.1
exit 1
fi
tag=$1
git tag $tag
git push origin $tag
|
sabotage-linux/butch
|
maketag.sh
|
Shell
|
gpl-3.0
| 124 |
#!/bin/bash
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# #@ does nothing at all ?
#
# Keywords: yay keywords
#
# there seems to be no description whatsoever :F
# still none
#
# no author exists
# just keys . and not here.........
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2009 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Include the BeakerLib environment
. /usr/share/beakerlib/beakerlib.sh
# Set the full test name
#@@key sthsth
rlJournalStart
rlPhaseStartSetup "Setup"
# does nothing
rlPhaseEnd
#@ @key puppy
#@ Cleanup after test
rlPhaseStartCleanup "Cleanup"
#@ we better clean up after doing nothing
#@ @key dummykey
rlPhaseEnd
rlJournalEnd
|
rh-lab-q/bkrdoc
|
examples/markup/headers/keys-and-keywords.sh
|
Shell
|
gpl-3.0
| 1,507 |
#1/bin/bash
#rm -rf ~/.android/avd/
path=$1
cd "$1"
rm -rf workingDir
echo "Dynodroid run started at "$(date "+%Y.%m.%d-%H:%M:%S") > dynoDroidRun$(date "+%Y.%m.%d-%H:%M:44").log
xterm -hold -e tail -f dynoDroidRun$(date "+%Y.%m.%d-%H:%M:44").log &
ant run 1 >> dynoDroidRun$(date "+%Y.%m.%d-%H:%M:44").log 2 >> dynoDroidRun$(date "+%Y.%m.%d-%H:%M:44").log
|
luminaxster/mudroid
|
old_scripts/run_dynodroid_on_path.sh
|
Shell
|
gpl-3.0
| 357 |
#! /bin/sh
# Case I:
#
#
# 1) Let's assume a set of 100 unique abs - X.
#
# 2) Let's say for CDR-L1 (using CDR-L1 as an example), the following definitions apply:
#
# a) c is CDR-L1 sequence.
# b) k is the set of key residues in the CDR for the specific class of CDR-L1.
# c) f is the set of key residues in the framework for the class of CDR-L1.
#
# 3) For each x in X, do the following steps:
#
# 4) Define X' as (X - x).
#
# 5) For each x' in X':
#
# 6) If the length of the CDR of x' is not the same as x, go to the next sequence.
#
# 7) Performing the following steps:
#
# NOTE: x' is another PDB with the same loop length as $pdbCode1
#
# a) Calculate the sequence identity of x,x' over c
#
# b) Record the best PDB b.
#
# c) Calculate the RMS over the the loop between b and x.
# Check if the required command line parameters have been input.
if [ $# -lt 4 ]
then
echo
echo "Usage: $0 <Arguments>"
echo
echo "Arguments are:"
echo
echo "1. Loop"
echo "2. Loop definition (e.g. L24-L34)"
echo "3. File with list of bad PDB codes for the loop concerned"
echo "4. File with list of canonical class mapping to PDB codes"
echo
exit 0
fi
loop=$1
loopDefinition=$2
prohibitedPDBsListFilename=$3
mappingsFilename=$4
# Check if the file with list of bad PDB codes can be read.
if [ ! -r $prohibitedPDBsListFilename ]
then
echo
echo "Unable to read file \"$prohibitedPDBsListFilename\""
echo
exit 0
fi
# Check if the file with mappings between PDB codes and canonical classes
# is present.
if [ ! -r $mappingsFilename ]
then
echo
echo "Unable to read file \"$mappingsFilename\""
echo
exit 0
fi
# Parse the loop definition for the loop start and loop end.
loopStart=`echo $loopDefinition | awk -F'-' '{print $1}'`
loopEnd=`echo $loopDefinition | awk -F'-' '{print $2}'`
if [ ! `echo $loopStart | grep "^[LH][1-9]"` ]
then
echo
echo "Invalid loop definition string \"$loopDefinition\""
echo
exit 0
fi
if [ ! `echo $loopEnd | grep "^[LH][1-9]"` ]
then
echo
echo "Invalid loop definition string \"$loopDefinition\""
echo
exit 0
fi
# For every unique PDB....
for pdbCode1 in `grep "^>" Fv_unique.pir | sed 's/>P1;//'`
do
# Step 1: Identify another structure whose loop sequence matches
# best with the loop sequence of $pdbCode1.
if [ `grep $pdbCode1 $prohibitedPDBsListFilename | head -1` ]
then
# Skip to the next PDB.
continue
fi
# Set the path for the PDB of Fv region.
pdb1Filename=$HOME/CANONICALS/NEW_DATASET/NUMBERED_Fv_PDB/$pdbCode1.pdb
# Get the loop sequence.
loop1Sequence=`getpdb $loopStart $loopEnd $pdb1Filename | pdb2pir -C -s | grep -v "^>" | grep -v "Seque" | sed 's/\*//'`
loop1Length=`echo "${#loop1Sequence}"`
# Set the best match variables to default values.
bestSequenceIdentity=-100000
bestLoopSequence=""
bestPDBCode=""
bestPDBFilename=""
bestCanonicalClassMatch=""
# For every other pdb in the unique list, do the required comparisons.
for pdbCode2 in `grep "^>" Fv_unique.pir | sed 's/>P1;//'`
do
# If the two PDB codes are the same, skip to the next one.
if [ "$pdbCode1" == "$pdbCode2" ]
then
continue
fi
# Check if the PDB is in the list of prohibited PDBs for the loop.
if [ `grep $pdbCode2 $prohibitedPDBsListFilename | head -1` ]
then
# Skip to the next PDB.
continue
fi
# Get the loop sequence.
pdb2Filename=$HOME/CANONICALS/NEW_DATASET/NUMBERED_Fv_PDB/$pdbCode2.pdb
loop2Sequence=`getpdb $loopStart $loopEnd $pdb2Filename | pdb2pir -C -s | grep -v "^>" | grep -v "Seque" | sed 's/\*//'`
loop2Length=`echo "${#loop2Sequence}"`
# Skip to the next PDB in the inner loop if the two loops
# are not of equal length.
if [ $loop1Length != $loop2Length ]
then
continue
fi
# Get the sequence identity between the two loops.
sequenceIdentity=`perl get_sequence_identity.pl $loop1Sequence $loop2Sequence`
# command="perl get_sequence_identity.pl $loop1Sequence $loop2Sequence"
# echo $command
# read
# Check if the sequence identity is better than the best recorded
# so far. If so, update the best sequence identity.
out=`echo "$sequenceIdentity > $bestSequenceIdentity" | bc`
if [ $out == 1 ]
then
bestLoopSequence=$loop2Sequence
bestSequenceIdentity=$sequenceIdentity
bestPDBCode=$pdbCode2
fi
done # End of inner for loop.
# Step 2: Carry out a structural fit of the loop of $pdbCode1 with $pdbCode2.
profitScriptFilename=/tmp/$$.prf
bestPDBFilename=$HOME/CANONICALS/NEW_DATASET/NUMBERED_Fv_PDB/$bestPDBCode.pdb
echo "REFERENCE $pdb1Filename" > $profitScriptFilename
echo "MOBILE $bestPDBFilename" >> $profitScriptFilename
echo "ZONE $loopDefinition:$loopDefinition" >> $profitScriptFilename
echo "IGNOREMISSING" >> $profitScriptFilename
echo "FIT" >> $profitScriptFilename
# Get the canonical class information.
canonicalClass1=`grep $pdbCode1 $mappingsFilename | awk -F',' '{print $3}'`
bestCanonicalClassMatch=`grep $bestPDBCode $mappingsFilename | awk -F',' '{print $3}'`
# Run ProFit on the script.
rms=`profit -f $profitScriptFilename | grep RMS | awk '{print $2}'`
# Print the results.
echo "$loop,$loopDefinition,$pdbCode1,$canonicalClass1,$loop1Sequence,$bestPDBCode,$bestCanonicalClassMatch,$bestLoopSequence,$bestSequenceIdentity,$rms"
done # End of outer for loop.
# Remove the ProFit script.
rm -f $profitScriptFilename
# End of script.
|
ACRMGroup/canonicals
|
ANALYSIS/KEY_RESIDUES_EXPERIMENT/case1_part1.sh
|
Shell
|
gpl-3.0
| 5,671 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-calculateratio_2-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::calculateratio_2:1.0 -N ID0000017 -R condorpool -L example_workflow -T 2016-11-08T20:46:03+00:00 ./example_workflow-calculateratio_2-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/instances/10_1_workflow_full_10files_secondary_w1_3sh_3rs_with_annot_with_proj_3s_range/dags/ubuntu/pegasus/example_workflow/20161108T204604+0000/00/00/calculateratio_2_ID0000017.sh
|
Shell
|
gpl-3.0
| 1,252 |
#!/bin/sh
if [ -d utils ]; then
cd utils
fi
cd ..
chmod a+rx bin examples python
chmod a+r bin/* examples/* python/*
chmod a+x examples/*.sh
cd bin
zip -u -9 ../chomp_bin chomp
cd ..
zip -u -9 -r chompfull_bin licen* bin/* examples/* python/* -x examples/*.bat
|
felixboes/hosd
|
chomp/utils/zipbin.sh
|
Shell
|
gpl-3.0
| 267 |
#!/bin/bash
python RunBootstrap.py --paralog1 YMR143W --paralog2 YDL083C --bootnum 47 > YMR143W_YDL083C_Boot47_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Bootstrap/ShFiles/MG94_YMR143W_YDL083C_Boot47.sh
|
Shell
|
gpl-3.0
| 128 |
#!/bin/bash
# exit script if return code != 0
set -e
# set arch for base image
OS_ARCH="x86-64"
# construct snapshot date (cannot use todays as archive wont exist) and set url for archive.
# note: for arch linux arm archive repo that the snapshot date has to be at least 2 days
# previous as the mirror from live to the archive for arm packages is slow
snapshot_date=$(date -d "2 days ago" +%Y/%m/%d)
# now set pacman to use snapshot for packages for snapshot date
if [[ "${OS_ARCH}" == "aarch64" ]]; then
echo 'Server = http://tardis.tiny-vps.com/aarm/repos/'"${snapshot_date}"'/$arch/$repo' > '/etc/pacman.d/mirrorlist'
echo 'Server = http://eu.mirror.archlinuxarm.org/$arch/$repo' >> '/etc/pacman.d/mirrorlist'
else
echo 'Server = https://archive.archlinux.org/repos/'"${snapshot_date}"'/$repo/os/$arch' > '/etc/pacman.d/mirrorlist'
echo 'Server = http://archive.virtapi.org/repos/'"${snapshot_date}"'/$repo/os/$arch' >> '/etc/pacman.d/mirrorlist'
fi
echo "[info] content of arch mirrorlist file"
cat '/etc/pacman.d/mirrorlist'
# reset gpg (not required when source is bootstrap tarball, but keeping for historic reasons)
rm -rf '/etc/pacman.d/gnupg/' '/root/.gnupg/' || true
# dns resolution reconfigure is required due to the tarball extraction
# overwriting the /etc/resolv.conf, thus we then need to fix this up
# before we can continue to build the image.
#echo "[info] Setting DNS resolvers to Cloudflare..."
#echo "nameserver 1.1.1.1" > '/etc/resolv.conf' || true
#echo "nameserver 1.0.0.1" >> '/etc/resolv.conf' || true
# refresh gpg keys
gpg --refresh-keys
# initialise key for pacman and populate keys
if [[ "${OS_ARCH}" == "aarch64" ]]; then
pacman-key --init && pacman-key --populate archlinuxarm
else
pacman-key --init && pacman-key --populate archlinux
fi
# force use of protocol http and ipv4 only for keyserver (defaults to hkp)
echo "no-greeting" > '/etc/pacman.d/gnupg/gpg.conf'
echo "no-permission-warning" >> '/etc/pacman.d/gnupg/gpg.conf'
echo "lock-never" >> '/etc/pacman.d/gnupg/gpg.conf'
echo "keyserver https://keyserver.ubuntu.com" >> '/etc/pacman.d/gnupg/gpg.conf'
echo "keyserver-options timeout=10" >> '/etc/pacman.d/gnupg/gpg.conf'
# perform pacman refresh with retries (required as keyservers are unreliable)
count=0
echo "[info] refreshing keys for pacman..."
until pacman-key --refresh-keys || (( count++ >= 3 ))
do
echo "[warn] failed to refresh keys for pacman, retrying in 30 seconds..."
sleep 30s
done
# force pacman db refresh and install sed package (used to do package folder exclusions)
pacman -Sy sed --noconfirm
# configure pacman to not extract certain folders from packages being installed
# this is done as we strip out locale, man, docs etc when we build the arch-scratch image
sed -i '\~\[options\]~a # Do not extract the following folders from any packages being installed\n'\
'NoExtract = usr/share/locale* !usr/share/locale/en* !usr/share/locale/locale.alias\n'\
'NoExtract = usr/share/doc*\n'\
'NoExtract = usr/share/man*\n'\
'NoExtract = usr/lib/firmware*\n'\
'NoExtract = usr/lib/modules*\n'\
'NoExtract = usr/share/gtk-doc*\n' \
'/etc/pacman.conf'
# list all packages that we want to exclude/remove
unneeded_packages="\
filesystem \
cryptsetup \
device-mapper \
dhcpcd \
iproute2 \
jfsutils \
libsystemd \
linux \
lvm2 \
man-db \
man-pages \
mdadm \
netctl \
openresolv \
pciutils \
pcmciautils \
reiserfsprogs \
s-nail \
systemd \
systemd-sysvcompat \
usbutils \
xfsprogs"
# split space separated string into list for install paths
IFS=' ' read -ra unneeded_packages_list <<< "${unneeded_packages}"
# construct string to ensure removal of any packages that might be part of tarball
pacman_remove_unneeded_packages='pacman --noconfirm -Rsc'
for i in "${unneeded_packages_list[@]}"; do
pacman_remove_unneeded_packages="${pacman_remove_unneeded_packages} ${i}"
done
echo "[info] Removing unneeded packages that might be part of the tarball..."
echo "${pacman_remove_unneeded_packages} || true"
eval "${pacman_remove_unneeded_packages} || true"
echo "[info] Adding required packages to pacman ignore package list to prevent upgrades..."
# add coreutils to pacman ignore list to prevent permission denied issue on Docker Hub -
# https://gitlab.archlinux.org/archlinux/archlinux-docker/-/issues/32
#
# add filesystem to pacman ignore list to prevent buildx issues with
# /etc/hosts and /etc/resolv.conf being read only, see issue -
# https://github.com/moby/buildkit/issues/1267#issuecomment-768903038
#
sed -i -e 's~#IgnorePkg.*~IgnorePkg = filesystem~g' '/etc/pacman.conf'
echo "[info] Displaying contents of pacman config file, showing ignored packages..."
cat '/etc/pacman.conf'
echo "[info] Updating packages currently installed..."
pacman -Syu --noconfirm
echo "[info] Install base group and additional packages..."
pacman -S base awk sed grep gzip supervisor nano vi ldns moreutils net-tools dos2unix unzip unrar htop jq openssl-1.0 rsync --noconfirm
echo "[info] set locale..."
echo en_GB.UTF-8 UTF-8 > '/etc/locale.gen'
locale-gen
echo LANG="en_GB.UTF-8" > '/etc/locale.conf'
# add user "nobody" to primary group "users" (will remove any other group membership)
usermod -g users nobody
# add user "nobody" to secondary group "nobody" (will retain primary membership)
usermod -a -G nobody nobody
# setup env for user nobody
mkdir -p '/home/nobody'
chown -R nobody:users '/home/nobody'
chmod -R 775 '/home/nobody'
# set user "nobody" home directory (needs defining for pycharm, and possibly other apps)
usermod -d /home/nobody nobody
# set shell for user nobody
chsh -s /bin/bash nobody
# find latest tini release tag from github
curl --connect-timeout 5 --max-time 600 --retry 5 --retry-delay 0 --retry-max-time 60 -o "/tmp/tini_release_tag" -L "https://github.com/krallin/tini/releases"
tini_release_tag=$(cat /tmp/tini_release_tag | grep -P -o -m 1 '(?<=/krallin/tini/releases/tag/)[^"]+')
# download tini, used to do graceful exit when docker stop issued and correct reaping of zombie processes.
if [[ "${OS_ARCH}" == "aarch64" ]]; then
curl --connect-timeout 5 --max-time 600 --retry 5 --retry-delay 0 --retry-max-time 60 -o "/usr/bin/tini" -L "https://github.com/krallin/tini/releases/download/${tini_release_tag}/tini-arm64" && chmod +x "/usr/bin/tini"
else
curl --connect-timeout 5 --max-time 600 --retry 5 --retry-delay 0 --retry-max-time 60 -o "/usr/bin/tini" -L "https://github.com/krallin/tini/releases/download/${tini_release_tag}/tini-amd64" && chmod +x "/usr/bin/tini"
fi
# identify if base-devel package installed
if pacman -Qg "base-devel" > /dev/null ; then
# remove base devel excluding useful core packages
pacman -Ru $(pacman -Qgq base-devel | grep -v awk | grep -v pacman | grep -v sed | grep -v grep | grep -v gzip | grep -v which) --noconfirm
fi
# remove any build tools that maybe present from the build
pacman -Ru dotnet-sdk yarn git yay-bin reflector gcc binutils --noconfirm 2> /dev/null || true
# general cleanup
yes|pacman -Scc
pacman --noconfirm -Rns $(pacman -Qtdq) 2> /dev/null || true
rm -rf /var/cache/* \
/var/empty/.cache/* \
/usr/share/locale/* \
/usr/share/man/* \
/usr/share/gtk-doc/* \
/tmp/*
# additional cleanup for base only
rm -rf /root/* \
/var/cache/pacman/pkg/* \
/usr/lib/firmware \
/usr/lib/modules \
/.dockerenv \
/.dockerinit \
/usr/share/info/* \
/README \
/bootstrap
|
binhex/arch-base
|
build/root/install.sh
|
Shell
|
gpl-3.0
| 7,343 |
#################################################
# ====== ===== ====== =====
# || \\ | // |
# || \\ | // |
# || // | || ===== | --------
# || // | \\ // |
# ====== ===== ====== =====
##################################################
# [Summary]: File provides automated deployment for users with little code experience. Manual
# installation instructions are still offered in Installation.md.
# Rotate the display 180 degrees to proper orientation
sed -i '1 a lcd_rotate=2' /boot/config.txt
# Remove the undervoltage warning symbol
sed -i '1 a avoid_warnings=1' /boot/config.txt
# Install core dependencies
sudo apt-get install git python-bluez python-dev cython
#Install dependencies for Kivy
#If they are already installed, it should skip over them or update if old version installed
sudo apt-get install libsdl2-dev libsdl2-image-dev libsdl2-mixer-dev libsdl2-ttf-dev pkg-config libgl1-mesa-dev libgles2-mesa-dev python-setuptools libgstreamer1.0-dev git-core gstreamer1.0-plugins-{bad,base,good,ugly} gstreamer1.0-{omx,alsa}
# Upgrade Cython and install Kivy
sudo pip install --upgrade cython
sudo pip install git+https://github.com/kivy/kivy.git@master
# Insert call to python script to change kivy ini file
#HERE
# Update System
sudo apt-get update
sudo apt-get upgrade
sudo apt-get dist-upgrade
sudo apt-get autoremove
sudo rpi-update
# Clone DigiDash Repository
cd ~
mkdir repositories
cd repositories
git clone https://github.com/WebsterXC/digidash.git
# Create a custom command by adding digidash to /bin
cd /bin
if [ ! -e digidash.sh ]; then
sed -i 'python repositories/digidash/src/main.py' digidash.sh
fi
chmod +x digidash.sh
|
WebsterXC/digidash
|
install_digidash.sh
|
Shell
|
gpl-3.0
| 1,735 |
#!/usr/bin/env fish
if [ ! -f configure ]
./autogen.sh
end
set -xg CC cc
set -xg CXX c++
set -xg CPPFLAGS '-I../include -I../../include -I../src -I../../src'
set -xg EXPAT_CFLAGS -I/usr/local/include
set -xg EXPAT_LDFLAGS -L/usr/local/lib
set -xg ZITA_CPPFLAGS -I/usr/local/include
set -xg ZITA_LDFLAGS -L/usr/local/lib
./configure --with-test --enable-lv2 --enable-output-oss
|
mekanix/bin
|
configure-drumgizmo.sh
|
Shell
|
gpl-3.0
| 381 |
#!/bin/sh
case "$1" in
"nodemon")
yarn dev
;;
"start")
NODE_ENV=production node ./bin/www
;;
*)
exec "$@"
;;
esac
|
johnyb/yapdnsui
|
startup.sh
|
Shell
|
gpl-3.0
| 175 |
#!/bin/sh
#
# Edit the n-th byte of a file with a given file, without using xxd
xxdrp () {
if which xxd > /dev/null
then
echo -n "$1" | xxd -r -p
else
echo -ne `echo -n "$1" | sed 's/\([0-9a-fA-F]\{2\}\)/\\\x\1/g'`
fi
}
xxdp () {
if which xxd > /dev/null
then
xxd -p "$1"
else
hexdump -ve '/1 "%02X"' "$1"
fi
}
usage() {
echo "Usage: $0 [OPTIONS] FILE"
echo -e "\t-i\t--index\t\tModify the byte at INDEX [Default: \"0\"]"
echo -e "\t-v\t--value\t\tSpecify the modified byte value as a hexadecimal string [Default: increase by 1]"
echo -e "\t-o\t--out\t\tName of the output file [Default: overwrite input file in place]"
echo -e "\n\t-h\t--help\t\tPrint this help text and exit"
}
###############################################################################
# Parameters
FILE=""
OUTPUT=""
INDEX="0"
VALUE=""
###############################################################################
# Parse arguments
ARGS=`getopt -o 'i:v:o:h' --long 'index:,value:,out:,help' -n "$0" -- "$@"`
#Bad arguments
if [ $? -ne 0 ]
then
usage
exit -1
fi
while true
do
case $1 in
-i | --index )
INDEX="$2"
shift 2
;;
-v | --value )
VALUE="`echo $2 | cut -c1-2`"
shift 2
;;
-o | --out )
OUTPUT="$2"
shift 2
;;
-h | --help )
usage
exit 0
;;
-- )
shift
break
;;
-* ) # Should never get here; getopt should handle errors on its own
echo "$0: unrecognized option \"$1\"" 1>&2
usage
exit -1
;;
* )
break
;;
esac
done
###############################################################################
if [ -z "$@" ]
then
echo "Missing FILE!"
usage
exit 1
else
FILE="$1"
fi
FILE_LEN=`wc -c $FILE | cut -d' ' -f1`
if [ "$INDEX" -gt "$FILE_LEN" ]
then
echo "Index out of bounds (INDEX=$INDEX, $FILE is ${FILE_LEN}B"
usage
exit 1
fi
# Overwrite input file if not otherwise specified
if [ -z "$OUTPUT" ]
then
OUTPUT="$FILE"
fi
# Increment the existing value if not otherwise specified
if [ -z "$VALUE" ]
then
EXISTING_VALUE="0x`xxdp "$FILE" | cut -c$((2*$INDEX+1)),$((2*$INDEX+2))`"
VALUE=`printf "%02X" $(($EXISTING_VALUE+1))`
fi
TMPOUT="`mktemp`"
xxdrp `xxdp "$FILE" | sed "s/../$VALUE/$(($INDEX+1))"` > "$TMPOUT"
cat "$TMPOUT" > "$OUTPUT"
rm "$TMPOUT"
|
Fbonazzi/Scripts
|
parsing/edit_nth_byte.sh
|
Shell
|
gpl-3.0
| 2,358 |
cp resetdb.sh /tmp
su root -c "cd /tmp ; su postgres -c '/tmp/resetdb.sh'"
|
KevinSeghetti/survey
|
survey/resetdb-fedora.sh
|
Shell
|
gpl-3.0
| 76 |
#!/bin/bash
# si-adb-list.txt
tmpList1=$(mktemp --suffix=.txt)
python2 tools/addChecksum.py < si-adb-list.txt > $tmpList1
python2 tools/validateChecksum.py < $tmpList1
mv -f $tmpList1 si-adb-list.txt
# si-ad.txt
tmpList2=$(mktemp --suffix=.txt)
python2 tools/addChecksum.py < si-ad.txt > $tmpList2
python2 tools/validateChecksum.py < $tmpList2
mv -f $tmpList2 si-ad.txt
# si-ad-wehr.txt
tmpList3=$(mktemp --suffix=.txt)
python2 tools/addChecksum.py < si-ad-wehr.txt > $tmpList3
python2 tools/validateChecksum.py < $tmpList3
mv -f $tmpList3 si-ad-wehr.txt
# si-track.txt
tmpList4=$(mktemp --suffix=.txt)
python2 tools/addChecksum.py < si-track.txt > $tmpList4
python2 tools/validateChecksum.py < $tmpList4
mv -f $tmpList4 si-track.txt
|
vinctux/si-adblock
|
sign.sh
|
Shell
|
gpl-3.0
| 737 |
#!/bin/bash -ex
@import(userdata/common.sh)@
# Add GitHub as a known host
ssh-keyscan github.com >> /root/.ssh/known_hosts
# Setup deploy keys for Peach
@import(userdata/keys/github.peach.sh)@
cd /home/ubuntu
# Target desscription for Firefox
@import(userdata/targets/mozilla-inbound-linux64-asan.sh)@
# Checkout Peach
retry git clone -v --depth 1 git@peach:MozillaSecurity/peach.git
cd peach
pip -q install -r requirements.txt
# Checkout Peach Pits
rm -rf Pits
retry git clone -v --depth 1 git@pits:MozillaSecurity/pits.git
# Checkout script for fetching S3
wget https://gist.githubusercontent.com/posidron/41cb0f276c317ed77264/raw/b3dea77ca22d4040540ce7776f55796e1a2f0dd9/peachbot.py
python userdata.py -sync
# Checkout and setup FuzzManager
retry git clone -v --depth 1 https://github.com/MozillaSecurity/FuzzManager.git Peach/Utilities/FuzzManager
pip install -r Peach/Utilities/FuzzManager/requirements.txt
@import(userdata/loggers/fuzzmanager.sh)@
@import(userdata/loggers/fuzzmanager.binary.sh)@
# Ensure proper permissions
chown -R ubuntu:ubuntu /home/ubuntu
# Example:
# ./laniakea.py -create-on-demand -image-args min_count=1 max_count=1 -tags Name=peach -userdata userdata/peach.sh -userdata-macros TARGET_PIT=pits/targets/laniakea/firefox.xml FUZZING_PIT=pits/files/mp4/fmp4.xml FILE_SAMPLE_PATH=./fuzzdata/samples/mp4
su -c "screen -t peach -dmS peach xvfb-run python ./peach.py -target @TARGET_PIT@ -pit @FUZZING_PIT@ -macro FileSampleMaxFileSize=-1 Strategy=rand.RandomMutationStrategy StrategyParams=SwitchCount=1000 MaxFieldsToMutate=$(($RANDOM % 50)) FileSamplePath=@FILE_SAMPLE_PATH@ WebSocketTemplate=@WEBSOCKET_TEMPLATE@ DataModel=@DATA_MODEL@" ubuntu
|
choller/laniakea
|
userdata/peach.pit.sh
|
Shell
|
mpl-2.0
| 1,691 |
run_schema() {
CUR_SHA=$(git show HEAD:apiserver/facades/schema.json | shasum -a 1 | awk '{ print $1 }')
TMP=$(mktemp /tmp/schema-XXXXX)
OUT=$(make --no-print-directory SCHEMA_PATH="${TMP}" rebuild-schema 2>&1)
OUT_CODE=$?
if [ $OUT_CODE -ne 0 ]; then
echo ""
echo "$(red 'Found some issues:')"
echo "${OUT}"
exit 1
fi
# shellcheck disable=SC2002
NEW_SHA=$(cat "${TMP}" | shasum -a 1 | awk '{ print $1 }')
if [ "${CUR_SHA}" != "${NEW_SHA}" ]; then
(>&2 echo "\\nError: facades schema is not in sync. Run 'make rebuild-schema' and commit source.")
exit 1
fi
}
test_schema() {
if [ "$(skip 'test_schema')" ]; then
echo "==> TEST SKIPPED: static schema analysis"
return
fi
(
set_verbosity
cd .. || exit
# Check for schema changes and ensure they've been committed
run_linter "run_schema"
)
}
|
wallyworld/juju
|
tests/suites/static_analysis/schema.sh
|
Shell
|
agpl-3.0
| 946 |
#!/bin/sh
set -e
set -x
mocha test/actions.js
mocha test/application-helpers.js
mocha test/config-helpers.js
mocha test/config-watcher.js
mocha test/cozy-light.js
mocha test/main-app-helper.js
mocha test/node-helpers.js
mocha test/npm-helpers.js
mocha test/plugin-helpers.js
mocha test/functional.js
NODE_ENV=need-all-logs
mocha test/cli.js
|
cozy-labs/cozy-light
|
scripts/run-tests.sh
|
Shell
|
agpl-3.0
| 342 |
#!/bin/sh
# ==============================================================================
# POV-Ray v3.8
# portfolio.sh - render the POV-Ray portfolio
# ==============================================================================
# written November 2003 by Christoph Hormann
# updated 2017-09-10 for POV-Ray v3.8 by Christoph Lipka
# This file is part of POV-Ray and subject to the POV-Ray licence
# see POVLEGAL.DOC for details
# ------------------------------------------------------------------------------
# calling conventions:
#
# portfolio.sh [log] [-d scene_directory] [-o output_directory]
#
# output_directory: if specified all images are written to this directory
# if not specified the images are written into the scene
# file directories, if these are not writable they are
# written in the current directory.
# log: log all text output of POV-Ray to a file (log.txt)
# scene_directory: if specified the portfolio scene in this directory are
# rendered, otherwise the scene directory is determined form
# the main povray ini file
# (usually /usr/local/share/povray-X.Y/scenes/portfolio,
# where X.Y represents the first two fields of the version
# number, e.g. for v3.8.1 this would be 3.8).
# ==============================================================================
# test mode
#SCENE_DIR=.
VERSION=`povray --generation`
VER_DIR=povray-$VERSION
DEFAULT_DIR=/usr/local
SYSCONFDIR=$DEFAULT_DIR/etc
install_dir()
{
if [ -z "$POVINI" ] ; then
test -f "$SYSCONFDIR/povray.ini" && POVINI="$SYSCONFDIR/povray.ini"
test -f "$HOME/.povrayrc" && POVINI="$HOME/.povrayrc"
test -f "$SYSCONFDIR/povray/$VERSION/povray.ini" && POVINI="$SYSCONFDIR/povray/$VERSION/povray.ini"
test -f "$HOME/.povray/$VERSION/povray.ini" && POVINI="$HOME/.povray/$VERSION/povray.ini"
fi
if [ ! -z "$POVINI" ] ; then
# this is not a completely failsafe method but it should work in most cases
INSTALL_DIR=`grep -E -i "^library_path=.*share/$VER_DIR" "$POVINI" | head -n 1 | sed "s?[^=]*=\"*??;s?/share/$VER_DIR.*??"`
echo "$INSTALL_DIR"
fi
}
OPTIONS="$1 $2 $3 $4 $5"
case "$OPTIONS" in
*log* | *LOG* | *Log* )
DATE=`date`
LOG_FILE="log.txt"
echo "log file for POV-Ray v$VERSION sample scene render $DATE" > "$LOG_FILE"
;;
esac
test "$1" = "-d" && SCENE_DIR="$2"
test "$2" = "-d" && SCENE_DIR="$3"
test "$3" = "-d" && SCENE_DIR="$4"
test "$4" = "-d" && SCENE_DIR="$5"
if [ -z "$SCENE_DIR" ] ; then
INSTALL_DIR="`install_dir`"
if [ -z "$INSTALL_DIR" ] ; then
echo "------------------------------------------------------"
echo " the sample scene render script could not determine"
echo " the location where POV-Ray is installed. Make sure"
echo " POV-Ray v$VERSION has been correctly installed on this"
echo " computer. If you continue the script will try to"
echo " the scenes from the current directory."
echo ""
read -p "Press CTRL-C to abort or any other key to continue " -n 1
echo "------------------------------------------------------"
SCENE_DIR=.
else
SCENE_DIR="$INSTALL_DIR/share/$VER_DIR/scenes/portfolio"
fi
fi
if [ ! -d "$SCENE_DIR" ] ; then
echo "------------------------------------------------------"
echo " Your POV-Ray installation seems to be defective"
echo " so this script does not work."
echo " Try reinstalling POV-Ray."
echo "------------------------------------------------------"
read
exit
fi
if [ -d "$SCENE_DIR/portfolio" ] ; then
SCENE_DIR="$SCENE_DIR/portfolio"
fi
test "$1" = "-o" && OUTPUT_DIR="$2"
test "$2" = "-o" && OUTPUT_DIR="$3"
test "$3" = "-o" && OUTPUT_DIR="$4"
test "$4" = "-o" && OUTPUT_DIR="$5"
if [ -z "$OUTPUT_DIR" ] ; then
if [ -w "$SCENE_DIR" ] ; then
OUTPUT_DIR="$SCENE_DIR"
else
OUTPUT_DIR=.
fi
fi
if [ ! -d "$OUTPUT_DIR" ] ; then
mkdir -p "$OUTPUT_DIR"
fi
if [ "$SCENE_DIR" != "$OUTPUT_DIR" ] ; then
test -f "$SCENE_DIR/index.html" && cp -f "$SCENE_DIR/index.html" "$OUTPUT_DIR/"
test -f "$SCENE_DIR/readme.txt" && cp -f "$SCENE_DIR/readme.txt" "$OUTPUT_DIR/"
fi
CURR_DIR=`pwd`
SCENE_DIR=`echo "$SCENE_DIR" | sed "s?^\.?$CURR_DIR?"`
SCENE_DIR=`echo "$SCENE_DIR" | sed "s?^\([^/]\)?$CURR_DIR/\1?"`
FILE_LIST=`find "$SCENE_DIR/" -not -path "*__empty*" -name "*.ini" | sort`
cd "$OUTPUT_DIR"
#echo "$FILE_LIST"
#echo "-------"
#echo "$OUTPUT_DIR"
#echo "$SCENE_DIR"
if [ -z "$LOG_FILE" ] ; then
echo "$FILE_LIST" | xargs -n 1 povray +L$SCENE_DIR
else
echo "$FILE_LIST" | xargs -n 1 povray +L$SCENE_DIR 2>&1 | tee -a "$LOG_FILE"
fi
cd "$CURR_DIR"
|
LeForgeron/povray
|
unix/scripts/portfolio.sh
|
Shell
|
agpl-3.0
| 4,717 |
#!/bin/sh
DIR=$( cd "$(dirname "$0")" ; pwd -P )
. $DIR/env.sh
HOST=$1
TIMEOUT=60
FPRINT=`torify $SCRIPTDIR/ssh_fingerprint.py $HOST 2>/dev/null | grep ssh-rsa | cut -f 2 -d ' '`
if [ -n "$FPRINT" ]; then
echo "Got $FPRINT for $HOST"
$SCRIPTDIR/add_ssh_fingerprint.py "$HOST" "$FPRINT"
else
echo "No fingerprint for $HOST"
fi
|
dirtyfilthy/freshonions-torscraper
|
scripts/check_fingerprint.sh
|
Shell
|
agpl-3.0
| 328 |
#!/bin/bash
serverAddress=albertine
sed -i -E "s/(http:\/\/)[^:]+(:)/\1$serverAddress\2/" Quiz/React/quiz/public/index.html
sed -i -E "s/(io.connect\(')[^:]+(:)/\1$serverAddress\2/" Quiz/React/quiz/src/index.js
#sed -i -E "s/(http:\/\/)[^:]+(:)/\1$serverAddress\2/" Experiments/NodeJS/CookieSession3/views/app2.ejs
#sed -i -E "s/(io.connect\(')[^:]+(:)/\1$serverAddress\2/" Experiments/NodeJS/CookieSession3/views/app2.ejs
sed -i -E "s/(http:\/\/)[^:]+(:)/\1$serverAddress\2/" Map/NodeJS/Examples/Bug.js
sed -i -E "s/(http:\/\/)[^:]+(:)/\1$serverAddress\2/" Map/NodeJS/Examples/MapExample1.js
sed -i -E "s/(http:\/\/)[^:]+(:)/\1$serverAddress\2/" Map/NodeJS/Examples/MapExample2.js
sed -i -E "s/(http:\/\/)[^:]+(:)/\1$serverAddress\2/" Map/NodeJS/Examples/MinimalExample.js
|
ChrL73/Diaphnea
|
serverAddress.sh
|
Shell
|
agpl-3.0
| 773 |
#!/bin/sh
# schema/validate.sh
# Portions Copyright (C) 2013 Regents of the University of California.
#
# Based on the CCNx C Library by PARC.
# Copyright (C) 2011 Palo Alto Research Center, Inc.
#
# This work is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the
# Free Software Foundation.
# This work is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details. You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
SCHEMA=example.xsd
XML_EXAMPLES="2-integers-test01 complicated-test01 complicated-test02"
set -e
NDNxDIR=`dirname $0`/../../../../
echo == Make sure NDNx directories have been prepared
test -x $NDNxDIR/bin/ndn_xmltondnb || exit 1
export PATH=$NDNxDIR/bin:$PATH
test -f $NDNxDIR/schema/validation/XMLSchema.xsd || (cd $NDNxDIR/schema/validation && make test)
echo == Creating symlinks to access external schemata
EXTSCHEMA=`(cd $NDNxDIR/schema/validation && echo *.xsd)`
for x in $EXTSCHEMA; do
test -f $NDNxDIR/schema/validation/$x && \
rm -f $x && \
ln -s $NDNxDIR/schema/validation/$x
done
echo == Validating $SCHEMA
xmllint --schema XMLSchema.xsd --noout $SCHEMA
ValidateXML () {
local X
X="$1"
echo == Normalizing ${X}.xml to use base64Binary
# Note for this purpose it does not matter that ndn_ndnbtoxml is ignorant of
# the project-specific DTAG values, since we're not trying to do anything
# with the intermediate ndnb except to turn it right back into text.
cat ${X}.xml | ndn_xmltondnb -w - | ndn_ndnbtoxml -b - | xmllint --format - > ${X}-base64.xml
echo == Validating ${X}
xmllint --schema $SCHEMA --noout ${X}-base64.xml
}
for i in $XML_EXAMPLES; do
ValidateXML $i
done
echo == Yippee\!
|
gujianxiao/gatewayForMulticom
|
apps/examples/ndnb-Java/schema/validate.sh
|
Shell
|
lgpl-2.1
| 2,093 |
#!/bin/bash
chromium -app="http://pocket.dict.cc"
|
artemisclyde/dotfiles
|
scripts/dict.sh
|
Shell
|
lgpl-3.0
| 50 |
$ docker-compose -f docker-compose-nginx.yml up
|
picodotdev/blog-ejemplos
|
ELK/docker-compose-nginx.sh
|
Shell
|
unlicense
| 47 |
php ./vendor/bin/protobuf --include-descriptors -i . -o ./src/ ./protobuf.proto
|
hainuo/SwooleDistributed
|
protobuf.sh
|
Shell
|
apache-2.0
| 79 |
#!/bin/bash
sleep 5
PASS="$MYSQL_ROOT_PASSWORD"
mysql -u root -p"$PASS" -e "CREATE DATABASE IF NOT EXISTS shopsaloon"
mysql -u root -p"$PASS" shopsaloon < /sql/tom.sql
|
tomrijntjes/robopreneur
|
init/init_db.sh
|
Shell
|
apache-2.0
| 168 |
#!/bin/bash
rm -rf ../../workspace/Game/src/*
cp -rf Game/src/* ../../workspace/Game/src
cp -rf Game/lib/* ../../workspace/Game/lib
|
Piasy/QQTang
|
src/server/game_install.sh
|
Shell
|
apache-2.0
| 132 |
#!/bin/bash
COMMAND=${1:-start}
if [[ ! -e /data/config.js ]]; then
echo "Initializing config.js..."
cp /ghost/config.docker.js /data/config.js
fi
if [[ ! -e /data/content ]]; then
echo "Initializing content..."
rsync -avz /ghost/content/ /data/content
fi
ln -sf /data/config.js /ghost/config.js
chown -R ghost:ghost /ghost /data
case $COMMAND in
init)
echo "init requested; exiting"
;;
start)
# Run ghost
npm start --production
;;
*)
echo "unknown command; try 'init' or 'start'"
;;
esac
|
kylelemons/dockerfiles
|
ghost/run.sh
|
Shell
|
apache-2.0
| 535 |
#!/bin/bash
# install new gems and make new migrations
cd /vagrant; bundle install;
cd /vagrant; bundle exec rake db:migrate
# Commands required to ensure correct docker containers are started when the vm is rebooted.
sudo docker start postgres
mailcatcher --ip 0.0.0.0
|
ylosix/ylosix
|
vagrant/start.sh
|
Shell
|
apache-2.0
| 273 |
#----------------------------------------------------------------------------------------
#
# Package : openshift-knative/serverless-operator
# Version : v1.7.1
# Source repo : https://github.com/openshift-knative/serverless-operator
# Tested on : RHEL 7.6
# Script License : Apache License Version 2.0
# Maintainer : Pratham Murkute <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# Prerequisites:
# https://github.com/openshift-knative/serverless-operator/tree/release-1.7#requirements
# Docker version 17.05 or higher must be installed
# Go version 1.12.1 or higher must be installed
#
#----------------------------------------------------------------------------------------
#!/bin/bash
# environment variables & setup
mkdir -p $HOME/go
mkdir -p $HOME/go/src
mkdir -p $HOME/go/bin
mkdir -p $HOME/go/pkg
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
#export GOROOT=/usr/local/go
#export PATH=$PATH:$GOROOT/bin
#export GOFLAGS="-mod=vendor"
#export GO111MODULE=auto
echo $GOPATH && echo $PATH
# build docker image for openshift/origin-release
mkdir -p $GOPATH/src/github.com/openshift && cd $_ && pwd
git clone https://github.com/openshift/release.git
cd release && pwd
git checkout fa8e7dc
cd projects/origin-release/golang-1.13/ && pwd
docker build -t openshift/origin-release:golang-1.13 .
# build docker image for openshift/origin-base
mkdir -p $GOPATH/src/github.com/openshift && cd $_ && pwd
git clone https://github.com/openshift/images.git
cd images && pwd
git checkout 98fd27e
cd base && pwd
docker build -t openshift/origin-base:latest -f ./Dockerfile.rhel .
# create a local registry to push images to
docker run -it -d --name registry -p 5000:5000 ppc64le/registry:2
export DOCKER_REPO_OVERRIDE=localhost:5000/openshift
# push the images
docker tag openshift/origin-release:golang-1.13 $DOCKER_REPO_OVERRIDE/origin-release:golang-1.13
docker push $DOCKER_REPO_OVERRIDE/origin-release:golang-1.13
docker tag openshift/origin-base:latest $DOCKER_REPO_OVERRIDE/origin-base:latest
docker push $DOCKER_REPO_OVERRIDE/origin-base:latest
# build serverless-operator images
mkdir -p $GOPATH/src/github.com/openshift-knative && cd $_ && pwd
git clone https://github.com/openshift-knative/serverless-operator.git
cd serverless-operator && pwd
git checkout -b v1.7.1 fd37d17 # no separate tag for v1.7.1
git branch -vv
make images
|
ppc64le/build-scripts
|
o/openshift-knative/serverless-operator-v1.7.1_rhel_7.6.sh
|
Shell
|
apache-2.0
| 2,649 |
#!/bin/bash
# Issues with deleting the NS (Terminating error)
# https://github.com/kubernetes/kubernetes/issues/19317
#
###############################################################################
# Copyright (c) 2018 Red Hat Inc
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# http://www.eclipse.org/legal/epl-2.0
#
# SPDX-License-Identifier: EPL-2.0
###############################################################################
set -eo pipefail
die() { echo "$*" 1>&2 ; exit 1; }
need() {
which "$1" &>/dev/null || die "Binary '$1' is missing but required"
}
# checking pre-reqs
need "jq"
need "curl"
need "kubectl"
PROJECT="$1"
shift
test -n "$PROJECT" || die "Missing arguments: kill-ns <namespace>"
kubectl proxy &>/dev/null &
PROXY_PID=$!
killproxy () {
kill $PROXY_PID
}
trap killproxy EXIT
sleep 1 # give the proxy a second
kubectl get namespace "$PROJECT" -o json | jq 'del(.spec.finalizers[] | select("kubernetes"))' | curl -s -k -H "Content-Type: application/json" -X PUT -o /dev/null --data-binary @- http://localhost:8001/api/v1/namespaces/$PROJECT/finalize && echo "Killed namespace: $PROJECT"
|
CollegeBoreal/Tutoriel
|
2.MicroServices/3.Orchestration/1.Kubernetes/C.Cluster/0.Local/6.K8s/kill-kube-ns.sh
|
Shell
|
apache-2.0
| 1,337 |
#!/bin/bash
ROOT=$(readlink -f $0)
BUILD_ROOT=`dirname $ROOT`
KUBE_ROOT="${BUILD_ROOT}/kubernetes"
source "${BUILD_ROOT}/default-config.sh"
function create {
kubectl create -f $@
}
function corrupt-database {
echo "Corrupting database"
}
function damage_data {
(cd "${HOME}/halcyon-vagrant-kubernetes"
rand_node=$(( ( RANDOM % 3 ) + 1 ))
rand_num=$(( ( RANDOM % 7 ) + 1 ))
ssh -F /tmp/kube-2 vagrant@kube2 "sudo sed -i 's/^seqno:.*/seqno: ${rand_num}/' /var/lib/nave/mariadb-${rand_node}/grastate.dat"
)
}
function destroy-galera-cluster {
echo "Destroying Galera cluster"
if [[ "${!DAMAGE_DATA[@]}" ]]; then
damage_data
fi
pods=$(kubectl get pods -o name -n vessels | grep -v bootstrap | cut -d '/' -f 2)
for pod in ${pods[@]}; do
kubectl --kubeconfig="${HOME}/.kube/config" delete pods $pod -n vessels
done
}
function recover-galera-cluster {
# User running a vessel recovery
echo "Recovering Galera cluster"
# Vessel will find newest DB
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${BUILD_ROOT}/vessels/mariadb/mariadb-vessel.yaml"
(cd "${HOME}/halcyon-vagrant-kubernetes"
vagrant ssh-config > /tmp/vagrant-ssh
awk -v RS= '{print > ("/tmp/kube-" NR)}' /tmp/vagrant-ssh
i=$(ssh -F /tmp/kube-2 vagrant@kube2 "cat /var/lib/nave/vessel-data/newest-db")
echo $i
) &>/tmp/test
newest=`cat /tmp/test`
kubectl --kubeconfig="${HOME}/.kube/config" delete rc mariadb-vessel -n vessels
service_num=$(echo $newest | cut -f 2 -d '-')
echo "Setting safe_to_bootstrap in grastate.dat for mariadb-${service_num}"
(cd "${HOME}/halcyon-vagrant-kubernetes"
ssh -F /tmp/kube-2 vagrant@kube2 "sudo sed -i 's/safe_to_bootstrap: 0/safe_to_bootstrap: 1/' /var/lib/nave/mariadb-${service_num}/grastate.dat"
)
echo "Bootstrapping cluster with pod ${newest}"
kubectl --kubeconfig="${HOME}/.kube/config" delete pods "${newest}" -n vessels
sleep 20
pods=$(kubectl get pods -o name -n vessels | grep -v bootstrap | grep -v vessel | grep -v mariadb-"${service_num}" | cut -d '/' -f 2)
echo "Rejoining the rest of the pods to the cluster"
for pod in "${pods[@]}"; do
kubectl --kubeconfig="${HOME}/.kube/config" delete pods $pod -n vessels
done
echo "----------------------"
echo " Cluster recovered! "
echo "----------------------"
}
function bootstrap-mariadb {
echo "Bootstrapping up Galera cluster"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-service-1.yaml"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-pv-1.yaml"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-pvc-1.yaml"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-bootstrap.yaml"
}
function setup-mariadb {
echo "Setting up Galera cluster"
for cluster_count in $(seq 1 $CLUSTER_SIZE); do
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-service-${cluster_count}.yaml"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-pv-${cluster_count}.yaml"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-pvc-${cluster_count}.yaml"
kubectl --kubeconfig="${HOME}/.kube/config" create -f "${KUBE_ROOT}/mariadb/mariadb-pod-${cluster_count}.yaml"
done
}
function clean {
kubectl --kubeconfig="${HOME}/.kube/config" delete job mariadb-bootstrap -n vessels
for cluster_count in $(seq 1 $CLUSTER_SIZE); do
kubectl --kubeconfig="${HOME}/.kube/config" delete service "mariadb-${cluster_count}" -n vessels
kubectl --kubeconfig="${HOME}/.kube/config" delete pv "mariadb-${cluster_count}" -n vessels
kubectl --kubeconfig="${HOME}/.kube/config" delete pvc "mariadb-${cluster_count}" -n vessels
kubectl --kubeconfig="${HOME}/.kube/config" delete rc "mariadb-${cluster_count}" -n vessels
done
(cd "${HOME}/halcyon-vagrant-kubernetes"
vagrant ssh-config > /tmp/vagrant-ssh
awk -v RS= '{print > ("/tmp/kube-" NR)}' /tmp/vagrant-ssh
ssh -F /tmp/kube-2 vagrant@kube2 "sudo rm -rf /var/lib/nave/*"
)
}
case "$2" in
'--damage-data' )
DAMAGE_DATA=''
;;
esac
case "$1" in
'setup' )
setup-mariadb
;;
'destroy' )
destroy-galera-cluster
;;
'recover' )
recover-galera-cluster
;;
'bootstrap' )
bootstrap-mariadb
;;
'clean' )
clean
;;
'-h' )
echo "test-pipeline.sh"
echo "This script is designed to test vessels by simulating cluster events."
echo ""
echo " bootstrap - Bootstrap MariaDB cluster"
echo " setup - Setup a MariaDB Galera cluster"
echo " destroy - Destroy the cluster sending into a damaged state."
echo " recover - Recover from a damaged cluster"
echo " clean - Delete everything from a running cluster"
echo " -h - Help menu"
;;
esac
|
rthallisey/nave
|
test-pipline.sh
|
Shell
|
apache-2.0
| 5,273 |
#!/usr/bin/env bash
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Runs all the tests. Currently includes FE tests, BE unit tests, and the end-to-end
# test suites.
# Exit on reference to uninitialized variables and non-zero exit codes
set -u
set -e
. $IMPALA_HOME/bin/set-pythonpath.sh
# Allow picking up strateg from environment
: ${EXPLORATION_STRATEGY:=core}
NUM_ITERATIONS=1
# Parametrized Test Options
# Run FE Tests
: ${FE_TEST:=true}
# Run Backend Tests
: ${BE_TEST:=true}
# Run End-to-end Tests
: ${EE_TEST:=true}
: ${EE_TEST_FILES:=}
# Run JDBC Test
: ${JDBC_TEST:=true}
# Run Cluster Tests
: ${CLUSTER_TEST:=true}
# parse command line options
while getopts "e:n:" OPTION
do
case "$OPTION" in
e)
EXPLORATION_STRATEGY=$OPTARG
;;
n)
NUM_ITERATIONS=$OPTARG
;;
?)
echo "run-all-tests.sh [-e <exploration_strategy>] [-n <num_iters>]"
echo "[-e] The exploration strategy to use. Default exploration is 'core'."
echo "[-n] The number of times to run the tests. Default is 1."
exit 1;
;;
esac
done
LOG_DIR=${IMPALA_TEST_CLUSTER_LOG_DIR}/query_tests
mkdir -p ${LOG_DIR}
# Enable core dumps
ulimit -c unlimited
echo "Split and assign HBase regions"
# To properly test HBase integeration, HBase regions are split and assigned by this
# script. Restarting HBase will change the region server assignment. Run split-hbase.sh
# before running any test.
${IMPALA_HOME}/testdata/bin/split-hbase.sh
for i in $(seq 1 $NUM_ITERATIONS)
do
# Preemptively force kill impalads and the statestore to clean up any running instances.
# The BE unit tests cannot run when impalads are started.
${IMPALA_HOME}/bin/start-impala-cluster.py --kill_only --force
if [[ "$BE_TEST" = true ]]; then
# Run backend tests.
${IMPALA_HOME}/bin/run-backend-tests.sh
fi
# Increase the admission controller max_requests to prevent builds failing due to
# queries not being closed.
${IMPALA_HOME}/bin/start-impala-cluster.py --log_dir=${LOG_DIR} --cluster_size=3\
--impalad_args=--default_pool_max_requests=500
# Run some queries using run-workload to verify run-workload has not been broken.
${IMPALA_HOME}/bin/run-workload.py -w tpch --num_clients=2 --query_names=TPCH-Q1\
--table_format=text/none --exec_options="disable_codegen:False"
if [[ "$EE_TEST" = true ]]; then
# Run end-to-end tests. The EXPLORATION_STRATEGY parameter should only apply to the
# functional-query workload because the larger datasets (ex. tpch) are not generated
# in all table formats.
${IMPALA_HOME}/tests/run-tests.py -x --exploration_strategy=core \
--workload_exploration_strategy=functional-query:$EXPLORATION_STRATEGY \
${EE_TEST_FILES}
fi
if [[ "$FE_TEST" = true ]]; then
# Run JUnit frontend tests
# Requires a running impalad cluster because some tests (such as DataErrorTest and
# JdbcTest) queries against an impala cluster.
cd $IMPALA_FE_DIR
mvn test
fi
if [[ "$JDBC_TEST" = true ]]; then
# Run the JDBC tests with background loading disabled. This is interesting because
# it requires loading missing table metadata.
${IMPALA_HOME}/bin/start-impala-cluster.py --log_dir=${LOG_DIR} --cluster_size=3 \
--catalogd_args=--load_catalog_in_background=false
mvn test -Dtest=JdbcTest
fi
if [[ "$CLUSTER_TEST" = true ]]; then
# Run the custom-cluster tests after all other tests, since they will restart the
# cluster repeatedly and lose state.
# TODO: Consider moving in to run-tests.py.
${IMPALA_HOME}/tests/run-custom-cluster-tests.sh
fi
# Finally, run the process failure tests.
# Disabled temporarily until we figure out the proper timeouts required to make the test
# succeed.
# ${IMPALA_HOME}/tests/run-process-failure-tests.sh
done
|
andybab/Impala
|
bin/run-all-tests.sh
|
Shell
|
apache-2.0
| 4,354 |
# -----------------------------------------------------------------------------
#
# Package : ghodss/yaml
# Version : 73d445a93680fa1a78ae23a5839bad48f32ba1ee
# Source repo : https://github.com/ghodss/yaml
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=ghodss/yaml
PACKAGE_VERSION=73d445a93680fa1a78ae23a5839bad48f32ba1ee
PACKAGE_URL=https://github.com/ghodss/yaml
yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq wget gcc-c++
wget https://golang.org/dl/go1.16.1.linux-ppc64le.tar.gz && tar -C /bin -xf go1.16.1.linux-ppc64le.tar.gz && mkdir -p /home/tester/go/src /home/tester/go/bin /home/tester/go/pkg
export PATH=$PATH:/bin/go/bin
export GOPATH=/home/tester/go
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
export PATH=$GOPATH/bin:$PATH
export GO111MODULE=on
function test_with_master_without_flag_u(){
echo "Building $PACKAGE_PATH with master branch"
export GO111MODULE=auto
if ! go get -d -t $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/install_fails
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_Fails" > /home/tester/output/version_tracker
exit 0
else
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with master branch without flag -u"
# Ensure go.mod file exists
go mod init
if ! gi test ./...; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_fails
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails" > /home/tester/output/version_tracker
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
fi
}
function test_with_master(){
echo "Building $PACKAGE_PATH with master"
export GO111MODULE=auto
if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_with_master_without_flag_u
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master_without_flag_u
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
}
function test_without_flag_u(){
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION and without -u flag"
if ! go get -d -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_with_master
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
}
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION"
if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_without_flag_u
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
|
ppc64le/build-scripts
|
g/ghodss__yaml/ghodss__yaml_rhel_8.3.sh
|
Shell
|
apache-2.0
| 5,091 |
#!/bin/bash -eu
#
# Copyright 2017 Open GEE Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Checks status of tutorial assets built by run_tutorial.sh script
set -x
set -e
ASSET_ROOT="/usr/local/google/gevol_test/assets"
echo "Using asset root: $ASSET_ROOT"
/opt/google/bin/gequery Tutorial/Databases/SFDb_3d --status
/opt/google/bin/gequery Tutorial/Databases/SFDb_3d_TM --status
/opt/google/bin/gequery Tutorial/Databases/SF_2d_Merc --status
/opt/google/bin/gequery Tutorial/Databases/SF_2d_Merc_With_Flat_Imagery --status
|
tst-ppenev/earthenterprise
|
earth_enterprise/src/fusion/tools/gee_test/status.sh
|
Shell
|
apache-2.0
| 1,038 |
#!/bin/bash
#********************************************************************************
# Copyright 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#********************************************************************************
# Configure extension PATH
SCRIPTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# Source git_util sh file
source ${SCRIPTDIR}/git_util.sh
# Get common initialization project
pushd . >/dev/null
cd $SCRIPTDIR
git_retry clone https://github.com/jparra5/dra_utilities.git utilities
popd >/dev/null
# Call common initialization
source $SCRIPTDIR/utilities/init.sh
|
jparra5/dra_upload_results
|
_init.sh
|
Shell
|
apache-2.0
| 1,095 |
#!/bin/sh -xe
AUTOCONF_FILES="Makefile.in aclocal.m4 ar-lib autom4te.cache compile \
config.guess config.h.in config.sub configure depcomp install-sh \
ltmain.sh missing *libtool test-driver"
case $1 in
clean)
test -f Makefile && make maintainer-clean
for file in ${AUTOCONF_FILES}; do
find . -name "$file" -print0 | xargs -0 -r rm -rf
done
exit 0
;;
esac
autoreconf -i
# shellcheck disable=SC2016
echo 'Run "./configure ${CONFIGURE_FLAGS} && make"'
|
openbmc/phosphor-settingsd
|
bootstrap.sh
|
Shell
|
apache-2.0
| 525 |
enable_interslicetransfer() {
moni call -v .gorfx.SetupOfferType "\
offerType:'http://gndms.zib.de/ORQTypes/InterSliceTransfer'; \
orqType:'{http://gndms.zib.de/c3grid/types}InterSliceTransferORQT'; \
resType:'{http://gndms.zib.de/c3grid/types}InterSliceTransferResultT'; \
calcFactory:'de.zib.gndms.GORFX.action.InterSliceTransferORQFactory'; \
taskActionFactory:'de.zib.gndms.GORFX.action.InterSliceTransferActionFactory'; \
mode:'$MODE'"
}
|
zibhub/GNDMS
|
scripts/features/enable-interslicetransfer.sh
|
Shell
|
apache-2.0
| 449 |
#!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
DB="$TEST_NAME"
run_sql "CREATE DATABASE $DB;"
run_sql "CREATE TABLE $DB.usertable1 ( \
YCSB_KEY varchar(64) NOT NULL, \
FIELD0 varchar(10) DEFAULT NULL, \
PRIMARY KEY (YCSB_KEY) \
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"
for i in `seq 1 100`
do
run_sql "INSERT INTO $DB.usertable1 VALUES (\"a$i\", \"bbbbbbbbbb\");"
done
# backup full
echo "backup start..."
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB"
# Test debug decode
run_br -s "local://$TEST_DIR/$DB" debug decode --field "Schemas"
run_br -s "local://$TEST_DIR/$DB" debug decode --field "EndVersion"
# Ensure compatibility
run_br -s "local://$TEST_DIR/$DB" validate decode --field "end-version"
# Test redact-log and redact-info-log compalibility
run_br -s "local://$TEST_DIR/$DB" debug decode --field "Schemas" --redact-log=true
run_br -s "local://$TEST_DIR/$DB" debug decode --field "Schemas" --redact-info-log=true
# Test validate backupmeta
run_br debug backupmeta validate -s "local://$TEST_DIR/$DB"
run_br debug backupmeta validate -s "local://$TEST_DIR/$DB" --offset 100
# Test validate checksum
run_br validate checksum -s "local://$TEST_DIR/$DB"
# Test validate checksum
for sst in $TEST_DIR/$DB/*.sst; do
echo "corrupted!" >> $sst
echo "$sst corrupted!"
break
done
corrupted=0
run_br validate checksum -s "local://$TEST_DIR/$DB" || corrupted=1
if [ "$corrupted" -ne "1" ];then
echo "TEST: [$TEST_NAME] failed!"
exit 1
fi
# backup full with ratelimit = 1 to make sure this backup task won't finish quickly
echo "backup start to test lock file"
PPROF_PORT=6080
GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/utils/determined-pprof-port=return($PPROF_PORT)" \
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/lock" \
--remove-schedulers \
--ratelimit 1 \
--ratelimit-unit 1 \
--concurrency 4 &> $TEST_DIR/br-other-stdout.log & # It will be killed after test finish.
# record last backup pid
_pid=$!
# give the former backup some time to write down lock file (and initialize signal listener).
sleep 1
pkill -10 -P $_pid
echo "starting pprof..."
# give the former backup some time to write down lock file (and start pprof server).
sleep 1
run_curl "https://localhost:$PPROF_PORT/debug/pprof/trace?seconds=1" &>/dev/null
echo "pprof started..."
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | grep '"disable": false'
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."enable-location-replacement"' | grep "false"
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."max-pending-peer-count"' | grep "2147483647"
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."max-merge-region-size"' | grep -E "^0$"
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."max-merge-region-keys"' | grep -E "^0$"
backup_fail=0
echo "another backup start expect to fail due to last backup add a lockfile"
run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/lock" --concurrency 4 || backup_fail=1
if [ "$backup_fail" -ne "1" ];then
echo "TEST: [$TEST_NAME] test backup lock file failed!"
exit 1
fi
# check is there still exists scheduler not in pause.
pause_schedulers=$(run_curl https://$PD_ADDR/pd/api/v1/schedulers?status="paused" | grep "scheduler" | wc -l)
if [ "$pause_schedulers" -lt "3" ];then
echo "TEST: [$TEST_NAME] failed because paused scheduler are not enough"
exit 1
fi
if ps -p $_pid > /dev/null
then
echo "$_pid is running"
# kill last backup progress (Don't send SIGKILL, or we might stuck PD in no scheduler state.)
pkill -P $_pid
echo "$_pid is killed @ $(date)"
else
echo "TEST: [$TEST_NAME] test backup lock file failed! the last backup finished"
exit 1
fi
# make sure we won't stuck in non-scheduler state, even we send a SIGTERM to it.
# give enough time to BR so it can gracefully stop.
sleep 30
if run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '[."schedulers-v2"][0][0]' | grep -q '"disable": true'
then
echo "TEST: [$TEST_NAME] failed because scheduler has been removed"
exit 1
fi
default_pd_values='{
"max-merge-region-keys": 200000,
"max-merge-region-size": 20,
"leader-schedule-limit": 4,
"region-schedule-limit": 2048
}'
for key in $(echo $default_pd_values | jq 'keys[]'); do
if ! run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq ".[$key]" | grep -q $(echo $default_pd_values | jq ".[$key]"); then
run_curl https://$PD_ADDR/pd/api/v1/config/schedule
echo "[$TEST_NAME] failed due to PD config isn't reset after restore"
exit 1
fi
done
# check is there still exists scheduler in pause.
pause_schedulers=$(curl https://$PD_ADDR/pd/api/v1/schedulers?status="paused" | grep "scheduler" | wc -l)
# There shouldn't be any paused schedulers since BR gracfully shutdown.
if [ "$pause_schedulers" -ne "0" ];then
echo "TEST: [$TEST_NAME] failed because paused scheduler has changed"
exit 1
fi
pd_settings=6
# balance-region scheduler enabled
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."schedulers-v2"[] | {disable: .disable, type: ."type" | select (.=="balance-region")}' | grep '"disable": false' || ((pd_settings--))
# balance-leader scheduler enabled
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."schedulers-v2"[] | {disable: .disable, type: ."type" | select (.=="balance-leader")}' | grep '"disable": false' || ((pd_settings--))
# hot region scheduler enabled
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."schedulers-v2"[] | {disable: .disable, type: ."type" | select (.=="hot-region")}' | grep '"disable": false' || ((pd_settings--))
# location replacement enabled
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."enable-location-replacement"' | grep "true" || ((pd_settings--))
# we need reset pd config to default
# until pd has the solution to temporary set these scheduler/configs.
run_br validate reset-pd-config-as-default --pd $PD_ADDR
# max-merge-region-size set to default 20
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."max-merge-region-size"' | grep "20" || ((pd_settings--))
# max-merge-region-keys set to default 200000
run_curl https://$PD_ADDR/pd/api/v1/config/schedule | jq '."max-merge-region-keys"' | grep "200000" || ((pd_settings--))
if [ "$pd_settings" -ne "6" ];then
echo "TEST: [$TEST_NAME] test validate reset pd config failed!"
exit 1
fi
# Test version
run_br --version
run_br -V
run_sql "DROP DATABASE $DB;"
|
c4pt0r/tidb
|
br/tests/br_other/run.sh
|
Shell
|
apache-2.0
| 7,027 |
#!/bin/bash
# Fail on any error
set -eo pipefail
# Display commands being run
set -x
# Only run the linter on go1.11, because:
# - It needs type aliases (so we can't use anything less than 1.9).
# - It only has to run once per CI (so we just have to pick 1 version).
# - It runs out of memory in go 1.12 https://github.com/dominikh/go-tools/issues/419.
if [[ `go version` != *"go1.11"* ]]; then
exit 0
fi
go install \
github.com/golang/protobuf/protoc-gen-go \
golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports \
honnef.co/go/tools/cmd/staticcheck
# Fail if a dependency was added without the necessary go.mod/go.sum change
# being part of the commit.
go mod tidy
git diff go.mod | tee /dev/stderr | (! read)
git diff go.sum | tee /dev/stderr | (! read)
# Easier to debug CI.
pwd
# Look at all .go files (ignoring .pb.go files) and make sure they have a Copyright. Fail if any don't.
git ls-files "*[^.pb].go" | xargs grep -L "\(Copyright [0-9]\{4,\}\)" 2>&1 | tee /dev/stderr | (! read)
gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
goimports -l . 2>&1 | tee /dev/stderr | (! read)
# Runs the linter. Regrettably the linter is very simple and does not provide the ability to exclude rules or files,
# so we rely on inverse grepping to do this for us.
#
# Piping a bunch of greps may be slower than `grep -vE (thing|otherthing|anotherthing|etc)`, but since we have a good
# amount of things we're excluding, it seems better to optimize for readability.
#
# Note: since we added the linter after-the-fact, some of the ignored errors here are because we can't change an
# existing interface. (as opposed to us not caring about the error)
golint ./... 2>&1 | ( \
grep -vE "gen\.go" | \
grep -vE "receiver name [a-zA-Z]+[0-9]* should be consistent with previous receiver name" | \
grep -vE "exported const AllUsers|AllAuthenticatedUsers|RoleOwner|SSD|HDD|PRODUCTION|DEVELOPMENT should have comment" | \
grep -v "exported func Value returns unexported type pretty.val, which can be annoying to use" | \
grep -v "ExecuteStreamingSql" | \
grep -vE "pubsub\/pstest\/fake\.go.+should have comment or be unexported" | \
grep -v "ClusterId" | \
grep -v "InstanceId" | \
grep -v "firestore.arrayUnion" | \
grep -v "firestore.arrayRemove" | \
grep -v "maxAttempts" | \
grep -v "UptimeCheckIpIterator" | \
grep -vE "apiv[0-9]+" | \
grep -v "ALL_CAPS" | \
grep -v "go-cloud-debug-agent" | \
grep -v "mock_test" | \
grep -v "internal/testutil/funcmock.go" | \
grep -v "internal/backoff" | \
grep -v "internal/trace" | \
grep -v "a blank import should be only in a main or test package" | \
grep -v "method ExecuteSql should be ExecuteSQL" | \
grep -vE "\.pb\.go:" || true) | tee /dev/stderr | (! read)
# TODO(deklerk): It doesn't seem like it, but is it possible to glob both before
# and after the colon? Then we could do *go-cloud-debug-agent*:*
staticcheck -go 1.9 -ignore '
*:S1007
*:SA1019
cloud.google.com/go/firestore/internal/doc-snippets.go:*
cloud.google.com/go/functions/metadata/metadata_test.go:SA1012
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/dwarf/frame.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/dwarf/typeunit.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/dwarf/const.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/dwarf/line.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/server/server.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/server/dwarf.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/server/eval.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/server/value.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/elf/file.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclntab_test.go:*
cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go:*
cloud.google.com/go/translate/internal/translate/v2/translate-nov2016-gen.go:*
cloud.google.com/go/storage/bucket.go:S1002
cloud.google.com/go/spanner/value.go:S1025
cloud.google.com/go/pubsub/integration_test.go:S1012
cloud.google.com/go/internal/fields/fold.go:S1008
cloud.google.com/go/httpreplay/internal/proxy/debug.go:*
cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go:ST1005
cloud.google.com/go/bigtable/cmd/cbt/cbt.go:ST1005
cloud.google.com/go/asset/v1beta1/doc.go:*
cloud.google.com/go/asset/v1beta1/mock_test.go:*
cloud.google.com/go/spanner/value_test.go:S1019
cloud.google.com/go/bigtable/reader.go:S1002
cloud.google.com/go/internal/btree/btree.go:U1000
cloud.google.com/go/container/apiv1/mock_test.go:*
' ./...
|
kubernetes-sigs/multi-tenancy
|
tenant/vendor/cloud.google.com/go/internal/kokoro/vet.sh
|
Shell
|
apache-2.0
| 4,871 |
#!/bin/sh
DEPS_DIR="${TRAVIS_BUILD_DIR}/go1.13"
mkdir ${DEPS_DIR} && pushd ${DEPS_DIR}
GO="go1.13.9.linux-amd64"
GO_TAR="${GO}.tar.gz"
travis_retry wget --no-check-certificate https://dl.google.com/go/${GO_TAR}
echo "72a391f8b82836adfd4be8d9d554ffb1 ${GO_TAR}" > go_md5.txt
md5sum -c go_md5.txt
tar -xf ${GO_TAR}
export GOROOT=${DEPS_DIR}/go
export PATH=$GOROOT/bin:$PATH
popd
|
google/libsxg
|
.travis/install_go.sh
|
Shell
|
apache-2.0
| 378 |
set -e
# Backs up one directory at a time, looking for one called "flutter".
function getFlutterPath() {
local path=".."
local counter=0
while [[ "${counter}" -lt 10 ]]; do
[ -d "${path}/flutter" ] && echo "${path}/flutter" && return 0
let counter++
path="${path}/.."
done
}
declare -a PROJECT_NAMES=(
"gallery" \
"crane_proto" \
"google_fonts" \
)
for PROJECT_NAME in "${PROJECT_NAMES[@]}"
do
echo "== Testing '${PROJECT_NAME}' on Flutter's $FLUTTER_VERSION channel =="
pushd "${PROJECT_NAME}"
localSdkPath=$(getFlutterPath)
if [ -z "$localSdkPath" ]
then
echo "Failed to find Flutter SDK for '${PROJECT_NAME}'."
exit 1
fi
# Run the analyzer to find any static analysis issues.
"${localSdkPath}/bin/flutter" analyze
# Run the formatter on all the dart files to make sure everything's linted.
"${localSdkPath}/bin/flutter" format -n --set-exit-if-changed .
# Run the actual tests.
"${localSdkPath}/bin/flutter" test
popd
done
echo "-- Success --"
|
material-components/material-components-flutter-experimental
|
travis_script.sh
|
Shell
|
apache-2.0
| 1,084 |
#!/bin/bash
# Copyright 2016 - 2020 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source ${CCPROOT}/examples/common.sh
echo_info "Cleaning up.."
cleanup "${CCP_NAMESPACE?}-pgbadger"
$CCPROOT/examples/waitforterm.sh pgbadger ${CCP_CLI?}
|
the1forte/crunchy-containers
|
examples/kube/pgbadger/cleanup.sh
|
Shell
|
apache-2.0
| 762 |
#!/bin/bash
# This script requires LHCI_CANARY_SERVER_URL and LHCI_CANARY_SERVER_TOKEN variables to be set.
set -ox pipefail
# Start up our LHCI server.
yarn start:server --port=9009 &
# Wait for the server to start before hitting it with data.
sleep 5
# Seed the database with some data for us to audit.
yarn start:seed-database
# Collect our LHCI results.
rm -rf .lighthouseci/
for url in $(LHCI_ROOT_URL=http://localhost:9009 node ./scripts/ci-dogfood-get-urls.js); do
yarn start collect "--url=$url" --additive || exit 1
done
# Assert our results, but don't fail the build yet.
yarn start assert
EXIT_CODE=$?
if [[ -n "$LHCI_CANARY_SERVER_URL" ]]; then
# Upload the results to our canary server.
yarn start upload \
--serverBaseUrl="$LHCI_CANARY_SERVER_URL" \
--token="$LHCI_CANARY_SERVER_TOKEN"
fi
# Upload the results to temporary public storage too
export LHCI_GITHUB_STATUS_CONTEXT_SUFFIX="-2"
export LHCI_GITHUB_APP_TOKEN=""
export LHCI_GITHUB_TOKEN="$GITHUB_TOKEN"
yarn start upload --target=temporary-public-storage
# Kill the LHCI server from earlier.
kill $!
exit $EXIT_CODE
|
GoogleChrome/lighthouse-ci
|
scripts/ci-dogfood.sh
|
Shell
|
apache-2.0
| 1,112 |
# ----------------------------------------------------------------------------
#
# Package : script.js
# Version : v2.5.8
# Source repo : https://github.com/ded/script.js
# Tested on : ubuntu_16.04
# Script License: Apache License, Version 2 or later
# Maintainer : Priya Seth <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
#Install Dependencies
if [ ! -d ~/.nvm ]; then
sudo apt-get update
sudo apt-get install -y build-essential libssl-dev curl git
sudo curl https://raw.githubusercontent.com/creationix/nvm/v0.33.0/install.sh| bash
fi
source ~/.nvm/nvm.sh
if [ `nvm list | grep -c "v7.4.0"` -eq 0 ]
then
nvm install v7.4.0
fi
nvm use v7.4.0
#Build and test script.js package
git clone https://github.com/ded/script.js
cd script.js
npm install
#On an ubuntu VM, with VNC and firefox installed, verified that the test cases can be
#executed by running the the test.html file in the browser. This file is present in
#the test folder along with a tests.js file. All tests succeeded. Note that these tests
#cannot be run through the command line and hence are not included in the Dockerfile.
#Test installation
if ! [ $? -eq 0 ];
then
echo "script.js package not Installed successfully"
else
echo "script.js package Installed successfully"
temp=$(npm list | grep script.js)
echo "Installed version : $temp"
fi
|
ppc64le/build-scripts
|
s/script_js/script-js_ubuntu_16.04.sh
|
Shell
|
apache-2.0
| 1,755 |
#!/bin/bash
# Copyright (c) 2014, IPython: interactive computing in Python
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Strict mode
set -euo pipefail
# Clone the repo
git clone https://github.com/lab41/pelops /pelops
pip install /pelops
# Launch the notebook
jupyter notebook --no-browser --port 8888 --ip=* --NotebookApp.token= --allow-root
|
d-grossman/pelops
|
docker/pelops_start.sh
|
Shell
|
apache-2.0
| 1,605 |
#!/usr/bin/env bash
if [ $# -lt 1 ]; then
echo "Usage: $0 emu dir"
exit 1
fi
CHROOT_DIR=$1
MIRROR=http://archive.raspbian.org/raspbian
VERSION=jessie
CHROOT_ARCH=armhf
# Debian package dependencies for the host
HOST_DEPENDENCIES="debootstrap qemu-user-static binfmt-support sbuild"
# Debian package dependencies for the chrooted environment
GUEST_DEPENDENCIES="build-essential sudo cmake"
# Host dependencies
sudo apt-get install -qq -y ${HOST_DEPENDENCIES}
# Create chrooted environment
sudo mkdir ${CHROOT_DIR}
debootstrap --version
sudo apt-get update
sudo apt-get -y upgrade
debootstrap --version
sudo debootstrap --foreign --no-check-gpg --include=fakeroot,build-essential \
--arch=${CHROOT_ARCH} ${VERSION} ${CHROOT_DIR} ${MIRROR}
sudo cp /usr/bin/qemu-arm-static ${CHROOT_DIR}/usr/bin/
sudo chroot ${CHROOT_DIR} ./debootstrap/debootstrap --second-stage
sudo sbuild-createchroot --arch=${CHROOT_ARCH} --foreign --setup-only \
${VERSION} ${CHROOT_DIR} ${MIRROR}
# Create file with environment variables which will be used inside chrooted
# environment
#echo "export ARCH=${ARCH}" > envvars.sh
#echo "export TRAVIS_BUILD_DIR=${TRAVIS_BUILD_DIR}" >> envvars.sh
#chmod a+x envvars.sh
# Install dependencies inside chroot
sudo chroot ${CHROOT_DIR} apt-get update
sudo chroot ${CHROOT_DIR} apt-get --allow-unauthenticated install \
-qq -y ${GUEST_DEPENDENCIES}
# Create build dir and copy travis build files to our chroot environment
sudo mkdir -p ${CHROOT_DIR}/${TRAVIS_BUILD_DIR}
sudo rsync -av ${TRAVIS_BUILD_DIR}/ ${CHROOT_DIR}/${TRAVIS_BUILD_DIR}/
|
geoffviola/data_layout_study
|
travis_setup_arm.bash
|
Shell
|
apache-2.0
| 1,591 |
#!/bin/bash
set -ev
export PROJECT_DIR=`pwd`
${PROJECT_DIR}/../travis/start_scheduler.sh
cd ${PROJECT_DIR}
lein run -c config/settings.edn setup-database -c travis/simulator_config.edn
set +e
lein run -c config/settings.edn travis -c travis/simulator_config.edn
SIM_EXIT_CODE=$?
if [ ${SIM_EXIT_CODE} -ne 0 ]; then
echo "Displaying executor logs"
${PROJECT_DIR}/../travis/show_executor_logs.sh
fi
exit ${SIM_EXIT_CODE}
|
m4ce/Cook
|
simulator/travis/run_simulation.sh
|
Shell
|
apache-2.0
| 428 |
#!/usr/bin/env bash
. ./vocabulary.sh
invoke_on_clients_parallel ./current-ops-per-sec.sh | awk '{ sum+=$1 } END { print sum }'
|
tgrabiec/scylla-benchmark-tools
|
current-ops-per-sec.sh
|
Shell
|
apache-2.0
| 130 |
#!/bin/bash
# This script verifies MD5 sum for all installed packages
#
# Return:
# 0 - ok
# x - error
# Output:
# List of customized and unidentified packages
# Returns list of cutomized packages based on md5sum util and
# file with MD5 sums file /var/lib/dpkg/info/${PKG}.md5sums
#
# Return:
# 0 - ok
# x - error
# Global Vars:
# FILES - list of customized files
function md5_verify_md5sum()
{
FILES=''
local MD5SUM_FILE="/var/lib/dpkg/info/${PKG}"
local EXT="md5sums"
[ -f "${MD5SUM_FILE}.${EXT}" ] || {
EXT="amd64.md5sums";
[ -f "${MD5SUM_FILE}.${EXT}" ] ||
return 0;}
OUT="$(nice -n 19 ionice -c 3 md5sum --quiet -c "${MD5SUM_FILE}.${EXT}" 2>&1)"
(( $? == 0 )) &&
return 0
#exclude packages for which we cannot verify md5sum, no md5sums file
echo "${OUT}" | grep 'md5sums*No such file' &&
return -2
#exclude /etc, .pyc files and md5sum summary lines
FILES="$(echo "${OUT}" | grep -v '/etc/\|\.pyc\|md5sum:')"
[ -n "${FILES}" ] && {
FILES=$(echo "${FILES}" | awk '{gsub(/\:/,"");print "/"$1}');}
return 0
}
# Returns list of cutomized packages based on dpkg --verify (if it available) or
# or by calling md5_verify_md5sum()
#
# Return:
# 0 - ok
# x - error
# Global Vars:
# FILES - list of customized files
function md5_verify ()
{
OUT=$(nice -n 19 ionice -c 3 dpkg -V "${PKG}" 2>/dev/null)
if (( $? == 0 )); then
FILES=$(echo "${OUT}" | awk '{if ($2 != "c") print $2}')
else
md5_verify_md5sum "${PKG}" ||
return $?
fi
return 0
}
# Get list of all installed packages and check md5 sum for them
CUSTOMIZED_PKGS=""
ALL_PKGS=$(dpkg-query -W -f='${Package}\n') || exit -1
cd /
# Verify all installed packages one by one
for PKG in ${ALL_PKGS}; do
md5_verify "${PKG}" || exit -1
if [ -n "${FILES}" ]; then
# Add to customized packages
[ -n "${CUSTOMIZED_PKGS}" ] &&
CUSTOMIZED_PKGS+="\n"
CUSTOMIZED_PKGS+="${PKG}"
fi
done
[ -n "${CUSTOMIZED_PKGS}" ] &&
echo -e "${CUSTOMIZED_PKGS}"
exit 0
|
aepifanov/mos_mu
|
playbooks/files/verify_md5_packages_ubuntu.sh
|
Shell
|
apache-2.0
| 2,095 |
#!/usr/bin/env bash
# Note: This script uses "mdadm --detail" to get some of the metrics, so it must be run as root.
# It is designed to be run periodically in a cronjob, and output to /var/lib/node_exporter/textfile_collector/md_info_detail.prom
# $ cat /etc/cron.d/prometheus_md_info_detail
# * * * * * bash /var/lib/node_exporter/md_info_detail.sh > /var/lib/node_exporter/md_info_detail.prom.$$ && mv /var/lib/node_exporter/md_info_detail.prom.$$ /var/lib/node_exporter/md_info_detail.prom
set -eu
for MD_DEVICE in /dev/md/*; do
# Subshell to avoid eval'd variables from leaking between iterations
(
# Resolve symlink to discover device, e.g. /dev/md127
MD_DEVICE_NUM=$(readlink -f "${MD_DEVICE}")
# Remove /dev/ prefix
MD_DEVICE_NUM=${MD_DEVICE_NUM#/dev/}
MD_DEVICE=${MD_DEVICE#/dev/md/}
# Query sysfs for info about md device
SYSFS_BASE="/sys/devices/virtual/block/${MD_DEVICE_NUM}/md"
MD_LAYOUT=$(cat "${SYSFS_BASE}/layout")
MD_LEVEL=$(cat "${SYSFS_BASE}/level")
MD_METADATA_VERSION=$(cat "${SYSFS_BASE}/metadata_version")
MD_NUM_RAID_DISKS=$(cat "${SYSFS_BASE}/raid_disks")
# Remove 'raid' prefix from RAID level
MD_LEVEL=${MD_LEVEL#raid}
# Output disk metrics
for RAID_DISK in ${SYSFS_BASE}/rd[0-9]*; do
DISK=$(readlink -f "${RAID_DISK}/block")
DISK_DEVICE=$(basename "${DISK}")
RAID_DISK_DEVICE=$(basename "${RAID_DISK}")
RAID_DISK_INDEX=${RAID_DISK_DEVICE#rd}
RAID_DISK_STATE=$(cat "${RAID_DISK}/state")
DISK_SET=""
# Determine disk set using logic from mdadm: https://github.com/neilbrown/mdadm/commit/2c096ebe4b
if [[ ${RAID_DISK_STATE} == "in_sync" && ${MD_LEVEL} == 10 && $((MD_LAYOUT & ~0x1ffff)) ]]; then
NEAR_COPIES=$((MD_LAYOUT & 0xff))
FAR_COPIES=$(((MD_LAYOUT >> 8) & 0xff))
COPIES=$((NEAR_COPIES * FAR_COPIES))
if [[ $((MD_NUM_RAID_DISKS % COPIES == 0)) && $((COPIES <= 26)) ]]; then
DISK_SET=$((RAID_DISK_INDEX % COPIES))
fi
fi
echo -n "node_md_disk_info{disk_device=\"${DISK_DEVICE}\", md_device=\"${MD_DEVICE_NUM}\""
if [[ -n ${DISK_SET} ]]; then
SET_LETTERS=({A..Z})
echo -n ", md_set=\"${SET_LETTERS[${DISK_SET}]}\""
fi
echo "} 1"
done
# Get output from mdadm --detail (Note: root/sudo required)
MDADM_DETAIL_OUTPUT=$(mdadm --detail /dev/"${MD_DEVICE_NUM}")
# Output RAID "Devices", "Size" and "Event" metrics, from the output of "mdadm --detail"
while IFS= read -r line ; do
# Filter out these keys that have numeric values that increment up
if echo "$line" | grep -E -q "Devices :|Array Size :| Used Dev Size :|Events :"; then
MDADM_DETAIL_KEY=$(echo "$line" | cut -d ":" -f 1 | tr -cd '[a-zA-Z0-9]._-')
MDADM_DETAIL_VALUE=$(echo "$line" | cut -d ":" -f 2 | cut -d " " -f 2 | sed 's:^ ::')
echo "node_md_info_${MDADM_DETAIL_KEY}{md_device=\"${MD_DEVICE_NUM}\", md_name=\"${MD_DEVICE}\", raid_level=\"${MD_LEVEL}\", md_num_raid_disks=\"${MD_NUM_RAID_DISKS}\", md_metadata_version=\"${MD_METADATA_VERSION}\"} ${MDADM_DETAIL_VALUE}"
fi
done <<< "$MDADM_DETAIL_OUTPUT"
# Output RAID detail metrics info from the output of "mdadm --detail"
# NOTE: Sending this info as labels rather than separate metrics, because some of them can be strings.
echo -n "node_md_info{md_device=\"${MD_DEVICE_NUM}\", md_name=\"${MD_DEVICE}\", raid_level=\"${MD_LEVEL}\", md_num_raid_disks=\"${MD_NUM_RAID_DISKS}\", md_metadata_version=\"${MD_METADATA_VERSION}\""
while IFS= read -r line ; do
# Filter for lines with a ":", to use for Key/Value pairs in labels
if echo "$line" | grep -E -q ":" ; then
# Exclude lines with these keys, as they're values are numbers that increment up and captured in individual metrics above
if echo "$line" | grep -E -qv "Array Size|Used Dev Size|Events|Update Time" ; then
echo -n ", "
MDADM_DETAIL_KEY=$(echo "$line" | cut -d ":" -f 1 | tr -cd '[a-zA-Z0-9]._-')
MDADM_DETAIL_VALUE=$(echo "$line" | cut -d ":" -f 2- | sed 's:^ ::')
echo -n "${MDADM_DETAIL_KEY}=\"${MDADM_DETAIL_VALUE}\""
fi
fi
done <<< "$MDADM_DETAIL_OUTPUT"
echo "} 1"
)
done
|
derekmarcotte/node_exporter
|
text_collector_examples/md_info_detail.sh
|
Shell
|
apache-2.0
| 4,294 |
#!/bin/bash
installPath=$1
srcPath=$2
echo $installPath
echo $srcPath
if [ ! -d "$installPath" ]; then
echo "No install path supplied. It should be a directory that can be written to and whose current content is of no value (will be overwritten) "
echo "$0 <install path> <src tree trunk directory>"
exit 1
fi
if [ ! -d "$srcPath" ]; then
echo "No src path supplied. It should be the trunk directory containing the jars, files, what not that need to be supplied."
echo "$0 <install path> <src tree trunk directory>"
exit 1
fi
export KAMANJALIBPATH=$installPath
# *******************************
# Clean out prior installation
# *******************************
rm -Rf $KAMANJALIBPATH
# *******************************
# Make the directories as needed
# *******************************
mkdir -p $KAMANJALIBPATH/msgdata
mkdir -p $KAMANJALIBPATH/kvstores
mkdir -p $KAMANJALIBPATH/logs
# *******************************
# Build fat-jars
# *******************************
echo "clean, package and assemble $srcPath ..."
cd $srcPath
sbt clean package KamanjaManager/assembly MetadataAPI/assembly KVInit/assembly MethodExtractor/assembly SimpleKafkaProducer/assembly NodeInfoExtract/assembly
#sbt package
#sbt KamanjaManager/assembly
#sbt MetadataAPI/assembly
#sbt KVInit/assembly
# recreate eclipse projects
#echo "refresh the eclipse projects ..."
#cd $srcPath
#sbt eclipse
# Move them into place
echo "copy the fat jars to $KAMANJALIBPATH ..."
cd $srcPath
cp Utils/KVInit/target/scala-2.10/KVInit* $KAMANJALIBPATH
cp MetadataAPI/target/scala-2.10/MetadataAPI* $KAMANJALIBPATH
cp KamanjaManager/target/scala-2.10/KamanjaManager* $KAMANJALIBPATH
cp Pmml/MethodExtractor/target/scala-2.10/MethodExtractor* $KAMANJALIBPATH
cp Utils/SimpleKafkaProducer/target/scala-2.10/SimpleKafkaProducer* $KAMANJALIBPATH
# *******************************
# Copy jars required (more than required if the fat jars are used)
# *******************************
# Base Types and Functions, InputOutput adapters, and original versions of things
echo "copy Base Types and Functions, InputOutput adapters..."
cp $srcPath/BaseFunctions/target/scala-2.10/basefunctions_2.10-0.1.0.jar $KAMANJALIBPATH
cp $srcPath/BaseTypes/target/scala-2.10/basetypes_2.10-0.1.0.jar $KAMANJALIBPATH
cp $srcPath/InputOutputAdapters/FileSimpleInputOutputAdapters/target/scala-2.10/filesimpleinputoutputadapters_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/InputOutputAdapters/KafkaSimpleInputOutputAdapters/target/scala-2.10/kafkasimpleinputoutputadapters_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/EnvContexts/SimpleEnvContextImpl/target/scala-2.10/simpleenvcontextimpl_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/MetadataBootstrap/Bootstrap/target/scala-2.10/bootstrap_2.10-1.0.jar $KAMANJALIBPATH
# Storage jars
echo "copy Storage jars..."
cp $srcPath/Storage/target/scala-2.10/storage_2.10-0.0.0.2.jar $KAMANJALIBPATH
# Metadata jars
echo "copy Metadata jars..."
cp $srcPath/Metadata/target/scala-2.10/metadata_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/MessageDef/target/scala-2.10/messagedef_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/MetadataAPI/target/scala-2.10/metadataapi_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/Pmml/MethodExtractor/target/scala-2.10/methodextractor_2.10-1.0.jar $KAMANJALIBPATH
# Kamanja jars
echo "copy Kamanja jars..."
cp $srcPath/KamanjaBase/target/scala-2.10/kamanjabase_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/KamanjaManager/target/scala-2.10/kamanjamanager_2.10-1.0.jar $KAMANJALIBPATH
# Pmml compile and runtime jars
echo "copy Pmml compile and runtime jars..."
cp $srcPath/Pmml/PmmlRuntime/target/scala-2.10/pmmlruntime_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/Pmml/PmmlUdfs/target/scala-2.10/pmmludfs_2.10-1.0.jar $KAMANJALIBPATH
cp $srcPath/Pmml/PmmlCompiler/target/scala-2.10/pmmlcompiler_2.10-1.0.jar $KAMANJALIBPATH
# sample configs
#echo "copy sample configs..."
cp $srcPath/Utils/KVInit/src/main/resources/*cfg $KAMANJALIBPATH
# other jars
echo "copy other jars..."
cp $srcPath/../externals/log4j/log4j-1.2.17.jar $KAMANJALIBPATH
cp $srcPath/Utils/Serialize/target/scala-2.10/serialize_2.10-1.0.jar $KAMANJALIBPATH
# env context jars
echo "env context jars..."
cp $HOME/.ivy2/cache/asm/asm/jars/asm-3.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.codahale.metrics/metrics-core/bundles/metrics-core-3.0.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.yammer.metrics/metrics-core/jars/metrics-core-2.2.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.datastax.cassandra/cassandra-driver-core/bundles/cassandra-driver-core-2.0.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.esotericsoftware.kryo/kryo/bundles/kryo-2.21.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.esotericsoftware.minlog/minlog/jars/minlog-1.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.esotericsoftware.reflectasm/reflectasm/jars/reflectasm-1.07-shaded.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.fasterxml.jackson.core/jackson-annotations/bundles/jackson-annotations-2.3.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.fasterxml.jackson.core/jackson-core/bundles/jackson-core-2.3.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.fasterxml.jackson.core/jackson-databind/bundles/jackson-databind-2.3.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.github.stephenc.findbugs/findbugs-annotations/jars/findbugs-annotations-1.3.9-1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.google.code.findbugs/jsr305/jars/jsr305-1.3.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.google.collections/google-collections/jars/google-collections-1.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.google.guava/guava/bundles/guava-16.0.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.google.protobuf/protobuf-java/bundles/protobuf-java-2.5.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.google.protobuf/protobuf-java/bundles/protobuf-java-2.6.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.jamesmurty.utils/java-xmlbuilder/jars/java-xmlbuilder-0.4.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.jcraft/jsch/jars/jsch-0.1.42.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.ning/compress-lzf/bundles/compress-lzf-0.9.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.novocode/junit-interface/jars/junit-interface-0.11-RC1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.sleepycat/je/jars/je-4.0.92.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.sun.jersey/jersey-core/bundles/jersey-core-1.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.sun.jersey/jersey-json/bundles/jersey-json-1.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.sun.jersey/jersey-server/bundles/jersey-server-1.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.sun.xml.bind/jaxb-impl/jars/jaxb-impl-2.2.3-1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.thoughtworks.paranamer/paranamer/jars/paranamer-2.3.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.thoughtworks.paranamer/paranamer/jars/paranamer-2.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.twitter/chill-java/jars/chill-java-0.3.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.twitter/chill_2.10/jars/chill_2.10-0.3.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-beanutils/commons-beanutils-core/jars/commons-beanutils-core-1.8.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-beanutils/commons-beanutils/jars/commons-beanutils-1.7.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-cli/commons-cli/jars/commons-cli-1.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-collections/commons-collections/jars/commons-collections-3.2.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-configuration/commons-configuration/jars/commons-configuration-1.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-dbcp/commons-dbcp/jars/commons-dbcp-1.2.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-digester/commons-digester/jars/commons-digester-1.8.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-io/commons-io/jars/commons-io-2.4.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-lang/commons-lang/jars/commons-lang-2.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.1.3.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-net/commons-net/jars/commons-net-3.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-pool/commons-pool/jars/commons-pool-1.5.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/io.netty/netty/bundles/netty-3.9.0.Final.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/javax.activation/activation/jars/activation-1.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/javax.servlet.jsp/jsp-api/jars/jsp-api-2.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/javax.servlet/servlet-api/jars/servlet-api-2.5.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/javax.xml.bind/jaxb-api/jars/jaxb-api-2.2.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/javax.xml.stream/stax-api/jars/stax-api-1.0-2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/jline/jline/jars/jline-0.9.94.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/joda-time/joda-time/jars/joda-time-2.3.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/junit/junit/jars/junit-4.11.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/log4j/log4j/bundles/log4j-1.2.17.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.9.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/net.java.dev.jna/jna/jars/jna-3.2.7.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.avro/avro/jars/avro-1.7.4.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.commons/commons-compress/jars/commons-compress-1.4.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.commons/commons-math3/jars/commons-math3-3.1.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.hadoop/hadoop-annotations/jars/hadoop-annotations-2.4.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.hadoop/hadoop-auth/jars/hadoop-auth-2.4.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.hadoop/hadoop-common/jars/hadoop-common-2.4.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.hbase/hbase-client/jars/hbase-client-0.98.4-hadoop2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.hbase/hbase-common/jars/hbase-common-0.98.4-hadoop2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.hbase/hbase-protocol/jars/hbase-protocol-0.98.4-hadoop2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.httpcomponents/httpclient/jars/httpclient-4.2.5.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.httpcomponents/httpcore/jars/httpcore-4.2.4.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.zookeeper/zookeeper/jars/zookeeper-3.4.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.cloudera.htrace/htrace-core/jars/htrace-core-2.04.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.codehaus.jackson/jackson-core-asl/jars/jackson-core-asl-1.8.8.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.codehaus.jackson/jackson-jaxrs/jars/jackson-jaxrs-1.8.3.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.codehaus.jackson/jackson-mapper-asl/jars/jackson-mapper-asl-1.8.8.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.codehaus.jackson/jackson-xc/jars/jackson-xc-1.8.3.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.codehaus.jettison/jettison/bundles/jettison-1.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.hamcrest/hamcrest-core/jars/hamcrest-core-1.3.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.jdom/jdom/jars/jdom-1.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.joda/joda-convert/jars/joda-convert-1.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.json4s/json4s-ast_2.10/jars/json4s-ast_2.10-3.2.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.json4s/json4s-core_2.10/jars/json4s-core_2.10-3.2.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.json4s/json4s-jackson_2.10/jars/json4s-jackson_2.10-3.2.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.json4s/json4s-native_2.10/jars/json4s-native_2.10-3.2.9.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.mapdb/mapdb/bundles/mapdb-1.0.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.26.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.26.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.objenesis/objenesis/jars/objenesis-1.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.ow2.asm/asm-commons/jars/asm-commons-4.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.ow2.asm/asm-tree/jars/asm-tree-4.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.ow2.asm/asm/jars/asm-4.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.scala-lang/scalap/jars/scalap-2.10.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.scala-sbt/test-interface/jars/test-interface-1.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.scalamacros/quasiquotes_2.10.4/jars/quasiquotes_2.10.4-2.0.0-M6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.scalatest/scalatest_2.10/bundles/scalatest_2.10-2.2.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.7.7.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.7.5.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.tukaani/xz/jars/xz-1.0.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.xerial.snappy/snappy-java/bundles/snappy-java-1.0.4.1.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.23.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.23.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/voldemort/voldemort/jars/voldemort-0.96.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/commons-pool/commons-pool/jars/commons-pool-1.5.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.twitter/chill_2.10/jars/chill_2.10-0.3.6.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/org.apache.kafka/kafka_2.10/jars/*.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/net.sf.jopt-simple/jopt-simple/jars/jopt-simple-3.2.jar $KAMANJALIBPATH
cp $HOME/.ivy2/cache/com.101tec/zkclient/jars/zkclient-0.3.jar $KAMANJALIBPATH
# *******************************
# COPD messages data prep
# *******************************
# Prepare test messages and copy them into place
echo "Prepare test messages and copy them into place..."
cd $srcPath/Utils/KVInit/src/main/resources
gzip -c beneficiaries.csv > beneficiaries.gz
gzip -c messages_new_format.csv > messages_new_format.gz
gzip -c messages_old_format.csv > messages_old_format.gz
gzip -c messages_new_format_all.csv > messages_new_format_all.csv.gz
gzip -c messages50_2014_BIOH.csv > messages50_2014_BIOH.csv.gz
cp *gz $KAMANJALIBPATH/msgdata/
# *******************************
# All that is left is to run the KamanjaManager
# *******************************
# no debug
# java -jar $KAMANJALIBPATH/KamanjaManager-1.0 --config /tmp/KamanjaInstall/COPD.cfg
# debug version intended for eclipse attached debugging
# java -Xdebug -Xrunjdwp:transport=dt_socket,address=8998,server=y -jar $KAMANJALIBPATH/KamanjaManager-1.0 --config /tmp/KamanjaInstall/COPD.cfg
echo "installKamanja complete..."
|
traytonwhite/Kamanja
|
trunk/SampleApplication/Medical/bin/installOnLEP_Medical.sh
|
Shell
|
apache-2.0
| 14,977 |
#!/bin/bash
targetdir=$1
echo "Running magento static-content:deploy, indexer:reindex and setting deploy:mode to $M2MODE"
$targetdir/bin/magento setup:static-content:deploy
$targetdir/bin/magento indexer:reindex
$targetdir/bin/magento deploy:mode:set $M2MODE
|
dirceu-cit/lemp
|
php/magento-deploy.sh
|
Shell
|
apache-2.0
| 261 |
#!/bin/bash -eux
# Run Integration Tests via Inspec Infra testing framework
# https://www.inspec.io
echo -e '\033[33mRunning Inspec Integration Tests ...\033[0m'
cd /st2-docker/test/integration
for dir in */; do
dir=$(basename $dir)
if [ -f "${dir}/inspec.yml" ]; then
echo -e "\nRunning tests for \033[1;36m${dir}\033[0m ..."
sudo inspec exec --show-progress ${dir}
fi
done
|
shusugmt/st2-docker
|
images/stackstorm/bin/test.sh
|
Shell
|
apache-2.0
| 391 |
# Copyright(c) 2015 Nippon Telegraph and Telephone Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOGTAG=`basename $0`
HOST_NAME=`hostname`
LOGDIR="/var/log/masakari"
LOGFILE="${LOGDIR}/masakari-processmonitor.log"
# Debug log output function
# Argument
# $1 : Message
log_debug () {
if [ ! -e ${LOGDIR} ]; then
mkdir -p ${LOGDIR}
fi
if [ "${LOG_LEVEL}" == "debug" ]; then
log_output "$1"
fi
}
# Info log output function
# Argument
# $1 : Message
log_info () {
if [ ! -e ${LOGDIR} ]; then
mkdir -p ${LOGDIR}
fi
log_output "$1"
}
# This function outputs the log
# Argument
# $1 : Message
log_output () {
echo "`date +'%Y-%m-%d %H:%M:%S'` ${HOST_NAME} ${LOGTAG}: $1" >> $LOGFILE
}
# Some sanity checks on the check target processing list.
# Format of the proc.list(Each columns must be separated by a comma.)
# The first column : Process ID (two digits of leading zeros) : cannot be omitted.
# The second column : The keyword when check exists in processing list(empty is NG.). : cannot be omitted
# The third column : The initial startup command (it's required to include word of "start". )
# The fourth column : Rebooting command (it's required to include word of "start".)
# The fifth column : Shell file name for special processing at the initial startup(before the startup)
# The sixth column : Shell file name for special processing at the initial startup(after the startup)
# The seventh column : Shell file name for special processing at the initial restart(before the startup)
# The eighth column : Shell file name for special processing at the initial restart(after the startup)
#
# When abonormal condition is detected about proc.list, exits by "exit 2".
column_num=8
check_proc_file_common (){
# Check the existence and validity of the proc.list.
if [ ! -e $PROC_LIST ]; then
log_info "$PROC_LIST(proc_list) is not exists."
exit 2
fi
if [ ! -s $PROC_LIST ]; then
log_info "$PROC_LIST(proc_list) is empty file."
exit 2
fi
if [ ! -r "$PROC_LIST" ]; then
log_info "$PROC_LIST(proc_list) is not readable."
exit 2
fi
OLD_IFS=$IFS
IFS=$'\n'
proc_list=(`cat $PROC_LIST`)
IFS=$OLD_IFS
LINE_NO=1
for line in "${proc_list[@]}"
do
num=`echo "$line" | tr -dc ',' | wc -c`
# The number of required column are incomplete.
check_num=`expr $column_num - 1`
if [ $num -ne $check_num ]; then
log_info "$PROC_LIST format error (column_num) line $LINE_NO"
exit 2
fi
PROC_ID=`echo $line | cut -d"," -f 1`
if [ ! -z "$PROC_ID" ]; then
expr "$PROC_ID" + 1 >/dev/null 2>&1
# If PROC ID is not a numeric,
if [ 1 -lt $? ]; then
log_info "$PROC_LIST format error (PROC_ID) not number. line $LINE_NO"
exit 2
fi
else
log_info "$PROC_LIST format error (PROC_ID) empty. line $LINE_NO"
exit 2
fi
KEY_WORD=`echo $line | cut -d"," -f 2`
if [ -z "$KEY_WORD" ]; then
log_info "$PROC_LIST format error (KEY_WORD) empty. line $LINE_NO"
exit 2
fi
START_CMD=`echo $line | cut -d"," -f 3`
if [ ! -z "$START_CMD" ]; then
check=`echo $START_CMD | grep -c start`
# If words of "start" are not included in initial startup processing.,
if [ $check -ne 1 ]; then
log_info "$PROC_LIST format error (START_CMD) line $LINE_NO"
exit 2
fi
fi
RESTART_CMD=`echo $line | cut -d"," -f 4`
if [ ! -z "$RESTART_CMD" ]; then
check=`echo $RESTART_CMD | grep -c start`
# If words of "start" are not included in restart processing,
if [ $check -ne 1 ]; then
log_info "$PROC_LIST format error (RESTART_CMD) line $LINE_NO"
exit 2
fi
fi
# Check the existence and validity of special processing shell file to be executed before and after start processing.
START_SP_CMDFILE_BEFORE=`echo $line | cut -d"," -f 5`
if [ ! -z "$START_SP_CMDFILE_BEFORE" ]; then
# The starting (before executing) special processing shell file does not exist.
if [ ! -e $START_SP_CMDFILE_BEFORE ]; then
log_info "$PROC_LIST format error (START_SP_CMDFILE_BEFORE) not exists. line $LINE_NO"
exit 2
fi
if [ ! -x $START_SP_CMDFILE_BEFORE ]; then
log_info "$PROC_LIST format error (START_SP_CMDFILE_BEFORE) not exeutable. line $LINE_NO"
exit 2
fi
fi
START_SP_CMDFILE_AFTER=`echo $line | cut -d"," -f 6`
if [ ! -z "$START_SP_CMDFILE_AFTER" ]; then
# The restarting (before executing) special processing shell file does not exist.
if [ ! -e $START_SP_CMDFILE_AFTER ]; then
log_info "$PROC_LIST format error (START_SP_CMDFILE_AFTER) not exists. line $LINE_NO"
exit 2
fi
if [ ! -x $START_SP_CMDFILE_AFTER ]; then
log_info "$PROC_LIST format error (START_SP_CMDFILE_AFTER) not exeutable. line $LINE_NO"
exit 2
fi
fi
# Check the existence and validity of special processing shell file to be executed before and after restart processing.
RESTART_SP_CMDFILE_BEFORE=`echo $line | cut -d"," -f 7`
if [ ! -z "$RESTART_SP_CMDFILE_BEFORE" ]; then
# The restarting (before executing) special processing shell file does not exist.
if [ ! -e $RESTART_SP_CMDFILE_BEFORE ]; then
log_info "$PROC_LIST format error (RESTART_SP_CMDFILE_BEFORE) not exists. line $LINE_NO"
exit 2
fi
if [ ! -x $RESTART_SP_CMDFILE_BEFORE ]; then
log_info "$PROC_LIST format error (RESTART_SP_CMDFILE_BEFORE) not exeutable. line $LINE_NO"
exit 2
fi
fi
RESTART_SP_CMDFILE_AFTER=`echo $line | cut -d"," -f 8`
if [ ! -z "$RESTART_SP_CMDFILE_AFTER" ]; then
# The restarting (before executing) special processing shell file does not exist.
if [ ! -e $RESTART_SP_CMDFILE_AFTER ]; then
log_info "$PROC_LIST format error (RESTART_SP_CMDFILE_AFTER) not exists. line $LINE_NO"
exit 2
fi
if [ ! -x $RESTART_SP_CMDFILE_AFTER ]; then
log_info "$PROC_LIST format error (RESTART_SP_CMDFILE_AFTER) not exeutable. line $LINE_NO"
exit 2
fi
fi
LINE_NO=`expr $LINE_NO + 1`
done
}
|
ntt-sic/masakari
|
masakari-processmonitor/processmonitor/common.sh
|
Shell
|
apache-2.0
| 7,313 |
../../../spark-1.6.0/bin/spark-submit \
--master local[*] \
--executor-memory 2g --executor-cores 1 --num-executors 4 \
--jars ../packages/spark-examples_2.10-2.0.0-SNAPSHOT.jar,../packages/random-0.0.1-SNAPSHOT-shaded.jar \
--py-files ../packages/python-lib.zip \
fix_corrupted_links.py \
$@
|
svebk/DeepSentiBank_memex
|
workflows/fix_corrupted_links/run_local.sh
|
Shell
|
bsd-2-clause
| 297 |
#! /usr/bin/env bash
set -eu
# Always fails with an error (for testing)
# stdin: message
echo 'Failure'
echo 'Failed' >&2
exit 1
|
kolypto/py-overc
|
tests/data/overc-server/alert.d/error.sh
|
Shell
|
bsd-2-clause
| 132 |
#!/bin/bash
# filename: extended-gui_system_calls.sh
#
# Copyright (c) 2013 - 2018 Andreas Schmidhuber
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# author: Andreas Schmidhuber
# purpose: executes several scripts every n seconds
# usage: extended-gui_system_calls.sh (... w/o parameters)
# version: date: description:
# 4.3 2018.09.23 C: improved checks for firmware upgrade
# 4.2 2017.06.27 N: run services check
# 4.1 2017.03.13 N: check for firmware upgrade
# 4.0 2015.11.22 N: CPU temperature monitoring and reporting (cpu_check)
# 3.1 2015.04.16 C: get extension variables from CONFIG2 instead of reading from config.xml
# 3.0 2015.04.09 C: for Extended GUI version 0.5
# 2.0 2014.04.07 C: initial version for Extended GUI
#------------- initialize variables ------------
cd `dirname $0`
. CONFIG
#-----------------------------------------------
LOCK_SCRIPT "ATTEMPT to run the script \"$SCRIPT_NAME\" twice!"
NOTIFY "INFO System call service started with pid ${PID}"
change_config=`/usr/local/bin/xml sel -t -v "//lastchange" ${XML_CONFIG_FILE}`
while true
do
if [ "$LOOP_DELAY" == "" ]; then LOOP_DELAY=60; fi
if [ -f "/var/run/firmware.lock" ] || [ -d "/tmp/sysbackup" ] || [ -d "/tmp/configbak" ]; then
if [ -d "/tmp/sysbackup" ]; then logger "extended-gui: /tmp/sysbackup directory found ... "; fi
if [ -d "/tmp/configbak" ]; then logger "extended-gui: /tmp/configbak directory found ... "; fi
if [ -f "/var/run/firmware.lock" ]; then logger "extended-gui: /var/run/firmware.lock file found ... "; fi
logger "extended-gui: firmware upgrade in progress, no further checks will be performed - extended-gui HALTED!";
NOTIFY "INFO extended-gui: firmware upgrade in progress, no further checks will be performed - extended-gui HALTED!";
exit 99;
else
lastchange_config=`/usr/local/bin/xml sel -t -v "//lastchange" ${XML_CONFIG_FILE}`
if [ "$change_config" != "$lastchange_config" ]; then
su root -c "/usr/local/www/ext/extended-gui/extended-gui_create_config2.php"
fi
$SYSTEM_SCRIPT_DIR/cpu_check.sh
$SYSTEM_SCRIPT_DIR/disk_check.sh
if [ $RUN_SERVICES -gt 0 ]; then
if [ -e ${PREFIX}services_firstrun.lock ]; then php $SYSTEM_SCRIPT_DIR/extended-gui_create_services_list.inc;
else touch ${PREFIX}services_firstrun.lock; fi
fi
if [ $RUN_USER -gt 0 ]; then $SYSTEM_SCRIPT_DIR/user_check.sh; fi
if [ $RUN_HOSTS -gt 0 ]; then
$SYSTEM_SCRIPT_DIR/hosts_check.sh &
fi
if [ $RUN_AUTOMOUNT -gt 0 ]; then $SYSTEM_SCRIPT_DIR/automount_usb.sh; fi
fi
sleep $LOOP_DELAY
done
UNLOCK_SCRIPT
|
crestAT/nas4free-extended-gui
|
extended-gui/scripts/extended-gui_system_calls.sh
|
Shell
|
bsd-2-clause
| 4,019 |
#!/bin/bash
# Author: Wonder
# Description:
# Usage: 部署到sae
#专门为部署到sae建的仓库
REPO_FOR_SAE=../ezlog.sae
DEPLOY_TO_SAE_VERSION=1
if [ ! -d "$REPO_FOR_SAE" ]; then
mkdir -p "$REPO_FOR_SAE"
git svn clone https://svn.sinaapp.com/ezlog/ "$REPO_FOR_SAE"
fi;
pwd=$PWD
rsync -rlcv --exclude-from="./deploy-exclude.txt" ./ ${REPO_FOR_SAE}/${DEPLOY_TO_SAE_VERSION}/
cd $REPO_FOR_SAE
git svn rebase
git add .
git commit -m "auto commit `date`"
git svn dcommit
cd $pwd
|
wonderbeyond/ezlog
|
deploy-to-sae.sh
|
Shell
|
bsd-2-clause
| 509 |
#!/bin/bash
# Copyright 2014 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
set -e
# Download kernel, sets, etc. from ftp.usa.openbsd.org
if ! [ -e install56.iso ]; then
curl -O ftp://ftp.usa.openbsd.org/pub/OpenBSD/5.6/i386/install56.iso
fi
# XXX: Download and save bash, curl, and their dependencies too?
# Currently we download them from the network during the install process.
# Create custom site56.tgz set.
mkdir -p etc
cat >install.site <<EOF
#!/bin/sh
env PKG_PATH=ftp://ftp.usa.openbsd.org/pub/OpenBSD/5.6/packages/i386 pkg_add -iv bash curl git
# See https://code.google.com/p/google-compute-engine/issues/detail?id=77
echo "ignore classless-static-routes;" >> /etc/dhclient.conf
EOF
cat >etc/rc.local <<EOF
(
set -x
echo "starting buildlet script"
netstat -rn
cat /etc/resolv.conf
dig metadata.google.internal
(
set -e
export PATH="\$PATH:/usr/local/bin"
/usr/local/bin/curl -o /buildlet \$(/usr/local/bin/curl -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/buildlet-binary-url)
chmod +x /buildlet
exec /buildlet
)
echo "giving up"
sleep 10
halt -p
)
EOF
chmod +x install.site
tar -zcvf site56.tgz install.site etc/rc.local
# Hack install CD a bit.
echo 'set tty com0' > boot.conf
dd if=/dev/urandom of=random.seed bs=4096 count=1
cp install56.iso install56-patched.iso
growisofs -M install56-patched.iso -l -R -graft-points \
/5.6/i386/site56.tgz=site56.tgz \
/etc/boot.conf=boot.conf \
/etc/random.seed=random.seed
# Initialize disk image.
rm -f disk.raw
qemu-img create -f raw disk.raw 10G
# Run the installer to create the disk image.
expect <<EOF
spawn qemu-system-x86_64 -nographic -smp 2 -drive if=virtio,file=disk.raw -cdrom install56-patched.iso -net nic,model=virtio -net user -boot once=d
expect "boot>"
send "\n"
# Need to wait for the kernel to boot.
expect -timeout 600 "\(I\)nstall, \(U\)pgrade, \(A\)utoinstall or \(S\)hell\?"
send "i\n"
expect "Terminal type\?"
send "vt220\n"
expect "System hostname\?"
send "buildlet\n"
expect "Which network interface do you wish to configure\?"
send "vio0\n"
expect "IPv4 address for vio0\?"
send "dhcp\n"
expect "IPv6 address for vio0\?"
send "none\n"
expect "Which network interface do you wish to configure\?"
send "done\n"
expect "Password for root account\?"
send "root\n"
expect "Password for root account\?"
send "root\n"
expect "Start sshd\(8\) by default\?"
send "yes\n"
expect "Start ntpd\(8\) by default\?"
send "no\n"
expect "Do you expect to run the X Window System\?"
send "no\n"
expect "Do you want the X Window System to be started by xdm\(1\)\?"
send "no\n"
expect "Do you want to suspend on lid close\?"
send "no\n"
expect "Change the default console to com0\?"
send "yes\n"
expect "Which speed should com0 use\?"
send "115200\n"
expect "Setup a user\?"
send "gopher\n"
expect "Full name for user gopher\?"
send "Gopher Gopherson\n"
expect "Password for user gopher\?"
send "gopher\n"
expect "Password for user gopher\?"
send "gopher\n"
expect "Since you set up a user, disable sshd\(8\) logins to root\?"
send "yes\n"
expect "What timezone are you in\?"
send "US/Pacific\n"
expect "Which disk is the root disk\?"
send "sd0\n"
expect "Use DUIDs rather than device names in fstab\?"
send "yes\n"
expect "Use \(W\)hole disk or \(E\)dit the MBR\?"
send "whole\n"
expect "Use \(A\)uto layout, \(E\)dit auto layout, or create \(C\)ustom layout\?"
send "custom\n"
expect "> "
send "z\n"
expect "> "
send "a b\n"
expect "offset: "
send "\n"
expect "size: "
send "1G\n"
expect "FS type: "
send "swap\n"
expect "> "
send "a a\n"
expect "offset: "
send "\n"
expect "size: "
send "\n"
expect "FS type: "
send "4.2BSD\n"
expect "mount point: "
send "/\n"
expect "> "
send "w\n"
expect "> "
send "q\n"
expect "Location of sets\?"
send "cd\n"
expect "Which CD-ROM contains the install media\?"
send "cd0\n"
expect "Pathname to the sets\?"
send "5.6/i386\n"
expect "Set name\(s\)\?"
send "+*\n"
expect "Set name\(s\)\?"
send " -x*\n"
expect "Set name\(s\)\?"
send " -game*\n"
expect "Set name\(s\)\?"
send " -man*\n"
expect "Set name\(s\)\?"
send "done\n"
expect "Directory does not contain SHA256\.sig\. Continue without verification\?"
send "yes\n"
# Need to wait for previous sets to unpack.
expect -timeout 600 "Location of sets\?"
send "done\n"
expect "Ambiguous: choose dependency for git"
send "0\n"
# Need to wait for install.site to install curl, git, et
expect -timeout 600 "CONGRATULATIONS!"
expect "# "
send "halt\n"
expect "Please press any key to reboot.\n"
send "\n"
expect "boot>"
send "\n"
expect -timeout 600 eof
EOF
# Create Compute Engine disk image.
echo "Archiving disk.raw... (this may take a while)"
tar -Szcf openbsd-386-gce.tar.gz disk.raw
echo "Done. GCE image is openbsd-386-gce.tar.gz."
|
evandbrown/build
|
env/openbsd-386/make.bash
|
Shell
|
bsd-3-clause
| 4,934 |
#!/bin/bash
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
# Install OpenImageIO
# Exit the whole script if any command fails.
set -ex
OPENIMAGEIO_REPO=${OPENIMAGEIO_REPO:=OpenImageIO/oiio}
OPENIMAGEIO_VERSION=${OPENIMAGEIO_VERSION:=master}
LOCAL_DEPS_DIR=${LOCAL_DEPS_DIR:=${PWD}/ext}
OPENIMAGEIO_SRCDIR=${OPENIMAGEIO_SRCDIR:=${LOCAL_DEPS_DIR}/OpenImageIO}
OPENIMAGEIO_BUILD_DIR=${OPENIMAGEIO_BUILD_DIR:=${OPENIMAGEIO_SRCDIR}/build/$PLATFORM}
OPENIMAGEIO_INSTALLDIR=${OPENIMAGEIO_INSTALLDIR:=${LOCAL_DEPS_DIR}/dist}
OPENIMAGEIO_CMAKE_FLAGS=${OPENIMAGEIO_CMAKE_FLAGS:=""}
OPENIMAGEIO_BUILD_TYPE=${OPENIMAGEIO_BUILD_TYPE:=Release}
OPENIMAGEIO_CXXFLAGS=${OPENIMAGEIO_CXXFLAGS:=""}
BASEDIR=$PWD
CMAKE_GENERATOR=${CMAKE_GENERATOR:="Unix Makefiles"}
if [ ! -e $OPENIMAGEIO_SRCDIR ] ; then
git clone https://github.com/${OPENIMAGEIO_REPO} $OPENIMAGEIO_SRCDIR
fi
mkdir -p ${OPENIMAGEIO_INSTALLDIR} && true
mkdir -p ${OPENIMAGEIO_BUILD_DIR} && true
pushd $OPENIMAGEIO_SRCDIR
git fetch --all -p
git checkout $OPENIMAGEIO_VERSION --force
if [[ "$USE_SIMD" != "" ]] ; then
OPENIMAGEIO_CMAKE_FLAGS="$OPENIMAGEIO_CMAKE_FLAGS -DUSE_SIMD=$USE_SIMD"
fi
if [[ "$DEBUG" == "1" ]] ; then
OPENIMAGEIO_CMAKE_FLAGS="$OPENIMAGEIO_CMAKE_FLAGS -DCMAKE_BUILD_TYPE=Debug"
fi
# if [[ "$ARCH" == "windows64" ]] ; then
pushd ${OPENIMAGEIO_BUILD_DIR}
cmake ../.. -G "$CMAKE_GENERATOR" \
-DCMAKE_BUILD_TYPE="$OPENIMAGEIO_BUILD_TYPE" \
-DCMAKE_INSTALL_PREFIX="$OPENIMAGEIO_INSTALLDIR" \
-DPYTHON_VERSION="$PYTHON_VERSION" \
-DCMAKE_INSTALL_LIBDIR="$OPENIMAGEIO_INSTALLDIR/lib" \
-DCMAKE_CXX_STANDARD="$CMAKE_CXX_STANDARD" \
$OPENIMAGEIO_CMAKE_FLAGS -DVERBOSE=1
echo "Parallel build $CMAKE_BUILD_PARALLEL_LEVEL"
time cmake --build . --target install --config ${OPENIMAGEIO_BUILD_TYPE}
popd
popd
export OpenImageIO_ROOT=$OPENIMAGEIO_INSTALLDIR
export PATH=$OpenImageIO_ROOT/bin:$PATH
export DYLD_LIBRARY_PATH=$OpenImageIO_ROOT/lib:$DYLD_LIBRARY_PATH
export LD_LIBRARY_PATH=$OpenImageIO_ROOT/lib:$LD_LIBRARY_PATH
export PYTHONPATH=$OpenImageIO_ROOT/lib/python${PYTHON_VERSION}/site-packages:$PYTHONPATH
echo "DYLD_LIBRARY_PATH = $DYLD_LIBRARY_PATH"
echo "LD_LIBRARY_PATH = $LD_LIBRARY_PATH"
echo "OpenImageIO_ROOT $OpenImageIO_ROOT"
ls -R $OpenImageIO_ROOT
|
imageworks/OpenShadingLanguage
|
src/build-scripts/build_openimageio.bash
|
Shell
|
bsd-3-clause
| 2,393 |
#!/bin/sh -e
#
# install CUBRID DBMS
if (php --version | grep -i HipHop > /dev/null); then
echo "Skipping CUBRID on HHVM"
exit 0
fi
CWD=$(pwd)
# cubrid dbms
mkdir -p cubrid/$CUBRID_VERSION
cd cubrid
if (test -f $CUBRID_VERSION-linux.x86_64.tar.gz); then
echo "CUBRID is already installed"
else
wget http://ftp.cubrid.org/CUBRID_Engine/$CUBRID_VERSION-linux.x86_64.tar.gz -O $CUBRID_VERSION-linux.x86_64.tar.gz
fi
cd $CUBRID_VERSION
tar xzf ../../$CUBRID_VERSION-linux.x86_64.tar.gz
cd ../..
# setting cubrid env
CUBRID=$CWD/cubrid/$CUBRID_VERSION/CUBRID
CUBRID_DATABASES=$CUBRID/databases
CUBRID_LANG=en_US
ld_lib_path=`printenv LD_LIBRARY_PATH`
if [ "$ld_lib_path" = "" ]
then
LD_LIBRARY_PATH=$CUBRID/lib
else
LD_LIBRARY_PATH=$CUBRID/lib:$LD_LIBRARY_PATH
fi
SHLIB_PATH=$LD_LIBRARY_PATH
LIBPATH=$LD_LIBRARY_PATH
PATH=$CUBRID/bin:$CUBRID/cubridmanager:$PATH
export CUBRID
export CUBRID_DATABASES
export CUBRID_LANG
export LD_LIBRARY_PATH
export SHLIB_PATH
export LIBPATH
export PATH
# start cubrid
cubrid service start
# create and start the demo db
$CUBRID/demo/make_cubrid_demo.sh
cubrid server start demodb
echo ""
echo "Installed CUBRID $CUBRID_VERSION"
echo ""
# cubrid pdo
install_pdo_cubrid() {
if (test "! (-f PDO_CUBRID-$CUBRID_PDO_VERSION.tgz)"); then
wget "http://pecl.php.net/get/PDO_CUBRID-$CUBRID_PDO_VERSION.tgz" -O PDO_CUBRID-$CUBRID_PDO_VERSION.tgz
fi
tar -zxf "PDO_CUBRID-$CUBRID_PDO_VERSION.tgz"
sh -c "cd PDO_CUBRID-$CUBRID_PDO_VERSION && phpize && ./configure --prefix=$CWD/cubrid/PDO_CUBRID-$CUBRID_PDO_VERSION && make"
echo "extension=$CWD/cubrid/PDO_CUBRID-$CUBRID_PDO_VERSION/modules/pdo_cubrid.so" >> ~/.phpenv/versions/$(phpenv version-name)/etc/php.ini
return $?
}
install_pdo_cubrid > ~/pdo_cubrid.log || ( echo "=== PDO CUBRID BUILD FAILED ==="; cat ~/pdo_cubrid.log; exit 1 )
echo ""
echo "Installed CUBRID PDO $CUBRID_PDO_VERSION"
echo ""
cd ..
|
peterkokot/yii2
|
tests/unit/data/travis/cubrid-setup.sh
|
Shell
|
bsd-3-clause
| 1,961 |
#!/bin/bash
VERSION="0.0.0"
# trap keyboard interrupt (control-c)
trap control_c SIGINT
function setPath {
cat <<SETPATH
--------------------------------------------------------------------------------------
Error locating ANTS
--------------------------------------------------------------------------------------
It seems that the ANTSPATH environment variable is not set. Please add the ANTSPATH
variable. This can be achieved by editing the .bash_profile in the home directory.
Add:
ANTSPATH=/home/yourname/bin/ants/
Or the correct location of the ANTS binaries.
Alternatively, edit this script ( `basename $0` ) to set up this parameter correctly.
SETPATH
exit 1
}
# Uncomment the line below in case you have not set the ANTSPATH variable in your environment.
# export ANTSPATH=${ANTSPATH:="$HOME/bin/ants/"} # EDIT THIS
#ANTSPATH=YOURANTSPATH
if [[ ${#ANTSPATH} -le 3 ]];
then
setPath >&2
fi
# Test availability of helper scripts.
# No need to test this more than once. Can reside outside of the main loop.
ANTS=${ANTSPATH}/antsRegistration
WARP=${ANTSPATH}/antsApplyTransforms
JLF=${ANTSPATH}/antsJointFusion
PEXEC=${ANTSPATH}ANTSpexec.sh
SGE=${ANTSPATH}waitForSGEQJobs.pl
PBS=${ANTSPATH}waitForPBSQJobs.pl
XGRID=${ANTSPATH}waitForXGridJobs.pl
SLURM=${ANTSPATH}/waitForSlurmJobs.pl
fle_error=0
for FLE in $JLF $ANTS $WARP $PEXEC $SGE $XGRID $PBS $SLURM
do
if [[ ! -x $FLE ]];
then
echo
echo "--------------------------------------------------------------------------------------"
echo " FILE $FLE DOES NOT EXIST -- OR -- IS NOT EXECUTABLE !!! $0 will terminate."
echo "--------------------------------------------------------------------------------------"
echo " if the file is not executable, please change its permissions. "
fle_error=1
fi
done
if [[ $fle_error = 1 ]];
then
echo "missing helper script"
exit 1
fi
#assuming .nii.gz as default file type. This is the case for ANTS 1.7 and up
function Usage {
cat <<USAGE
Usage:
`basename $0` -d ImageDimension -o OutputPrefix <other options> <images>
Compulsory arguments (minimal command line requires SGE cluster, otherwise use -c & -j options):
-d: ImageDimension: 2 or 3.
-o: OutputPrefix: A prefix that is prepended to all output files.
-t: TargetImage: Target image to be labeled.
-g: Atlas: Atlas to be warped to target image.
-l: Labels: Labels corresponding to atlas (cf -g).
Optional arguments:
-m: Majority vote: Use majority vote instead of joint label fusion (default = 0).
-k: Keep files: Keep warped atlas and label files (default = 0).
-c: Control for parallel computation (default 0) -- 0 == run serially, 1 == SGE qsub,
2 == use PEXEC (localhost), 3 == Apple XGrid, 4 == PBS qsub, 5 == SLURM.
-j: Number of cpu cores to use (default 2; -- requires "-c 2").
-r: qsub options
-q: Use quick registration parameters: Either 0 or 1 (default = 1).
-p: Save posteriors: Save posteriors in specified c-style format e.g. posterior%04d.nii.gz
Need to specify output directory.
-f: Float precision: Use float precision (default = 1) -- 0 == double, 1 == float.
-u: Registration walltime (default = 20:00:00): Option for PBS/SLURM qsub specifying requested time
per pairwise registration.
-v: Registration memory limit (default = 8gb): Option for PBS/SLURM qsub specifying requested memory
per pairwise registration.
-w: JLF walltime (default = 20:00:00): Option for PBS/SLURM qsub specifying requested time
for the joint label fusion call.
-z: JLF Memory limit (default = 8gb): Option for PBS/SLURM qsub specifying requested memory
for the joint label fusion call.
-y: transform type (default = 's')
t: translation
r: rigid
a: rigid + affine
s: rigid + affine + deformable syn
sr: rigid + deformable syn
so: deformable syn only
b: rigid + affine + deformable b-spline syn
br: rigid + deformable b-spline syn
bo: deformable b-spline syn only
-x: Target mask image (default = 'otsu')
otsu: use otsu thresholding to define foreground/background
or: 'or' all the warped atlas images to defined foreground/background
<filename>: a user-specified mask
none: don't use a mask
Example:
`basename $0` -d 3 -t target.nii.gz -o malf \
-p malfPosteriors%04d.nii.gz \
-g atlas1.nii.gz -l labels1.nii.gz \
-g atlas2.nii.gz -l labels2.nii.gz \
-g atlas3.nii.gz -l labels3.nii.gz
--------------------------------------------------------------------------------------
JLF was created by:
--------------------------------------------------------------------------------------
Hongzhi Wang and Paul Yushkevich
Penn Image Computing And Science Laboratory
University of Pennsylvania
Please reference http://www.ncbi.nlm.nih.gov/pubmed/22732662 when employing this script
in your studies.
Wang H, Suh JW, Das SR, Pluta J, Craige C, Yushkevich PA.
Multi-Atlas Segmentation with Joint Label Fusion.
IEEE Trans Pattern Anal Mach Intell.
--------------------------------------------------------------------------------------
script by Nick Tustison
--------------------------------------------------------------------------------------
USAGE
exit 1
}
function Help {
cat <<HELP
`basename $0` will propagate labels from a set of pre-labeled atlases using the JLF
algorithm.
Usage:
`basename $0` -d ImageDimension -o OutputPrefix <other options> <images>
Example Case:
`basename $0` -d 3 -t target.nii.gz -o malf \
-p malfPosteriors%04d.nii.gz \
-g atlas1.nii.gz -l labels1.nii.gz \
-g atlas2.nii.gz -l labels2.nii.gz \
-g atlas3.nii.gz -l labels3.nii.gz
Compulsory arguments (minimal command line requires SGE cluster, otherwise use -c & -j options):
-d: ImageDimension: 2 or 3.
-o: OutputPrefix: A prefix that is prepended to all output files.
-t: TargetImage: Target image to be labeled.
-g: Atlas: Atlas to be warped to target image.
-l: Labels: Labels corresponding to atlas (cf -g).
Optional arguments:
-m: Majority vote: Use majority vote instead of joint label fusion (default = 0).
-k: Keep files: Keep warped atlas and label files (default = 0).
-c: Control for parallel computation (default 0) -- 0 == run serially, 1 == SGE qsub,
2 == use PEXEC (localhost), 3 == Apple XGrid, 4 == PBS qsub, 5 == SLURM.
-j: Number of cpu cores to use (default 2; -- requires "-c 2").
-q: Use quick registration parameters: Either 0 or 1 (default = 1).
-p: Save posteriors: Save posteriors in specified c-style format e.g. posterior%04d.nii.gz
Need to specify output directory.
-f: Float precision: Use float precision (default = 1) -- 0 == double, 1 == float.
-u: Registration walltime (default = 20:00:00): Option for PBS/SLURM qsub specifying requested time
per pairwise registration.
-v: Registration memory limit (default = 8gb): Option for PBS/SLURM qsub specifying requested memory
per pairwise registration.
-w: JLF walltime (default = 20:00:00): Option for PBS/SLURM qsub specifying requested time
for the joint label fusion call.
-z: JLF Memory limit (default = 8gb): Option for PBS/SLURM qsub specifying requested memory
for the joint label fusion call.
-y: Transform type (default = 's')
t: translation
r: rigid
a: rigid + affine
s: rigid + affine + deformable syn
sr: rigid + deformable syn
so: deformable syn only
b: rigid + affine + deformable b-spline syn
br: rigid + deformable b-spline syn
bo: deformable b-spline syn only
-x: Target mask image (default = 'otsu')
otsu: use otsu thresholding to define foreground/background
or: 'or' all the warped atlas images to defined foreground/background
<filename>: a user-specified mask
none: don't use a mask
Requirements:
This scripts relies on the following scripts in your $ANTSPATH directory. The script
will terminate prematurely if these files are not present or are not executable.
- pexec.sh
- waitForSGEQJobs.pl (only for use with Sun Grid Engine)
- waitForPBSQJobs.pl (only for use with Portable Batch System)
- ANTSpexec.sh (only for use with localhost parallel execution)
- waitForXGridJobs.pl (only for use with Apple XGrid)
- waitForSlurmJobs.pl (only for use with SLURM)
- antsRegistrationSyN.sh
- antsRegistrationSyNQuick.sh ( quick parameters )
--------------------------------------------------------------------------------------
Get the latest ANTS version at:
--------------------------------------------------------------------------------------
https://github.com/stnava/ANTs/
--------------------------------------------------------------------------------------
Read the ANTS documentation at:
--------------------------------------------------------------------------------------
http://stnava.github.io/ANTs/
--------------------------------------------------------------------------------------
JLF was created by:
--------------------------------------------------------------------------------------
Hongzhi Wang and Paul Yushkevich
Penn Image Computing And Science Laboratory
University of Pennsylvania
Please reference http://www.ncbi.nlm.nih.gov/pubmed/22732662 when employing this script
in your studies.
Wang H, Suh JW, Das SR, Pluta J, Craige C, Yushkevich PA.
Multi-Atlas Segmentation with Joint Label Fusion.
IEEE Trans Pattern Anal Mach Intell.
--------------------------------------------------------------------------------------
script by Nick Tustison
--------------------------------------------------------------------------------------
HELP
exit 1
}
function reportParameters {
cat <<REPORTPARAMETERS
--------------------------------------------------------------------------------------
Parameters
--------------------------------------------------------------------------------------
ANTSPATH is $ANTSPATH
Dimensionality: $DIM
Output prefix: $OUTPUT_PREFIX
Posteriors format: $OUTPUT_POSTERIORS_FORMAT
Target image: $TARGET_IMAGE
Atlas images: ${ATLAS_IMAGES[@]}
Atlas labels: ${ATLAS_LABELS[@]}
Transformation: ${TRANSFORM_TYPE}
Keep all images: $KEEP_ALL_IMAGES
Processing type: $DOQSUB
Number of cpu cores: $CORES
--------------------------------------------------------------------------------------
REPORTPARAMETERS
}
cleanup()
{
echo "\n*** Performing cleanup, please wait ***\n"
runningANTSpids=$( ps --ppid $$ -o pid= )
for thePID in $runningANTSpids
do
echo "killing: ${thePID}"
kill ${thePID}
done
return $?
}
control_c()
# run if user hits control-c
{
echo -en "\n*** User pressed CTRL + C ***\n"
cleanup
exit $?
echo -en "\n*** Script cancelled by user ***\n"
}
#initializing variables with global scope
time_start=`date +%s`
CURRENT_DIR=`pwd`/
DIM=3
OUTPUT_DIR=${CURRENT_DIR}/tmp$RANDOM/
OUTPUT_PREFIX=${OUTPUT_DIR}/tmp
OUTPUT_SUFFIX="nii.gz"
OUTPUT_POSTERIORS_FORMAT=''
TARGET_IMAGE=''
ATLAS_IMAGES=()
ATLAS_LABELS=()
TRANSFORM='s'
KEEP_ALL_IMAGES=0
DOQSUB=0
CORES=1
PRECISION=0
XGRID_OPTS=""
SCRIPT_PREPEND=""
QSUB_OPTS=""
TARGET_MASK_IMAGE="otsu"
REGISTRATION_WALLTIME="20:00:00"
REGISTRATION_MEMORY="8gb"
JLF_WALLTIME="20:00:00"
JLF_MEMORY="8gb"
##Getting system info from linux can be done with these variables.
# RAM=`cat /proc/meminfo | sed -n -e '/MemTotal/p' | awk '{ printf "%s %s\n", $2, $3 ; }' | cut -d " " -f 1`
# RAMfree=`cat /proc/meminfo | sed -n -e '/MemFree/p' | awk '{ printf "%s %s\n", $2, $3 ; }' | cut -d " " -f 1`
# cpu_free_ram=$((${RAMfree}/${cpu_count}))
if [[ ${OSTYPE:0:6} == 'darwin' ]];
then
cpu_count=`sysctl -n hw.physicalcpu`
else
cpu_count=`cat /proc/cpuinfo | grep processor | wc -l`
fi
# Provide output for Help
if [[ "$1" == "-h" ]];
then
Help >&2
fi
MAJORITYVOTE=0
RUNQUICK=1
TRANSFORM_TYPE="s"
# reading command line arguments
while getopts "c:d:f:g:h:j:k:l:m:o:p:q:r:t:u:v:w:x:y:z:" OPT
do
case $OPT in
h) #help
echo "$USAGE"
exit 0
;;
c) #use SGE cluster
DOQSUB=$OPTARG
if [[ $DOQSUB -gt 5 ]];
then
echo " DOQSUB must be an integer value (0=serial, 1=SGE qsub, 2=try pexec, 3=XGrid, 4=PBS qsub, 5=SLURM ) you passed -c $DOQSUB "
exit 1
fi
;;
d) #dimensions
DIM=$OPTARG
if [[ ${DIM} -ne 2 && $DIM -ne 3 ]];
then
echo " Dimensionality is only valid for 2 or 3. You passed -d $DIM."
exit 1
fi
;;
f)
PRECISION=$OPTARG
;;
g)
ATLAS_IMAGES[${#ATLAS_IMAGES[@]}]=$OPTARG
;;
j) #number of cpu cores to use
CORES=$OPTARG
;;
k)
KEEP_ALL_IMAGES=$OPTARG
;;
m) #majority voting option
MAJORITYVOTE=$OPTARG
;;
p)
OUTPUT_POSTERIORS_FORMAT=$OPTARG
;;
q)
RUNQUICK=$OPTARG
;;
r)
QSUB_OPTS=$OPTARG
;;
l)
ATLAS_LABELS[${#ATLAS_LABELS[@]}]=$OPTARG
;;
o)
OUTPUT_PREFIX=$OPTARG
OUTPUT_DIR=`dirname ${OUTPUT_PREFIX}`
;;
t)
TARGET_IMAGE=$OPTARG
;;
u)
REGISTRATION_WALLTIME=$OPTARG
;;
v)
REGISTRATION_MEMORY=$OPTARG
;;
w)
JLF_WALLTIME=$OPTARG
;;
z)
JLF_MEMORY=$OPTARG
;;
x)
TARGET_MASK_IMAGE=$OPTARG
;;
y)
TRANSFORM_TYPE=$OPTARG
;;
\?) # getopts issues an error message
echo "$USAGE" >&2
exit 1
;;
esac
done
if [[ $DOQSUB -eq 1 || $DOQSUB -eq 4 ]];
then
qq=`which qsub`
if [[ ${#qq} -lt 1 ]];
then
echo "do you have qsub? if not, then choose another c option ... if so, then check where the qsub alias points ..."
exit 1
fi
fi
if [[ $DOQSUB -eq 5 ]];
then
qq=`which sbatch`
if [[ ${#qq} -lt 1 ]];
then
echo "do you have sbatch? if not, then choose another c option ... if so, then check where the sbatch alias points ..."
exit
fi
fi
if [[ ! -f "$TARGET_IMAGE" ]];
then
echo "Target image '$TARGET_IMAGE' does not exist. See usage: '$0 -h 1'"
exit
fi
if [[ ${#ATLAS_IMAGES[@]} -ne ${#ATLAS_LABELS[@]} ]];
then
echo "The number of atlas images does not equal the number of labels. Ensure that a corresponding set of labels exist for each image."
exit 1
fi
PRECISIONFLAG='f'
if [[ ${PRECISION} -eq 0 ]];
then
PRECISIONFLAG='d'
fi
mkdir ${OUTPUT_DIR}
##########################################################################
#
# Perform JLF labeling by
# 1) registering all atlases to target image
# 2) call 'jointfusion'
#
##########################################################################
echo
echo "--------------------------------------------------------------------------------------"
echo " Start JLFization"
echo "--------------------------------------------------------------------------------------"
reportParameters
jobIDs=""
WARPED_ATLAS_IMAGES=()
WARPED_ATLAS_LABELS=()
AFFINE_FILES=()
WARP_FIELDS=()
INVERSE_WARP_FIELDS=()
for (( i = 0; i < ${#ATLAS_IMAGES[@]}; i++ ))
do
IMG_BASE=`basename ${ATLAS_IMAGES[$i]}`
BASENAME=` echo ${IMG_BASE} | cut -d '.' -f 1 `
qscript="${OUTPUT_DIR}/job_${BASENAME}_${i}.sh"
WARPED_ATLAS_IMAGES[${#WARPED_ATLAS_IMAGES[@]}]="${OUTPUT_PREFIX}${BASENAME}_${i}_Warped.nii.gz"
INVERSE_WARPED_ATLAS_IMAGES[${#INVERSE_WARPED_ATLAS_IMAGES[@]}]="${OUTPUT_PREFIX}${BASENAME}_${i}_InverseWarped.nii.gz"
WARPED_ATLAS_LABELS[${#WARPED_ATLAS_LABELS[@]}]="${OUTPUT_PREFIX}${BASENAME}_${i}_WarpedLabels.nii.gz"
WARP_FIELDS[${#WARP_FIELDS[@]}]="${OUTPUT_PREFIX}${BASENAME}_${i}_1Warp.nii.gz"
INVERSE_WARP_FIELDS[${#INVERSE_WARP_FIELDS[@]}]="${OUTPUT_PREFIX}${BASENAME}_${i}_1InverseWarp.nii.gz"
AFFINE_FILES[${#AFFINE_FILES[@]}]="${OUTPUT_PREFIX}${BASENAME}_${i}_0GenericAffine.mat"
if [[ -f "${OUTPUT_PREFIX}${BASENAME}_${i}_WarpedLabels.nii.gz" ]];
then
echo ${OUTPUT_PREFIX}${BASENAME}_${i}_WarpedLabels.nii.gz already exists.
rm -f $qscript
continue
fi
regcall=${ANTSPATH}/antsRegistrationSyN.sh
if [[ $RUNQUICK -eq 1 ]];
then
regcall=${ANTSPATH}/antsRegistrationSyNQuick.sh
fi
registrationCall="$regcall \
-d ${DIM} \
-p ${PRECISIONFLAG} \
-j 1 \
-t ${TRANSFORM_TYPE} \
-f ${TARGET_IMAGE} \
-m ${ATLAS_IMAGES[$i]} \
-o ${OUTPUT_PREFIX}${BASENAME}_${i}_ > ${OUTPUT_PREFIX}${BASENAME}_${i}_log.txt"
labelXfrmCall="${ANTSPATH}/antsApplyTransforms \
-d ${DIM} \
--float 1 \
-i ${ATLAS_LABELS[$i]} \
-r ${TARGET_IMAGE} \
-o ${OUTPUT_PREFIX}${BASENAME}_${i}_WarpedLabels.nii.gz \
-n NearestNeighbor \
-t ${OUTPUT_PREFIX}${BASENAME}_${i}_1Warp.nii.gz \
-t ${OUTPUT_PREFIX}${BASENAME}_${i}_0GenericAffine.mat >> ${OUTPUT_PREFIX}${BASENAME}_${i}_log.txt"
copyImageHeaderCall="${ANTSPATH}/CopyImageHeaderInformation \
${TARGET_IMAGE} \
${OUTPUT_PREFIX}${BASENAME}_${i}_Warped.nii.gz \
${OUTPUT_PREFIX}${BASENAME}_${i}_Warped.nii.gz 1 1 1"
copyLabelsHeaderCall="${ANTSPATH}/CopyImageHeaderInformation \
${TARGET_IMAGE} \
${OUTPUT_PREFIX}${BASENAME}_${i}_WarpedLabels.nii.gz \
${OUTPUT_PREFIX}${BASENAME}_${i}_WarpedLabels.nii.gz 1 1 1"
rm -f $qscript
if [[ $DOQSUB -eq 5 ]];
then
# SLURM job scripts must start with a shebang
echo '#!/bin/sh' > $qscript
fi
echo "$registrationCall" >> $qscript
echo "$labelXfrmCall" >> $qscript
echo "$copyImageHeaderCall" >> $qscript
echo "$copyLabelsHeaderCall" >> $qscript
if [[ $DOQSUB -eq 1 ]];
then
id=`qsub -cwd -S /bin/bash -N antsJlfReg -v ANTSPATH=$ANTSPATH $QSUB_OPTS $qscript | awk '{print $3}'`
jobIDs="$jobIDs $id"
sleep 0.5
elif [[ $DOQSUB -eq 4 ]];
then
id=`qsub -N antsJlfReg -v ANTSPATH=$ANTSPATH $QSUB_OPTS -q nopreempt -l nodes=1:ppn=1 -l mem=${REGISTRATION_MEMORY} -l walltime=${REGISTRATION_WALLTIME} $qscript | awk '{print $1}'`
jobIDs="$jobIDs $id"
sleep 0.5
elif [[ $DOQSUB -eq 3 ]];
then
id=`xgrid $XGRID_OPTS -job submit /bin/bash $qscript | awk '{sub(/;/,"");print $3}' | tr '\n' ' ' | sed 's: *: :g'`
jobIDs="$jobIDs $id"
elif [[ $DOQSUB -eq 5 ]];
then
id=`sbatch --job-name=antsJlfReg${i} --export=ANTSPATH=$ANTSPATH $QSUB_OPTS --nodes=1 --cpus-per-task=1 --time=${REGISTRATION_WALLTIME} --mem=${REGISTRATION_MEMORY} $qscript | rev | cut -f1 -d\ | rev`
jobIDs="$jobIDs $id"
sleep 0.5
elif [[ $DOQSUB -eq 0 ]];
then
echo $qscript
bash $qscript
fi
done
if [[ $DOQSUB -eq 2 ]];
then
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting JLF on max ${CORES} cpucores. "
echo "--------------------------------------------------------------------------------------"
chmod +x ${OUTPUT_DIR}/job_*.sh
$PEXEC -j ${CORES} "sh" ${OUTPUT_DIR}/job_*.sh
fi
jlfCall="${ANTSPATH}/antsJointFusion -d ${DIM} -t $TARGET_IMAGE --verbose 1 "
if [[ $DOQSUB -eq 0 ]];
then
# Run job locally
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting JLF"
echo "--------------------------------------------------------------------------------------"
EXISTING_WARPED_ATLAS_IMAGES=()
EXISTING_WARPED_ATLAS_LABELS=()
for (( i = 0; i < ${#WARPED_ATLAS_IMAGES[@]}; i++ ))
do
echo ${WARPED_ATLAS_IMAGES[$i]}
if [[ -f ${WARPED_ATLAS_IMAGES[$i]} ]] && [[ -f ${WARPED_ATLAS_LABELS[$i]} ]];
then
EXISTING_WARPED_ATLAS_IMAGES[${#EXISTING_WARPED_ATLAS_IMAGES[@]}]=${WARPED_ATLAS_IMAGES[$i]}
EXISTING_WARPED_ATLAS_LABELS[${#EXISTING_WARPED_ATLAS_LABELS[@]}]=${WARPED_ATLAS_LABELS[$i]}
fi
done
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -lt 2 ]];
then
echo "Error: At least 2 warped image/label pairs needs to exist for jointFusion."
exit 1
fi
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -ne ${#WARPED_ATLAS_LABELS[@]} ]];
then
echo "Warning: One or more registrations failed."
fi
maskCall=''
if [[ $MAJORITYVOTE -eq 1 ]];
then
jlfCall="${ANTSPATH}/ImageMath ${DIM} ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz MajorityVoting ${EXISTING_WARPED_ATLAS_LABELS[@]} "
else
for (( i = 0; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
jlfCall="${jlfCall} -g ${EXISTING_WARPED_ATLAS_IMAGES[$i]} -l ${EXISTING_WARPED_ATLAS_LABELS[$i]}"
done
if [[ -z "${OUTPUT_POSTERIORS_FORMAT}" ]];
then
jlfCall="${jlfCall} -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz]"
else
jlfCall="${jlfCall} -r 1 -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz,${OUTPUT_POSTERIORS_FORMAT}]"
fi
if [[ ${TARGET_MASK_IMAGE} == 'otsu' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOtsu.nii.gz"
maskCall="${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_IMAGE} ${TARGET_MASK_IMAGE} Otsu 1;"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ ${TARGET_MASK_IMAGE} == 'or' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOr.nii.gz"
maskCall="${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${EXISTING_WARPED_ATLAS_IMAGES[0]} ${EXISTING_WARPED_ATLAS_IMAGES[1]};"
for (( i = 2; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
maskCall="${maskCall} ${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${TARGET_MASK_IMAGE} ${EXISTING_WARPED_ATLAS_IMAGES[$i]};"
done
maskCall="${maskCall} ${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_MASK_IMAGE} ${TARGET_MASK_IMAGE} 0 0 0 1"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ -f ${TARGET_MASK_IMAGE} ]];
then
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
fi
fi
qscript2="${OUTPUT_PREFIX}JLF.sh"
echo "$maskCall" > $qscript2
echo "$jlfCall" >> $qscript2
echo $qscript2
bash $qscript2
fi
if [[ $DOQSUB -eq 1 ]];
then
# Run jobs on SGE and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting JLF on SGE cluster. "
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/waitForSGEQJobs.pl 1 600 $jobIDs
# Returns 1 if there are errors
if [[ ! $? -eq 0 ]];
then
echo "qsub submission failed - jobs went into error state"
exit 1;
fi
EXISTING_WARPED_ATLAS_IMAGES=()
EXISTING_WARPED_ATLAS_LABELS=()
for (( i = 0; i < ${#WARPED_ATLAS_IMAGES[@]}; i++ ))
do
echo ${WARPED_ATLAS_IMAGES[$i]}
if [[ -f ${WARPED_ATLAS_IMAGES[$i]} ]] && [[ -f ${WARPED_ATLAS_LABELS[$i]} ]];
then
EXISTING_WARPED_ATLAS_IMAGES[${#EXISTING_WARPED_ATLAS_IMAGES[@]}]=${WARPED_ATLAS_IMAGES[$i]}
EXISTING_WARPED_ATLAS_LABELS[${#EXISTING_WARPED_ATLAS_LABELS[@]}]=${WARPED_ATLAS_LABELS[$i]}
fi
done
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -lt 2 ]];
then
echo "Error: At least 2 warped image/label pairs needs to exist for jointFusion."
exit 1
fi
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -ne ${#WARPED_ATLAS_LABELS[@]} ]];
then
echo "Warning: One or more registrations failed."
fi
maskCall=''
if [[ $MAJORITYVOTE -eq 1 ]];
then
jlfCall="${ANTSPATH}/ImageMath ${DIM} ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz MajorityVoting ${EXISTING_WARPED_ATLAS_LABELS[@]} "
else
for (( i = 0; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
jlfCall="${jlfCall} -g ${EXISTING_WARPED_ATLAS_IMAGES[$i]} -l ${EXISTING_WARPED_ATLAS_LABELS[$i]}"
done
if [[ -z "${OUTPUT_POSTERIORS_FORMAT}" ]];
then
jlfCall="${jlfCall} -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz]"
else
jlfCall="${jlfCall} -r 1 -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz,${OUTPUT_POSTERIORS_FORMAT}]"
fi
if [[ ${TARGET_MASK_IMAGE} == 'otsu' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOtsu.nii.gz"
maskCall="${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_IMAGE} ${TARGET_MASK_IMAGE} Otsu 1;"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ ${TARGET_MASK_IMAGE} == 'or' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOr.nii.gz"
maskCall="${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${EXISTING_WARPED_ATLAS_IMAGES[0]} ${EXISTING_WARPED_ATLAS_IMAGES[1]};"
for (( i = 2; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
maskCall="${maskCall} ${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${TARGET_MASK_IMAGE} ${EXISTING_WARPED_ATLAS_IMAGES[$i]};"
done
maskCall="${maskCall} ${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_MASK_IMAGE} ${TARGET_MASK_IMAGE} 0 0 0 1"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ -f ${TARGET_MASK_IMAGE} ]];
then
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
fi
fi
qscript2="${OUTPUT_PREFIX}JLF.sh"
echo "$maskCall" > $qscript2
echo "$jlfCall" >> $qscript2
jobIDs=`qsub -cwd -S /bin/bash -N antsJlf -v ANTSPATH=$ANTSPATH $QSUB_OPTS $qscript2 | awk '{print $3}'`
${ANTSPATH}/waitForSGEQJobs.pl 1 600 $jobIDs
fi
if [[ $DOQSUB -eq 4 ]];
then
# Run jobs on PBS and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting JLF on PBS cluster. "
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/waitForPBSQJobs.pl 1 600 $jobIDs
# Returns 1 if there are errors
if [[ ! $? -eq 0 ]];
then
echo "qsub submission failed - jobs went into error state"
exit 1;
fi
EXISTING_WARPED_ATLAS_IMAGES=()
EXISTING_WARPED_ATLAS_LABELS=()
for (( i = 0; i < ${#WARPED_ATLAS_IMAGES[@]}; i++ ))
do
echo ${WARPED_ATLAS_IMAGES[$i]}
if [[ -f ${WARPED_ATLAS_IMAGES[$i]} ]] && [[ -f ${WARPED_ATLAS_LABELS[$i]} ]];
then
EXISTING_WARPED_ATLAS_IMAGES[${#EXISTING_WARPED_ATLAS_IMAGES[@]}]=${WARPED_ATLAS_IMAGES[$i]}
EXISTING_WARPED_ATLAS_LABELS[${#EXISTING_WARPED_ATLAS_LABELS[@]}]=${WARPED_ATLAS_LABELS[$i]}
fi
done
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -lt 2 ]];
then
echo "Error: At least 2 warped image/label pairs needs to exist for jointFusion."
exit 1
fi
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -ne ${#WARPED_ATLAS_LABELS[@]} ]];
then
echo "Warning: One or more registrations failed."
fi
maskCall=''
if [[ $MAJORITYVOTE -eq 1 ]];
then
jlfCall="${ANTSPATH}/ImageMath ${DIM} ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz MajorityVoting ${EXISTING_WARPED_ATLAS_LABELS[@]} "
else
for (( i = 0; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
jlfCall="${jlfCall} -g ${EXISTING_WARPED_ATLAS_IMAGES[$i]} -l ${EXISTING_WARPED_ATLAS_LABELS[$i]}"
done
if [[ -z "${OUTPUT_POSTERIORS_FORMAT}" ]];
then
jlfCall="${jlfCall} -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz]"
else
jlfCall="${jlfCall} -r 1 -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz,${OUTPUT_POSTERIORS_FORMAT}]"
fi
if [[ ${TARGET_MASK_IMAGE} == 'otsu' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOtsu.nii.gz"
maskCall="${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_IMAGE} ${TARGET_MASK_IMAGE} Otsu 1;"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ ${TARGET_MASK_IMAGE} == 'or' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOr.nii.gz"
maskCall="${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${EXISTING_WARPED_ATLAS_IMAGES[0]} ${EXISTING_WARPED_ATLAS_IMAGES[1]};"
for (( i = 2; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
maskCall="${maskCall} ${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${TARGET_MASK_IMAGE} ${EXISTING_WARPED_ATLAS_IMAGES[$i]};"
done
maskCall="${maskCall} ${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_MASK_IMAGE} ${TARGET_MASK_IMAGE} 0 0 0 1"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ -f ${TARGET_MASK_IMAGE} ]];
then
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
fi
fi
qscript2="${OUTPUT_PREFIX}JLF.sh"
echo "$maskCall" > $qscript2
echo "$jlfCall" >> $qscript2
jobIDs=`qsub -N antsJlf -v ANTSPATH=$ANTSPATH $QSUB_OPTS -q nopreempt -l nodes=1:ppn=1 -l mem=${JLF_MEMORY} -l walltime=${JLF_WALLTIME} $qscript2 | awk '{print $1}'`
${ANTSPATH}/waitForPBSQJobs.pl 1 600 $jobIDs
fi
if [[ $DOQSUB -eq 2 ]];
then
EXISTING_WARPED_ATLAS_IMAGES=()
EXISTING_WARPED_ATLAS_LABELS=()
for (( i = 0; i < ${#WARPED_ATLAS_IMAGES[@]}; i++ ))
do
echo ${WARPED_ATLAS_IMAGES[$i]}
if [[ -f ${WARPED_ATLAS_IMAGES[$i]} ]] && [[ -f ${WARPED_ATLAS_LABELS[$i]} ]];
then
EXISTING_WARPED_ATLAS_IMAGES[${#EXISTING_WARPED_ATLAS_IMAGES[@]}]=${WARPED_ATLAS_IMAGES[$i]}
EXISTING_WARPED_ATLAS_LABELS[${#EXISTING_WARPED_ATLAS_LABELS[@]}]=${WARPED_ATLAS_LABELS[$i]}
fi
done
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -lt 2 ]];
then
echo "Error: At least 2 warped image/label pairs needs to exist for jointFusion."
exit 1
fi
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -ne ${#WARPED_ATLAS_LABELS[@]} ]];
then
echo "Warning: One or more registrations failed."
fi
maskCall=''
if [[ $MAJORITYVOTE -eq 1 ]];
then
jlfCall="${ANTSPATH}/ImageMath ${DIM} ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz MajorityVoting ${EXISTING_WARPED_ATLAS_LABELS[@]} "
else
for (( i = 0; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
jlfCall="${jlfCall} -g ${EXISTING_WARPED_ATLAS_IMAGES[$i]} -l ${EXISTING_WARPED_ATLAS_LABELS[$i]}"
done
if [[ -z "${OUTPUT_POSTERIORS_FORMAT}" ]];
then
jlfCall="${jlfCall} -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz]"
else
jlfCall="${jlfCall} -r 1 -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz,${OUTPUT_POSTERIORS_FORMAT}]"
fi
if [[ ${TARGET_MASK_IMAGE} == 'otsu' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOtsu.nii.gz"
maskCall="${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_IMAGE} ${TARGET_MASK_IMAGE} Otsu 1;"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ ${TARGET_MASK_IMAGE} == 'or' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOr.nii.gz"
maskCall="${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${EXISTING_WARPED_ATLAS_IMAGES[0]} ${EXISTING_WARPED_ATLAS_IMAGES[1]};"
for (( i = 2; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
maskCall="${maskCall} ${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${TARGET_MASK_IMAGE} ${EXISTING_WARPED_ATLAS_IMAGES[$i]};"
done
maskCall="${maskCall} ${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_MASK_IMAGE} ${TARGET_MASK_IMAGE} 0 0 0 1"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ -f ${TARGET_MASK_IMAGE} ]];
then
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
fi
fi
qscript2="${OUTPUT_PREFIX}JLF.sh"
echo "$maskCall" > $qscript2
echo "$jlfCall" >> $qscript2
sh $qscript2
fi
if [[ $DOQSUB -eq 3 ]];
then
# Run jobs on XGrid and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting JLF on XGrid cluster. Submitted $count jobs "
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/waitForXGridJobs.pl -xgridflags "$XGRID_OPTS" -verbose -delay 30 $jobIDs
# Returns 1 if there are errors
if [[ ! $? -eq 0 ]];
then
echo "XGrid submission failed - jobs went into error state"
exit 1;
fi
EXISTING_WARPED_ATLAS_IMAGES=()
EXISTING_WARPED_ATLAS_LABELS=()
for (( i = 0; i < ${#WARPED_ATLAS_IMAGES[@]}; i++ ))
do
echo ${WARPED_ATLAS_IMAGES[$i]}
if [[ -f ${WARPED_ATLAS_IMAGES[$i]} ]] && [[ -f ${WARPED_ATLAS_LABELS[$i]} ]];
then
EXISTING_WARPED_ATLAS_IMAGES[${#EXISTING_WARPED_ATLAS_IMAGES[@]}]=${WARPED_ATLAS_IMAGES[$i]}
EXISTING_WARPED_ATLAS_LABELS[${#EXISTING_WARPED_ATLAS_LABELS[@]}]=${WARPED_ATLAS_LABELS[$i]}
fi
done
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -lt 2 ]];
then
echo "Error: At least 2 warped image/label pairs needs to exist for jointFusion."
exit 1
fi
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -ne ${#WARPED_ATLAS_LABELS[@]} ]];
then
echo "Warning: One or more registrations failed."
fi
maskCall=''
if [[ $MAJORITYVOTE -eq 1 ]];
then
jlfCall="${ANTSPATH}/ImageMath ${DIM} ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz MajorityVoting ${EXISTING_WARPED_ATLAS_LABELS[@]} "
else
for (( i = 0; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
jlfCall="${jlfCall} -g ${EXISTING_WARPED_ATLAS_IMAGES[$i]} -l ${EXISTING_WARPED_ATLAS_LABELS[$i]}"
done
if [[ -z "${OUTPUT_POSTERIORS_FORMAT}" ]];
then
jlfCall="${jlfCall} -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz]"
else
jlfCall="${jlfCall} -r 1 -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz,${OUTPUT_POSTERIORS_FORMAT}]"
fi
if [[ ${TARGET_MASK_IMAGE} == 'otsu' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOtsu.nii.gz"
maskCall="${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_IMAGE} ${TARGET_MASK_IMAGE} Otsu 1;"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ ${TARGET_MASK_IMAGE} == 'or' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOr.nii.gz"
maskCall="${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${EXISTING_WARPED_ATLAS_IMAGES[0]} ${EXISTING_WARPED_ATLAS_IMAGES[1]};"
for (( i = 2; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
maskCall="${maskCall} ${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${TARGET_MASK_IMAGE} ${EXISTING_WARPED_ATLAS_IMAGES[$i]};"
done
maskCall="${maskCall} ${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_MASK_IMAGE} ${TARGET_MASK_IMAGE} 0 0 0 1"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ -f ${TARGET_MASK_IMAGE} ]];
then
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
fi
fi
qscript2="${OUTPUT_PREFIX}JLF.sh"
echo "$maskCall" > $qscript2
echo "$jlfCall" >> $qscript2
sh $qscript2
fi
if [[ $DOQSUB -eq 5 ]];
then
# Run jobs on SLURM and wait to finish
echo
echo "--------------------------------------------------------------------------------------"
echo " Starting JLF on SLURM cluster. "
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}/waitForSlurmJobs.pl 1 600 $jobIDs
# Returns 1 if there are errors
if [[ ! $? -eq 0 ]];
then
echo "SLURM submission failed - jobs went into error state"
exit 1;
fi
# Remove the SLURM output files (which are likely to be empty)
rm -f ${OUTPUT_DIR}/slurm-*.out
EXISTING_WARPED_ATLAS_IMAGES=()
EXISTING_WARPED_ATLAS_LABELS=()
for (( i = 0; i < ${#WARPED_ATLAS_IMAGES[@]}; i++ ))
do
echo ${WARPED_ATLAS_IMAGES[$i]}
if [[ -f ${WARPED_ATLAS_IMAGES[$i]} ]] && [[ -f ${WARPED_ATLAS_LABELS[$i]} ]];
then
EXISTING_WARPED_ATLAS_IMAGES[${#EXISTING_WARPED_ATLAS_IMAGES[@]}]=${WARPED_ATLAS_IMAGES[$i]}
EXISTING_WARPED_ATLAS_LABELS[${#EXISTING_WARPED_ATLAS_LABELS[@]}]=${WARPED_ATLAS_LABELS[$i]}
fi
done
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -lt 2 ]];
then
echo "Error: At least 2 warped image/label pairs needs to exist for jointFusion."
exit 1
fi
if [[ ${#EXISTING_WARPED_ATLAS_LABELS[@]} -ne ${#WARPED_ATLAS_LABELS[@]} ]];
then
echo "Warning: One or more registrations failed."
fi
maskCall=''
if [[ $MAJORITYVOTE -eq 1 ]];
then
jlfCall="${ANTSPATH}/ImageMath ${DIM} ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz MajorityVoting ${EXISTING_WARPED_ATLAS_LABELS[@]} "
else
for (( i = 0; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
jlfCall="${jlfCall} -g ${EXISTING_WARPED_ATLAS_IMAGES[$i]} -l ${EXISTING_WARPED_ATLAS_LABELS[$i]}"
done
if [[ -z "${OUTPUT_POSTERIORS_FORMAT}" ]];
then
jlfCall="${jlfCall} -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz]"
else
jlfCall="${jlfCall} -r 1 -o [${OUTPUT_PREFIX}Labels.nii.gz,${OUTPUT_PREFIX}Intensity.nii.gz,${OUTPUT_POSTERIORS_FORMAT}]"
fi
if [[ ${TARGET_MASK_IMAGE} == 'otsu' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOtsu.nii.gz"
maskCall="${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_IMAGE} ${TARGET_MASK_IMAGE} Otsu 1;"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ ${TARGET_MASK_IMAGE} == 'or' ]];
then
TARGET_MASK_IMAGE="${OUTPUT_PREFIX}TargetMaskImageOr.nii.gz"
maskCall="${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${EXISTING_WARPED_ATLAS_IMAGES[0]} ${EXISTING_WARPED_ATLAS_IMAGES[1]};"
for (( i = 2; i < ${#EXISTING_WARPED_ATLAS_IMAGES[@]}; i++ ))
do
maskCall="${maskCall} ${ANTSPATH}/ImageMath ${DIM} ${TARGET_MASK_IMAGE} max ${TARGET_MASK_IMAGE} ${EXISTING_WARPED_ATLAS_IMAGES[$i]};"
done
maskCall="${maskCall} ${ANTSPATH}/ThresholdImage ${DIM} ${TARGET_MASK_IMAGE} ${TARGET_MASK_IMAGE} 0 0 0 1"
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
elif [[ -f ${TARGET_MASK_IMAGE} ]];
then
jlfCall="${jlfCall} -x ${TARGET_MASK_IMAGE}"
fi
fi
qscript2="${OUTPUT_PREFIX}JLF.sh"
echo "#!/bin/sh" > $qscript2
echo "$maskCall" >> $qscript2
echo "$jlfCall" >> $qscript2
jobIDs=`sbatch --job-name=antsJlf --export=ANTSPATH=$ANTSPATH $QSUB_OPTS --nodes=1 --cpus-per-task=1 --time=${JLF_WALLTIME} --mem=${JLF_MEMORY} $qscript2 | rev | cut -f1 -d\ | rev`
${ANTSPATH}/waitForSlurmJobs.pl 1 600 $jobIDs
fi
# clean up
rm -f ${OUTPUT_DIR}/job_*.sh
if [[ $KEEP_ALL_IMAGES -eq 0 ]];
then
rm -f ${WARPED_ATLAS_IMAGES[@]}
rm -f ${INVERSE_WARPED_ATLAS_IMAGES[@]}
rm -f ${WARPED_ATLAS_LABELS[@]}
rm -f ${AFFINE_FILES[@]}
rm -f ${WARP_FIELDS[@]}
rm -f ${INVERSE_WARP_FIELDS[@]}
rm -f $qscript
rm -f $qscript2
rm -f ${OUTPUT_DIR}/slurm-*.out
fi
time_end=`date +%s`
time_elapsed=$((time_end - time_start))
echo
echo "--------------------------------------------------------------------------------------"
if [[ $MAJORITYVOTE -eq 1 ]];
then
echo " Done creating: ${OUTPUT_PREFIX}MajorityVotingLabels.nii.gz"
else
echo " Done creating: ${OUTPUT_PREFIX}Labels.nii.gz"
fi
echo " Script executed in $time_elapsed seconds"
echo " $(( time_elapsed / 3600 ))h $(( time_elapsed %3600 / 60 ))m $(( time_elapsed % 60 ))s"
echo "--------------------------------------------------------------------------------------"
exit 0
|
dawnsong/ANTs
|
Scripts/antsJointLabelFusion.sh
|
Shell
|
bsd-3-clause
| 41,677 |
#
# Below we run a few versions of the model
# - openmp, without local timestepping
# - coarray, without local timestepping
# - openmp, with local timestepping
#
export SWALS_SRC='../../../src'
source ${SWALS_SRC}/test_run_commands
echo 'Will run openmp version with: ' $OMP_RUN_COMMAND
echo 'Will run coarray version with: ' $CAF_RUN_COMMAND
#
# Run regular openmp model without local timestepping
#
# Clean existing binary
rm ./BP09
rm -r ./OUTPUTS
# Build the code
make -B -f make_BP09 > build_outfile.log
# Run the code
#eval "$OMP_RUN_COMMAND ./BP09 load_balance_6_trivial.txt > outfile_omp.log"
eval "$OMP_RUN_COMMAND ./BP09 '' > outfile_omp.log"
# Plot and report tests
echo '# Testing openmp version '
Rscript plot_results.R lowresolution_omp
# Move the openmp results to a new folder
mv $( dirname OUTPUTS/RUN*/multidomain_log.log ) OUTPUTS/openmp_results
#
# Same model with coarray -- results not identical as above due to domain
# partitioning (but close)
#
# Clean existing binary
rm ./BP09
#rm -r ./OUTPUTS
# Build the code
make -B -f make_BP09_coarray > build_outfile.log
# Run the code
#eval "$CAF_RUN_COMMAND ./BP09 load_balance_6_trivial.txt > outfile_ca.log"
eval "$CAF_RUN_COMMAND ./BP09 '' > outfile_ca.log"
# Plot and report tests
echo '# Testing coarray version '
Rscript plot_results.R lowresolution_coarray
# Move the coarray results to a new folder
mv $( dirname OUTPUTS/RUN*/mu*001.log ) OUTPUTS/coarray_results
# Run the comparison script
echo '# Comparing coarray/openmp versions '
Rscript compare_logs_coarray_openmp.R
#
# Openmp model with local timestepping and partitioning -- results not
# identical to previous due to local timestepping (but close)
#
rm ./BP09
#rm -r ./OUTPUTS
# Build the code
make -B -f make_BP09_localtimestep > build_outfile.log
# Run the code
eval "$OMP_RUN_COMMAND ./BP09 'load_balance_6_trivial.txt' > outfile_omp.log"
# Plot and report tests
echo '# Testing openmp version '
Rscript plot_results.R lowresolution_omp_localtimestep
# Move the results to a new folder
mv $( dirname OUTPUTS/RUN*/mu*001.log ) OUTPUTS/openmp_results_localtimestep
# Run the comparison script
echo '# Comparing openmp/openmp-localtimestep versions '
Rscript compare_logs_openmp_localtimestep.R
|
GeoscienceAustralia/ptha
|
propagation/SWALS/examples/nthmp/BP09/run_model.sh
|
Shell
|
bsd-3-clause
| 2,242 |
#!/bin/bash
export CC=$PREFIX/bin/clang
export CXX=$PREFIX/bin/clang++
export CONDA_BUILD_SYSROOT=$PREFIX/$HOST/sysroot
sed -i '1c#!/usr/bin/env perl' bin/hipconfig
sed -i 's/if(${RUN_HIT} EQUAL 0)/if(FALSE)/g' CMakeLists.txt
mkdir build
cd build
cmake \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_LIBDIR=lib \
-DHIP_COMPILER=clang \
-DHSA_PATH=$PREFIX \
-DHIP_PATH=$PREFIX \
-DHIP_CLANG_PATH=$PREFIX/bin \
-DDEVICE_LIB_PATH=$PREFIX/lib \
-DBUILD_HIPIFY_CLANG=yes \
..
make VERBOSE=1 -j${CPU_COUNT}
make install
# Copy the [de]activate scripts to $PREFIX/etc/conda/[de]activate.d.
# This will allow them to be run on environment activation.
for CHANGE in "activate" "deactivate"
do
mkdir -p "${PREFIX}/etc/conda/${CHANGE}.d"
cp "${RECIPE_DIR}/activate/${CHANGE}.sh" "${PREFIX}/etc/conda/${CHANGE}.d/${PKG_NAME}_${CHANGE}.sh"
done
|
isuruf/staged-recipes
|
recipes/hip/build.sh
|
Shell
|
bsd-3-clause
| 899 |
#!/bin/bash
# 'objcopy -O verilog' produces the x8 output.
# As we need x32 we are getting it from disasm results
echo @00000000 > program.hex
mips-mti-elf-objdump -D program.elf | sed -rn 's/\s+[a-f0-9]+:\s+([a-f0-9]*)\s+.*/\1/p' >> program.hex
|
Iuliiapl/schoolMIPS
|
scripts/program/common/03_generate_verilog_readmemh_file.sh
|
Shell
|
mit
| 248 |
# Function for creating an admin user in Magento.
#
# @author Oktay Acikalin <[email protected]>
# @copyright Oktay Acikalin
# @license MIT (LICENSE.txt)
function magento.create_admin_user () {
local username=$1
local password=$2
local email=$3
local firstname=$4
local lastname=$5
mysql.shell_cmd $(magento.get_db_connection_params default) <<< "
INSERT INTO admin_user SET
username='${username}',
password=md5('${password}'),
email='${email}',
firstname='${firstname}',
lastname='${lastname}',
is_active=1;
SET @user_id = LAST_INSERT_ID();
INSERT INTO admin_role SET
parent_id=1,
tree_level=2,
sort_order=0,
role_type='U',
user_id=@user_id,
role_name='${email}';
"
}
|
oktayacikalin/project-service
|
lib/magento.create_admin_user.sh
|
Shell
|
mit
| 879 |
#!/bin/sh
echo "Placing root files into dummy_app for testing"
mkdir -p tests/dummy_app/packages/meteor-mocha
rsync -av ./package/ tests/dummy_app/packages/meteor-mocha
cd tests/dummy_app/
meteor npm install
# We expect all unit tests to pass
npm run test:unit:nightmare 2> /dev/null
echo "npm run test:unit:nightmare exited with code $?"
if [ $? -ne 0 ]; then
exit 1 # Our suite fails because tests that should have passed failed
fi
# We expect all app tests to fail
npm run test:app:nightmare 2> /dev/null
echo "npm run test:app:nightmare exited with code $?"
if [ $? -ne 1 ]; then
exit 1 # Our suite fails because tests that should have failed passed
fi
exit 0
|
meteortesting/meteor-mocha
|
test.sh
|
Shell
|
mit
| 673 |
#!/usr/bin/ksh
#
# edit parameters for a load.
#
###############################################################################
#
CMD=$(basename ${0})
#
oper=none
loadstatus=oper
loadstatusset=no
newlabloads=/tmp/newlabloads$$
newlabloads2=/tmp/newlabloads2$$
alllabids=/tmp/alllabids$$
#
trap 'rm -f /tmp/*$$ 1>/dev/null 2>&1' 0 1 2 15
#
usage() {
echo
echo "usage: $CMD [-x?] [-t status] -a branch cpuload [labid [labid ...] ]"
echo " or"
echo "usage: $CMD [-x?] -d branch cpuload [labid [labid ...] ]"
echo " or"
echo "usage: $CMD [-x?] [-t status] -b branch [branch ...]"
echo " or"
echo "usage: $CMD [-x?] [-t status] -c cpuload [cpuload ...]"
echo " or"
echo "usage: $CMD [-x?] [-t status] -l labid [labid ...]"
echo
echo "where:"
echo " -x - enable debug mode"
echo " -? - print usage message"
echo " -a - add branch/cpuload for reload of labid"
echo " -d - delete branch/cpuload for reload of labid"
echo " -b - list reload data filtered by branch"
echo " -c - list reload data filtered by cpuload"
echo " -l - list reload data filtered by labid"
echo " -t status - load status when adding a load to a labid, oper or test"
echo
echo "the last option [-a|-d|-b|-c|-l] given takes precedence."
echo "the default load status for -a is 'oper' if -t is not used."
echo "-t option can also be used to query with the -b, -c and -l options."
echo "if a list of labids is NOT given when using the -a or -d options,"
echo "then the requested operation is applied to all labids assigned"
echo "to the given release/load."
echo
}
#
function add_to_reload {
echo
echo "Starting add_to_reload: ${*}"
#
if [[ $# -lt 2 ]]
then
echo
echo "Missing parameters: branch, cpuload, [labid [labid ...]]"
return 2
fi
#
case "${loadstatus}" in
oper|test)
;;
*)
echo
echo "Invalid load status value ${loadstatus}."
echo "Allowed values are: 'oper' and 'test'."
return 2
;;
esac
#
branch=${1}
cpuload=${2}
shift 2
#
# back up files before updating
#
# backupdir="BACKUP/$(date '+%y%m%d%H%M%S')"
# [ ! -d ${backupdir} ] && mkdir -p ${backupdir};
# cp loads images labloads ${backupdir}
#
# remove any old tuples for this release, load, and labid.
#
cp labloads $newlabloads
#
# check if a list of labids was given. if not, try to
# generate one.
#
if [[ $# == 0 ]]
then
echo
echo "Generating the list of labids for ${branch}/${cpuload}"
#
uprintf -q -f"%s\n" labid in labloads where cpuload req "^${cpuload}\$" and branch req "^${branch}\$" >${alllabids}
if [[ ! -s "${alllabids}" ]]
then
echo
echo "No labids found for ${branch}/${cpuload}"
return 2
else
cat ${alllabids}
set -- $(cat ${alllabids})
fi
fi
#
for labid in ${*}
do
echo
echo "Deleting $branch/$cpuload from reload of $labid."
grep -v "^${labid} ${branch} ${cpuload} " $newlabloads >$newlabloads2
mv $newlabloads2 $newlabloads
done
#
echo
echo "DIFF of OLD and NEW labloads:"
diff labloads $newlabloads
#
for labid in ${*}
do
echo
echo "Adding $branch/$cpuload for reload of $labid."
#
echo "${labid} ${branch} ${cpuload} ${loadstatus}" >>$newlabloads
done
#
# backup of old labloads
[ ! -d BACKUP ] && mkdir -p BACKUP ;
echo $(date '+%y%m%d%H%M%S') >>BACKUP/configure.logfile
echo 'diff labloads newlabloads' >>BACKUP/configure.logfile
diff labloads $newlabloads >>BACKUP/configure.logfile
#
cat $newlabloads | sort -u >labloads
#
return 0
}
#
function delete_from_reload {
#
echo
echo "Starting delete_from_reload: ${*}"
#
if [[ $# -lt 2 ]]
then
echo
echo "Missing parameters: branch, cpuload, [labid [labid ...]]"
return 2
fi
#
branch=${1}
cpuload=${2}
shift 2
#
# back up files before updating
#
# backupdir="BACKUP/$(date '+%y%m%d%H%M%S')"
# [ ! -d ${backupdir} ] && mkdir -p ${backupdir};
# cp loads images labloads ${backupdir}
#
cp labloads $newlabloads
#
# check if a list of labids was given. if not, try to
# generate one.
#
if [[ $# == 0 ]]
then
echo
echo "Generating the list of labids for ${branch}/${cpuload}"
#
uprintf -q -f"%s\n" labid in labloads where cpuload req "^${cpuload}\$" and branch req "^${branch}\$" >${alllabids}
if [[ ! -s "${alllabids}" ]]
then
echo
echo "No labids found for ${branch}/${cpuload}"
return 2
else
cat ${alllabids}
set -- $(cat ${alllabids})
fi
fi
#
#
for labid in ${*}
do
echo
echo "Deleting $branch/$cpuload from reload of $labid."
grep -v "^${labid} ${branch} ${cpuload} " $newlabloads >$newlabloads2
mv $newlabloads2 $newlabloads
done
#
# backup of old labloads
[ ! -d BACKUP ] && mkdir -p BACKUP ;
echo $(date '+%y%m%d%H%M%S') >>BACKUP/configure.logfile
echo 'diff labloads newlabloads' >>BACKUP/configure.logfile
diff labloads $newlabloads >>BACKUP/configure.logfile
#
cat $newlabloads | sort -u >labloads
#
return 0
}
#
function list_by_branch {
echo
echo "Starting list_by_branch: ${*}"
#
if [[ $# -lt 1 ]]
then
echo
echo "Load data for all branches: "
uprint loads
#
echo
echo "Lab Load data for all branches:"
uprint labloads
return 0
fi
#
if [[ "${loadstatusset}" == yes ]]
then
statusclause=" and status req ^${loadstatus}$"
else
statusclause=
fi
#
for branch in ${*}
do
echo
echo "Load data for branch $branch:"
uprint loads where branch req "^$branch$"
#
echo
echo "Lab Load data for branch $branch:"
uprint labloads where branch req "^$branch$" $statusclause
done
#
return 0
}
#
function list_by_cpuload {
echo
echo "Starting list_by_cpuload: ${*}"
#
if [[ $# -lt 1 ]]
then
echo
echo "Load data for all cpuloads: "
uprint loads
#
echo
echo "Lab Load data for all cpuloads:"
uprint labloads
return 0
fi
#
if [[ "${loadstatusset}" == yes ]]
then
statusclause=" and status req ^${loadstatus}$"
else
statusclause=
fi
#
for cpuload in ${*}
do
echo
echo "Load data for cpuload $cpuload:"
uprint loads where cpuload req "^$cpuload$"
#
echo
echo "Lab Load data for cpuload $cpuload:"
uprint labloads where cpuload req "^$cpuload$" $statusclause
done
#
return 0
}
#
function list_by_labid {
echo
echo "Starting list_by_labid: ${*}"
#
if [[ $# -lt 1 ]]
then
echo
echo "Lab Load data for all labids:"
uprint labloads
return 0
fi
#
if [[ "${loadstatusset}" == yes ]]
then
statusclause=" and status req ^${loadstatus}$"
else
statusclause=
fi
#
for labid in ${*}
do
echo
echo "Lab Load data for labid $labid:"
uprint labloads where labid req "^$labid$" $statusclause
done
#
return 0
}
#
function none {
echo
echo "Nothing to do."
#
usage
#
return 0
}
#
set -- $(getopt ?xadbclt: ${*})
if [[ ${?} -ne 0 ]]
then
usage
exit 0
fi
#
for opt in ${*}
do
case "${opt}" in
-x)
set -x
shift
;;
-a)
oper=add_to_reload
shift
;;
-d)
oper=delete_from_reload
shift
;;
-b)
oper=list_by_branch
shift
;;
-c)
oper=list_by_cpuload
shift
;;
-l)
oper=list_by_labid
shift
;;
-t)
loadstatus=${2}
loadstatusset=yes
shift 2
;;
--)
shift
break
;;
esac
done
#
echo
echo "Starting LCS Configure Load"
#
if [[ -z "${LCSTOOLSDATA}" ]]
then
echo
echo "LCSTOOLSDATA not defined." >&2
exit 2
fi
#
cd ${LCSTOOLSDATA}
#
${oper} ${*}
#
exit 0
|
ombt/ombt
|
lts08.lcstools/tools/src/saveload/configure.sh
|
Shell
|
mit
| 7,242 |
#!/bin/bash
Install_Nginx_1_4_7()
{
Echo_Blue "[+] Installing ${Nginx_Ver} ... "
}
Install_Nginx_1_6_3()
{
Echo_Blue "[+] Installing ${Nginx_Ver} ... "
}
Install_Nginx_1_8_0()
{
Echo_Blue "[+] Installing ${Nginx_Ver} ... "
groupadd www
useradd -s /sbin/nologin -g www www
Tar_Cd ${Nginx_Ver}.tar.gz ${Nginx_Ver}
./configure --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_spdy_module --with-http_gzip_static_module --with-ipv6 --with-http_sub_module ${NginxMAOpt}
make && make install
cd ../
ln -sf /usr/local/nginx/sbin/nginx /usr/bin/nginx
rm -f /usr/local/nginx/conf/nginx.conf
cd ${cur_dir}
if [ "${Stack}" = "lnamp" ]; then
\cp conf/nginx_a.conf /usr/local/nginx/conf/nginx.conf
\cp conf/proxy.conf /usr/local/nginx/conf/proxy.conf
else
\cp conf/nginx.conf /usr/local/nginx/conf/nginx.conf
fi
\cp conf/rewrite/dabr.conf /usr/local/nginx/conf/dabr.conf
\cp conf/rewrite/discuz.conf /usr/local/nginx/conf/discuz.conf
\cp conf/rewrite/sablog.conf /usr/local/nginx/conf/sablog.conf
\cp conf/rewrite/typecho.conf /usr/local/nginx/conf/typecho.conf
\cp conf/rewrite/typecho2.conf /usr/local/nginx/conf/typecho2.conf
\cp conf/rewrite/wordpress.conf /usr/local/nginx/conf/wordpress.conf
\cp conf/rewrite/discuzx.conf /usr/local/nginx/conf/discuzx.conf
\cp conf/rewrite/none.conf /usr/local/nginx/conf/none.conf
\cp conf/rewrite/wp2.conf /usr/local/nginx/conf/wp2.conf
\cp conf/rewrite/phpwind.conf /usr/local/nginx/conf/phpwind.conf
\cp conf/rewrite/shopex.conf /usr/local/nginx/conf/shopex.conf
\cp conf/rewrite/dedecms.conf /usr/local/nginx/conf/dedecms.conf
\cp conf/rewrite/drupal.conf /usr/local/nginx/conf/drupal.conf
\cp conf/rewrite/ecshop.conf /usr/local/nginx/conf/ecshop.conf
\cp conf/pathinfo.conf /usr/local/nginx/conf/pathinfo.conf
\cp conf/enable-php.conf /usr/local/nginx/conf/enable-php.conf
\cp conf/enable-php-pathinfo.conf /usr/local/nginx/conf/enable-php-pathinfo.conf
\cp conf/proxy-pass-php.conf /usr/local/nginx/conf/proxy-pass-php.conf
\cp conf/enable-ssl-example.conf /usr/local/nginx/conf/enable-ssl-example.conf
mkdir -p /home/wwwroot/default
chmod +w /home/wwwroot/default
mkdir -p /home/wwwlogs
chmod 777 /home/wwwlogs
chown -R www:www /home/wwwroot/default
mkdir /usr/local/nginx/conf/vhost
if [ "${Stack}" = "lnmp" ]; then
cat >/home/wwwroot/default/.user.ini<<EOF
open_basedir=/home/wwwroot/default:/tmp/:/proc/
EOF
chmod 644 /home/wwwroot/default/.user.ini
chattr +i /home/wwwroot/default/.user.ini
fi
\cp init.d/init.d.nginx /etc/init.d/nginx
chmod +x /etc/init.d/nginx
if [ "${SelectMalloc}" = "3" ]; then
mkdir /tmp/tcmalloc
chown -R www:www /tmp/tcmalloc
sed -i '/nginx.pid/a\
google_perftools_profiles /tmp/tcmalloc;' /usr/local/nginx/conf/nginx.conf
fi
}
# Default choice is Nginx 1.8.0
Install_Nginx()
{
if [ "${NginxSelect}" = "1" ]; then
Install_Nginx_1_4_7
elif [ "${NginxSelect}" = "2" ]; then
Install_Nginx_1_6_3
elif [ "${NginxSelect}" = "3" ]; then
Install_Nginx_1_8_0
else
Install_Nginx_1_8_0
fi
}
|
shines77/lnamp_shell
|
include/nginx.sh
|
Shell
|
mit
| 3,333 |
TIMEFORMAT=%R
TIMEOUT=20
echo "#Benchmarking scabolic"
echo "Timeout is set to ${TIMEOUT}s and results are in seconds"
for benchmarks in $@; do
echo "##Benchmarking $benchmarks"
./scabolic satlib --timeout=${TIMEOUT} --time $benchmarks
done
|
regb/scabolic
|
scripts/run-benchmark.sh
|
Shell
|
mit
| 245 |
#!/bin/sh
ffmpeg -y -nostdin \
-f decklink \
-i 'DeckLink Mini Recorder (1)@10' \
-c:v rawvideo -c:a pcm_s16le \
-pix_fmt yuv420p \
-f matroska \
tcp://localhost:10000
|
voc/voctomix
|
example-scripts/ffmpeg/source-decklink-cam1.sh
|
Shell
|
mit
| 174 |
#!/bin/bash
find ../corpora/ -name *$1*.elf -exec sh -c 'python ../dev/feature_extraction/cpc/cpc_extract.py -c {} > {}.cpc.chain' \;
|
syreal17/Cardinal
|
tests/create_chains.sh
|
Shell
|
mit
| 135 |
#!/bin/bash
filename="*txt"
for file in $filename
do
echo "Contents of $file"
echo "---"
cat "$file"
echo
done
|
yimng/LEARN
|
abs/11/catfile.sh
|
Shell
|
mit
| 117 |
source $setup
examples=$qtbase/src/examples
mkdir build
cd build
mkdir bin moc obj
cat > obj/plugins.cpp <<EOF
#include <QtPlugin>
#ifdef _WIN32
Q_IMPORT_PLUGIN (QWindowsIntegrationPlugin);
#endif
#ifdef __linux__
Q_IMPORT_PLUGIN (QLinuxFbIntegrationPlugin);
Q_IMPORT_PLUGIN (QXcbIntegrationPlugin);
#endif
EOF
CFLAGS="-std=gnu++11"
echo "compiling reference to plugins"
$host-g++ $CFLAGS \
$(pkg-config-cross --cflags Qt5Core) \
-c obj/plugins.cpp \
-o obj/plugins.o
CFLAGS="$CFLAGS -g -I. $(pkg-config-cross --cflags Qt5Widgets)"
LIBS="$(pkg-config-cross --libs Qt5Widgets)"
LDFLAGS=""
if [ $os = "windows" ]; then
CFLAGS="-mwindows $CFLAGS"
fi
echo "compiling dynamiclayouts"
$qtbase/bin/moc $examples/widgets/layouts/dynamiclayouts/dialog.h > moc/dynamiclayouts.cpp
$host-g++ $CFLAGS $LDFLAGS \
$examples/widgets/layouts/dynamiclayouts/dialog.cpp \
$examples/widgets/layouts/dynamiclayouts/main.cpp \
moc/dynamiclayouts.cpp \
obj/plugins.o \
$LIBS -o bin/dynamiclayouts$exe_suffix
echo "compiling rasterwindow"
$qtbase/bin/moc $examples/gui/rasterwindow/rasterwindow.h > moc/rasterwindow.cpp
$host-g++ $CFLAGS $LDFLAGS \
$examples/gui/rasterwindow/rasterwindow.cpp \
$examples/gui/rasterwindow/main.cpp \
moc/rasterwindow.cpp \
obj/plugins.o \
$LIBS -o bin/rasterwindow$exe_suffix
echo "compiling analogclock"
$host-g++ $CFLAGS $LDFLAGS \
-I$examples/gui/rasterwindow/ \
$examples/gui/analogclock/main.cpp \
$examples/gui/rasterwindow/rasterwindow.cpp \
moc/rasterwindow.cpp \
obj/plugins.o \
$LIBS -o bin/analogclock$exe_suffix
# We haven't gotten OpenGL support to work on Linux yet (TODO)
if [ $os != "linux" ]; then
echo "compiling openglwindow"
$qtbase/bin/moc $examples/gui/openglwindow/openglwindow.h > moc/openglwindow.cpp
$host-g++ $CFLAGS $LDFLAGS \
$examples/gui/openglwindow/main.cpp \
$examples/gui/openglwindow/openglwindow.cpp \
moc/openglwindow.cpp \
obj/plugins.o \
$LIBS -o bin/openglwindow$exe_suffix
fi
# TODO: try to compile some stuff with $qtbase/bin/qmake too, make sure that works
mkdir -p $out/bin
for prog in analogclock dynamiclayouts openglwindow rasterwindow; do
if [ -f bin/$prog ]; then
$host-strip bin/$prog
cp bin/$prog $out/bin/
fi
done
if [ $os = "linux" ]; then
cp $dejavu/ttf/DejaVuSans.ttf $out/bin/
fi
|
jfranklin9000/urbit
|
nix/nixcrpkgs/pkgs/qt/examples_builder.sh
|
Shell
|
mit
| 2,339 |
#!/bin/bash
#
# For EL, see the slurm directory. Since there are no upstream slurm packages, we
# build slurm, munge, and slurm-drmaa all in one.
#
# On Ubuntu we only build the source package, for uploading to a PPA.
set -e
#set -xv
pkg=slurm-drmaa
# increment $pkg_build when all dists/versions need a rebuild of the same upstream
pkg_build=1
# set/increment $series_build when just a particular dist/version needs a rebuild
#series_build=1
# upstream slurm-drmaa version
version='1.1.3'
url="https://github.com/natefoo/slurm-drmaa/releases/download/${version}/slurm-drmaa-${version}.tar.gz"
# NOTE: if this does not match debian/control, Launchpad builds will likely fail
builddeps='bison gperf ragel libslurm-dev bats'
DEBFULLNAME="Nathan Coraor"
DEBEMAIL="[email protected]"
export DEBFULLNAME DEBEMAIL
build=/host/build.$(hostname)
. /etc/os-release
function unsupported() {
echo "Don't know how to build for $NAME $VERSION [$ID] ($VERSION_ID)"
exit 1
}
case $ID in
ubuntu)
dch_dist_arg="--distribution $UBUNTU_CODENAME"
debuild_args='-S'
;;
debian)
dch_dist_arg='--distribution unstable'
;;
*)
unsupported
;;
esac
case "$PRETTY_NAME" in
*bullseye*)
VERSION_ID=11
;;
*bookworm*)
VERSION_ID=12
;;
esac
# can be used to set any version-specific vars
case $VERSION_ID in
20.04)
builddeps="dh-systemd ${builddeps}"
;;
11|12)
;;
*)
unsupported
;;
esac
. /util/utility_functions.sh
export DEBIAN_FRONTEND=noninteractive
gid=$(stat -c %g /host)
uid=$(stat -c %u /host)
if [ -z "$__STARFORGE_RUN_AS" -a $uid -ne 0 ]; then
# set timezone for debian/changelog
echo 'America/New_York' > /etc/timezone
apt-get -qq update
apt-get install --no-install-recommends -y wget tzdata sudo build-essential devscripts debhelper quilt fakeroot ca-certificates
dpkg-reconfigure tzdata
apt-get install --no-install-recommends -y $builddeps
[ $gid -ne 0 ] && groupadd -g $gid build
useradd -u $uid -g $gid -d $build -m -s /bin/bash build
exec sudo -iu build __STARFORGE_RUN_AS=1 -- ${SHELL:-/bin/bash} "$0" "$@"
elif [ -z "$__STARFORGE_RUN_AS" ]; then
mkdir $build
fi
case $version in
*-dev.*)
dch_version=${version%.*}${pkg_build}.${version/*.}
;;
*)
dch_version=${version}-${pkg_build}
;;
esac
# the logic for setting this isn't flawless but dput fails if the .orig.tar.gz has already been uploaded, so we can only
# use -sa once per upstream version? if this build adds the change to changelog.natefoo and the package build id is 1,
# this indicates a new upstream version and -sa will be set, otherwise -sd.
source_arg='-sd'
cd $build
download_tarball "$url"
ln -s "slurm-drmaa-${version}.tar.gz" "slurm-drmaa_${version%-*}.orig.tar.gz"
extract_tarball "slurm-drmaa-${version}.tar.gz"
cp -r $(dirname $0)/debian slurm-drmaa-${version}
cd slurm-drmaa-${version}
# use specific overrides if provided
for base in rules control; do
override="debian/${base}.${ID}-${VERSION_ID}"
[ -f "$override" ] && cp "${override}" "debian/${base}"
# remove this and others so they're not included in the debian tarball
rm -f debian/${base}.*
done
# the distribution needs to be correct in the .changes file for launchpad to build the PPA packages (but otherwise
# doesn't matter), the distribution is derived from the changelog, and we don't want to maintain a bunch of changelogs
#dch -v ${dch_version} ${dch_dist_arg} "New upstream release"
if ! grep -q "^slurm-drmaa (${dch_version})" debian/changelog.natefoo; then
if [ $pkg_build -eq 1 ]; then
source_arg='-sa'
: ${DCH_MESSAGE:=New upstream release}
else
: ${DCH_MESSAGE:=New package build}
fi
cd debian
[ ! -f changelog.natefoo ] && dch_create_args="--create --package=${pkg}"
dch ${dch_create_args} -v ${dch_version} --distribution unstable --force-distribution --changelog changelog.natefoo "$DCH_MESSAGE"
cd ..
cp debian/changelog.natefoo $(dirname $0)/debian/changelog.natefoo
fi
# now create this package's changelog
case "$ID" in
ubuntu)
dch_version+="ubuntu${series_build:-1}~${VERSION_ID}"
;;
debian)
dch_version+="+deb${VERSION_ID}u${series_build:-1}"
;;
esac
cat debian/changelog.natefoo debian/changelog.${ID} > debian/changelog
dch -v "${dch_version}" $dch_dist_arg "Series package"
rm debian/changelog.*
# -S to build source package
# -sa to include source, -sd to exclude source and only include the diff
# -us to not sign source, -uc to not sign changes
debuild ${debuild_args} ${source_arg} -us -uc
echo "packages in ${pkg}/$(basename $build)"
echo "To sign: debsign -S ${pkg}_${dch_version}_source.changes" &&
echo "To push: dput ${PPA:=ppa:natefoo/slurm-drmaa-test} ${pkg}_${dch_version}_source.changes"
echo " or on Debian: dput -c ../dput.cf ${PPA:=ppa:natefoo/slurm-drmaa-test} ${pkg}_${dch_version}_source.changes"
|
natefoo/starforge
|
slurm-drmaa/build.sh
|
Shell
|
mit
| 5,047 |
#!/bin/bash
# This script encapsulates calling setfattr with root privileges
# (Check that this script is added to the sudoers list)
COMMAND="setfattr"
# May be incompatible if run on Ubuntu based systems; it looks like Ubuntu
# requires you to prefix any attributes with 'users.'. This is unfortunately
# not acceptable for this attribute
ATTRIBUTE_NAME="security.ima"
VALUE=$1
FILE_PATH=$2
# sanity checking the path. Check that the file path seems valid
if [[ "${FILE_PATH}" =~ ^\./mount_directory/home/root/\.ssh/authorized_keys$ ]]; then
${COMMAND} -n ${ATTRIBUTE_NAME} -v ${VALUE} ${FILE_PATH}
else
exit 1
fi
|
01org/AFT
|
testing_harness/tools/setfattr_script.sh
|
Shell
|
gpl-2.0
| 627 |
#!/bin/sh
# $Id: translatecount.sh,v 2.0 2003/11/18 15:20:42 nijel Exp $
##
# Shell script to produce counts of just how out of date the translations are!
##
# Accepts optional parameters of a list of files to produce counts from
# Defaults to using all files
##
# Written by Robin Johnson <robbat2 at users.sourceforge.net>
##
srcfilelist=${@}
destfilelist=""
translationmatch='//to translate'
suffixtoremove='.inc.php'
added=0
if [ -z "${srcfilelist}" ]; then
srcfilelist="*.inc.php"
added=1
fi;
for i in ${srcfilelist}; do
if [ ! -e ${i} ]; then
$i=`eval ${i}*php3`
fi
if [ -e ${i} ]; then
destfilelist="${destfilelist} ${i}"
fi;
done;
destfilelist=`echo ${destfilelist} | xargs -n1 | egrep '.inc.php$'`
if [ ! -z "${destfilelist}" ]; then
grep -c -- "${translationmatch}" ${destfilelist} | sort -t':' -n +1 | sed -e "s/${suffixtoremove}//g" | xargs -n1 | egrep -v ':0$'
fi;
|
nizaranand/New-Life-Office
|
phpMyAdmin/lang/translatecount.sh
|
Shell
|
gpl-2.0
| 904 |
git subsplit init [email protected]:joomla/joomla-framework.git
git subsplit publish "
src/Joomla/Application:[email protected]:joomla/joomla-framework-application.git
src/Joomla/Archive:[email protected]:joomla/joomla-framework-archive.git
src/Joomla/Cache:[email protected]:joomla/joomla-framework-cache.git
src/Joomla/Compat:[email protected]:joomla/joomla-framework-compat.git
src/Joomla/Controller:[email protected]:joomla/joomla-framework-controller.git
src/Joomla/Crypt:[email protected]:joomla/joomla-framework-crypt.git
src/Joomla/Data:[email protected]:joomla/joomla-framework-data.git
src/Joomla/Database:[email protected]:joomla/joomla-framework-database.git
src/Joomla/Date:[email protected]:joomla/joomla-framework-date.git
src/Joomla/DI:[email protected]:joomla/joomla-framework-di.git
src/Joomla/Event:[email protected]:joomla/joomla-framework-event.git
src/Joomla/Facebook:[email protected]:joomla/joomla-framework-facebook.git
src/Joomla/Filesystem:[email protected]:joomla/joomla-framework-filesystem.git
src/Joomla/Filter:[email protected]:joomla/joomla-framework-filter.git
src/Joomla/Form:[email protected]:joomla/joomla-framework-form.git
src/Joomla/Github:[email protected]:joomla/joomla-framework-github.git
src/Joomla/Google:[email protected]:joomla/joomla-framework-google.git
src/Joomla/Http:[email protected]:joomla/joomla-framework-http.git
src/Joomla/Image:[email protected]:joomla/joomla-framework-image.git
src/Joomla/Input:[email protected]:joomla/joomla-framework-input.git
src/Joomla/Keychain:[email protected]:joomla/joomla-framework-keychain.git
src/Joomla/Language:[email protected]:joomla/joomla-framework-language.git
src/Joomla/Linkedin:[email protected]:joomla/joomla-framework-linkedin.git
src/Joomla/Log:[email protected]:joomla/joomla-framework-log.git
src/Joomla/Model:[email protected]:joomla/joomla-framework-model.git
src/Joomla/Oauth1:[email protected]:joomla/joomla-framework-oauth1.git
src/Joomla/Oauth2:[email protected]:joomla/joomla-framework-oauth2.git
src/Joomla/Profiler:[email protected]:joomla/joomla-framework-profiler.git
src/Joomla/Registry:[email protected]:joomla/joomla-framework-registry.git
src/Joomla/Router:[email protected]:joomla/joomla-framework-router.git
src/Joomla/Session:[email protected]:joomla/joomla-framework-session.git
src/Joomla/String:[email protected]:joomla/joomla-framework-string.git
src/Joomla/Test:[email protected]:joomla/joomla-framework-test.git
src/Joomla/Twitter:[email protected]:joomla/joomla-framework-twitter.git
src/Joomla/Uri:[email protected]:joomla/joomla-framework-uri.git
src/Joomla/Utilities:[email protected]:joomla/joomla-framework-utilities.git
src/Joomla/View:[email protected]:joomla/joomla-framework-view.git
" --heads="master"
rm -rf .subsplit/
|
joomla/joomla-framework
|
build/joomla-split.sh
|
Shell
|
gpl-2.0
| 2,684 |
#!/bin/sh
#
# Copyright (c) 2012-2015 OpenLD
# Javier Sayago <[email protected]>
# Contact: [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#DESCRIPTION=This script will show you the uptime
uptime
echo ""
exit 0
|
OpenLD/enigma2-wetek
|
lib/python/Plugins/Extensions/LDteam/scripts/Uptime.sh
|
Shell
|
gpl-2.0
| 760 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.