code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cp $DIR/98-ftdi.rules /etc/udev/rules.d/98-ftdi.rules
cp $DIR/47-hokuyo.rules /etc/udev/rules.d/47-hokuyo.rules
cp $DIR/99-cameras.rules /etc/udev/rules.d/99-cameras.rules
udevadm control --reload-rules
udevadm trigger
|
ufieeehw/IEEE2016
|
udev/setup.sh
|
Shell
|
mit
| 288 |
#!/bin/bash
SESSION="dotfiles"
FOLDER=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) # directory of this file
#create detached session named $SESSION
tmux new-session -d -s $SESSION
tmux splitw -h -p 50 -t 0
tmux splitw -v -p 50 -t 0
tmux splitw -v -p 50 -t 1
tmux selectp -t 0
tmux send-keys "$FOLDER/shell_colors.sh" 'C-m'
tmux selectp -t 1
tmux send-keys "cd $FOLDER && git lg" 'C-m'
#tmux send-keys "q"
tmux selectp -t 2
tmux send-keys "cd $FOLDER/../../ && la" 'C-m'
tmux selectp -t 3
tmux send-keys "cd $FOLDER/../../ && vim README.md" 'C-m'
tmux -2 attach-session -t $SESSION
|
egel/dotfiles
|
bin/extras/tmux_screenshot.sh
|
Shell
|
mit
| 593 |
#!/bin/bash
echo "(1/2) Copying bin/sprinter to /usr/local/bin/sprinter"
`cp -f bin/sprinter /usr/local/bin/sprinter`
echo "(2/2) Making sure sprinter has the correct permissions set to run"
echo "TODO: not checking permissions yet..."
echo ""
echo "Fantastic - you are almost set to sprint like a horse!"
echo ""
echo "Unless you have added sprinter to your bash startup script already you just"
echo "have to add the following snippet to the end of the file:"
echo ""
echo "alias rake='sprinter'"
echo ""
echo "Bash: add this by running the following in a terminal:"
echo "echo \"\n# Sprinter alias for rake\nalias rake='sprinter'\" >> ~/.bash_profile"
echo ""
echo "ZSH: add this by running the following in a terminal:"
echo "echo \"\n# Sprinter alias for rake\nalias rake='sprinter'\" >> ~/.zshrc"
echo ""
echo "Don't forget you also have to reload your terminal window before you will"
echo "see any changes."
echo ""
echo "Good luck young sprinter!"
|
MrTin/sprinter
|
install.sh
|
Shell
|
mit
| 962 |
#!/bin/bash
ET_SRC_URL=${ET_SRC_URL:-"http://dexya6d9gs5s.cloudfront.net/latest/dynatrace-easytravel-src.zip"}
ET_SRC_HOME=/tmp
ET_SRC_CF_HOME="${ET_SRC_HOME}/CustomerFrontend"
ET_SRC_BB_HOME="${ET_SRC_HOME}/BusinessBackend"
ET_SRC_LG_HOME="${ET_SRC_HOME}/com.dynatrace.uemload"
ET_DEPLOY_HOME="${ET_DEPLOY_HOME:-$(pwd)/deploy}"
ET_CF_DEPLOY_HOME="${ET_DEPLOY_HOME}/${ET_CF_DEPLOY_HOME:-frontend}"
ET_BB_DEPLOY_HOME="${ET_DEPLOY_HOME}/${ET_BB_DEPLOY_HOME:-backend}"
ET_LG_DEPLOY_HOME="${ET_DEPLOY_HOME}/${ET_LG_DEPLOY_HOME:-loadgen}"
ET_CF_DEPLOY_LIB_HOME="${ET_CF_DEPLOY_HOME}/lib"
cd "${ET_SRC_HOME}"
# Download easyTravel sources
curl -L -o easyTravel-src.zip "${ET_SRC_URL}"
# Unarchive and build easyTravel sources while setting up some env vars
unzip ./easyTravel-src.zip
export ANT_OPTS="-Dfile.encoding=UTF8"
export JAVA_TOOL_OPTIONS=-Dfile.encoding=UTF8
ant -f ./Distribution war
cd "${ET_SRC_HOME}/Distribution/dist"
# Deploy easyTravel build artifacts into the workspace
mkdir -p "${ET_BB_DEPLOY_HOME}"
cp -v ./business/backend.war ${ET_BB_DEPLOY_HOME}
mkdir -p "${ET_CF_DEPLOY_HOME}"
cp -v ./customer/frontend.war ${ET_CF_DEPLOY_HOME}
mkdir -p "${ET_LG_DEPLOY_HOME}"
tar -cvzf "${ET_LG_DEPLOY_HOME}/loadgen.tar.gz" \
./lib/commons-cli-*.jar \
./lib/commons-io-*.jar \
./lib/commons-lang*.jar \
./lib/commons-logging-*.jar \
./lib/guava-*.jar \
./lib/httpclient-*.jar \
./lib/httpcore-*.jar \
./lib/metrics-core-*.jar \
./lib/metrics-json-*.jar \
./lib/metrics-jvm-*.jar \
./lib/metrics-servlets-*.jar \
./lib/mvel*.jar \
./lib/nekohtml-*.jar \
./lib/xercesImpl-*.jar \
./lib/xml-apis-*.jar \
./resources/easyTravel.properties \
./resources/easyTravelConfig.properties \
./resources/easyTravelThirdPartyResourcesizes.properties \
./resources/Users.txt \
./com.dynatrace.easytravel.commons.jar \
./uemload.jar
|
dynatrace-innovationlab/easyTravel-Builder
|
build.sh
|
Shell
|
mit
| 2,038 |
while sng_next; do
eval "$@"
done
|
tyn/arcs
|
pkg/pipe/smap.pipe.sh
|
Shell
|
mit
| 36 |
#!/usr/bin/env bash
clear;
echo "$0" | sed 's/\.\///g' | awk '{print toupper($0)}'
function contains() {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
function menu() {
commandArr=("1" "2" "3" "4", "5" "6" "7" "8" "9")
echo -e ""
echo -e "--------------------------------------------------------"
echo -e "ACTION MENU"
echo -e "--------------------------------------------------------"
echo -e ""
echo -e "1\t\"create\"\tCreate a new fiddle"
echo -e "2\t\"fork\"\t\tFork an existing fiddle"
echo -e "3\t\"index\"\t\tRe-index a specific fiddle type"
echo -e "4\t\"start\"\t\tStart the fiddle web service process"
echo -e "5\t\"stop\"\t\tStop the web service process"
echo -e "6\t\"delete\"\tDelete an existing fiddle"
echo -e "7\t\"refactor\"\tRename (\"refactor\") an existing fiddle"
echo -e "8\t\"test\"\t\tInvoke JsTestDriver for a given fiddle"
echo -e "9\t\"exit\"\t\tQuit"
echo -e ""
echo -e "--------------------------------------------------------"
read -p "(1-9)? " COMMAND
if [[ $(contains "${COMMAND}" "${commandArr[@]}"; echo "$?";) == "0" ]]
then
echo "you selected ${COMMAND}"
else
clear;
menu;
fi
}
menu
|
bradyhouse/house
|
fiddles/bash/fiddle-0014-ReadCommand/script.sh
|
Shell
|
mit
| 1,275 |
# Shell script for easily setting up HAZID site on non-shared ubuntu hosts.
#
# Takes a few arguments:
# 1. The path to the HAZID site root folder.
# 2. The MySQL database host.
# 3. The MySQL database admin username.
# 4. The MySQL database admin password.
# 5. The MySQL database name.
# Download and run RBAC database setup file.
wget https://raw.githubusercontent.com/yiisoft/yii2/2.0.2/framework/rbac/migrations/schema-mysql.sql
mysql -h $2 -u $3 -p$4 $5 < schema-mysql.sql
# Set up HAZID tables.
mysql -h $2 -u $3 -p$4 $5 < create_tables.sql
mysql -h $2 -u $3 -p$4 $5 < set_permissions.sql
# Set up RBAC authorisation rules & roles.
chmod u+x $1/yii
php $1/yii rbac/init
|
rhamlet1/Hazid
|
scripts/db_setup.sh
|
Shell
|
mit
| 680 |
#!/usr/bin/env bash
KYMSU_PATH=`pwd`
# Make Kymsu accessible in PATH
ln -fs "${KYMSU_PATH}"/kymsu.sh /usr/local/bin/kymsu
# Store Kymsu stuff in home directory
mkdir -p ~/.kymsu && echo "${KYMSU_PATH}" > ~/.kymsu/path
cp -R "${KYMSU_PATH}/plugins.d" ~/.kymsu
echo "KYMSU has been installed. Run kymsu command!"
|
welcoMattic/kymsu
|
install.sh
|
Shell
|
mit
| 315 |
#!/bin/bash
# $1=operation $2=address $3=common_name
#Name the domain we're using in OpenVPN
DOMAIN=vpn.ansible.com
# Name of existing Tower Inventory dedicated to this
INVNAME=OpenVPN
# Name of ansible inventory file to use from command line:
HOSTCONFIG=/etc/ansible/hosts.vpn
#Name the directory under /var/lib/awx/projects to execute the playbook on first config
PROJ=vpnclient
#Name of the playbook to run under the project directory
PLAYBOOK=config.yml
#Other Variables
LOCKFILE=/var/lock/.client-manage.exclusivelock
WAITSEC=30
OPER=$1
IP=$2
CN=$3
# Use function template for use in modules:
usage ()
{
echo "Use: "${0}" <help/add/delete> <arg 2> <arg 3>"
exit 1;
}
# Some sanity checking on input:
if [[ "${#}" -lt 3 ]]; then
usage
elif [[ "$1" == "help" ]]; then
usage
fi
# functions to add/remove/update
addnew ()
{
echo $CN".vpn.ansible.com ansible_ssh_host="$IP >> $HOSTCONFIG
}
deleteoldcn ()
{
sed -i /$CN/d $HOSTCONFIG
}
deleteoldip ()
{
sed -i /$IP/d $HOSTCONFIG
}
updateip ()
{
sed -i s/$OLDIP/$IP/ $HOSTCONFIG
}
updatecn ()
{
sed -i s/$OLDCN/$CN/ $HOSTCONFIG
}
#Sanity Check for Multiple Duplicates
dupipcount=($(cat $HOSTCONFIG | grep -v "#" | grep -v '\[' | grep $CN | awk '{print $1}' | sed s/.$DOMAIN// | wc -l ))
dupcncount=($(cat $HOSTCONFIG | grep -v "#" | grep -v '\[' | grep $IP | awk '{print $2}' | sed s/ansible_ssh_host\=// | wc -l ))
if [[ "$dupipcount" -gt 1 ]]; then
deleteoldip
elif [[ "$dupcncount" -gt 1 ]]; then
deleteoldcn
fi
#Evaluate action on our new host
dupip=($(cat $HOSTCONFIG | grep -v "#" | grep -v '\[' | grep $CN | awk '{print $1}' | sed s/.$DOMAIN// | sort -u ))
dupcn=($(cat $HOSTCONFIG | grep -v "#" | grep -v '\[' | grep $IP | awk '{print $2}' | sed s/ansible_ssh_host\=// | sort -u ))
if [[ "$dupip" == "$IP" ]]; then
if [[ "$dupcn" == "$CN" ]]; then
error=1
#Do nothing.
elif [[ "$dupcn" != "$CN" ]]; then
action=1
# update CN
fi
elif [[ "$dupip" != "$IP" ]]; then
if [[ "$dupcn" == "$CN" ]]; then
action=2
#update IP
elif [[ "$dupcn" != "$CN" ]]; then
action=3
# Add New
fi
fi
(
# Wait for lock on /var/lock/.myscript.exclusivelock (fd 200) for $WAITSEC seconds
flock -x -w $WAITSEC 200 || exit 1
#Do some work
if [[ "$OPER" == "help" ]]; then
usage
elif [[ "$OPER" == "delete" ]]; then
deleteoldcn
deleteoldip
elif [[ "$OPER" == "add" ]]; then
case $action in
1)
updatecn
;;
2)
updateip
;;
3)
addnew
;;
esac
else
echo "Please see: "${0}" help"
error=1
fi
if [[ "$error" != "1" ]]; then
#Import into Tower:
awx-manage inventory_import --source=$HOSTCONFIG --inventory-name=$INVNAME --overwrite
else
exit 1
fi
) 200>$LOCKFILE
# #Kick off ad-hoc config job against new host (commented out, uncomment and use as needed):
# cd /var/lib/awx/projects/$PROJ
# ansible-playbook -i $IP, $PLAYBOOK
|
dfederlein/Tower-OpenVPN
|
client-manage.sh
|
Shell
|
mit
| 2,852 |
# Speed testing of Event Processing Agents (EPA)
# Using explicit input cleanup-rule, where applicable (EPA1-7)
# By [email protected] 4.7.2016
./EPArun_speed_cleanup.sh EPA1 1event
./EPArun_speed_cleanup.sh EPA1 100events
./EPArun_speed_cleanup.sh EPA1 1000events
./EPArun_speed_cleanup.sh EPA1 10000events
./EPArun_speed_cleanup.sh EPA2 1event
./EPArun_speed_cleanup.sh EPA2 100events
./EPArun_speed_cleanup.sh EPA2 1000events
./EPArun_speed_cleanup.sh EPA2 10000events
./EPArun_speed_cleanup.sh EPA3_local 1event
./EPArun_speed_cleanup.sh EPA3_local 100events
./EPArun_speed_cleanup.sh EPA3_local 1000events
./EPArun_speed_cleanup.sh EPA3_local 10000events
./EPArun_speed_cleanup.sh EPA4 EPA3_1event
./EPArun_speed_cleanup.sh EPA4 EPA3_100events
./EPArun_speed_cleanup.sh EPA4 EPA3_1000events
./EPArun_speed_cleanup.sh EPA4 EPA3_10000events
./EPArun_speed_cleanup.sh EPA5 1event
./EPArun_speed_cleanup.sh EPA5 100events
./EPArun_speed_cleanup.sh EPA5 1000events
./EPArun_speed_cleanup.sh EPA5 10000events
./EPArun_speed_cleanup.sh EPA6 1event
./EPArun_speed_cleanup.sh EPA6 100events
./EPArun_speed_cleanup.sh EPA6 1000events
./EPArun_speed_cleanup.sh EPA6 10000events
./EPA7run_speed.sh EPA5_1event
./EPA7run_speed.sh EPA5_100events
./EPA7run_speed.sh EPA5_1000events
./EPA7run_speed.sh EPA5_10000events
|
aaltodsg/instans-cep2sparql
|
scripts/batch_speed_cleanup.sh
|
Shell
|
mit
| 1,319 |
#! /bin/bash
# author savior
/etc/init.d/mysqld restart
tail -f /var/log/lastlog
|
mut0u/centos6.6-mysql-ssh-docker
|
boot/init.sh
|
Shell
|
mit
| 91 |
# http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html
# http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html#Zle-Builtins
# http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html#Standard-Widgets
# Make sure that the terminal is in application mode when zle is active, since
# only then values from $terminfo are valid
if (( ${+terminfo[smkx]} )) && (( ${+terminfo[rmkx]} )); then
function zle-line-init() {
echoti smkx
}
function zle-line-finish() {
echoti rmkx
}
zle -N zle-line-init
zle -N zle-line-finish
fi
bindkey -e # Use emacs key bindings
bindkey '\ew' kill-region # [Esc-w] - Kill from the cursor to the mark
bindkey -s '\el' 'ls\n' # [Esc-l] - run command: ls
bindkey '^r' history-incremental-search-backward # [Ctrl-r] - Search backward incrementally for a specified string. The string may begin with ^ to anchor the search to the beginning of the line.
if [[ "${terminfo[kpp]}" != "" ]]; then
bindkey "${terminfo[kpp]}" up-line-or-history # [PageUp] - Up a line of history
fi
if [[ "${terminfo[knp]}" != "" ]]; then
bindkey "${terminfo[knp]}" down-line-or-history # [PageDown] - Down a line of history
fi
# start typing + [Up-Arrow] - fuzzy find history forward
if [[ "${terminfo[kcuu1]}" != "" ]]; then
autoload -U up-line-or-beginning-search
zle -N up-line-or-beginning-search
bindkey "${terminfo[kcuu1]}" up-line-or-beginning-search
fi
# start typing + [Down-Arrow] - fuzzy find history backward
if [[ "${terminfo[kcud1]}" != "" ]]; then
autoload -U down-line-or-beginning-search
zle -N down-line-or-beginning-search
bindkey "${terminfo[kcud1]}" down-line-or-beginning-search
fi
if [[ "${terminfo[khome]}" != "" ]]; then
bindkey "${terminfo[khome]}" beginning-of-line # [Home] - Go to beginning of line
fi
if [[ "${terminfo[kend]}" != "" ]]; then
bindkey "${terminfo[kend]}" end-of-line # [End] - Go to end of line
fi
bindkey ' ' magic-space # [Space] - do history expansion
bindkey '^[[1;5C' forward-word # [Ctrl-RightArrow] - move forward one word
bindkey '^[[1;5D' backward-word # [Ctrl-LeftArrow] - move backward one word
if [[ "${terminfo[kcbt]}" != "" ]]; then
bindkey "${terminfo[kcbt]}" reverse-menu-complete # [Shift-Tab] - move through the completion menu backwards
fi
bindkey '^?' backward-delete-char # [Backspace] - delete backward
if [[ "${terminfo[kdch1]}" != "" ]]; then
bindkey "${terminfo[kdch1]}" delete-char # [Delete] - delete forward
else
bindkey "^[[3~" delete-char
bindkey "^[3;5~" delete-char
bindkey "\e[3~" delete-char
fi
# Edit the current command line in $EDITOR
autoload -U edit-command-line
zle -N edit-command-line
bindkey '\C-x\C-e' edit-command-line
# file rename magick
bindkey "^[m" copy-prev-shell-word
# consider emacs keybindings:
#bindkey -e ## emacs key bindings
#
#bindkey '^[[A' up-line-or-search
#bindkey '^[[B' down-line-or-search
bindkey '^[^[[C' emacs-forward-word
bindkey '^[^[[D' emacs-backward-word
#
#bindkey -s '^X^Z' '%-^M'
#bindkey '^[e' expand-cmd-path
#bindkey '^[^I' reverse-menu-complete
#bindkey '^X^N' accept-and-infer-next-history
#bindkey '^W' kill-region
bindkey '^I' complete-word
## Fix weird sequence that rxvt produces
bindkey -s '^[[Z' '\t'
#
|
Edholm/oh-my-zsh
|
lib/key-bindings.zsh
|
Shell
|
mit
| 3,452 |
#! /bin/bash
redirectedTemp=tmp
mkdir -p $redirectedTemp
url="https://tcga.xenahubs.net/download/TCGA.BRCA.sampleMap/miRNA_HiSeq_gene.gz"
fileName=$redirectedTemp/miRNA_HiSeq_gene.gz
wget -O $fileName $url
gunzip $fileName
#downloading CancerType Samples
url="https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE62944&format=file&file=GSE62944%5F06%5F01%5F15%5FTCGA%5F24%5FCancerType%5FSamples%2Etxt%2Egz"
fileName=$redirectedTemp/GSE62944_06_01_15_TCGA_24_CancerType_Samples.txt.gz
wget -O $fileName $url
gunzip $fileName
|
srp33/WishBuilder
|
TCGA_BreastCancer_miRNA/download.sh
|
Shell
|
mit
| 527 |
#!/usr/bin/env bash
BASH_FILENAME=".bashrc"
install_homebrew() {
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
}
# OS detection
case "$OSTYPE" in
darwin*)
BASH_FILENAME=".bash_profile"
# install_homebrew
;;
linux*)
BASH_FILENAME=".bashrc"
;;
*)
;;
esac
if [[ "$BASH_FILENAME" = ".bash_profile" ]]; then
if [[ -z "$(which brew)" ]]; then
install_homebrew
fi
fi
BASH_FILE="~/${BASH_FILENAME}"
# Copy dotfiles: bash vim git
copy_dotfiles() {
cp "${BASH_FILENAME}" ~
cp ".aliases" ~
cp ".bash_prompt" ~
cp ".curlrc" ~
cp ".exports" ~
cp ".functions" ~
cp ".vimrc" ~
cp ".gitconfig" ~
}
# Install or update https://github.com/rupa/z
install_z() {
curl -O https://raw.githubusercontent.com/rupa/z/v1.9/z.sh
mkdir ~/.z && mv z.sh ~/.z
echo -e "\n# https://github.com/rupa/z\n" >> "$BASH_FILE"
echo ". ~/.z/z.sh" >> "$BASH_FILE"
}
# Install or update https://github.com/creationix/nvm
install_nvm() {
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.32.0/install.sh | bash
}
copy_dotfiles
install_z
install_nvm
|
Alex1990/settings-sync
|
bootstrap.sh
|
Shell
|
mit
| 1,143 |
#!/bin/bash
# $1 = input file
# $2 = output file
rm -rf "$2"
/usr/bin/iconv -f cp936 -t utf-8 "$1" > "$2"
if [[ $? != "0" ]]; then
cp -f $1 $2
fi
|
breakds/corpus-accu
|
bin/try_correct_encoding.sh
|
Shell
|
mit
| 161 |
#!/bin/bash
## Event handler script for executing an NRPE command on a given host when a service# is in CRITICAL and HARD state.
state=$1 # eg. "OK","CRITICAL,"WARNING","UNKNOWN"
statetype=$2 # eg. "SOFT","HARD"
host=$3 # hostaddress of where to execute nrpe command
command=$4 # nrpe command to execute
logfile=/tmp/eventhandler.log # logfile to store executions by this eventhandler
# Date format: "2016-03-29 13:10 CEST"
date=`date +"%Y-%m-%d %H:%M %Z"`
case "$1" in (CRITICAL)
if [ "$statetype" = "HARD" ] ; then
/bin/echo -en "$date | ${0##*/} Got state: <$state> and statetype: <$statetype> with command <$command> and argument <$cmdarg> for execution on host <$host>\n" >> $logfile
/usr/lib/nagios/plugins/check_nrpe -H $host -p 5666 -t 30 -c $command >> $logfile
fi
esac
exit 0
|
CoffeeITWorks/ansible_nagios4_server_plugins
|
files/plugins/event_handler_nsclient_command.sh
|
Shell
|
mit
| 793 |
#!/bin/bash
mkdir -p data
cwltool --outdir ./data tools/picard-SortSam.cwl samples/sortsam.json
|
Duke-GCB/bespin-cwl
|
examples/tools/run_sortsam.sh
|
Shell
|
mit
| 97 |
#!/usr/bin/env bash
# Remove unnecessary packages (and their dependencies)
# which can’t be removed until after the installation process
yum --assumeyes autoremove authconfig firewalld linux-firmware
# Clean up old yum repo data & logs
yum clean all
yum history new
rm --recursive --force /var/lib/yum/yumdb/*
rm --recursive --force /var/lib/yum/history/*
truncate --no-create --size=0 /var/log/yum.log
# Remove random-seed, so it’s not the same in every image
rm --force /var/lib/random-seed
# Change any incorrect SELinux context labels
fixfiles -R -a restore
# Force the filesystem to reclaim space from deleted files
dd if=/dev/zero of=/var/tmp/zeros bs=1M
rm --force /var/tmp/zeros
|
TFDuesing/packer-templates
|
Fedora-20/scripts/50cleanup.sh
|
Shell
|
mit
| 696 |
#!/bin/sh
DoExitAsm ()
{ echo "An error occurred while assembling $1"; exit 1; }
DoExitLink ()
{ echo "An error occurred while linking $1"; exit 1; }
OFS=$IFS
IFS="
"
/usr/bin/ld /usr/lib/crt1.10.5.o -x -multiply_defined suppress -L. -o triangle `cat link.res`
if [ $? != 0 ]; then DoExitLink ; fi
IFS=$OFS
|
rusucosmin/courses
|
ubb/teaching/dp/ppaslink.sh
|
Shell
|
mit
| 313 |
#! /bin/bash
key_names=(me gs cr gfm k8s)
for key in "${key_names[@]}"
do
if [[ -f ~/.ssh/$key.prv ]]; then
ssh-add ~/.ssh/$key.prv > /dev/null 2>&1
echo "SSH Key $key loaded."
fi
done
|
shampine/dotfiles
|
inc/keys.sh
|
Shell
|
mit
| 199 |
#!/bin/bash
set -uxv
##########################
# Takes a directory coadded by all, and by exposure
# and measures and compiles photometry on a per exposure basis
###########################
# $Id: run_unstacked_photometry.sh,v 1.26 2010-10-05 22:45:26 dapple Exp $
#############################
# $1 : Cluster dir
# $2 : Photdir
# $3 : Cluster
# $4 : Filter
# $5 : Detection Image
# $6 : Target Seeing
clusterdir=$1
photdir=$2
cluster=$3
filter=$4
detect=$5
convolve=$6
############################
. progs.ini > /tmp/out.log 2>&1
##############################
special_filters="I K"
###############################
unstacked_dir=$photdir/${filter}/unstacked
if [ ! -d ${unstacked_dir} ]; then
mkdir -p ${unstacked_dir}
fi
#### Pre-req Test
inSpecialFilters=`echo $special_filters | grep $filter`
if [ -z "$inSpecialFilters" ] && [ ! -e $clusterdir/$filter/SCIENCE/cat/chips.cat8 ]; then
echo "chips.cat8 files does not exist"
exit 15
fi
### Generate catalogs for individual exposures
detect_dir=`dirname $detect`
detect_base=`basename $detect .fits`
detect_weight=${detect_dir}/${detect_base}.weight.fits
detect_cat=${detect_dir}/detection.cat
if [ ! -e ${detect_cat} ]; then
echo "Cannot Find Detection Catalog: $detect_cat"
exit 14
fi
exposures=`ldactoasc -i $clusterdir/$filter/SCIENCE/${cluster}_all.cat -t STATS -b -s -k IMAGENAME ${cluster}_all | awk '($2 == 1){print $1}'`
exposures="${exposures} all"
catfiles=""
for exposure in $exposures; do
file=$clusterdir/$filter/SCIENCE/coadd_${cluster}_$exposure/coadd.fits
if [ -e $file ]; then
measure_dir=`dirname $file`
measure_base=`basename $file .fits`
measure_weight=${measure_dir}/${measure_base}.weight.fits
measure_flag=${measure_dir}/${measure_base}.flag.fits
seeing=`dfits $file | fitsort -d SEEING | awk '{print $2}'`
tag=`echo $measure_dir | awk -F '_' '{print $NF}'`
./extract_object_cats.py --di $detect --dw $detect_weight \
--pi $file --pw $measure_weight --pf $measure_flag \
-o ${unstacked_dir}/${tag}.filtered.cat --fwhm ${seeing} --new-fwhm ${convolve} \
--areacat ${detect_cat}
exit_code=$?
if [ "$exit_code" != "0" ]; then
echo "Failure in do_multiple_exposures.py: $exit_code"
exit $exit_code
fi
catfiles="$catfiles ${unstacked_dir}/${tag}.filtered.cat"
else
echo "Missing Exposure: ${exposure}"
exit 1
fi
done
# Sort catalogs by instrument and config
if [ -e unstacked.exp.list_$$ ]; then
rm -f unstacked.exp.list_$$
fi
############################
#special filters
inSpecialFilters=`echo ${special_filters} | grep ${filter}`
if [ -n "$inSpecialFilters" ]; then
./convertSpecialFilters.py ${catfiles} ${unstacked_dir}/${cluster}.${filter}.unstacked.cat
exit 0
fi
#############################
mastercat=""
for cat in $catfiles; do
base=`basename $cat`
expid=`echo $base | awk -F'.' '{print $1}'`
if [ "$expid" == "all" ];then
mastercat=$cat
continue
fi
exp_instrum_config=`ldactoasc -i $clusterdir/$filter/SCIENCE/cat/chips.cat8 -t CHIPS_STATS -s -b -k INSTRUM CONFIG FITSFILE | grep "$expid" | awk '{print $1, $2}' | sort | uniq`
instrum=`echo $exp_instrum_config | awk '{print $1}'`
config=`echo $exp_instrum_config | awk '{print $2}'`
if [ "${instrum}" == "0" ]; then
instrum='SPECIAL'
config="0"
fi
echo $cat ${instrum}-${config} >> unstacked.exp.list_$$
done
if [ "${filter}" == "B-WHT" ]; then
filter=B
elif [ "${filter}" == "U-WHT" ]; then
filter=U
fi
configs=`awk '{print $2}' unstacked.exp.list_$$ | sort | uniq`
# Combine catalogs into final catalog
echo "adam-look: configs=$configs for $cluster - $filter"
merge_line=""
for config in $configs; do
cats=`grep "$config" unstacked.exp.list_$$ | awk -v ORS=' ' '{print $1}'`
./measure_unstacked_photometry.py -o $unstacked_dir/$cluster.$filter.$config.unstacked.cat -i $config -m $mastercat $cats
exit_code=$?
if [ "$exit_code" != "0" ]; then
echo "$exit_code Failure in measure_unstacked_photometry.py"
exit $exit_code
fi
if [ ! -s $unstacked_dir/$cluster.$filter.$config.unstacked.cat ]; then
echo "Final catalog not produced: $unstacked_dir/$cluster.$filter.$config.unstacked.cat"
exit 1
fi
merge_line="$merge_line $unstacked_dir/$cluster.$filter.$config.unstacked.cat"
done
instrum=`awk '{if (NR==1) print $2}' unstacked.exp.list_$$ | awk -F'-' '{print $1}'`
./combine_unstacked_config_cats.py $unstacked_dir/$cluster.$filter.unstacked.cat $instrum $mastercat $merge_line
rm -f unstacked.exp.list_$$
|
deapplegate/wtgpipeline
|
run_unstacked_photometry.sh
|
Shell
|
mit
| 4,587 |
#! /bin/sh
# @(#)$Id: JMLRelation.sh,v 1.4 2004/06/01 19:51:43 leavens Exp $
# Copyright (C) 1998, 1999, 2002, 2004 Iowa State University
# This file is part of JML
# JML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# JML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with JML; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Domain_ValSed='s/_Domain_/Value/g
s/_DomainType_/JMLType/g
s/_val_not_equal_to_dv_/!val.equals(dv)/g
'
Domain_EqlSed='s/_Domain_/Equals/g
s/_DomainType_/Object/g
s/_val_not_equal_to_dv_/!val.equals(dv)/g
'
Domain_ObjSed='s/_Domain_/Object/g
s/_DomainType_/Object/g
s/_val_not_equal_to_dv_/val != dv/g
'
Range_ValSed='s/_Range_/Value/g
s/_RangeType_/JMLType/g
s/_r_not_equal_to_rv_/!r.equals(rv)/g
'
Range_EqlSed='s/_Range_/Equals/g
s/_RangeType_/Object/g
s/_r_not_equal_to_rv_/!r.equals(rv)/g
'
Range_ObjSed='s/_Range_/Object/g
s/_RangeType_/Object/g
s/_r_not_equal_to_rv_/r != rv/g
'
create_rel()
{
DOMAINTYPE=$2
RANGETYPE=$3
case "$DOMAINTYPE" in
Value)
DS="$Domain_ValSed"
;;
Equals)
DS="$Domain_EqlSed"
;;
Object)
DS="$Domain_ObjSed"
;;
*)
echo "got DOMAINTYPE of $DOMAINTYPE" >&2
exit 1
;;
esac
case "$RANGETYPE" in
Value)
RS="$Range_ValSed"
;;
Equals)
RS="$Range_EqlSed"
;;
Object)
RS="$Range_ObjSed"
;;
*)
echo "got RANGETYPE of $RANGETYPE" >&2
exit 1
;;
esac
for k in $1
do
echo "Creating JML${DOMAINTYPE}To${RANGETYPE}${k}.${SFX1}"
sed -e "$DS" -e "$RS" "JML${k}.${SFX1}-generic" > "JML${DOMAINTYPE}To${RANGETYPE}${k}.${SFX1}"
done
}
print_usage()
{
echo "Usage: JMLRelation kind option1 option2" >&2
echo "kind: one of Relation RelationEnumerator RelationImageEnumerator or all" >&2
echo "option1: one of Value, Equals, Object or all" >&2
echo "option2: one of Value, Equals, Object or all" >&2
exit 1
}
if [ "$#" != 3 ]
then
print_usage
fi
kind="Relation RelationEnumerator RelationImageEnumerator"
case "$1" in
Relation)
kind=Relation
;;
RelationEnumerator)
kind=RelationEnumerator
;;
RelationImageEnumerator)
kind=RelationImageEnumerator
;;
all)
;;
*)
print_usage
;;
esac
kt="Value Equals Object"
case "$2" in
Value|value|val)
kt=Value
;;
Equals|equals|eqls)
kt=Equals
;;
Object|object|obj)
kt=Object
;;
all)
;;
*)
print_usage
;;
esac
vt="Value Equals Object"
case "$3" in
Value|value|val)
vt=Value
;;
Equals|equals|eqls)
vt=Equals
;;
Object|object|obj)
vt=Object
;;
all)
;;
*)
print_usage
;;
esac
SFX1="java"
for KEYTYPE in $kt
do
for VALUETYPE in $vt
do
create_rel "$kind" $KEYTYPE $VALUETYPE
done
done
|
GaloisInc/Votail
|
external_tools/JML/org/jmlspecs/models/JMLRelation.sh
|
Shell
|
mit
| 3,257 |
#!/bin/sh
export model_2d=""
export model_3d="\
neohooke\
rivlin\
arruda\
mises\
mohr\
drucker\
swift\
ramberg\
viscoe\
creep\
relax"
export model_shell=""
../test_static_sub2.sh $*
|
FrontISTR/FrontISTR
|
examples/static/1elem/test_1elem.sh
|
Shell
|
mit
| 198 |
#!/bin/bash
source ./venv/bin/activate
python3 -u ./train_model.py --weight_path=$1 \
--history_path=$2 \
--lock_file=$3 \
--config_file=$4 \
--first_epoch=$5
|
johnmartinsson/bird-species-classification
|
run_job.sh
|
Shell
|
mit
| 184 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-59-1
#
# Security announcement date: 2014-09-24 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:48 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - bash:4.1-3+deb6u1
#
# Last versions recommanded by security team:
# - bash:4.1-3+deb6u2
#
# CVE List:
# - CVE-2014-6271
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade bash=4.1-3+deb6u2 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2014/DLA-59-1.sh
|
Shell
|
mit
| 598 |
export GOPATH="$HOME/Dropbox/Programming/go"
|
DracoLi/dotfiles
|
go/exports.zsh
|
Shell
|
mit
| 45 |
#!/bin/bash
# ensure the correct kernel headers are installed
# and install some basic tools useful on nearly every dev vm
apt-get -y install linux-kernel-headers git patch vim-nox htop bash-completion wget curl gcc software-properties-common
reboot
|
mayflower/baseboxes
|
scripts/ubuntu/tools.sh
|
Shell
|
mit
| 252 |
#!/bin/bash
set -e
# redis
redis-server /etc/redis/redis.conf
# memcached
memcached -d -u daemon
# sphinx
exec "$@"
|
wangaguo/docker_atgames_dev
|
atgames_dev_runner/0.1/docker-entrypoint.sh
|
Shell
|
mit
| 120 |
# function to parse the ini style configuration file
config_parser () {
local iniFile="$1";
local tmpFile=$( mktemp /tmp/`basename $iniFile`.XXXXXX );
local intLines;
local binSED=$( which sed );
# copy the ini file to the temporary location
cp $iniFile $tmpFile;
# remove tabs or spaces around the =
$binSED -i -e 's/[ \t]*=[ \t]*/=/g' $tmpFile;
# transform section labels into function declaration
$binSED -i -e 's/\[\([A-Za-z0-9_]*\)\]/config.section.\1() \{/g' $tmpFile;
$binSED -i -e 's/config\.section\./\}\'$'\nconfig\.section\./g' $tmpFile;
# remove first line
$binSED -i -e '1d' $tmpFile;
# add the last brace
echo -e "\n}" >> $tmpFile;
# now load the file
source $tmpFile;
# clean up
rm -f $tmpFile;
}
|
chilladx/config-parser
|
config-parser.sh
|
Shell
|
mit
| 738 |
#!/usr/bin/env bash
mkdir /etc/nginx/ssl 2>/dev/null
PATH_SSL="/etc/nginx/ssl"
PATH_KEY="${PATH_SSL}/${1}.key"
PATH_CSR="${PATH_SSL}/${1}.csr"
PATH_CRT="${PATH_SSL}/${1}.crt"
if [ ! -f $PATH_KEY ] || [ ! -f $PATH_CSR ] || [ ! -f $PATH_CRT ]
then
openssl genrsa -out "$PATH_KEY" 2048 2>/dev/null
openssl req -new -key "$PATH_KEY" -out "$PATH_CSR" -subj "/CN=$1/O=Vagrant/C=UK" 2>/dev/null
openssl x509 -req -days 365 -in "$PATH_CSR" -signkey "$PATH_KEY" -out "$PATH_CRT" 2>/dev/null
fi
block="
server {
listen 80;
listen [::]:80;
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name $1;
root \"${2%/}\";
access_log off;
error_log /var/log/nginx/error.log error;
client_max_body_size 100m;
index index.html;
include gzip;
location / {
try_files \$uri \$uri/ /index.html;
}
location ~ (.+)\.(html|json|txt|js|css|jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$ {
try_files \$uri =404;
}
location ~ ^/(api) {
proxy_pass http://127.0.0.1;
proxy_set_header Host apollo.dev;
include /etc/nginx/proxy_params;
}
ssl_certificate /etc/nginx/ssl/$1.crt;
ssl_certificate_key /etc/nginx/ssl/$1.key;
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 10m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5';
}
"
echo "$block" > "/etc/nginx/sites-available/$1"
ln -fs "/etc/nginx/sites-available/$1" "/etc/nginx/sites-enabled/$1"
nginx -s reload
|
adiachenko/xenial
|
.scripts/serve-spa.sh
|
Shell
|
mit
| 1,559 |
#!/usr/bin/env bash
set -e
find_repo_path() {
cd "$(dirname "${BASH_SOURCE[0]}")"
cd "$(dirname "$(readlink "${BASH_SOURCE[0]}")")"
echo "$PWD/.."
}
create_formula() {
rm -fr poetenv
python3 -m venv poetenv
source poetenv/bin/activate
pip install blackbelt
pip install homebrew-pypi-poet
poet -f blackbelt | sed 's/Shiny new formula/Internal toolbelt on steroids/' > "$(find_repo_path)/Formula/blackbelt.rb"
rm -fr poetenv
}
create_formula
|
apiaryio/homebrew-formulae
|
bin/update-blackbelt.sh
|
Shell
|
mit
| 464 |
#!/bin/bash
# This program contains parts of narwhal's "sea" program,
# as well as bits borrowed from Tim Caswell's "nvm"
# nave install <version>
# Fetch the version of node and install it in nave's folder.
# nave use <version>
# Install the <version> if it isn't already, and then start
# a subshell with that version's folder at the start of the
# $PATH
# nave use <version> program.js
# Like "nave use", but have the subshell start the program.js
# immediately.
# When told to use a version:
# Ensure that the version exists, install it, and
# then add its prefix to the PATH, and start a subshell.
if [ "$NAVE_DEBUG" != "" ]; then
set -x
fi
if [ -z "$BASH" ]; then
cat >&2 <<MSG
Nave is a bash program, and must be run with bash.
MSG
exit 1
fi
shell=`basename "$SHELL"`
case "$shell" in
bash) ;;
zsh) ;;
*)
echo "Nave only supports zsh and bash shells." >&2
exit 1
;;
esac
# Use fancy pants globs
shopt -s extglob
# Try to figure out the os and arch for binary fetching
uname="$(uname
|
wenjoy/homePage
|
node_modules/node-captcha/node_modules/canvas/node_modules/mocha/node_modules/diff/node_modules/should/node_modules/gulp-rename/node_modules/gulp-sourcemaps/node_modules/convert-source-map/node_modules/inline-source-map/node_modules/nave/nave.sh
|
Shell
|
mit
| 1,024 |
#!/bin/bash
docker build -t registry.aliyuncs.com/freshncp/debian .
|
Berkaroad/docker-images
|
debian/build.sh
|
Shell
|
mit
| 68 |
f2py -c xcorr.pyf xcorr.c
|
trichter/sito
|
src/compile_xcorr.sh
|
Shell
|
mit
| 26 |
#!/bin/bash
cd /var/www/news-api/vendor/influx-analytics/
../../phpunit/phpunit/phpunit tests/AnalyticsTest.php
|
vorbind/influx-analytics
|
run.sh
|
Shell
|
mit
| 112 |
#!/bin/bash
cd ../.git/hooks
ln -sfv ../../git/pre-commit.sh pre-commit
|
andyjack/neo-tracker
|
git/setup.sh
|
Shell
|
mit
| 73 |
#!/bin/sh
set -eo pipefail -o nounset
wget --quiet -O blekhman_ad.tsv https://raw.githubusercontent.com/macarthur-lab/gene_lists/master/lists/blekhman_ad.tsv
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh38/GRCh38.genome
## Get the .genome file
genome2=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg38/hg38.genome
## Get the chromomsome mapping file
chr_mapping=$(ggd get-files hg38-chrom-mapping-ensembl2ucsc-ncbi-v1 --pattern "*.txt")
grch38_gtf="$(ggd get-files grch38-gene-features-ensembl-v1 -p 'grch38-gene-features-ensembl-v1.gtf.gz')"
cat << EOF > parse_gtf_by_gene.py
"""
Get a list of genome coordinates for a list of ad genes
"""
import sys
import io
import gzip
gtf_file = sys.argv[1] ## A gtf file with CDS features
ad_gene_file = sys.argv[2] ## A single column tsv file for ad genes
outfile = sys.argv[3] ## File to write to
## Get a set of gene symbols
ad_gene_set = {}
with io.open(ad_gene_file, "rt", encoding = "utf-8") as ad:
ad_gene_set = set(x.strip() for x in ad)
## Parse the gtf file
fh = gzip.open(gtf_file, "rt", encoding = "utf-8") if gtf_file.endswith(".gz") else io.open(ad_gene_file, "rt", encoding = "utf-8")
ad_gene_dict = dict()
header = []
for line in fh:
if line[0] == "#":
header = line.strip().split("\t")
continue
line_dict = dict(zip(header,line.strip().split("\t")))
line_dict.update({x.strip().replace("\"","").split(" ")[0]:x.strip().replace("\"","").split(" ")[1] for x in line_dict["attribute"].strip().split(";")[:-1]})
## If the current gene is in the ad gene set
if line_dict["gene_name"] in ad_gene_set:
if line_dict["gene_name"] not in ad_gene_dict:
ad_gene_dict[line_dict["gene_name"]] = []
## If CDS or stop_codon feature, add feature info to ad_gene_dict
if line_dict["feature"] == "CDS" or line_dict["feature"] == "stop_codon":
## Change 1 based start to zero based start
ad_gene_dict[line_dict["gene_name"]].append([str(line_dict["#chrom"]),
str(int(line_dict["start"]) - 1),
str(line_dict["end"]),
str(line_dict["strand"]),
str(line_dict["gene_id"]),
str(line_dict["gene_name"]),
str(line_dict["transcript_id"]),
str(line_dict["gene_biotype"])
])
fh.close()
## Write dict out
with open(outfile, "w") as o:
for gene, coor in ad_gene_dict.items():
for line in coor:
o.write("\t".join(line) + "\n")
EOF
python parse_gtf_by_gene.py $grch38_gtf blekhman_ad.tsv unflattened_ad_genes.bed
cat << EOF > sort_columns.py
"""
sort the transcript id column
sort and get a unique list of the gene column
"""
import sys
for line in sys.stdin.readlines():
line_list = line.strip().split("\t")
## Sort column 4 - 8 and get a uniqe list
line_list[3] = ",".join(sorted(list(set(line_list[3].strip().split(",")))))
line_list[4] = ",".join(sorted(list(set(line_list[4].strip().split(",")))))
line_list[5] = ",".join(sorted(list(set(line_list[5].strip().split(",")))))
line_list[6] = ",".join(sorted(list(set(line_list[6].strip().split(",")))))
line_list[7] = ",".join(sorted(list(set(line_list[7].strip().split(",")))))
## Print to stdout
print("\t".join(line_list))
EOF
## Merge and sort ad genes with coordinates
gsort unflattened_ad_genes.bed $genome \
| bedtools merge -i - -c 4,5,6,7,8 -o collapse \
| awk -v OFS="\t" 'BEGIN { print "#chrom\tstart\tend\tstrand\tgene_ids\tgene_symbols\ttranscript_ids\tgene_biotypes" } {print $0}' \
| python sort_columns.py \
| gsort --chromosomemappings $chr_mapping /dev/stdin $genome2 \
| bgzip -c > hg38-autosomal-dominant-genes-blekhman-v1.bed.gz
tabix hg38-autosomal-dominant-genes-blekhman-v1.bed.gz
wget --quiet $genome2
## Get ad gene complement coordinates
sed "1d" hg38.genome \
| bedtools complement -i <(zgrep -v "#" hg38-autosomal-dominant-genes-blekhman-v1.bed.gz) -g /dev/stdin \
| gsort /dev/stdin $genome2 \
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend"} {print $1,$2,$3}' \
| bgzip -c > hg38-autosomal-dominant-genes-blekhman-v1.compliment.bed.gz
tabix hg38-autosomal-dominant-genes-blekhman-v1.compliment.bed.gz
rm hg38.genome
rm blekhman_ad.tsv
rm unflattened_ad_genes.bed
rm parse_gtf_by_gene.py
rm sort_columns.py
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/hg38/hg38-autosomal-dominant-genes-blekhman-v1/recipe.sh
|
Shell
|
mit
| 4,816 |
java AES e testKey.txt testEncrypt.txt
|
IJJJfoundation/AES
|
src/deployment/encrypt_shell_script.sh
|
Shell
|
mit
| 39 |
#!/usr/bin/env bash
./gradlew installDist
./cli/build/install/cli/bin/cli
|
cdcalc/cdcalc
|
run.sh
|
Shell
|
mit
| 74 |
#!/bin/bash
USER_VARS_FILE_NAME="${HOME}/.userVars.sh";
source ./scripts/shellVars.sh;
function loadShellVars() {
if [ -f ${USER_VARS_FILE_NAME} ]; then
source ${USER_VARS_FILE_NAME}
else
for varkey in "${!SHELLVARNAMES[@]}"; do
X=${SHELLVARNAMES[$varkey]};
SHELLVARS["${X},VAL"]=${!X};
eval "export ${SHELLVARNAMES[$varkey]}='${SHELLVARS[${SHELLVARNAMES[$varkey]},VAL]}'";
done
fi
# echo "From source ... ";
# echo "PROJECT_NAME = ${PROJECT_NAME}";
# echo "MODULE_NAME = ${MODULE_NAME}";
# echo -e "GITHUB_ORGANIZATION_NAME = ${GITHUB_ORGANIZATION_NAME}\n";
}
function saveShellVars()
{
echo -e "Saving shell variables to $1";
echo -e "#/bin/bash\n# You can edit this, but it may be altered progrmmatically." > $1;
for varkey in "${!SHELLVARNAMES[@]}"; do
X=${SHELLVARNAMES[$varkey]};
eval "echo \"export ${X}='${!X}';\" >> $1;";
done
chown ${SUDOUSER}:${SUDOUSER} ${USER_VARS_FILE_NAME};
}
function askUserForParameters()
{
declare -a VARS_TO_UPDATE=("${!1}");
CHOICE="n";
while [[ ! "X${CHOICE}X" == "XyX" ]]
do
ii=1;
for varkey in "${VARS_TO_UPDATE[@]}"; do
eval "printf \"\n%+5s %s\" $ii \"${SHELLVARS[${varkey},SHORT]}\"";
# eval "echo $ii/. -- ${SHELLVARS[${varkey},SHORT]}";
((ii++));
done;
echo -e "\n\n";
read -ep "Is this correct? (y/n/q) :: " -n 1 -r USER_ANSWER
CHOICE=$(echo ${USER_ANSWER:0:1} | tr '[:upper:]' '[:lower:]')
if [[ "X${CHOICE}X" == "XqX" ]]; then
echo "Skipping this operation."; exit 1;
elif [[ ! "X${CHOICE}X" == "XyX" ]]; then
for varkey in "${VARS_TO_UPDATE[@]}"; do
read -p "${SHELLVARS[${varkey},LONG]}" -e -i "${!varkey}" INPUT
if [ ! "X${INPUT}X" == "XX" ]; then eval "${varkey}=\"${INPUT}\""; fi;
done;
fi;
echo " "
done;
saveShellVars ${USER_VARS_FILE_NAME};
return;
};
# loadShellVars;
# PARM_NAMES=("MODULE_NAME" "GITHUB_ORGANIZATION_NAME");
# askUserForParameters PARM_NAMES[@];
# exit;
# echo -e "
# From SHELLVARS ...";
# echo "PARENT_DIR = ${PARENT_DIR}";
# echo "METEOR_PWD = ${METEOR_PWD}";
# echo "YOUR_FULLNAME = ${YOUR_FULLNAME}";
# echo "FLOOZIE = ${FLOOZIE}";
# # for varkey in "${!SHELLVARS[@]}"; do
# # echo ${SHELLVARS[$varkey]};
# # done
# # eval "export VVV='hhh';";
# # echo $VVV;
# echo -e "------
# ";
# exit;
|
martinhbramwell/Meteor-CI-Tutorial
|
scripts/manageShellVars.sh
|
Shell
|
mit
| 2,376 |
for i in `ls *fasta_*.fasta`; do echo $i; time python ~/Development/pyPaSWAS/pypaswas.py --loglevel=info -M BLOSUM80 canisLupusAnkyrin.fasta $i -L timings/$i.CPU.log.txt --device_type=CPU --platform_name=Intel --framework=OpenCL -o speed.txt; done
|
swarris/pyPaSWAS
|
data/desktop/r1.sh
|
Shell
|
mit
| 249 |
#!/bin/bash
ref_date='Thu Apr 19 17:07:39 CDT 2012'
#ref_sec=$(date -j -f '%a %b %d %T %Z %Y' "${ref_date}" +%s)
ref_sec=$(date +%s)
update_inc=1
#tput clear
#cat <<'EOF'
#
#
# [|] [|]
# _-'''''''''''''-_
# / \
# | |
# | |
# | |
# \ /
# '-_____________-'
#EOF
while :
do
((sec=$(date +%s) - ${ref_sec}))
((day=sec/86400))
((sec-=day*86400))
((hour=sec/3600))
((sec-=hour*3600))
((min=sec/60))
((sec-=min*60))
#tput cup 6 14
#tput cup 0 0
printf "\r%.2id:%.2ih:%.2im:%.2is" ${day} ${hour} ${min} ${sec}
sleep ${update_inc}
done
exit 0
|
joserc87/config-files
|
scripts/trash/stopwatch.sh
|
Shell
|
mit
| 710 |
#!/bin/bash
# pipe usage:
# user@domain: path_to_pipe/pipe.sh -g <mm10/hg38> -r <PE/SE> -o read_file1 -p read_file2 (if PE file)
# input file: sra file, fastq file, and fastq.gz file
# pipe start
###################################################################################################
# read all necessary parameters and prepare data structure
date
pipe_version="v3.1"
host="zhanglab/atac-seq base"
# get the absolute path
pipe_path="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
md5=`md5sum $0 | awk '{print $1}'`
# read parameters
while getopts m:t:g:o:p:r:i:h opts
do case "$opts" in
m) marker="$OPTARG";; # default 'no_annotation'
t) threads="$OPTARG";; # default 24
g) species="$OPTARG";; # hg19, hg38, mm9, mm10, danRer10
o) R1="$OPTARG";; # PE read 1, or the SE file, or the sra file
p) R2="$OPTARG";; # PE read 2.
r) types="$OPTARG";; # PE or SE;
i) ifr_parameter="$OPTARG";;
h) echo "
Analyzing ATAC-seq data, generating QC plot and json reports.
usage: path-to-pipe/pipe.sh -g <hg38/hg19/mm10/mm9/danRer10> -r <PE/SE> -o <read_file1> -p <read_file2>
Options: -g input species. Please notice that each docker/singularity image is designed for one species only.
-r reads type, PE for paired-end reads, SE for single-end reads.
-o input read file1.
-p input read file2.
-t threads used for the pipe, mainly involved in cutadapt and bwa mem steps [24].
-i insertion free region finding parameters used by Wellington Algorithm (Jason Piper etc. 2013), see documentation for more details.
If you don NOT want to run IFR finding step, please just ignore the -i option; however IFR finding will use default parameters only if -i specified as 0:
min_lfp=5
max_lfp=15
step_lfp=2
min_lsh=50
max_lsh=200
step_lsh=20
method=BH
p_cutoff=0.05
If you want to specify your own parameter, please make sure they are in the same order and seperated by comma
Example: -i 5,15,2,50,200,20,BH,0.05
You can check the pipe log file for the parameters used by IFR code
"
exit;;
[?]) echo "
Analyzing ATAC-seq data, generating QC plot and json reports.
usage: path-to-pipe/pipe.sh -g <hg38/hg19/mm10/mm9/danRer10> -r <PE/SE> -o <read_file1> -p <read_file2>
Options: -g input species. Please notice that each docker/singularity image is designed for one species only.
-r reads type, PE for paired-end reads, SE for single-end reads.
-o input read file1.
-p input read file2.
-t threads used for the pipe, mainly involved in cutadapt and bwa mem steps [24].
-i insertion free region finding parameters used by Wellington Algorithm (Jason Piper etc. 2013), see documentation for more details.
If you don NOT want to run IFR finding step, please just ignore the -i option; however IFR finding will use default parameters only if -i specified as 0:
min_lfp=5
max_lfp=15
step_lfp=2
min_lsh=50
max_lsh=200
step_lsh=20
method=BH
p_cutoff=0.05
If you want to specify your own parameter, please make sure they are in the same order and seperated by comma
Example: -i 5,15,2,50,200,20,BH,0.05
You can check the pipe log file for the parameters used by IFR code
"
exit;;
esac
done
if [ -z "$threads" ]
then
threads=24
fi
if [ -z "$marker" ]
then
marker='no_annotation'
fi
if [[ $R1 == *.sra ]]
then name=`echo ${R1%.sra}`
echo "this is sra file, $fastq_dump_tool would be used......"
$fastq_dump_tool $R1 --split-3
raw1=$name'_1.fastq'
raw2=$name'_2.fastq'
elif [[ $R1 == *.fastq* ]] && [[ $types == PE ]]
then
name=`echo ${R1%.fastq*}`
raw1=$R1
raw2=$R2
elif [[ $R1 == *.fastq* ]] && [[ $types == SE ]]
then
name=`echo ${R1%.fastq*}`
raw1=$R1
else
echo "please use fastq(or fastq.gz) file or sra file......"
exit
fi
# analysis code
# each '0' step means those are prerequest for the following one
# each step would assume previous steps have been processed
###################################################################################################
# step0, preparation
s0_atac_pre () {
mkdir 'Processed_'$name
ln -rs $R1 ./'Processed_'$name/$R1
ln -rs $raw1 ./'Processed_'$name/$raw1 2> /dev/null
ln -rs $raw2 ./'Processed_'$name/$raw2 2> /dev/null
cd ./'Processed_'$name/
source $pipe_path'/qc_source.sh' $species
mkdir 'QC_ATAC_data_collection_'$name
touch QC_pipe_processing.log
# start record
date >> QC_pipe_processing.log
echo "Target file is $R1 $R2" >> QC_pipe_processing.log
echo "Specified species is $species" >> QC_pipe_processing.log
echo "types of reads is $types" >> QC_pipe_processing.log
echo " " >> QC_pipe_processing.log
}
# Step 1.1, Trim ATAC-seq adapters and QC on seq file
s1.1_cutadapt () {
if [[ $types == PE ]];
then
echo 'trimming ATAC PE reads by cutadapt'
$cutadapt -a $adapter_1 -A $adapter_2 --quality-cutoff=15,10 --minimum-length=36 -o 'step1.1_trimed_'$name'_1.fastq' -p 'step1.1_trimed_'$name'_2.fastq' $raw1 $raw2 > 'step1.1_'$name'_cutadapt_PE.trimlog'
temp=`grep "Total read pairs processed:" step1.1_*trimlog | awk '{print $5}'`
raw_reads=`echo ${temp//,}`
temp2=`grep "Pairs written" step1.1_*trimlog | awk '{print $5}'`
written_reads=`echo ${temp2//,}`
elif [[ $types == SE ]];
then
echo 'trimming ATAC SE reads by cutadapt'
$cutadapt -a $adapter_1 --quality-cutoff=15,10 --minimum-length=36 -o 'step1.1_trimed_'$name'.fastq' $raw1 > 'step1.1_'$name'_cutadapt_SE.trimlog'
temp=`grep "Total reads processed:" step1.1_*trimlog | awk '{print $4}'`
raw_reads=`echo ${temp//,}`
temp2=`grep "Reads written" step1.1_*trimlog | awk '{print $5}'`
written_reads=`echo ${temp2//,}`
fi
if [ $? == 0 ]
then
echo "step1.1, cutadapt trimming done" >> QC_pipe_processing.log
else
echo "step1.1, cutadapt trimming fail......" >> QC_pipe_processing.log
# exit 1
fi
}
# step1.2, fastqc
s1.2_fastqc () {
echo 'fastqc is processing fastq file......'
[ -f ` ls 'step1.1_trimed_'$name*'.fastq' | head -1` ] && $fastqc -t $threads 'step1.1_trimed_'$name*'.fastq' -o .
if [ $? == 0 ]
then
echo "step1.2, fastqc process done" >> QC_pipe_processing.log
else
echo "step1.2, fastqc process fail......" >> QC_pipe_processing.log
exit 1
fi
for zip in `ls | grep fastqc.zip`
do
unzip -o $zip
mv $zip 'step1.2_'$zip
done
# 1.3 fastqc data collection
echo -e "filename\tdeduplication_percentage\tmarker" > 'step1.3_dedup_percentage_'$name'.result'
for file in `ls -d *fastqc/`
do
cd $file
temp=`echo ${file##step1.1_trimed_}`
out_name=`echo ${temp%*_fastqc/}`
out_value=`grep 'Total Deduplicated Percentage' fastqc_data.txt | awk '{print $4}'`
echo -e "$out_name\t$out_value\t$marker" >> ../'step1.3_dedup_percentage_'$name'.result'
echo -e "item\t$out_name\t$out_name" > 'step1.3_duplication_summary_'$out_name'.result'
grep 'Sequence Duplication Levels' -A 15 fastqc_data.txt >> 'step1.3_duplication_summary_'$out_name'.result'
mv 'step1.3_duplication_summary_'$out_name'.result' ../'QC_ATAC_data_collection_'$name
echo -e "$out_name\tfastqc_test" > 'step1.3_fastqc_summary_'$out_name'.result'
awk -F "\t" '{print $1,$2}' OFS='\t' summary.txt >> 'step1.3_fastqc_summary_'$out_name'.result'
mv 'step1.3_fastqc_summary_'$out_name'.result' ../'QC_ATAC_data_collection_'$name
cd ..
done
if [ $? == 0 ]
then
echo "step1.3, fastqc data_collection process done" >> QC_pipe_processing.log
else
echo "step1.3, fastqc data_collection process fail......" >> QC_pipe_processing.log
fi
sed 1d step1.3_dedup_percentage_$name'.result' | cut -f 2 > temp_dedup.txt \
&& before_dedup=$(python -c "print(`awk '{s+=$1}END{print s}' temp_dedup.txt` * 0.01 /`cat temp_dedup.txt | wc -l`)") \
&& before_dup=$(python -c "print(1-$before_dedup*1.0)") \
&& rm temp_dedup.txt
mv 'step1.3_dedup_percentage_'$name'.result' ./'QC_ATAC_data_collection_'$name
mv *fastqc* ./'QC_ATAC_data_collection_'$name
# 1.4, get PE data R1 R2 deduplication difference percentage
if [[ $types == PE ]];
then
per1=`tail -n 2 ./'QC_ATAC_data_collection_'$name/'step1.3_dedup_percentage_'$name'.result' | awk '{print $2}' | sed -n '1p'`
per2=`tail -n 2 ./'QC_ATAC_data_collection_'$name/'step1.3_dedup_percentage_'$name'.result' | awk '{print $2}' | sed -n '2p'`
dif=`echo "scale=2; ($per1-$per2)*200/($per1+$per2)" | bc -l`
else
dif=0
fi
if [ $? == 0 ]
then
echo "step1.4, calculate replicate difference process done" >> QC_pipe_processing.log
else
echo "step1.4, calculate replicate difference process fail......" >> QC_pipe_processing.log
fi
}
# step2.0, files check
s2.0_ref () {
# refine chrom_size file (remove random and Unknown record)
awk '{ if ((length($1) < 6) && (length($1) > 1)) print $0}' OFS='\t' $chrom_size > refined_chrom_size.txt
chrom_size=`pwd`"/refined_chrom_size.txt"
}
# step2.1, BWA MEM alignment
s2.1_bwa () {
echo 'alignment by bwa......'
$bwa mem -t $threads $bwa_ref 'step1.1_trimed_'$name*'.fastq' | $samtools view -bS - | $samtools sort - -O 'bam' -o 'step2.1_trimed_'$name'.bam' -T temp_aln
if [ $? == 0 ]
then
echo "step2.1, bwa alignment process done" >> QC_pipe_processing.log
rm 'step1.1_trimed_'$name*'.fastq'
else
echo "step2.1, bwa alignment process fail......" >> QC_pipe_processing.log
exit 1
fi
}
# step2.2, removing low mapQ reads and count reads distribution (mapQ=0 makes no sense here, because they are not reliable)
s2.2_distri () {
$samtools view -h 'step2.1_trimed_'$name'.bam' > input.sam \
&& awk '$5>0' input.sam | sed '/^@/d' - | cat <(grep '^@' input.sam) - > output.sam \
&& cat output.sam | awk '{print $3}' | sort -k1,1V | uniq -c > count_no_mapq0.txt \
&& awk '! /random/ && ! /Un/ && /chr/ ' count_no_mapq0.txt | awk '{print $2, $1}' OFS="\t" | sort -k1,1 -V -s > temp2.2.txt
if [[ $types == SE ]]; then
mv temp2.2.txt 'step2.2_chrom_count_'$name'.txt'
elif [[ $types == PE ]]; then
awk '{$2=int($2*0.5); print}' OFS="\t" temp2.2.txt > 'step2.2_chrom_count_'$name'.txt' && rm temp2.2.txt
fi
# only effect reads
$methylQA density -S $chrom_size output.sam
cut -f 1 output.extended.bed | uniq -c > count_unique.txt
awk '{print $2, $1}' OFS="\t" count_unique.txt | sort -k1,1 -V -s > 'step2.2_chrom_count_unique_'$name'.txt'
effect_chrM=`grep chrM output.extended.bed | wc -l`
# get chrM count in uniquely mapped reads
# to keep the count consistent, the results are all from methylQA
# when count directly from output.sam file, please pay attention to the unpaired reads
cat <(samtools view -H input.sam) <(awk '$3=="chrM"' input.sam) | $methylQA density -S -r -o temp $chrom_size -
unique_chrM=`grep chrM temp.extended.bed | wc -l`
rm temp*
rm output*
rm count*.txt
rm input.sam
awk -F "\t" '{print $2}' 'step2.2_chrom_count_unique_'$name'.txt' | paste 'step2.2_chrom_count_'$name'.txt' - | awk -F "\t" -v marker=$marker '{print $1,$2+0,$3+0,marker}' OFS="\t" > ./'QC_ATAC_data_collection_'$name/'step2.2_chrom_count_'$name'.result'
if [ $? == 0 ]
then
echo "step2.2, count reads distribution process done" >> QC_pipe_processing.log
else
echo "step2.2, count reads distribution process fail......" >> QC_pipe_processing.log
fi
rm step2.2_chrom_count*txt
}
# step2.3, preseq
s2.3_preseq () {
$preseq lc_extrap -o 'step2.3_yield_'$name'.result' -B 'step2.1_trimed_'$name'.bam'
if [ $? == 0 ]
then
echo "step2.3, preseq lc_extrap estimate process done" >> QC_pipe_processing.log
else
echo "step2.3, preseq lc_extrap estimate process fail......" >> QC_pipe_processing.log
fi
mv 'step2.3_yield_'$name'.result' ./'QC_ATAC_data_collection_'$name
}
# 3.1, methylQA
s3.1_methylQA () {
echo 'methylQA processing......'
$methylQA atac -o step3.1_methylQA_$name $chrom_size 'step2.1_trimed_'$name'.bam'
if [ $? == 0 ]
then
echo "step3.1, mathylQA atac process done" >> QC_pipe_processing.log
else
echo "step3.1, mathylQA atac process fail......" >> QC_pipe_processing.log
fi
# mapping status
map_mapped=`grep 'mappable reads' step3.1_methylQA_$name'.report' | awk '{print $4}'`
map_uniq=`grep '(mapQ >= 10)' step3.1_methylQA_$name'.report' | awk '{print $8}'`
map_effect=`grep 'non-redundant' step3.1_methylQA_$name'.report' | awk '{print $6}'`
mapped_ratio=`echo "scale=2; $map_mapped/$raw_reads" | bc -l`
effect_ratio=`echo "scale=2; $map_effect/$raw_reads" | bc -l`
# unique chrM ratio from step2.2
unique_chrM_ratio=`echo "scale=4; $unique_chrM / $map_uniq" | bc -l`
echo -e "unique_mapped\tchrM\tunique_chrM_ratio" > 'step2.2_unique_chrM_ratio_'$name'.result'
echo -e "$map_uniq\t$unique_chrM\t$unique_chrM_ratio" >> 'step2.2_unique_chrM_ratio_'$name'.result'
mv 'step2.2_unique_chrM_ratio_'$name'.result' ./'QC_ATAC_data_collection_'$name
unique_no_chrM=`python -c "print($map_uniq-$unique_chrM)"`
effect_no_chrM=`python -c "print($map_effect-$effect_chrM)"`
nodup_ratio=`echo "scale=3; $effect_no_chrM/$unique_no_chrM" | bc -l`
after_dup=$(python -c "print(1-$nodup_ratio*1.0)")
useful=`grep 'non-redundant' step3.1_methylQA_*.report | awk '{print $6}'`
single_end=`wc -l *open.bed | awk '{print $1}'`
uf_ratio=`echo "scale=3; $useful / $raw_reads" | bc -l`
echo -e "file\ttotal\tuseful\tuseful_ratio\tsingle_end" > 'step3.1_useful_reads_'$name.result
echo -e "$name\t$raw_reads\t$useful\t$uf_ratio\t$single_end" >> 'step3.1_useful_reads_'$name.result
mv 'step3.1_useful_reads_'$name.result ./'QC_ATAC_data_collection_'$name
sort -n 'step3.1_methylQA_'*$name'.insertdistro' | uniq -c | awk '{print $2,$1}' > 'step3.1_insertion_distri_'$name'.result' && rm 'step3.1_methylQA_'*$name'.insertdistro'
mv 'step3.1_insertion_distri_'$name'.result' ./'QC_ATAC_data_collection_'$name
rm step3.1_methylQA_*bigWig
}
# 3.2, normalization bedGraph -> 10M
s3.2_nomral_bg () {
echo 'normalization bedGraph......'
# add a new bigwig file without black list
intersectBed -iobuf 200M -a 'step3.1_methylQA_'*$name'.open.bedGraph' -b $black_list -v > rmbl.bedGraph
bedGraphToBigWig rmbl.bedGraph $chrom_size 'step3.2_rmbl_'$name'.bigWig' && rm 'step3.1_methylQA_'*$name'.open.bedGraph'
# normalization
norm=`grep 'non-redundant' step3.1_methylQA_*report | awk '{print $6}'`
factor=`echo "scale=3; $norm/10000000" | bc -l`
awk -v factor=$factor '{print $1,$2,$3,$4/factor}' OFS='\t' rmbl.bedGraph > 'step3.2_normalized_per_10M_'$name'.open.bedGraph'
bedGraphToBigWig 'step3.2_normalized_per_10M_'$name'.open.bedGraph' $chrom_size 'step3.2_normalized_per_10M_'$name'.bigWig' && rm 'step3.2_normalized_per_10M_'$name'.open.bedGraph'
rm rmbl.bedGraph
if [ $? == 0 ]
then
echo "step3.2, normalization process done" >> QC_pipe_processing.log
else
echo "step3.2, normalization process fail......" >> QC_pipe_processing.log
fi
}
# 3.3, peak calling
s3.3_peakcall () {
echo 'peak calling......'
awk '{if ($2 > $3)sub($2, 0); print}' OFS="\t" 'step3.1_methylQA_'$name'.open.bed' > temp.open.bed \
&& intersectBed -iobuf 200M -a temp.open.bed -b $black_list -v > 'step3.3_rmbl_'$name'.open.bed' \
&& rm temp.open.bed 'step3.1_methylQA_'$name'.open.bed'
$macs2 callpeak -t 'step3.3_rmbl_'$name'.open.bed' -g $macs2_genome -q 0.01 -n 'step3.4_peakcall_'$name --keep-dup 1000 --nomodel --shift 0 --extsize 150
if [ $? == 0 ]
then
echo "step3.4, macs2 peak calling process done" >> QC_pipe_processing.log
else
echo "step3.4, macs2 peak calling process fail......" >> QC_pipe_processing.log
fi
mv step3.1_methylQA_$name* 'QC_ATAC_data_collection_'$name
# peak length distribution:
awk '{print $3-$2+1}' 'step3.4_peakcall_'$name'_peaks.narrowPeak' | sort -n | uniq -c | awk '{print $2,$1}' > 'step3.4_peak_length_distri_'$name'.result'
mv 'step3.4_peak_length_distri_'$name'.result' ./'QC_ATAC_data_collection_'$name
}
# step4.0, set variable
s4.0_set () {
peak='step3.4_peakcall_'$name'_peaks.narrowPeak'
bed='step3.3_rmbl_'$name'.open.bed'
}
# 4.1, RUP and insertion site
s4.1_rup () {
total=`wc -l $bed |awk '{print $1}'`
sum=`intersectBed -iobuf 200M -a $bed -b $peak -f 0.5 -u | wc -l`
ratio=`echo "scale=2; $sum*100/$total" | bc -l`
if [ $? == 0 ]
then
echo "step4.1, reads unpder peak ratio calculation process done" >> QC_pipe_processing.log
else
echo "step4.1, reads unpder peak ratio calculation process fail......" >> QC_pipe_processing.log
fi
# 4.1.2, add insertion site bigwig
awk '{mid=int(($3+$2)/2); if($6=="+") {print $1"\t"mid"\t"mid+1"\t"1} else {print $1"\t"mid-1"\t"mid"\t"1}}' \
$bed | sort -k1,1 -k2,2n | uniq -c | awk -F " " '{print $2"\t"$3"\t"$4"\t"$1}' > step4.2_insertion_site_$name.bedGraph
bedGraphToBigWig step4.2_insertion_site_$name.bedGraph $chrom_size step4.2_insertion_site_$name'.bigWig'
if [ $? == 0 ]
then
echo "step4.1.2, insertion site process done" >> QC_pipe_processing.log
else
echo "step4.1.2, insertion site process fail......" >> QC_pipe_processing.log
fi
}
# 4.2, enrichment
s4.2_enrich () {
# 4.2.1, new enrichment from RUP based on 10M sub-sampling with adjustment
# numerator = ($rupn+10000000*$peak_length / $genome_size) / $peak_length
# denominator = (10000000+$useful_ends) / ($genome_size-$peak_length))
## $1 for original open.bed, $2 for useful_single ends/sub-sample 10M
cal_enrich () {
shuf $1 | head -10000000 > temp.open.bed
$macs2 callpeak -t temp.open.bed -g $macs2_genome -q 0.01 -n temp_peak --keep-dup 1000 --nomodel --shift 0 --extsize 150
peak_length=`awk '{s+=$3-$2+1}END{print s}' 'temp_peak_peaks.narrowPeak'`
rupn=`intersectBed -iobuf 200M -a temp.open.bed -b 'temp_peak_peaks.narrowPeak' -f 0.5 | wc -l`
upper=`python -c "print(1.0*($rupn+10000000*$peak_length/$genome_size)/$peak_length)"`
lower=`python -c "print(1.0*($2+10000000)/($genome_size-$peak_length))"`
enrichment=`python -c "print(1.0*$upper/$lower)"`
rup=`python -c "print(1.0*$rupn/10000000)"`
echo -e "name\trupn\trup\tcoverage\tenrichment" > 'step4.2_sub10M_enrichment_'$name'.result'
echo -e "$name\t$rupn\t$rup\t$peak_length\t$enrichment" >> 'step4.2_sub10M_enrichment_'$name'.result'
mv 'step4.2_sub10M_enrichment_'$name'.result' ./'QC_ATAC_data_collection_'$name
rm temp.open.bed temp_peak_*
}
total=`wc -l $bed |awk '{print $1}'`
if (( $total > 10000000 ))
then
cal_enrich $bed 10000000
else
cal_enrich $bed $total
echo "Warning: the open.bed file contains less than 10M reads" >> QC_pipe_processing.log
echo "Warning: the enrichment is calculated by the original bed file, and may not be reliable" >> QC_pipe_processing.log
fi
if [ $? == 0 ]
then
echo "step4.2.1, sub10M enrichment ratio process done" >> QC_pipe_processing.log
else
echo "step4.2.1, sub10M enrichment ratio process fail......" >> QC_pipe_processing.log
fi
# 4.2.2, coding promoter enrichment
# coding enrichment = ( reads in promoter / promoter length) / (total reads / genome size)
denominator=`echo "scale=10; $total / $genome_size" | bc -l`
intersectBed -iobuf 200M -a $peak -b $coding_promoter -u > promoter_peak.bed
reads_in_promoter=`intersectBed -iobuf 200M -a $bed -b promoter_peak.bed -f 0.5 -u | wc -l | awk '{print $1}'`
promoter_number=`intersectBed -iobuf 200M -a $coding_promoter -b promoter_peak.bed -F 0.5 -u | wc -l | awk '{print $1}'`
promoter_length=`echo "$promoter_number * 2000+0.001" | bc -l`
enrichment_ratio=`echo "scale=3; $reads_in_promoter / $promoter_length / $denominator" | bc -l`
if [ $? == 0 ]
then
echo "step4.2.2, coding promoter enrichment ratio process done" >> QC_pipe_processing.log
else
echo "step4.2.2, coding promoter enrichment ratio process fail......" >> QC_pipe_processing.log
fi
echo -e "name\ttotal_reads\tpromoter_number\treads_in_promoter\tenrichment_ratio" > 'step4.2_enrichment_ratio_in_promoter_'$name'.result'
echo -e "$name\t$total\t$promoter_number\t$reads_in_promoter\t$enrichment_ratio" >> 'step4.2_enrichment_ratio_in_promoter_'$name'.result'
mv 'step4.2_enrichment_ratio_in_promoter_'$name'.result' 'QC_ATAC_data_collection_'$name
rm promoter_peak.bed
}
# 4.4, saturation analysis
s4.4_saturation () {
# subsampling:
total=`wc -l $bed |awk '{print $1}'`
for number in 5 10 20 30 40 50 60 70 80 90
do
sample_ratio=$(( total * $number / 100 ))
shuf $bed | head -$sample_ratio > 'Trimed_rmbl_'$name'_sample'$number'.open.bed'
done
if [ $? == 0 ]
then
echo "step4.4, saturation subsampling process done" >> QC_pipe_processing.log
else
echo "step4.4, saturation subsampling process fail......" >> QC_pipe_processing.log
fi
# call peak
mkdir saturation_$name
mv *sample*.open.bed ./saturation_$name/
ln -rs $bed ./saturation_$name/
cp step3.4_peakcall_*Peak ./saturation_$name/'peakcall_Trimed_rmbl_'$name'.open.bed_peaks.narrowPeak'
cd ./saturation_$name
for file in `ls 'Trimed_rmbl_'$name'_sample'*'.open.bed'`;
do
$macs2 callpeak -t $file -g $macs2_genome -q 0.01 -n 'peakcall_'$file --keep-dup 1000 --nomodel --shift 0 --extsize 150
done
if [ $? == 0 ]
then
echo "step4.5, saturation call peak process done" >> ../QC_pipe_processing.log
else
echo "step4.5, saturation call peak process fail......" >> ../QC_pipe_processing.log
fi
echo "peak calling done......"
# summarise results
echo -e "5\n`seq 10 10 100`" > saturation_points.txt
for file in `ls *open.bed`
do
read_num=`wc -l $file | awk '{print $1}'`
echo `echo "scale=2; $read_num / 1000000" | bc -l`>> temp44.txt
rm $file
done
sort -k1,1n temp44.txt > saturation_reads.txt
rm temp44.txt
total_region=`awk '{s+=$3-$2+1}END{print s}' 'peakcall_Trimed_rmbl_'$name'.open.bed_peaks.narrowPeak'`
for number in 5 10 20 30 40 50 60 70 80 90
do
file='peakcall_Trimed_rmbl_'$name'_sample'$number'.open.bed_peaks.narrowPeak'
peak_number=`wc -l $file | awk '{print $1}'`
peak_region=`intersectBed -iobuf 200M -a $file -b 'peakcall_Trimed_rmbl_'$name'.open.bed_peaks.narrowPeak' | awk '{s+=$3-$2+1}END{print s}'`
if [ -z "$peak_region" ]; then
peak_region=0
fi
echo `echo "scale=2; $peak_region / $total_region" | bc -l` >> temp443.txt
echo $peak_number >> temp442.txt
done
if [ $? == 0 ]
then
echo "step4.5, saturation results collection process done" >> ../QC_pipe_processing.log
else
echo "step4.5, saturation results collection process fail......" >> ../QC_pipe_processing.log
fi
echo `wc -l 'peakcall_Trimed_rmbl_'$name'.open.bed_peaks.narrowPeak' | awk '{print $1}'` >> temp442.txt
mv temp442.txt saturation_peak.txt
echo 1 >> temp443.txt
mv temp443.txt saturation_ratio.txt
paste saturation_points.txt saturation_reads.txt saturation_peak.txt saturation_ratio.txt > temp444.txt
echo -e "file\t$name'_read'\t$name'_peak'\t$name'_ratio'\tmarker" > 'step4.4_saturation_'$name'.result'
awk -v marker=$marker '{print $0,marker}' OFS='\t' temp444.txt >> 'step4.4_saturation_'$name'.result'
rm temp444.txt
rm saturation*.txt
mv 'step4.4_saturation_'$name'.result' ../'QC_ATAC_data_collection_'$name
cd ..
mv saturation_$name 'QC_ATAC_data_collection_'$name
}
# 4.5, background
s4.5_background () {
# the exit singal 1 happens then peak number < 100, thus the 2 "mv bin.txt" command would have minor error, doesn't influence results
# signal part
intersectBed -iobuf 200M -a $peak -b $promoter_file -u | awk '{print $1"\t"$2"\t"$3"\t""1""\t"$9}' > promoter.narrowPeak
intersectBed -iobuf 200M -a $peak -b $promoter_file -v | awk '{print $1"\t"$2"\t"$3"\t""0""\t"$9}' > non-promoter.narrowPeak
echo -e "num_peaks_in_promoter\tnum_peaks_in_non-promoter\tnum_reads_in_promoter_peaks\tnum_reads_in_non-promoter_peaks" > 'promoter_percentage_'$name'.result'
peak1=`wc -l promoter.narrowPeak | awk '{print $1}'`
peak2=`wc -l non-promoter.narrowPeak | awk '{print $1}'`
read1=`intersectBed -iobuf 200M -a $bed -b promoter.narrowPeak -u -f 0.50 | wc -l`
read2=`intersectBed -iobuf 200M -a $bed -b non-promoter.narrowPeak -u -f 0.50 | wc -l`
echo -e "$peak1\t$peak2\t$read1\t$read2" >> 'promoter_percentage_'$name'.result'
sed -i 's/^-e //' 'promoter_percentage_'$name'.result'
cat promoter.narrowPeak non-promoter.narrowPeak | sort -k5 -n -r > top10k.narrowPeak
if (( `cat top10k.narrowPeak | wc -l` > 100 ))
then
python $pipe_path'/promoter_bin.py' top10k.narrowPeak
else
echo "Warning: total peak is fewer than 100, promoter bin step would be skipped. At least 100 peaks are required." >> ./QC_pipe_processing.log
fi
rm promoter.narrowPeak
rm non-promoter.narrowPeak
rm top10k.narrowPeak
# background noise part
awk '{ if ((length($1) < 6) && (length($1) > 1)) print $0}' OFS='\t' $chrom_size > temp45.txt
python2.7 $pipe_path'/random_chr.py' temp45.txt
size=`wc -l $bed | awk '{print $1}'`
awk '{print $1"\t"int(($3+$2)/2)-100000"\t"int(($3+$2)/2)+100000"\t"$4}' $peak > temp45
awk '{if ($2<0) $2=0; print $0}' OFS="\t" temp45 > temp452
mv temp452 temp45
intersectBed -iobuf 200M -a chr.peak -b temp45 -v | shuf - | head -50000 | sort -k1,1V -k2,2n > background
intersectBed -iobuf 200M -a $bed -b background -u -f 0.5 | sort -k1,1V -k2,2n > temp45
python $pipe_path'/rpkm_bin.py' background temp45 $size
if [ $? == 0 ]
then
echo "step4.6, background evaluation process done" >> ./QC_pipe_processing.log
else
echo "step4.6, background evaluation process fail......" >> ./QC_pipe_processing.log
fi
mv reads.txt 'background_'$name'.result'
rm temp45
rm background
mv bin.txt 'bin_'$name'.result' 2> /dev/null
bg_total=`wc -l background*.result | awk '{print $1}'`
bg_half_thres=`awk '$6<=0.188 {print $0}' background*.result | wc -l`
bg_less=`awk '$6<=0.377 {print $0}' background*.result | wc -l`
bg_more=`awk '$6>0.377 {print $0}' background*.result | wc -l`
ra_half_thres=`echo "scale=2; $bg_half_thres*100 / $bg_total" | bc -l`
ra_less=`echo "scale=2; $bg_less*100 / $bg_total" | bc -l`
ra_more=`echo "scale=2; $bg_more*100 / $bg_total" | bc -l`
echo -e "$ra_half_thres\t$ra_less\t$ra_more" > 'dichoto_bg_'$name'.result'
mv 'dichoto_bg_'$name'.result' ./'QC_ATAC_data_collection_'$name/'step4.5_dichoto_bg_'$name'.result'
mv background_$name'.result' ./'QC_ATAC_data_collection_'$name/step4.5_background_$name'.result'
mv promoter_percentage_$name'.result' ./'QC_ATAC_data_collection_'$name/step4.5_promoter_percentage_$name'.result'
mv bin_$name'.result' ./'QC_ATAC_data_collection_'$name/step4.5_bin_$name'.result' 2> /dev/null
rm temp45.txt chr.peak
}
# step 4.6, visualization
s4.6_visualization () {
#summarize results
echo -e "file\ttotal\twritten_reads\tmapped\tmapped_ratio\tuniq_mapped\tnon_redundant_uniq_mapped\teffect_ratio\tfastqc_dup\tafter_align_dup\tnumber_of_reads_under_peak\trup_ratio\tsub10M_enrichment\tcoding_enrichment\tbg_gt37_percentage" > 'QC_data_collection_'$name'.result'
echo -e "$name\t$raw_reads\t$written_reads\t$map_mapped\t$mapped_ratio\t$map_uniq\t$map_effect\t$effect_ratio\t$before_dup\t$after_dup\t$sum\t$ratio\t$enrichment\t$enrichment_ratio\t$ra_more" >> 'QC_data_collection_'$name'.result'
mv 'QC_data_collection_'$name'.result' ./'QC_ATAC_data_collection_'$name
if [ -z $name ] || [ -z $raw_reads ] || [ -z $map_mapped ] || [ -z $mapped_ratio ] || [ -z $map_uniq ] || [ -z $written_reads ] || [ -z $map_effect ] || [ -z $effect_ratio ] || [ -z $nodup_ratio ] || [ -z $sum ]|| [ -z $ratio ]|| [ -z $dif ] || [ -z $before_dedup ]
then
echo "step4.6, sumarizing result process fail......" >> QC_pipe_processing.log
else
echo "step4.6, sumarizing result process done" >> QC_pipe_processing.log
fi
# plot and json
time=`head -1 QC_pipe_processing.log | sed 's/ /_/g'`
image_id=`bash $pipe_path'/find_image_ID_digest.sh' $host 2> /dev/null | awk '{print $2}'`
if [ -z "$image_id" ]
then
image_id="failed_to_get_id"
fi
# clean result
find . -name "*.result" | xargs sed -i 's/^-e //'
cd ./'QC_ATAC_data_collection_'$name
Rscript $pipe_path'/visualization.R' $name $pipe_path'/../atac_ref/mm10_encode_pe' $species $written_reads $unique_chrM_ratio $pipe_version $time $image_id
if [ $? == 0 ]
then
echo "step4.6, plot process done" >> ../QC_pipe_processing.log
else
echo "step4.6, plot process fail......" >> ../QC_pipe_processing.log
fi
sed 's/\[/{/g' $name'_report.json' | sed '/ {/d' | sed '/\]/d' |\
sed 's/ }/ },/g' | sed 's/"!/{/g' | sed 's/!"/}/g' | sed 's/"?/[/g' | sed 's/?"/]/g' |\
sed 's/@/"/g' | tac | sed '3s/},/}/g' | sed '1,2d' | tac | cat - <(echo " },") <(sed '1d' $pipe_path'/../atac_ref/mm10_encode_pe/encode_pe.json') | sed 's/\\r//g' | sed "s/MD5ToBeChange/$md5/g" > QC_$name'.json'
rm $name'_report.json'
mv QC_$name'.json' ../
paste <(cut -f 1-8 QC_data_collection_${name}.result) <(cut -f 5 step3.1_useful_reads_${name}.result) <(cut -f 9- QC_data_collection_${name}.result) > QC_table_${name}.result
mkdir 'plots_collection_'$name
mv *png 'plots_collection_'$name
rm $name'_report.txt'
cd ..
rm pesudo_bl.txt 2> /dev/null
rm refined_chrom_size.txt
find -type l -delete
multiqc .
rename 's/multiqc/step4.6_multiqc/' multiqc*
}
s4.7_ifr_finding () {
# set default parameters:
if (( $ifr_parameter == 0 )); then
min_lfp=5
max_lfp=15
step_lfp=2
min_lsh=50
max_lsh=200
step_lsh=20
method=BH
p_cutoff=0.05
else
min_lfp=`cut -d"," -f 1 <<< $ifr_parameter`
max_lfp=`cut -d"," -f 2 <<< $ifr_parameter`
step_lfp=`cut -d"," -f 3 <<< $ifr_parameter`
min_lsh=`cut -d"," -f 4 <<< $ifr_parameter`
max_lsh=`cut -d"," -f 5 <<< $ifr_parameter`
step_lsh=`cut -d"," -f 6 <<< $ifr_parameter`
method=`cut -d"," -f 7 <<< $ifr_parameter`
p_cutoff=`cut -d"," -f 8 <<< $ifr_parameter`
fi
echo "step4.7, IFR finding parameters used:
min_lfp is $min_lfp
max_lfp is $max_lfp
step_lfp is $step_lfp
min_lsh is $min_lsh
max_lsh is $max_lsh
step_lsh is $step_lsh
method is $method
p_cutoff is $p_cutoff
" >> QC_pipe_processing.log
awk '{print $1"\t"$2-50"\t"$3+50"\t"$4}' "step3.4_peakcall_"$name"_peaks.narrowPeak" |\
awk '{if(($3-$2)>1000) {printf $1"\t"$2"\t"; printf "%.0f",($2+$3)/2; print "\t"$4".1"; printf $1"\t"; printf "%.0f",($2+$3)/2; print "\t"$3"\t"$4".2"} else print $0}' |\
awk '{if(($3-$2)>1000) {printf $1"\t"$2"\t"; printf "%.0f",($2+$3)/2; print "\t"$4".1"; printf $1"\t"; printf "%.0f",($2+$3)/2; print "\t"$3"\t"$4".2"} else print $0}' > temp.peak
split -n l/$threads temp.peak
rm temp.peak
[ -f temp.txt ] && rm list
for file in `ls xa*`
do
intersectBed -a "step4.2_insertion_site_"$name".bedGraph" -b $file -wa -wb | uniq > $file".bed"
echo "Rscript $pipe_path'/ATAC-seq_wellington.R' "$file".bed IFR_"$file".txt" $min_lfp $max_lfp $step_lfp $min_lsh $max_lsh $step_lsh $method $p_cutoff >> list
rm $file
done
cat list | parallel
if [ $? == 0 ]
then
echo "step4.7, IFR finding process done" >> QC_pipe_processing.log
else
echo "step4.7, cIFR finding process fail......" >> QC_pipe_processing.log
fi
rm xa*bed list
cat IFR*txt | sed "s/\"//g" | sort -k1,1V -k2,2n | awk '{print $1"\t"$2"\t"$3"\t""found_IFR_"NR"\t"$4"\t"".""\t"$5}' > "step4.7_IFR_"$name".bed"
rm IFR*txt
}
# run pipe ()
###################################################################################################
# step-by-step
s0_atac_pre
s1.1_cutadapt
s1.2_fastqc
s2.0_ref
s2.1_bwa
s2.2_distri
s2.3_preseq
s3.1_methylQA
s3.2_nomral_bg
s3.3_peakcall
s4.0_set
s4.1_rup
s4.2_enrich
s4.4_saturation
s4.5_background
s4.6_visualization
if [ -z "$ifr_parameter" ]; then
echo "step4.7 ifr finding is ommited" >> QC_pipe_processing.log
else
s4.7_ifr_finding
fi
echo "Processing $name done"
echo "Processing $name done"
echo "Processing $name done"
cd ..
date
|
ShaopengLiu1/Atac-seq_Quality_Control_pipe
|
archive/AIAP_v1.00/v3.1/atac_v3.1.sh
|
Shell
|
mit
| 34,260 |
#!/bin/bash
TESTS1="t/t1.scm t/t2.scm t/t3.scm t/t4.scm t/t5.scm t/t6.scm t/t7.scm t/t9.scm"
TESTS2="t/records/t1.scm t/records/t2.scm t/records/t3.scm t/records/t4.scm"
function test1 {
echo running tests in $1
for TEST in $TESTS1
do
eval $2
done
echo
echo
echo
}
function test2 {
echo running tests in $1
for TEST in $TESTS2
do
eval $2
done
echo
echo
echo
}
test1 larceny "larceny -r7rs -path .:substitution/assoc:unification/basic -program \"\$TEST\""
test1 sagittarius "sagittarius -c -L. -Lsubstitution/assoc -Lunification/basic -S.sld \"\$TEST\""
#test1 chicken "csi -require-extension r7rs chicken-basic.scm \"\$TEST\" -e '(exit)'"
test1 larceny "larceny -r7rs -path .:substitution/binary-trie:unification/basic -program \"\$TEST\""
test1 sagittarius "sagittarius -c -L. -Lsubstitution/binary-trie -Lunification/basic -S.sld \"\$TEST\""
test2 larceny "larceny -r7rs -path .:substitution/assoc:unification/records -program \"\$TEST\""
test2 sagittarius "sagittarius -c -L. -Lsubstitution/assoc -Lunification/records -S.sld \"\$TEST\""
#test2 chicken "csi -require-extension r7rs chicken-records.scm \"\$TEST\" -e '(exit)'"
test2 larceny "larceny -r7rs -path .:substitution/binary-trie:unification/records -program \"\$TEST\""
test2 sagittarius "sagittarius -c -L. -Lsubstitution/binary-trie -Lunification/records -S.sld \"\$TEST\""
|
orchid-hybrid/microKanren-sagittarius
|
run_tests.sh
|
Shell
|
mit
| 1,412 |
#!/bin/bash -l
#SBATCH
#SBATCH --job-name=glm_1_nodes_40_calls_shared.sh
#SBATCH --time=03:00:00
#SBATCH --mail-type=begin,end
#SBATCH [email protected]
#SBATCH --nodes=1
#SBATCH --partition=shared
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
|
karoraw1/GLM_Wrapper
|
MARCCTEST/glm_1_nodes_40_calls_shared.sh
|
Shell
|
mit
| 5,571 |
#!/usr/bin/env bash
source bin/log.sh
set -e
if [ -z ${1} ]; then
echo "Usage: ${0} device-set [DeviceAgent-SHA]
Examples:
$ bin/appcenter.sh App-Center-Test-Cloud/coordinator-experiment
$ SKIP_IPA_BUILD=1 SERIES='Args and env' bin/appcenter.sh \
App-Center-Test-Cloud/coordinator-experiment
$ SERIES='DeviceAgent 2.0' bin/appcenter.sh \
App-Center-Test-Cloud/coordinator-experiment \
48d137d6228ccda303b2a71b0d09e1d0629bf980
The DeviceAgent-SHA optional argument allows tests to be run against any
DeviceAgent that has been uploaded to S3 rather than the current active
DeviceAgent for Test Cloud.
If you need to test local changes to run-loop or Calabash on Test Cloud,
use the BUILD_RUN_LOOP and BUILD_CALABASH env variables.
Responds to these env variables:
SERIES: the Test Cloud series
SKIP_IPA_BUILD: iff 1, then skip re-building the ipa.
'make test-ipa' will still be called, so changes in the
features/ directory will be staged and sent to Test Cloud.
BUILD_RUN_LOOP: iff 1, then rebuild run-loop gem before uploading.
BUILD_RUN_LOOP: iff 1, then rebuild Calabash iOS gem before uploading.
"
exit 64
fi
APPCENTER_TOKEN=$("${HOME}/.calabash/find-keychain-credential.sh" api-token)
if [ -z "${APPCENTER_TOKEN}" ]; then
error "Failed to load appcenter token"
exit 1
fi
# The uninstall/install dance is required to test changes in
# run-loop and calabash-cucumber in Test Cloud
if [ "${BUILD_RUN_LOOP}" = "1" ]; then
gem uninstall -Vax --force --no-abort-on-dependent run_loop
(cd ../run_loop; rake install)
fi
if [ "${BUILD_CALABASH}" = "1" ]; then
gem uninstall -Vax --force --no-abort-on-dependent calabash-cucumber
(cd ../calabash-ios/calabash-cucumber; rake install)
fi
PREPARE_XTC_ONLY="${SKIP_IPA_BUILD}" make ipa-cal
(cd testcloud-submit
rm -rf .xtc
mkdir -p .xtc
if [ "${2}" != "" ]; then
echo "${2}" > .xtc/device-agent-sha
fi)
appcenter test run calabash \
--app-path testcloud-submit/CalSmoke-cal.ipa \
--app App-Center-Test-Cloud/iOSCalSmoke \
--project-dir testcloud-submit \
--token $APPCENTER_TOKEN \
--devices "${1}" \
--config-path cucumber.yml \
--profile default \
--include .xtc \
--test-series ${SERIES} \
--disable-telemetry
|
calabash/ios-smoke-test-app
|
CalSmokeApp/bin/appcenter.sh
|
Shell
|
mit
| 2,257 |
#!/bin/bash
export CONFIG=$1
if [ -f "$CONFIG" ];then
. "$CONFIG"
DIRNAME=$(dirname "$CONFIG")
cd "$DIRNAME" || exit 1
else
echo "ERROR CONFIG."
exit 1
fi
echo "$CERT_DOMAINS" > domains.txt
if [ ! -f "cloudxns.sh" ];then
wget https://github.com/xdtianyu/scripts/raw/master/le-dns/cloudxns.sh -O cloudxns.sh -o /dev/null
chmod +x cloudxns.sh
fi
if [ ! -f "cloudxns-hook.sh" ];then
wget https://github.com/xdtianyu/scripts/raw/master/le-dns/cloudxns-hook.sh -O cloudxns-hook.sh -o /dev/null
chmod +x cloudxns-hook.sh
fi
if [ ! -f "letsencrypt.sh" ];then
wget https://raw.githubusercontent.com/lukas2511/dehydrated/master/dehydrated -O letsencrypt.sh -o /dev/null
chmod +x letsencrypt.sh
fi
./letsencrypt.sh --register --accept-terms
if [ "$ECC" = "TRUE" ];then
./letsencrypt.sh -c -k ./cloudxns-hook.sh -t dns-01 -a secp384r1
else
./letsencrypt.sh -c -k ./cloudxns-hook.sh -t dns-01
fi
|
boxcore/shell
|
tools/le-dns/le-cloudxns.sh
|
Shell
|
mit
| 944 |
#!/bin/bash
set -o nounset
set -o errexit
set -o xtrace
package() {
BIN_NAME_TAG=$CRATE_NAME-$TRAVIS_TAG-$TARGET
if [[ $TARGET =~ .*windows.* ]]; then
BIN_NAME_TAG=$BIN_NAME_TAG.exe
fi
cp -f target/$TARGET/release/$BIN_NAME bin/$BIN_NAME_TAG
sha256sum "bin/$BIN_NAME_TAG" | tee "bin/$BIN_NAME_TAG.sha256"
}
release_tag() {
cp -f target/$TARGET/release/$BIN_NAME bin/
git config --global user.email "[email protected]"
git config --global user.name "Travis CI"
git add --force bin/$BIN_NAME
SHA=$(git rev-parse --short HEAD)
git commit --message "Add binary. $SHA. $TRAVIS_TAG-$TARGET."
tagname="binary-$TRAVIS_TAG-$TARGET"
git tag --force "$tagname"
git push --force https://${GITHUB_TOKEN}@github.com/autozimu/LanguageClient-neovim.git "$tagname"
git reset --hard HEAD^
}
if [[ $TRAVIS_OS_NAME == 'osx' ]]; then
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
fi
TARGETS=(${TARGETS//:/ })
for TARGET in "${TARGETS[@]}"; do
BIN_NAME=$CRATE_NAME
if [[ $TARGET =~ .*windows.* ]]; then
BIN_NAME=$BIN_NAME.exe
fi
cross build --release --target $TARGET
release_tag
package
done
if [[ $TRAVIS_OS_NAME == 'linux' ]]; then
sudo apt-get update
sudo apt-get install --yes python3-pip
sudo pip3 install semver
ci/cleanup-binary-tags.py
fi
|
autozimu/LanguageClient-neovim
|
ci/before_deploy.sh
|
Shell
|
mit
| 1,375 |
#!/bin/bash
#
luna-send -a com.palm.app.messaging -n 1 palm://com.palm.db/putPermissions '{"permissions": [{"type": "db.kind", "object": "com.palm.message:1", "caller": "info.mobo.exportmessages.service", "operations": {"read":"allow"}}, {"type": "db.kind", "object": "com.palm.message:1", "caller": "info.mobo.exportmessages.service", "operations": {"read": "allow"}}]}'
|
Garfonso/ExportMessages
|
application/repairDBPermission.sh
|
Shell
|
mit
| 371 |
#!/usr/bin/env bash
# thread number
thread_num=1
# use current machine as master, if an ConnectException occurs in slave.log,
# try to set master_host=127.0.0.1
master_host=$(hostname)
# if you run more than one training tasks on the same host at the same time,
# different tasks must have different ports!
master_port=61235
echo "master host:${master_host}, master port:${master_port}"
# model name(linear, fm, ffm, gbdt, gbmlr, gbsdt, gbhmlr, gbhsdt)
model_name=gbsdt
echo "model name:${model_name}"
# model config
properties_path="demo/gbsdt/regression/${model_name}.conf"
echo "config:${properties_path}"
# train/test line python transform switch & script
transform="false"
transform_script_path="bin/transform.py"
echo "kill old task..."
kill $(cat master_${master_port}.pid)
kill $(cat slave_${master_port}.pid)
# start ytk-mp4j master, application pid will be saved in master_${master_port}.pid file,
# default max memory is 1000m, master log saved in log/master.log
echo "start master..."
nohup java -server -Xmx1000m -classpath .:lib/*:config -Dlog4j.configuration=file:config/log4j_master.properties com.fenbi.mp4j.comm.CommMaster 1 "${master_port}" >> log/master.log 2>&1 & echo $! > master_${master_port}.pid
# start local train worker, application pid will be saved in slave_${master_port}.pid,
# defaul max memory is 60000m, slave log slaved in log/slave.log
echo "start slave..."
nohup java -server -Xmx60000m -XX:-OmitStackTraceInFastThrow -classpath .:lib/*:config -Dlog4j.configuration=file:config/log4j_slave.properties com.fenbi.ytklearn.worker.LocalTrainWorker \
"${model_name}" "${properties_path}" "${transform_script_path}" "${transform}" user "${master_host}" "${master_port}" "${thread_num}" >> log/slave.log 2>&1 & echo $! > slave_${master_port}.pid
|
yuantiku/ytk-learn
|
demo/gbsdt/regression/local_optimizer.sh
|
Shell
|
mit
| 1,797 |
# Sets reasonable OS X defaults.
#
# Or, in other words, set shit how I like in OS X.
#
# The original idea (and a couple settings) were grabbed from:
# https://github.com/mathiasbynens/dotfiles/blob/master/.osx
#
# Run ./set-defaults.sh and you'll be good to go.
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Reveal IP address, hostname, OS version, etc. when clicking the clock
# in the login window
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
# Trackpad: enable tap to click for this user and for the login screen
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
defaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Metrics
defaults write NSGlobalDomain AppleMeasurementUnits -string "Centimeters"
defaults write NSGlobalDomain AppleMetricUnits -bool true
# Show icons for hard drives, servers, and removable media on the desktop
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool false
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool false
defaults write com.apple.finder ShowMountedServersOnDesktop -bool false
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool false
# Finder: show hidden files by default
defaults write com.apple.finder AppleShowAllFiles -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Disable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Remove Dropbox’s green checkmark icons in Finder
file=/Applications/Dropbox.app/Contents/Resources/emblem-dropbox-uptodate.icns
[ -e "$file" ] && mv -f "$file" "$file.bak"
# Show the ~/Library folder.
chflags nohidden ~/Library
# Don't show Dashboard as a Space
defaults write com.apple.dock dashboard-in-overlay -bool true
# Don't automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Remove the auto-hiding Dock delay
defaults write com.apple.dock autohide-delay -float 0
# Remove the animation when hiding/showing the Dock
defaults write com.apple.dock autohide-time-modifier -float 0
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# Top left screen corner - Mission Control
defaults write com.apple.dock wvous-tl-corner -int 2
defaults write com.apple.dock wvous-tl-modifier -int 0
# Top right screen corner - Desktop
defaults write com.apple.dock wvous-tr-corner -int 4
defaults write com.apple.dock wvous-tr-modifier -int 0
# Bottom left screen corner - Start screen saver
defaults write com.apple.dock wvous-bl-corner -int 7
defaults write com.apple.dock wvous-bl-modifier -int 0
# Hide Safari's bookmark bar.
defaults write com.apple.Safari ShowFavoritesBar -bool false
# Set up Safari for development.
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
# Enable the debug menu in Address Book
defaults write com.apple.addressbook ABShowDebugMenu -bool true
# Enable Dashboard dev mode (allows keeping widgets on the desktop)
defaults write com.apple.dashboard devmode -bool true
# Enable the debug menu in iCal (pre-10.8)
defaults write com.apple.iCal IncludeDebugMenu -bool true
# Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
# Enable the debug menu in Disk Utility
defaults write com.apple.DiskUtility DUDebugMenuEnabled -bool true
defaults write com.apple.DiskUtility advanced-image-options -bool true
# Enable the WebKit Developer Tools in the Mac App Store
defaults write com.apple.appstore WebKitDeveloperExtras -bool true
# Enable Debug Menu in the Mac App Store
defaults write com.apple.appstore ShowDebugMenu -bool true
for app in "Address Book" "Calendar" "Contacts" "Dashboard" "Dock" "Finder" \
"Mail" "Safari" "SystemUIServer" "Terminal" "iCal" "iTunes"; do
killall "$app" > /dev/null 2>&1
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
|
werme/dotfiles
|
osx/set-defaults.sh
|
Shell
|
mit
| 5,789 |
#!/usr/bin/env bash
sudo add-apt-repository "deb https://cli-assets.heroku.com/branches/stable/apt ./"
curl -L https://cli-assets.heroku.com/apt/release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get install heroku
|
aviggiano/setup
|
heroku.sh
|
Shell
|
mit
| 228 |
#!/usr/bin/env bash
set -e
# Get the base repository path
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
# Get the name of the package we are building
PACKAGE="$(basename "$DIR")"
# Get the name of the organization
ORGANIZATION="$(basename "$(dirname "$DIR")")"
# Get the name of the repository
REPOSITORY="$(basename "$(dirname "$(dirname "$DIR")")")"
: ${LOCAL_TARGET="$(go env GOOS)_$(go env GOARCH)"}
# Move into our base repository path
cd "$DIR"
# Get the version of the app
VERSION="$(cat VERSION)"
# Clean up old binaries and packages
echo "==> Cleaning up build environment..."
rm -rf pkg/*
rm -rf bin/*
mkdir -p bin
mkdir -p pkg
shasum256() {
if hash sha256sum 2>/dev/null; then
sha256sum "$@"
else
shasum -a 256 "$@"
fi
}
#
# Compile Configuration
#
GIT_COMMIT="$(git rev-parse --short HEAD)"
GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)"
EXTLDFLAGS="-X $REPOSITORY/$ORGANIZATION/$PACKAGE/cmd.GITCOMMIT=${GIT_COMMIT}${GIT_DIRTY} -X $REPOSITORY/$ORGANIZATION/$PACKAGE/cmd.VERSION=$VERSION"
STATIC="-extldflags '-static'"
#
# Determine build targets
#
# Default to local os/arch
targets="$LOCAL_TARGET"
# If we are building for release change targets based off of environment
if [[ "$TARGETS" == "release" ]]; then
if [[ $(uname) == "Linux" ]]; then
targets="darwin_amd64 linux_amd64 linux_amd64-lxc windows_amd64"
elif [[ $(uname) == "Darwin" ]]; then
targets="darwin_amd64 linux_amd64 linux_amd64-lxc"
else
echo "Unable to build on $(uname). Use Linux or Darwin."
exit 1
fi
elif [[ "$TARGETS" != "" ]]; then
targets="$TARGETS"
fi
set +e
for target in $targets; do
case $target in
"darwin_amd64")
echo "==> Building darwin amd64..."
CGO_ENABLED=0 GOARCH="amd64" GOOS="darwin" \
go build -ldflags "$EXTLDFLAGS" -o "pkg/darwin_amd64/$PACKAGE"
;;
"linux_amd64")
echo "==> Building linux amd64..."
CGO_ENABLED=1 GOOS="linux" GOARCH="amd64" \
go build -ldflags "$STATIC $EXTLDFLAGS" -o "pkg/linux_amd64/$PACKAGE"
;;
"linux_amd64-lxc")
echo "==> Building linux amd64 with lxc..."
CGO_ENABLED=1 GOOS="linux" GOARCH="amd64" \
go build -ldflags "$STATIC $EXTLDFLAGS" -o "pkg/linux_amd64-lxc/$PACKAGE" -tags "lxc"
;;
"windows_amd64")
echo "==> Building windows amd64..."
CGO_ENABLED=1 GOOS="windows" GOARCH="amd64" CXX="x86_64-w64-mingw32-g++" CC="x86_64-w64-mingw32-gcc" \
go build -ldflags "$STATIC $EXTLDFLAGS" -o "pkg/windows_amd64/$PACKAGE.exe"
;;
*)
echo "--> Invalid target: $target"
;;
esac
done
set -e
# Copy our local OS/Arch to the bin/ directory
for F in $(find ./pkg/${LOCAL_TARGET} -mindepth 1 -maxdepth 1 -type f); do
echo "==> Copying ${LOCAL_TARGET} to ./bin"
cp ${F} bin/
chmod 755 bin/*
done
# Package up the artifacts
if [[ "$GENERATE_PACKAGES" != "" ]]; then
for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
OSARCH=$(basename ${PLATFORM})
echo "==> Packaging ${OSARCH}"
pushd $PLATFORM >/dev/null 2>&1
tar czvf ../${PACKAGE}-${OSARCH}.tar.gz ./* >/dev/null
popd >/dev/null 2>&1
rm -rf $PLATFORM >/dev/null
done
echo "==> Generating SHA256..."
for F in $(find ./pkg -mindepth 1 -maxdepth 1 -type f); do
FILENAME=$(basename ${F})
shasum256 "./pkg/${FILENAME}" >> ./pkg/SHA256SUM.txt
done
echo
cat ./pkg/SHA256SUM.txt
fi
|
detachedheads/annotate-influxdb
|
scripts/build.sh
|
Shell
|
mit
| 3,450 |
# Strip the remote
function strip_remote {
cat | sed -e 's/^ *.*\///'
}
# Limit remote
function limit_remote {
grep "^ *${1}/" | strip_remote
}
# Get the current branch
function get_current_branch {
git branch | grep "^*" | awk '{print $2}'
}
# Get the branch's timestamp
function get_branch_timestamp {
git log -n 1 --pretty=format:%at $1
}
# Get all branches
function get_branches {
remote=$1
if [ -z "$remote" ]; then
git branch | sed -e 's/^\*\? *//'
else
git branch -r | limit_remote $remote
fi
}
# Get remote branches contained in the specified branch
function get_merged {
remote=$1
branch=$2
if [ -z "$branch" ]; then
branch=$remote
git branch --merged $branch | sed -e 's/^\*\? *//' | grep -v "^${branch}$"
else
b=$(echo $branch | strip_remote)
git branch -r --merged $branch | limit_remote $remote | grep -v "^${b}$"
fi
}
# Get remote branches containing the specified branch
function get_descendents {
remote=$1
branch=$2
if [ -z "$branch" ]; then
branch=$remote
git branch --contains $branch | sed -e 's/^\*\? *//' | grep -v "^${branch}$"
else
b=$(echo $branch | strip_remote)
git branch -r --contains $branch | limit_remote $remote | grep -v "^${b}$"
fi
}
function confirm {
message=$1
read -p "$message [Y/n] " yn
[ "$yn" == "Y" -o "$yn" == "y" -o -z "$yn" ]
}
|
stilvoid/git-tools
|
lib/common.sh
|
Shell
|
mit
| 1,451 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=Base-Windows
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=dll
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/helloworld
OUTPUT_BASENAME=helloworld
PACKAGE_TOP_DIR=helloworld/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/helloworld/bin"
copyFileToTmpDir "${OUTPUT_PATH}.exe" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}.exe" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/helloworld.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/helloworld.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
RoseLeBlood/aSTL
|
examples/HelloWorld/nbproject/Package-Debug.bash
|
Shell
|
mit
| 1,469 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2185-1
#
# Security announcement date: 2014-04-29 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:47 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - firefox:29.0+build1-0ubuntu0.14.04.2
#
# Last versions recommanded by security team:
# - firefox:50.0+build2-0ubuntu0.14.04.2
#
# CVE List:
# - CVE-2014-1518
# - CVE-2014-1519
# - CVE-2014-1522
# - CVE-2014-1523
# - CVE-2014-1524
# - CVE-2014-1525
# - CVE-2014-1528
# - CVE-2014-1529
# - CVE-2014-1530
# - CVE-2014-1531
# - CVE-2014-1492
# - CVE-2014-1532
# - CVE-2014-1526
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade firefox=50.0+build2-0ubuntu0.14.04.2 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/i686/2014/USN-2185-1.sh
|
Shell
|
mit
| 893 |
docker build -t $(cat REPO_AND_VERSION) .
|
sys42/docker-dokuwiki
|
build.sh
|
Shell
|
mit
| 42 |
#!/bin/sh
#
# hwloc
source ./helper.sh
set_stage
wgetl http://www.ezix.org/software/files/lshw-B.02.17.tar.gz
tar -xzf lshw-B.02.17.tar.gz
cd lshw-B.02.17
mkdir -p $PREFIX/lshw
make PREFIX=$PREFIX/lshw
make install PREFIX=$PREFIX/lshw
leave_stage
|
amirajdhawan/totient-pkg
|
configs/lshw.sh
|
Shell
|
mit
| 251 |
#!/bin/sh
ROOT_DIR="/opt/software/"
DATE=`date +%Y%m%d`
alias cp='cp'
LOG="${ROOT_DIR}${DATE}_log"
[ ! -d /data/ ] && echo -e "\E[1;31m THERE IS NO '/data' \E[0m"| tee -a $LOG && exit 1
[ ! -d /opt/software ] && echo -e "\E[1;31m THERE IS NO '/opt/software' \E[0m" | tee -a $LOG && exit 1
echo "[$DATE --- START INSTALL LOCAL ]" | tee -a $LOG
function install_apache(){
cd ${ROOT_DIR};
tar -zxf httpd-2.2.11.tar.gz ;
cd httpd-2.2.11 ; ./configure --prefix=/usr/local/apache --enable-so --enable-rewrite -with-mpm=prefork --enable-ssl > $ROOT_DIR/apaconfig.log 2>&1 && echo -e "\E[1;32mConfigure HTTP OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mERROR: Configure HTTP ERROR ..... \E[0m" | tee -a $LOG ;
echo -e "\E[1;31m ready to make httpd ................... \E[0m"
sleep 5 ;
make clean > /dev/null 2>&1 && make > $ROOT_DIR/apamake.log 2>&1 && make install >> $ROOT_DIR/apamake.log 2>&1 && echo -e "\E[1;32mInstall HTTP OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mERROR:Install HTTP Error ..... \E[0m" |tee -a $LOG ;
mkdir -p /data/logs
mkdir -p /data/www
cp ${ROOT_DIR}config/index.php /data/www/
cp ${ROOT_DIR}config/httpd.conf /usr/local/apache/conf/
cp ${ROOT_DIR}config/httpd-vhosts.conf /usr/local/apache/conf/extra/
echo -e "\E[1;31m[Function] install_apache OK \n\E[0m" | tee -a $LOG
}
function install_rsync(){
cp -a ${ROOT_DIR}config/rsyncd.conf /etc/rsyncd.conf ;
cp -a ${ROOT_DIR}config/rsync.passwd /etc/rsync.passwd ;
chmod 600 /etc/rsync.passwd ;
rsync --daemon;
count=`ps -efw | grep rsync | grep -v grep | wc -l`
if [ "$count" == "1" ]; then
echo -e "\E[1;32mRsync Start OK ..... \E[0m" | tee -a $LOG
else
echo -e "\E[1;31mERROR: Rsync Start Error ...EXIT.. \E[0m" | tee -a $LOG && exit 1;
fi
echo -e "\E[1;31m[Function] install_rsync OK \n\E[0m" | tee -a $LOG
}
function install_cronolog(){
cd ${ROOT_DIR};
tar xzf cronolog-1.6.2.tar.gz && cd cronolog-1.6.2 && ./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mCronolog Install Done ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mERROR:Cronolog Install Error ..... \E[0m" | tee -a $LOG;
echo -e "\E[1;31m[Function] install_cronolog OK \n\E[0m" | tee -a $LOG
}
function install_php_depend(){
cd ${ROOT_DIR};
tar xzf gettext-0.17.tar.gz && cd gettext-0.17 && ./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mInstall GETTEXT OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mInstall GETTEXT ERROR ..... \E[0m" | tee -a $LOG
cd ${ROOT_DIR};
tar xzf gd-2.0.35.tar.gz && cd gd-2.0.35 && ./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 ; ./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mInstall GD OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mInstall GD ERROR ..... \E[0m" | tee -a $LOG
cd ${ROOT_DIR};
tar xzf libmcrypt-2.5.7.tar.gz && cd libmcrypt-2.5.7 && ./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mInstall libmcrypt OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mInstall libmcrypt ERROR ..... \E[0m" |tee -a $LOG
cd ${ROOT_DIR};
mkdir -p /usr/local/man/man1/
tar xzf jpegsrc.v6b.tar.gz && cd jpeg-6b && ./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mInstall Jpeg OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mInstall Jpeg ERROR ..... \E[0m"| tee -a $LOG
cd ${ROOT_DIR};
tar xjf libpng-1.2.10.tar.bz2 && cd libpng-1.2.10 ;./configure > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mInstall Libpng OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mInstall Libpng ERROR ..... \E[0m" | tee -a $LOG ;
ln -s /usr/lib64/libjpeg.so.62.0.0 /usr/lib/libjpeg.so
echo -e "\E[1;31m[Function] install_php_depend OK \n\E[0m" | tee -a $LOG
}
function install_php(){
cd ${ROOT_DIR};
tar -zxf php-5.2.6.tar.gz ; cp -r ${ROOT_DIR}jpeg-6b/* /opt/software/php-5.2.6/ext/gd/libgd/ ; cd php-5.2.6 ; ./configure --prefix=/usr/local/php --with-mysql --with-apxs2=/usr/local/apache/bin/apxs --with-openssl --with-curl --enable-xml --with-mcrypt --with-ttf --enable-magic-quotes --enable-fastcgi --enable-mbstring --with-iconv --enable-mbstring --with-gd --with-jpeg-dir --with-png-dir --with-zlib-dir --enable-sysvsem > ${ROOT_DIR}phpconfig.log 2>&1 && echo -e "\E[1;32mConfigure PHP OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mERROR: Configure PHP ERROR ..... \E[0m" |tee -a $LOG;
sleep 5;
make > $ROOT_DIR/phpmake.log 2>&1 && make install > $ROOT_DIR/phpmake.log 2>&1 && echo -e "\E[1;32mPHP Install OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mERROR:PHP Install ERROR ..... \E[0m" | tee -a $LOG;
mkdir /usr/local/php/ext
cp -a ${ROOT_DIR}config/php.ini /usr/local/php/lib/ ;
echo -e "\E[1;31m[Function] install_php OK \n \E[0m" | tee -a $LOG
}
function install_php_memcached(){
cd ${ROOT_DIR};
[ -d "/usr/local/php" ] && tar xzf memcache-2.2.3.tgz && cd memcache-2.2.3 && /usr/local/php/bin/phpize > /dev/null 2>&1 && ./configure -enable-memcache -with-php-config=/usr/local/php/bin/php-config -with-zlib-dir > /dev/null 2>&1 && make > /dev/null 2>&1 && make install > /dev/null 2>&1 && echo -e "\E[1;32mPHP_memcached Install OK ..... \E[0m" | tee -a $LOG || echo -e "\E[1;31mPHP_memcached Install ERROR ..... \E[0m" | tee -a $LOG;
cp /usr/local/php/lib/php/extensions/no-debug-non-zts/* /usr/local/php/ext/
echo -e "\E[1;31m[Function] install_php_memcached OK \n\E[0m" | tee -a $LOG
}
install_apache
install_rsync
install_cronolog
install_php_depend
install_php
install_php_memcached
|
lgh8820/ansible-test
|
OperationsShell/install/install-websoftware.sh
|
Shell
|
mit
| 5,645 |
#!/bin/sh
#######ensembl ftp
#ensembl_archive_xx,
#ontology_mart_xx,
#ensembl_ontology_xx,
#ensembl_website_xx,
######ensemblgenome ftp
#ensemblgenomes_info_xx,
#ensemblgenomes_stable_ids_xx_xx,
#ensembl_compara_pan_homology_xx_xx,
#ftp://ftp.ensembl.org/pub/release-92/mysql
ENSEMBL_FTP="ftp://ftp.ensembl.org/pub/"
#ftp://ftp.ensemblgenomes.org/pub/release-39/pan_ensembl/mysql/
EG_FTP="ftp://ftp.ensemblgenomes.org/pub/"
echo "Set ensembl version e (such as 91)"
read e
echo "set e to be $e"
echo "Set ensemblgenome version eg (such as 38)"
read eg
echo "set eg to be $eg"
echo "Set gramene version g (such as 57)"
read g
echo "set g to be $g"
#echo "Set ftp root site to download data (for example:ftp://ftp.ensemblgenomes.org/pub/)"
#read ftproot
#echo "set ftp root to be $ftproot"
if [[ -z $e ]]
then
echo "Need ensembl version such as 92."
exit
fi
if [[ -z $eg ]]
then
echo "Need ensembl genome version such as 39."
exit
fi
if [[ -z $g ]]
then
echo "Need gramene version such as 57."
exit
fi
#if [[ -z $ftproot ]]
#then
# echo "Need ftp root to download database, such as ftp://ftp.ensemblgenomes.org/pub/."
# exit
#fi
echo "the ensembl version is $e, the ensemlb genomes version is $eg, the gramene version is $g"
echo "The ftp url for ensembl is ${ENSEMBL_FTP}/release-${e}/mysql, continue? [Y/N]"
read reply
echo "reply is $reply"
if [[ $reply != [Yy] ]]
then
echo "skip"
else
mkdir ensembl_dbs
cd ensembl_dbs
for db in ensembl_archive_$e ontology_mart_$e ensembl_ontology_$e ensembl_website_$e ensembl_production_$e ensembl_accounts ncbi_taxonomy
do echo "download $db"
wget -r -nH --cut-dir=3 ${ENSEMBL_FTP}/release-${e}/mysql/$db
if [ "$db" == "ensembl_account" ]
then
mv mysql/ensembl_account mysql/ensembl_account_$e
fi
done
cd ../
fi
#ftp://ftp.ensemblgenomes.org/pub/release-39/pan_ensembl/mysql/
echo "The ftp url for ensembl plants is ${EG_FTP}/release-${eg}/pan_ensembl/mysql, continue? [Y/N]"
read reply
echo "reply is $reply"
if [[ $reply != [Yy] ]]
then
echo "skip"
else
mkdir eg_dbs
cd eg_dbs
for db in ensembl_compara_pan_homology_${eg}_$e ensemblgenomes_info_$eg ensemblgenomes_stable_ids_${eg}_$e ensembl_metadata
do echo "donwload $db"
wget -r -nH --cut-dir=4 ${EG_FTP}/release-${eg}/pan_ensembl/mysql/$db
done
~/scripts/phh-file-rename s/${eg}/${g}/ mysql/*
cd ../
fi
|
warelab/gramene-ensembl
|
scripts/load-scripts/fetch_pan_meta_dbs.sh
|
Shell
|
mit
| 2,460 |
# ------------------------------------------------------------------------------
# FILE: extract.plugin.zsh
# DESCRIPTION: oh-my-zsh plugin file.
# AUTHOR: Sorin Ionescu ([email protected])
# VERSION: 1.0.1
# ------------------------------------------------------------------------------
function extract() {
local remove_archive
local success
local file_name
local extract_dir
if (( $# == 0 )); then
echo "Usage: extract [-option] [file ...]"
echo
echo Options:
echo " -r, --remove Remove archive."
echo
echo "Report bugs to <[email protected]>."
fi
remove_archive=1
if [[ "$1" == "-r" ]] || [[ "$1" == "--remove" ]]; then
remove_archive=0
shift
fi
while (( $# > 0 )); do
if [[ ! -f "$1" ]]; then
echo "extract: '$1' is not a valid file" 1>&2
shift
continue
fi
success=0
file_name="$( basename "$1" )"
extract_dir="$( echo "$file_name" | sed "s/\.${1##*.}//g" )"
case "$1" in
(*.tar.gz|*.tgz) [ -z $commands[pigz] ] && tar zxvf "$1" || pigz -dc "$1" | tar xv ;;
(*.tar.bz2|*.tbz|*.tbz2) tar xvjf "$1" ;;
(*.tar.xz|*.txz) tar --xz --help &> /dev/null \
&& tar --xz -xvf "$1" \
|| xzcat "$1" | tar xvf - ;;
(*.tar.zma|*.tlz) tar --lzma --help &> /dev/null \
&& tar --lzma -xvf "$1" \
|| lzcat "$1" | tar xvf - ;;
(*.tar) tar xvf "$1" ;;
(*.gz) [ -z $commands[pigz] ] && gunzip "$1" || pigz -d "$1" ;;
(*.bz2) bunzip2 "$1" ;;
(*.xz) unxz "$1" ;;
(*.lzma) unlzma "$1" ;;
(*.Z) uncompress "$1" ;;
(*.zip|*.war|*.jar|*.sublime-package|*.ipsw|*.xpi|*.apk|*.pbw|*.pbz) unzip "$1" -d $extract_dir ;;
(*.rar) unrar x -ad "$1" ;;
(*.7z) 7za x "$1" ;;
(*.deb)
mkdir -p "$extract_dir/control"
mkdir -p "$extract_dir/data"
cd "$extract_dir"; ar vx "../${1}" > /dev/null
cd control; tar xzvf ../control.tar.gz
cd ../data; tar xzvf ../data.tar.gz
cd ..; rm *.tar.gz debian-binary
cd ..
;;
(*)
echo "extract: '$1' cannot be extracted" 1>&2
success=1
;;
esac
(( success = $success > 0 ? $success : $? ))
(( $success == 0 )) && (( $remove_archive == 0 )) && rm "$1"
shift
done
}
alias x=extract
|
Neal/oh-my-zsh
|
plugins/extract/extract.plugin.zsh
|
Shell
|
mit
| 2,350 |
#!/bin/bash
#
# Copyright (C) 2013
# Author(s): David Alexa <[email protected]>
#
# LICENSE TERMS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name of the Company nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# This software is provided ``as is'', and any express or implied
# warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are disclaimed.
# In no event shall the company or contributors be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
#
# ==========================
#
# This file is executed during composer install.
# Set correct file mod for necessary files and folders.
chown -R apache:apache ./*
chmod 700 -R app/{cache,logs}
chmod 700 src/FIT/NetopeerBundle/Data/models{,/tmp}
chmod 600 app/netopeerWebGui.db
|
CESNET/Netopeer-GUI
|
src/FIT/NetopeerBundle/bin/netconfwebgui-postinstall.sh
|
Shell
|
mit
| 1,817 |
lmd build main
lmd watch main
|
petr/spd
|
assetscore/old/start.sh
|
Shell
|
mit
| 29 |
function test_linux_deployment() {
###### Testing an nginx deployment
log "Testing deployments"
kubectl create namespace ${namespace}
NGINX="docker.io/library/nginx:latest"
IMAGE="${NGINX}" # default to the library image unless we're in TEST_ACR mode
if [[ "${TEST_ACR}" == "y" ]]; then
# force it to pull from ACR
IMAGE="${ACR_REGISTRY}/test/nginx:latest"
# wait for acr
wait
# TODO: how to do this without polluting user home dir?
docker login --username="${SERVICE_PRINCIPAL_CLIENT_ID}" --password="${SERVICE_PRINCIPAL_CLIENT_SECRET}" "${ACR_REGISTRY}"
docker pull "${NGINX}"
docker tag "${NGINX}" "${IMAGE}"
docker push "${IMAGE}"
fi
kubectl run --image="${IMAGE}" nginx --namespace=${namespace} --overrides='{ "apiVersion": "extensions/v1beta1", "spec":{"template":{"spec": {"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}'
count=12
while (( $count > 0 )); do
log " ... counting down $count"
running=$(kubectl get pods --namespace=${namespace} | grep nginx | grep Running | wc | awk '{print $1}')
if (( ${running} == 1 )); then break; fi
sleep 5; count=$((count-1))
done
if (( ${running} != 1 )); then
log "K8S: gave up waiting for deployment"
kubectl get all --namespace=${namespace}
exit 1
fi
kubectl expose deployments/nginx --type=LoadBalancer --namespace=${namespace} --port=80
log "Checking Service External IP"
count=60
external_ip=""
while (( $count > 0 )); do
log " ... counting down $count"
external_ip=$(kubectl get svc --namespace ${namespace} nginx --template="{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}" || echo "")
[[ ! -z "${external_ip}" ]] && break
sleep 10; count=$((count-1))
done
if [[ -z "${external_ip}" ]]; then
log "K8S: gave up waiting for loadbalancer to get an ingress ip"
exit 1
fi
log "Checking Service"
count=5
success="n"
while (( $count > 0 )); do
log " ... counting down $count"
ret=$(curl -f --max-time 60 "http://${external_ip}" | grep 'Welcome to nginx!' || echo "curl_error")
if [[ $ret =~ .*'Welcome to nginx!'.* ]]; then
success="y"
break
fi
sleep 5; count=$((count-1))
done
if [[ "${success}" != "y" ]]; then
log "K8S: failed to get expected response from nginx through the loadbalancer"
exit 1
fi
}
function test_windows_deployment() {
echo "coming soon"
}
|
ChinaCloudGroup/acs-engine
|
test/cluster-tests/kubernetes/k8s-utils.sh
|
Shell
|
mit
| 2,416 |
#!/bin/bash
set -e
set -u
usage() { echo "Usage: $0 -t <title> [-y <year>]" 1>&2; exit 1; }
MOVIE_ID=''
MOVIE_TITLE=''
DATA_DIR=''
MOVIE_YEAR=''
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
nextQuoteId=''
while getopts ":t:y:" o; do
case "${o}" in
t)
MOVIE_TITLE=${OPTARG}
TITLE_URL_ENCODED=`python -c "import sys, urllib as ul; \
print ul.quote_plus(\"$MOVIE_TITLE\")"`
DATA_DIR='movie-quotes'/${TITLE_URL_ENCODED}
if [ ! -d "$DATA_DIR" ]; then
mkdir -p "$DATA_DIR"
fi
;;
y)
MOVIE_YEAR=${OPTARG}
;;
*)
usage
;;
esac
done
if [ -z "$DATA_DIR" ] || [ -z "$MOVIE_TITLE" ]; then
usage
fi
source "$SCRIPT_DIR"/quodb-functions.sh
moviesWithTitle=`getMoviesWithTitle "$TITLE_URL_ENCODED"`
moviesWithTitleCount=`echo "$moviesWithTitle" | jq -s length`
if [ "$moviesWithTitleCount" -gt "1" ]; then
if [ -z "$MOVIE_YEAR" ]; then
echo "Found multiple movies with title '$MOVIE_TITLE'"
echo "Add -y <year> to distinguish."
echo "$moviesWithTitle"
usage
fi
MOVIE_ID=`echo "$moviesWithTitle" | jq -r "select(.year==$MOVIE_YEAR)|.title_id"`
fi
if [ "$moviesWithTitleCount" -eq "1" ]; then
MOVIE_ID=`echo "$moviesWithTitle" | jq -r ".[0]|.title_id"`
fi
if [ -z "$MOVIE_ID" ]; then
echo "Unable to find id for title '$MOVIE_TITLE' at quodb.com"
exit 1
fi
crawlDirection='backward'
while [ "$crawlDirection" != 'stop' ];
do
quotesJson=`getQuotesJson $nextQuoteId`
error=`echo "$quotesJson" | jq '.error'`
if [ ! -z "$error" ] && [ "$error" != "null" ]; then
if [ "$crawlDirection" == 'backward' ]; then
echo "Reached the beginning. Now crawling forward to the end."
crawlDirection='forward'
nextQuoteId=`getNextQuoteId "$crawlDirection"`
else
echo "Reached the end."
crawlDirection='stop'
fi
else
saveToFiles "$quotesJson"
nextQuoteId=`getNextQuoteId "$crawlDirection"`
fi
done
echo "Finished."
|
johnmuth/movie-quote-tweet-bot
|
crawler/crawl-movie-quotes.sh
|
Shell
|
cc0-1.0
| 2,120 |
#!/bin/sh
######################################################################
#
# @file openstudio.sh
#
# @brief Version and build information for openstudio.
#
# @version $Rev$ $Date$
#
# Copyright © 2013-2017, Tech-X Corporation, Boulder, CO.
# See LICENSE file (EclipseLicense.txt) for conditions of use.
#
######################################################################
######################################################################
#
# Builds and deps
# Built from svn repo only
#
######################################################################
OPENSTUDIO_DEPS=swig,ruby,rake,dakota,doxygen,vtk,qt
if test -z "$OPENSTUDIO_BUILDS"; then
OPENSTUDIO_BUILDS=ser
fi
######################################################################
#
# Launch openstudio builds.
#
######################################################################
buildOpenstudio() {
# SWS: These could be replaced by editing the openstudio cmake script and corresponding Find____.cmake scripts
# OPENSTUDIO_PAR_ARGS=" "
OPENSTUDIO_SER_ARGS="-DBOOST_ROOT:STRING=$CONTRIB_DIR/boost -DSWIG_EXECUTABLE:FILEPATH=$CONTRIB_DIR/swig/bin/swig -DRUBY_EXECUTABLE:STRING=$CONTRIB_DIR/ruby-ser/bin -DQT_QMAKE_EXECUTABLE:FILE=$CONTRIB_DIR/qt-4.8.4-sersh/bin/qmake -DVTK_DIR:PATH=$CONTRIB_DIR/VTK-sersh"
OPENSTUDIO_SER_ARGS="$OPENSTUDIO_SER_ARGS $CMAKE_COMPILERS_SER"
# OPENSTUDIO_PAR_ARGS="$OPENSTUDIO_PAR_ARGS $CMAKE_COMPILERS_PAR"
# Get openstudio checkout
getVersion openstudio
# Configure and build serial and parallel
if bilderPreconfig openstudio; then
# Serial build
if bilderConfig $USE_CMAKE_ARG openstudio ser "$OPENSTUDIO_SER_ARGS $CMAKE_SUPRA_SP_ARG" openstudio; then
bilderBuild openstudio ser "$OPENSTUDIO_MAKEJ_ARGS"
fi
fi
}
######################################################################
#
# Install openstudio
#
######################################################################
installOpenstudio() {
bilderInstall openstudio ser openstudio
}
|
Tech-XCorp/bilder
|
packages/openstudio.sh
|
Shell
|
epl-1.0
| 2,024 |
sh clean.sh
make g2mini_cm_defconfig && make -j2 && ./dtbToolCM -2 -o ./arch/arm/boot/dt.img -s 2048 -p ./scripts/dtc/ ./arch/arm/boot/
#&& find -name "*.ko" -exec cp -f '{}' ./RAMDISKs/D618-V20D/system/lib/modules/ \; && mv -f ./arch/arm/boot/zImage ./RAMDISKs/D618-V20D/split_img/zImage && mv -f ./arch/arm/boot/dt.img ./RAMDISKs/D618-V20D/split_img/dt.img
exec bash
|
Vangreen/android_kernel_lge_msm8926
|
make-g2mini_cm.sh
|
Shell
|
gpl-2.0
| 370 |
VERSION=1.0.6
NAME=bzip2
DESC=
DEPENDS=
CONFLICTS=
SOURCES=(http://www.bzip.org/1.0.6/bzip2-1.0.6.tar.gz)
SOURCES_HASHES=(00b516f4704d4a7cb50a1d97e6e8e15b)
PATCHES=("patches/$NAME-$VERSION-seaos-all.patch")
function prepare() {
if ! [ -d $NAME-$VERSION ]; then
tar xf $NAME-$VERSION.tar.gz
fi
}
function build() {
cp -rf ../src/bzip2-1.0.6/* .
if ! make PREFIX=$INSTALL_ROOT/usr CC=$HOST_TRIPLET-gcc RANLIB=$HOST_TRIPLET-ranlib AR=$HOST_TRIPLET-ar all install; then
return 1
fi
}
|
dbittman/sea
|
apps/porting/pack/packs/bzip2/pkg.sh
|
Shell
|
gpl-2.0
| 493 |
#! /bin/bash
MYNAME=$(basename "$0")
MYBASENAME=$(basename "$0" .sh)
MYDIR=$(dirname "$0")
VERBOSE=""
VERSION="0.0.3"
LOG_OPTION="--log"
DEBUG_OPTION="--debug"
CONTAINER_SERVER=""
CONTAINER_IP=""
CLUSTER_NAME=""
LAST_CONTAINER_NAME=""
OPTION_VENDOR="mariadb"
PROVIDER_VERSION="10.3"
OPTION_NUMBER_OF_NODES="1"
N_CONTAINERS=0
N_CLUSTERS=0
MY_CONTAINERS=""
node_ip11=""
node_ip12=""
node_ip13=""
node_ip21=""
cd $MYDIR
source ./include.sh
source ./shared_test_cases.sh
source ./include_lxc.sh
MYSQL_ROOT_PASSWORD=$(generate_strong_password)
#
# Prints usage information and exits.
#
function printHelpAndExit()
{
cat << EOF
Usage: $MYNAME [OPTION]... [TESTNAME]
$MYNAME - Test script for s9s to check Galera on LXC.
-h, --help Print this help and exit.
--verbose Print more messages.
--print-json Print the JSON messages sent and received.
--log Print the logs while waiting for the job to be ended.
--print-commands Do not print unit test info, print the executed commands.
--install Just install the server and exit.
--reset-config Remove and re-generate the ~/.s9s directory.
--server=SERVER Use the given server to create containers.
--vendor=STRING Use the given Galera vendor.
--provider-version=VERSION The SQL server provider version.
SUPPORTED TESTS:
o registerServer Creates a new cmon-cloud container server.
o createMasterCluster Creates a cluster to be user as a master cluster.
o testBinaryLog Enabled the binary logging on all the servers.
o testBinaryLog
o testFailover Testing failover between clusters.
o testBack Testing failover between clusters.
o testFinalize Deleting containers.
EOF
exit 1
}
ARGS=$(\
getopt -o h \
-l "help,verbose,log,server:,print-commands,install,reset-config,\
provider-version:,number-of-nodes:,vendor:,leave-nodes,\
os-vendor:,os-release:" \
-- "$@")
if [ $? -ne 0 ]; then
exit 6
fi
eval set -- "$ARGS"
while true; do
case "$1" in
-h|--help)
shift
printHelpAndExit
;;
--verbose)
shift
VERBOSE="true"
OPTION_VERBOSE="--verbose"
;;
--log)
shift
LOG_OPTION="--log"
DEBUG_OPTION="--debug"
;;
--print-json)
shift
OPTION_PRINT_JSON="--print-json"
;;
--print-commands)
shift
DONT_PRINT_TEST_MESSAGES="true"
PRINT_COMMANDS="true"
;;
--install)
shift
OPTION_INSTALL="--install"
;;
--reset-config)
shift
OPTION_RESET_CONFIG="true"
;;
--server)
shift
CONTAINER_SERVER="$1"
shift
;;
--vendor)
shift
OPTION_VENDOR="$1"
shift
;;
--provider-version)
shift
PROVIDER_VERSION="$1"
shift
;;
--os-vendor)
OPTION_OS_VENDOR="$2"
shift 2
;;
--os-release)
OPTION_OS_RELEASE="$2"
shift 2
;;
--)
shift
break
;;
*)
printError "Unknown option '$1'."
exit 6
;;
esac
done
if [ -z "$OPTION_RESET_CONFIG" ]; then
printError "This script must remove the s9s config files."
printError "Make a copy of ~/.s9s and pass the --reset-config option."
exit 6
fi
if [ -z "$CONTAINER_SERVER" ]; then
printError "No container server specified."
printError "Use the --server command line option to set the server."
exit 6
fi
#
#
#
function createMasterCluster()
{
local node_name1
local node_name2
local node_name3
local cluster_name="master_cluster"
local nodes
local n
local master_cluster_id_option
print_title "Creating the Master Cluster"
begin_verbatim
#
# Composing a list of container names.
#
node_name1=$(printf "%s_11_%06d" "${MYBASENAME}" "$$")
node_ip11=$(create_node \
--autodestroy \
--os-vendor "$OPTION_OS_VENDOR" \
--os-release "$OPTION_OS_RELEASE" \
$node_name1)
node_name2=$(printf "%s_12_%06d" "${MYBASENAME}" "$$")
node_ip12=$(create_node \
--autodestroy \
--os-vendor "$OPTION_OS_VENDOR" \
--os-release "$OPTION_OS_RELEASE" \
$node_name2)
node_name3=$(printf "%s_13_%06d" "${MYBASENAME}" "$$")
node_ip13=$(create_node \
--autodestroy \
--os-vendor "$OPTION_OS_VENDOR" \
--os-release "$OPTION_OS_RELEASE" \
$node_name3)
nodes="$node_ip11;$node_ip12;$node_ip13"
#
# Creating a Cluster.
#
mys9s cluster \
--create \
--template="ubuntu" \
--cluster-name="$cluster_name" \
--cluster-type=galera \
--provider-version="$PROVIDER_VERSION" \
--vendor=$OPTION_VENDOR \
--nodes="$nodes" \
--db-admin-passwd="$MYSQL_ROOT_PASSWORD" \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
counter=0
while true; do
CLUSTER_ID=$(find_cluster_id $cluster_name)
if [ "$CLUSTER_ID" != 'NOT-FOUND' ]; then
break;
fi
if [ "$counter" -gt 10 ]; then
break
fi
let counter+=1
echo "Cluster '$cluster_name' not found."
s9s cluster --list --long
sleep 10
done
if [ "$CLUSTER_ID" -gt 0 2>/dev/null ]; then
printVerbose "Cluster ID is $CLUSTER_ID"
else
failure "Cluster ID '$CLUSTER_ID' is invalid"
fi
wait_for_cluster_started "$cluster_name"
mys9s cluster --list --long
if [ -n "$master_cluster_id_option" ]; then
mys9s replication --list
fi
end_verbatim
}
#
#
#
function createSlaveCluster()
{
local node_name1
local cluster_name="slave_cluster"
local nodes
local master_cluster_id_option
local retcode
print_title "Creating the Slave Cluster"
begin_verbatim
#
# Composing a list of container names.
#
node_name1=$(printf "%s_21_%06d" "${MYBASENAME}" "$$")
node_ip21=$(create_node \
--autodestroy \
--os-vendor "$OPTION_OS_VENDOR" \
--os-release "$OPTION_OS_RELEASE" \
$node_name1)
nodes="$node_ip21"
#
# Creating a Cluster.
#
mys9s cluster \
--create \
--template="ubuntu" \
--cluster-name="$cluster_name" \
--cluster-type=galera \
--provider-version="$PROVIDER_VERSION" \
--vendor=$OPTION_VENDOR \
--nodes="$nodes" \
--db-admin-passwd="$MYSQL_ROOT_PASSWORD" \
--remote-cluster-id=1 \
$LOG_OPTION \
$DEBUG_OPTION
retcode=$?
if [ "$retcode" -ne 0 ]; then
failure "The return code is $retcode"
end_verbatim
return 1
else
success " o The return code is 0, OK."
fi
#
# FIXME: I am not sure why I wrote this like this.
#
counter=0
while true; do
CLUSTER_ID=$(find_cluster_id $cluster_name)
if [ "$CLUSTER_ID" != 'NOT-FOUND' ]; then
break;
fi
if [ "$counter" -gt 10 ]; then
break
fi
let counter+=1
echo "Cluster '$cluster_name' not found."
s9s cluster --list --long
sleep 10
done
if [ "$CLUSTER_ID" -gt 0 2>/dev/null ]; then
printVerbose "Cluster ID is $CLUSTER_ID"
else
failure "Cluster ID '$CLUSTER_ID' is invalid"
fi
wait_for_cluster_started "$cluster_name"
mys9s cluster --list --long
if [ -n "$master_cluster_id_option" ]; then
mys9s replication --list
fi
end_verbatim
}
function testBinaryLog()
{
local node_name
local node_list
print_title "Enabling Binary Logging"
begin_verbatim
node_list=$(s9s node \
--list --cluster-id=1 --long --batch | \
grep ^g | \
awk '{ print $5}')
for node_name in $node_list; do
mys9s nodes \
--enable-binary-logging \
--nodes=$node_name \
--cluster-id=1 \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
done
mys9s replication --list
end_verbatim
}
function testFailover()
{
print_title "Testing Failover"
begin_verbatim
mys9s node --list --long
mys9s replication \
--failover \
--master=$node_ip12:3306 \
--slave=$node_ip21:3306 \
--cluster-id=2 \
--remote-cluster-id=1 \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
mys9s replication --list
mysleep 60
mys9s replication --list
end_verbatim
}
function testBack()
{
print_title "Testing Switch Back After Failover"
begin_verbatim
mys9s node --list --long
mys9s replication \
--failover \
--master=$node_ip11:3306 \
--slave=$node_ip21:3306 \
--cluster-id=2 \
--remote-cluster-id=1 \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
mys9s replication --list
mysleep 60
mys9s replication --list
mys9s node --list --long
end_verbatim
}
#
# Running the requested tests.
#
startTests
reset_config
grant_user
if [ "$OPTION_INSTALL" ]; then
if [ -n "$1" ]; then
for testName in $*; do
runFunctionalTest "$testName"
done
else
runFunctionalTest registerServer
runFunctionalTest createMasterCluster
runFunctionalTest testBinaryLog
runFunctionalTest createSlaveCluster
runFunctionalTest testFailover
runFunctionalTest testBack
fi
elif [ "$1" ]; then
for testName in $*; do
runFunctionalTest "$testName"
done
else
runFunctionalTest registerServer
runFunctionalTest createMasterCluster
runFunctionalTest testBinaryLog
runFunctionalTest createSlaveCluster
runFunctionalTest testFailover
runFunctionalTest testBack
fi
endTests
|
severalnines/s9s-tools
|
tests/ft_galera_c2c.sh
|
Shell
|
gpl-2.0
| 10,309 |
#!/bin/sh
#
# Startup script for the VuFind Jetty Server under *nix systems
# (it works under NT/cygwin too).
#
# Configuration files
#
# $JETTY_HOME/etc/jetty.xml
# If found, used as this script's configuration file, but only if
# /etc/jetty.conf was not present. See above.
#
# Configuration variables
#
# VUFIND_HOME
# Home of the VuFind installation.
#
# SOLR_HOME
# Home of the Solr installation.
#
# JAVA_HOME
# Home of Java installation.
#
# JAVA
# Command to invoke Java. If not set, $JAVA_HOME/bin/java will be
# used.
#
# JAVA_OPTIONS
# Extra options to pass to the JVM
#
# JETTY_HOME
# Where Jetty is installed. If not set, the script will try go
# guess it by first looking at the invocation path for the script,
# and then by looking in standard locations as $HOME/opt/jetty
# and /opt/jetty. The java system property "jetty.home" will be
# set to this value for use by configure.xml files, f.e.:
#
# <Arg><SystemProperty name="jetty.home" default="."/>/webapps/jetty.war</Arg>
#
# JETTY_LOG
# The path where Jetty will store the log files
#
# JETTY_CONSOLE
# Where Jetty console output should go. Defaults to first writeable of
# /dev/console
# /dev/tty
#
# JETTY_PORT
# Override the default port for Jetty servers. If not set then the
# default value in the xml configuration file will be used. The java
# system property "jetty.port" will be set to this value for use in
# configure.xml files. For example, the following idiom is widely
# used in the demo config files to respect this property in Listener
# configuration elements:
#
# <Set name="Port"><SystemProperty name="jetty.port" default="8080"/></Set>
#
# Note: that the config file could ignore this property simply by saying:
#
# <Set name="Port">8080</Set>
#
# JETTY_RUN
# Where the jetty.pid file should be stored. It defaults to the
# first available of /var/run, /usr/var/run, and /tmp if not set.
#
# JETTY_PID
# The Jetty PID file, defaults to $JETTY_RUN/jetty.pid
#
# JETTY_ARGS
# The default arguments to pass to jetty.
#
. /home/$USER/.bash_profile
usage()
{
echo "Usage: $0 {start|stop|run|restart|check|supervise} [ CONFIGS ... ] "
exit 1
}
[ $# -gt 0 ] || usage
TMPJ=/tmp/j$$
##################################################
# Get the action & configs
##################################################
ACTION=$1
shift
ARGS="$*"
CONFIGS=""
##################################################
# Find directory function
##################################################
findDirectory()
{
OP=$1
shift
for L in $* ; do
[ $OP $L ] || continue
echo $L
break
done
}
##################################################
# Set Performance options for JETTY
##################################################
# -Xms Sets the initial heap size for when the JVM starts.
# -Xmx Sets the maximum heap size.
# If you often get a "catalog error" or need to restart vufind you
# may need to increase the Xmx value if your system has the memory
# to support it. For example, on a system with 4GB of memory that is
# dedicated to running Vufind you may want to set the Xmx value to
# 2048m or 3,072m.
#
# IMPORTANT NOTE: You may want to skip setting the Xms value
# so that JAVA / Vufind will only use what it needs - potentialy
# leaving more memory for the rest of the system. To see the JVM memory
# usage visit your solr URL, possibly the same URL as your vufind,
# instance but appended with :8080/solr/
#
# The most important factors for determining the amount of memory
# that you will need to allocate are the number of records you have
# indexed, and how much traffic your site receives. It may also be
# beneficial to limit or block crawlers and robots as they can,
# at times, generate so many requests that your site has poor
# performance for your human patrons
#
# For more information on tuning vufind and java, see the
# vufind wiki article at https://vufind.org/wiki/performance
# and http://www.oracle.com/technetwork/java/gc-tuning-5-138395.html
#
# Some example settings:
# JAVA_OPTIONS="-server -Xms1048576k -Xmx1048576k -XX:+UseParallelGC -XX:NewRatio=5"
# JAVA_OPTIONS="-server -Xmx1048576k -XX:+UseParallelGC -XX:NewRatio=5"
# JAVA_OPTIONS="-server -Xms1024m -Xmx1024m -XX:+UseParallelGC -XX:NewRatio=5"
# JAVA_OPTIONS="-server -Xmx1024m -XX:+UseParallelGC -XX:NewRatio=5"
if [ -z "$JAVA_OPTIONS" ]
then
JAVA_OPTIONS="-server -Xms1024m -Xmx1024m -XX:+UseParallelGC -XX:NewRatio=5"
fi
##################################################
# Set VUFIND_HOME
##################################################
if [ -z "$VUFIND_HOME" ]
then
VUFIND_HOME="/usr/local/vufind2"
fi
##################################################
# Set SOLR_HOME
##################################################
if [ -z "$SOLR_HOME" ]
then
SOLR_HOME="$VUFIND_HOME/solr"
fi
##################################################
# Set JETTY_HOME
##################################################
if [ -z "$JETTY_HOME" ]
then
JETTY_HOME="$SOLR_HOME/jetty"
fi
##################################################
# Set Jetty's Logging Directory
##################################################
if [ -z "$JETTY_LOG" ]
then
JETTY_LOG="$JETTY_HOME/logs"
fi
##################################################
# Jetty's hallmark
##################################################
JETTY_INSTALL_TRACE_FILE="start.jar"
#####################################################
# Check that jetty is where we think it is
#####################################################
if [ ! -r $JETTY_HOME/$JETTY_INSTALL_TRACE_FILE ]
then
echo "** ERROR: Oops! Jetty doesn't appear to be installed in $JETTY_HOME"
echo "** ERROR: $JETTY_HOME/$JETTY_INSTALL_TRACE_FILE is not readable!"
exit 1
fi
###########################################################
# Get the list of config.xml files from the command line.
###########################################################
if [ ! -z "$ARGS" ]
then
for A in $ARGS
do
if [ -f $A ]
then
CONF="$A"
elif [ -f $JETTY_HOME/etc/$A ]
then
CONF="$JETTY_HOME/etc/$A"
elif [ -f ${A}.xml ]
then
CONF="${A}.xml"
elif [ -f $JETTY_HOME/etc/${A}.xml ]
then
CONF="$JETTY_HOME/etc/${A}.xml"
else
echo "** ERROR: Cannot find configuration '$A' specified in the command line."
exit 1
fi
if [ ! -r $CONF ]
then
echo "** ERROR: Cannot read configuration '$A' specified in the command line."
exit 1
fi
CONFIGS="$CONFIGS $CONF"
done
fi
##################################################
# Try to find this script's configuration file,
# but only if no configurations were given on the
# command line.
##################################################
if [ -z "$JETTY_CONF" ]
then
if [ -f /etc/jetty.conf ]
then
JETTY_CONF=/etc/jetty.conf
elif [ -f "${JETTY_HOME}/etc/jetty.conf" ]
then
JETTY_CONF="${JETTY_HOME}/etc/jetty.conf"
fi
fi
##################################################
# Read the configuration file if one exists
##################################################
CONFIG_LINES=
if [ -z "$CONFIGS" ] && [ -f "$JETTY_CONF" ] && [ -r "$JETTY_CONF" ]
then
CONFIG_LINES=`cat $JETTY_CONF | grep -v "^[:space:]*#" | tr "\n" " "`
fi
##################################################
# Get the list of config.xml files from jetty.conf
##################################################
if [ ! -z "${CONFIG_LINES}" ]
then
for CONF in ${CONFIG_LINES}
do
if [ ! -r "$CONF" ]
then
echo "** WARNING: Cannot read '$CONF' specified in '$JETTY_CONF'"
elif [ -f "$CONF" ]
then
# assume it's a configure.xml file
CONFIGS="$CONFIGS $CONF"
elif [ -d "$CONF" ]
then
# assume it's a directory with configure.xml files
# for example: /etc/jetty.d/
# sort the files before adding them to the list of CONFIGS
XML_FILES=`ls ${CONF}/*.xml | sort | tr "\n" " "`
for FILE in ${XML_FILES}
do
if [ -r "$FILE" ] && [ -f "$FILE" ]
then
CONFIGS="$CONFIGS $FILE"
else
echo "** WARNING: Cannot read '$FILE' specified in '$JETTY_CONF'"
fi
done
else
echo "** WARNING: Don''t know what to do with '$CONF' specified in '$JETTY_CONF'"
fi
done
fi
#####################################################
# Run the standard server if there's nothing else to run
#####################################################
if [ -z "$CONFIGS" ]
then
CONFIGS="${JETTY_HOME}/etc/jetty.xml"
fi
#####################################################
# Find a location for the pid file
#####################################################
if [ -z "$JETTY_RUN" ]
then
JETTY_RUN=`findDirectory -w /var/run /usr/var/run /tmp`
fi
#####################################################
# Find a PID for the pid file
#####################################################
if [ -z "$JETTY_PID" ]
then
JETTY_PID="$JETTY_RUN/vufind.pid"
fi
#####################################################
# Find a location for the jetty console
#####################################################
if [ -z "$JETTY_CONSOLE" ]
then
if [ -w /dev/console ]
then
JETTY_CONSOLE=/dev/console
else
JETTY_CONSOLE=/dev/tty
fi
fi
##################################################
# Check for JAVA_HOME
##################################################
if [ -z "$JAVA_HOME" ]
then
# If a java runtime is not defined, search the following
# directories for a JVM and sort by version. Use the highest
# version number.
# Java search path
JAVA_LOCATIONS="\
/usr/java \
/usr/bin \
/usr/local/bin \
/usr/local/java \
/usr/local/jdk \
/usr/local/jre \
/usr/lib/jvm \
/opt/java \
/opt/jdk \
/opt/jre \
"
JAVA_NAMES="java jdk jre"
for N in $JAVA_NAMES ; do
for L in $JAVA_LOCATIONS ; do
[ -d $L ] || continue
find $L -name "$N" ! -type d | grep -v threads | while read J ; do
[ -x $J ] || continue
VERSION=`eval $J -version 2>&1`
[ $? = 0 ] || continue
VERSION=`expr "$VERSION" : '.*"\(1.[0-9\.]*\)["_]'`
[ "$VERSION" = "" ] && continue
expr $VERSION \< 1.5 >/dev/null && continue
echo $VERSION:$J
done
done
done | sort | tail -1 > $TMPJ
JAVA=`cat $TMPJ | cut -d: -f2`
JVERSION=`cat $TMPJ | cut -d: -f1`
JAVA_HOME=`dirname $JAVA`
while [ ! -z "$JAVA_HOME" -a "$JAVA_HOME" != "/" -a ! -f "$JAVA_HOME/lib/tools.jar" ] ; do
JAVA_HOME=`dirname $JAVA_HOME`
done
[ "$JAVA_HOME" = "" ] && JAVA_HOME=
echo "Found JAVA=$JAVA in JAVA_HOME=$JAVA_HOME"
fi
##################################################
# Determine which JVM of version >1.5
# Try to use JAVA_HOME
##################################################
if [ "$JAVA" = "" -a "$JAVA_HOME" != "" ]
then
if [ ! -z "$JAVACMD" ]
then
JAVA="$JAVACMD"
else
[ -x $JAVA_HOME/bin/jre -a ! -d $JAVA_HOME/bin/jre ] && JAVA=$JAVA_HOME/bin/jre
[ -x $JAVA_HOME/bin/java -a ! -d $JAVA_HOME/bin/java ] && JAVA=$JAVA_HOME/bin/java
fi
fi
if [ "$JAVA" = "" ]
then
echo "Cannot find a JRE or JDK. Please set JAVA_HOME to a >=1.5 JRE" 2>&2
exit 1
fi
JAVA_VERSION=`expr "$($JAVA -version 2>&1 | head -1)" : '.*1\.\([0-9]\)'`
#####################################################
# See if JETTY_PORT is defined
#####################################################
if [ "$JETTY_PORT" != "" ]
then
JAVA_OPTIONS="$JAVA_OPTIONS -Djetty.port=$JETTY_PORT"
fi
#####################################################
# Add Solr values to command line
#####################################################
if [ "$SOLR_HOME" != "" ]
then
JAVA_OPTIONS="$JAVA_OPTIONS -Dsolr.solr.home=$SOLR_HOME"
fi
#####################################################
# Set Jetty Logging Directory
#####################################################
if [ "$JETTY_LOG" ]
then
JAVA_OPTIONS="$JAVA_OPTIONS -Djetty.logs=$JETTY_LOG"
fi
#####################################################
# Are we running on Windows? Could be, with Cygwin/NT.
#####################################################
case "`uname`" in
CYGWIN*) PATH_SEPARATOR=";";;
*) PATH_SEPARATOR=":";;
esac
#####################################################
# Add jetty properties to Java VM options.
#####################################################
JAVA_OPTIONS="$JAVA_OPTIONS -Djetty.home=$JETTY_HOME "
#####################################################
# This is how the Jetty server will be started
#####################################################
RUN_CMD="$JAVA $JAVA_OPTIONS -jar $JETTY_HOME/start.jar $JETTY_ARGS $CONFIGS"
##################################################
# Do the action
##################################################
case "$ACTION" in
start)
echo "Starting VuFind ... "
if [ -f $JETTY_PID ]
then
echo "Already Running!!"
exit 1
fi
# Export variables for Import Tool
export VUFIND_HOME
echo "STARTED VuFind `date`" >> $JETTY_CONSOLE
echo "$RUN_CMD"
nohup sh -c "exec $RUN_CMD >>$JETTY_CONSOLE 2>&1" > /dev/null &
echo $! > $JETTY_PID
echo "VuFind running pid="`cat $JETTY_PID`
;;
stop)
PID=`cat $JETTY_PID 2>/dev/null`
echo "Shutting down VuFind ... "
kill $PID 2>/dev/null
sleep 2
kill -9 $PID 2>/dev/null
rm -f $JETTY_PID
echo "STOPPED `date`" >>$JETTY_CONSOLE
;;
restart)
if [ -x "$0" ]; then
"$0" stop $*
sleep 5
"$0" start $*
else
sh "$0" stop $*
sleep 5
sh "$0" start $*
fi
;;
supervise)
#
# Under control of daemontools supervise monitor which
# handles restarts and shutdowns via the svc program.
#
exec $RUN_CMD
;;
run|demo)
echo "Running VuFind ... "
if [ -f $JETTY_PID ]
then
echo "Already Running!!"
exit 1
fi
exec $RUN_CMD
;;
check)
echo "Checking arguments to VuFind: "
echo "VUFIND_HOME = $VUFIND_HOME"
echo "SOLR_HOME = $SOLR_HOME"
echo "JETTY_HOME = $JETTY_HOME"
echo "JETTY_LOG = $JETTY_LOG"
echo "JETTY_CONF = $JETTY_CONF"
echo "JETTY_RUN = $JETTY_RUN"
echo "JETTY_PID = $JETTY_PID"
echo "JETTY_CONSOLE = $JETTY_CONSOLE"
echo "JETTY_PORT = $JETTY_PORT"
echo "CONFIGS = $CONFIGS"
echo "JAVA_OPTIONS = $JAVA_OPTIONS"
echo "JAVA = $JAVA"
echo "CLASSPATH = $CLASSPATH"
echo "RUN_CMD = $RUN_CMD"
echo
if [ -f $JETTY_PID ]
then
echo "VuFind running pid="`cat $JETTY_PID`
exit 0
fi
exit 1
;;
*)
usage
;;
esac
exit 0
|
yorkulibraries/vufind
|
vufind.sh
|
Shell
|
gpl-2.0
| 15,222 |
#!/bin/sh
#
# $Id$
#
# This file is part of the OpenLink Software Virtuoso Open-Source (VOS)
# project.
#
# Copyright (C) 1998-2014 OpenLink Software
#
# This project is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License, dated June 1991.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# ----------------------------------------------------------------------
# Fix issues with LOCALE
# ----------------------------------------------------------------------
LANG=C
LC_ALL=POSIX
export LANG LC_ALL
# WebDAV test parameters
USR=user_
PWD=pass_
LOGFILE=dav.log
STATFILE=dav.stat
CURDIR=`pwd`
sqlfile=init_dav_metr.sql
FEXT=.BIN
. $HOME/binsrc/tests/suite/test_fn.sh
rm -f $sqlfile
# SQL script for initializing a schema
cat > $sqlfile <<ENDF
create procedure
make_file ()
{
declare _string any;
declare _add any;
declare len, _size integer;
_size := \$U{_SIZE};
_string := 'This is test file!\n';
len := length (_string);
_add := repeat (_string, (_size*1024/len) + 1);
string_to_file (concat ('\$U{_HOME}','/test_dav'), _add, 0);
}
;
delete from WS.WS.SYS_DAV_USER where U_NAME like 'user_%';
delete from WS.WS.SYS_DAV_COL where COL_NAME like 'user_%';
delete from WS.WS.SYS_DAV_RES where RES_FULL_PATH like '/DAV/user_%';
create procedure
make_users ()
{
declare idx, len integer;
declare _user, _pass varchar;
idx := 1;
len := \$U{_USERS} + 1;
while (idx < len)
{
_user := concat ('user_', cast (idx as varchar));
_pass := concat ('pass_', cast (idx as varchar));
insert soft WS.WS.SYS_DAV_USER (U_ID, U_NAME, U_FULL_NAME, U_E_MAIL, U_PWD,
U_GROUP, U_DEF_PERMS, U_ACCOUNT_DISABLED)
values (idx + 2, _user, 'DAV test user', '[email protected]', _pass, 1, '110100000', 0);
insert into WS.WS.SYS_DAV_COL (COL_ID, COL_NAME, COL_PARENT, COL_OWNER,
COL_GROUP, COL_PERMS, COL_CR_TIME, COL_MOD_TIME)
values (WS.WS.GETID ('C'), _user, 1, idx + 2, 1, '110100000R', now (), now ());
idx := idx + 1;
}
}
;
create procedure
make_uri ()
{
declare _text, _name, _user_dir varchar;
declare idx, len, loops, dlen, rn integer;
declare dl any;
idx := 1;
loops := \$U{_LOOPS};
if ('\$U{_SIZE}' = 'random')
rn := 1;
else
rn := 0;
if (rn)
{
dl := sys_dirlist ('\$U{_HOME}/files', 1);
dlen := length (dl);
}
len := \$U{_USERS} + 1;
while (idx < len)
{
_user_dir := concat ('user_', cast (idx as varchar), '/');
if (not rn)
{
_text := concat ('1 PUT /DAV/', _user_dir, 'test_dav', cast (idx as varchar),'$FEXT HTTP/1.1\n');
_text := concat (_text, '1 GET /DAV/user_', cast (idx as varchar), '/test_dav', cast (idx as varchar),'$FEXT HTTP/1.1\n');
}
else
{
declare fn varchar;
declare ix integer;
ix := 0;
_text := '';
while (ix < loops)
{
fn := aref (dl, rnd (dlen));
_text := concat (_text, '1 PUT /DAV/', _user_dir, fn, ' HTTP/1.1\n');
_text := concat (_text, '1 GET /DAV/', _user_dir, fn, ' HTTP/1.1\n');
ix := ix + 1;
}
}
if (not rn)
_text := repeat (_text, loops);
_text := concat (sprintf ('localhost %s\n', server_http_port ()), _text);
string_to_file (concat ('\$U{_HOME}', '/uri_', cast (idx as varchar), '.url'), _text, 0);
idx := idx + 1;
}
}
;
make_file ();
make_users ();
make_uri ();
ENDF
chmod 644 $sqlfile
# Waits until last client finished
waitAll ()
{
clients=3
while [ "$clients" -gt "2" ]
do
sleep 5
clients=`ps -e | grep urlsimu | grep -v grep | wc -l`
echo -e "Clients remaining: $clients \r"
done
}
# Cleanup logs and help files
rm -f test_dav* uri_*.url cli_*.log $LOGFILE *$FEXT *.txt
cp files/*.txt .
if [ "$#" -eq 3 ]
then
ECHO "Started WebDAV metrics test (clients: $1, size $2 Kb, R/W $3)"
else
ECHO
ECHO " Usage $0 users file_size(Kb.) time_repeats"
ECHO
ECHO " Usage $0 10 1000 5"
ECHO
exit
fi
#
# CREATE FIRST FILE AND USERS (user_n, pass_n)
#
#echo "$ISQL $DSN PROMPT=OFF VERBOSE=OFF ERRORS=STDOUT -u \"_SIZE=$2 _USERS=$1 _LOOPS=$3 _HOME=$CURDIR\" < $sqlfile"
RUN $ISQL $DSN PROMPT=OFF VERBOSE=OFF ERRORS=STDOUT -u "_SIZE=$2 _USERS=$1 _LOOPS=$3 _HOME=$CURDIR" < $sqlfile
#
# CREATE TEST FILES
#
count=1
while [ "$count" -le "$1" ]
do
cp test_dav test_dav$count$FEXT > /dev/null
count=`expr $count + 1`
done
rm -f test_dav
# Start N-1 times urlsimu background and one in foreground
count=1
while [ "$count" -lt "$1" ]
do
URLPUT="$HOME/binsrc/tests/urlsimu -u user_$count -p pass_$count -t cli_$count.bin"
$URLPUT uri_$count.url > cli_$count.log &
count=`expr $count + 1`
done
URLPUT="$HOME/binsrc/tests/urlsimu -u user_$count -p pass_$count -t cli_$count.bin"
$URLPUT -u "$USR$count" -p "$PWD$count" uri_$count.url -t cli_$count.bin | tee cli_$count.log
# Wait until last finishes
waitAll
sleep 1
echo "" >> $STATFILE
echo "" >> $STATFILE
echo "" >> $STATFILE
# Printout the result of test
echo "=================== RESULTS =======================" | tee -a $STATFILE
echo "Initial clients: $1, file size $2 Kb, total R/W $3" | tee -a $STATFILE
echo "Clients: `grep Total cli_*.log | wc -l`" | tee -a $STATFILE
echo "------------------- Counts ------------------------" | tee -a $STATFILE
#grep -h Total cli_*.log > total.log
#cat total.log | tee -a $STATFILE
grep -h Total cli_*.log | cut -d ' ' -f 27- | tee -a $STATFILE
echo "------------------- Average -----------------------" | tee -a $STATFILE
#gawk -f total.awk total.log | tee -a $STATFILE
avr=0
tmp=0
cnt=0
for f in cli_*.log
do
tmp=`grep 'Total' $f | cut -d '/' -f 2`
avr=`expr $avr + $tmp`
cnt=`expr $cnt + 1`
done
avr=`expr $avr \/ $cnt`
echo "Average: $avr" | tee -a $STATFILE
RUN $ISQL $DSN '"EXEC=checkpoint;"' ERRORS=STDOUT
RUN $ISQL $DSN '"EXEC=status();"' ERRORS=STDOUT
grep 'Lock Status:' $LOGFILE | tee -a $STATFILE
ok_n=`grep 'HTTP/1.1 2' cli_*.log | wc -l`
tot_ok=`expr $ok_n / 2 / $count`
echo "Total successful R/W: $tot_ok of ($3)" | tee -a $STATFILE
echo "=================== END ===========================" | tee -a $STATFILE
echo "" >> $STATFILE
echo "" >> $STATFILE
echo "" >> $STATFILE
# END OF WebDAV metrics test
|
v7fasttrack/virtuoso-opensource
|
binsrc/dav/dav_metr.sh
|
Shell
|
gpl-2.0
| 6,897 |
#!/bin/bash -e
#build deb packages
VERSION=`git describe | sed 's/-/+r/;s/-/+/'`
[ -z $VERSION ] && VERSION=`dpkg-parsechangelog -S version`
rm -rf build/
CP=`ls`
mkdir -p build
cp -R $CP build
pushd build
rm debian/changelog -f
EDITOR=/bin/true dch --create --package thunderbolt-dkms -v $VERSION "CI Build"
dpkg-buildpackage
#test the packages install
dpkg -i ../*.deb
|
dell/thunderbolt-icm-dkms
|
ci/build_and_install_debs.sh
|
Shell
|
gpl-2.0
| 374 |
convert images/OCS-78-A.png -crop 1571x4538+121+319 +repage images/OCS-78-A.png
#
#
#/OCS-78.png
convert images/OCS-78-B.png -crop 1556x4532+57+317 +repage images/OCS-78-B.png
#
#
#/OCS-78.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/cropedges.OCS-78.sh
|
Shell
|
gpl-2.0
| 193 |
#!/bin/bash
###
OLD_URL_F_MPTCP="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/fedora-20-x86_64/kernel-3.14.15-307.mptcp.fc20/kernel-3.14.15-307.mptcp.fc20.x86_64.rpm"
OLD_URL_F_MPTCP="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/fedora-21-x86_64/kernel-3.14.15-308.mptcp.fc21/kernel-3.14.15-308.mptcp.fc21.x86_64.rpm"
OLD_URL_F_MPTCP_P="/root/rpm/kernel-3.14.15-307.mptcp.fc20.x86_64.rpm"
NEW_URL_F_MPTCP_P="/root/rpm/kernel-3.14.15-308.mptcp.fc21.x86_64.rpm"
OLD_URL_F_MODULE="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/fedora-20-x86_64/kernel-3.14.15-307.mptcp.fc20/kernel-modules-extra-3.14.15-307.mptcp.fc20.x86_64.rpm"
NEW_URL_F_MODULE="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/fedora-21-x86_64/kernel-3.14.15-308.mptcp.fc21/kernel-modules-extra-3.14.15-308.mptcp.fc21.x86_64.rpm"
OLD_URL_F_MODULE_P="/root/rpm/kernel-modules-extra-3.14.15-307.mptcp.fc20.x86_64.rpm"
NEW_URL_F_MODULE_P="/root/rpm/kernel-modules-extra-3.14.15-308.mptcp.fc21.x86_64.rpm"
OLD_URL_C_MPTCP="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/epel-7-x86_64/kernel-3.14.15-307.mptcp.fc20/kernel-3.14.15-307.mptcp.el7.centos.x86_64.rpm"
NEW_URL_C_MPTCP="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/epel-7-x86_64/kernel-3.14.15-308.mptcp.fc21/kernel-3.14.15-308.mptcp.el7.centos.x86_64.rpm"
OLD_URL_C_MODULE="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/epel-7-x86_64/kernel-3.14.15-307.mptcp.fc20/kernel-modules-extra-3.14.15-307.mptcp.el7.centos.x86_64.rpm"
NEW_URL_C_MODULE="http://copr-be.cloud.fedoraproject.org/results/kenjiro/mptcp-kernel/epel-7-x86_64/kernel-3.14.15-308.mptcp.fc21/kernel-modules-extra-3.14.15-308.mptcp.el7.centos.x86_64.rpm"
OLD_URL_C_MPTCP_P="/root/rpm/kernel-3.14.15-307.mptcp.el7.centos.x86_64.rpm"
NEW_URL_C_MPTCP_P="/root/rpm/kernel-3.14.15-308.mptcp.el7.centos.x86_64.rpm"
OLD_URL_C_MODULE_P="/root/rpm/kernel-modules-extra-3.14.15-307.mptcp.el7.centos.x86_64.rpm"
NEW_URL_C_MODULE_P="/root/rpm/kernel-modules-extra-3.14.15-308.mptcp.el7.centos.x86_64.rpm"
sed -i s!$OLD_URL_F_MPTCP!$NEW_URL_F_MPTCP!g ./conf/*
sed -i s!$OLD_URL_F_MODULE!$NEW_URL_F_MODULE!g ./conf/*
sed -i s!$OLD_URL_F_MPTCP_P!$NEW_URL_F_MPTCP_P!g ./conf/*
sed -i s!$OLD_URL_F_MODULE_P!$NEW_URL_F_MODULE_P!g ./conf/*
sed -i s!$OLD_URL_C_MPTCP!$NEW_URL_C_MPTCP!g ./conf/*
sed -i s!$OLD_URL_C_MODULE!$NEW_URL_C_MODULE!g ./conf/*
sed -i s!$OLD_URL_C_MPTCP_P!$NEW_URL_C_MPTCP_P!g ./conf/*
sed -i s!$OLD_URL_C_MODULE_P!$NEW_URL_C_MODULE_P!g ./conf/*
|
nak3/mptcp-testsuite
|
kvm-setup/update_conf.sh
|
Shell
|
gpl-2.0
| 2,600 |
#!/bin/bash
echo -n "Version (exemple: 1.00) : "; \
read version; \
echo -n "Release date (exemple: 2006-01-24) : "; \
read date; \
sed "s/^ # Version : ..../ # Version : $version/" index.pl > TEMP
mv -f TEMP index.pl
sed "s/^ # Version : ..../ # Version : $version/" update.pl > TEMP
mv -f TEMP update.pl
sed "s/^ # Version : ..../ # Version : $version/" download.pl > TEMP
mv -f TEMP download.pl
if [ -f config.pm ]; then
sed "s/^ # Version : ..../ # Version : $version/" config.pm > TEMP
mv -f TEMP config.pm
fi
sed "s/^ # Version : ..../ # Version : $version/" config.example.pm > TEMP
mv -f TEMP config.example.pm
sed "s/^Version : ..../Version : $version/" README > TEMP
mv -f TEMP README
sed "s/Version : ..../Version : $version/" README.md > TEMP
mv -f TEMP README.md
sed "s/^ # Released : ........../ # Released : $date/" index.pl > TEMP
mv -f TEMP index.pl
sed "s/^ # Released : ........../ # Released : $date/" update.pl > TEMP
mv -f TEMP update.pl
sed "s/^ # Released : ........../ # Released : $date/" download.pl > TEMP
mv -f TEMP download.pl
if [ -f config.pm ]; then
sed "s/^ # Released : ........../ # Released : $date/" config.pm > TEMP
mv -f TEMP config.pm
fi
sed "s/^ # Released : ........../ # Released : $date/" config.example.pm > TEMP
mv -f TEMP config.example.pm
sed "s/^Released : ........../Released : $date/" README > TEMP
mv -f TEMP README
sed "s/Released : ........../Released : $date/" README.md > TEMP
mv -f TEMP README.md
mkdir temp
chmod 755 index.pl update.pl download.pl config*.pm
cp -Rp index.pl temp
cp -Rp update.pl temp
cp -Rp download.pl temp
cp -Rp config.example.pm temp/config.pm
cp -d favicon.ico temp
cp -Rp LICENSE temp
cp -Rp README* temp
cp -Rp CHANGELOG temp
cp -Rp UPGRADE temp
cp -Rp template temp
mkdir temp/rrd
cp -R rrd/01_* temp/rrd/
cp -R rrd/02_* temp/rrd/
cp -R rrd/03_* temp/rrd/
cp -R rrd/04_* temp/rrd/
cp -R rrd/05_* temp/rrd/
cp -R rrd/06_* temp/rrd/
cp -R rrd/07_* temp/rrd/
cp -R graphs temp
chown -Rf root:root temp
chmod -Rf 755 temp/rrd/*
chmod -Rf 777 temp/graphs
cd temp
rm -f graphs/.gitignore
rm -Rf graphs/*.png
rm -Rf template/src
for i in rrd/* ; do rm -f $i/*.rrd ; done ;
tar -czf ../eluna_graph_system.tar.gz ./
mkdir -p ../releases
rm -Rf ../releases/$version
mkdir ../releases/$version
mv ../eluna_graph_system.tar.gz ../releases/$version
mv README* ../releases/$version
mv LICENSE ../releases/$version
mv CHANGELOG ../releases/$version
mv UPGRADE ../releases/$version
cd ..
rm -R temp
cd releases
rm -f latest
ln -s $version latest
cd ..
|
stephanedupont/eLunaGraphSystem
|
MAKE.sh
|
Shell
|
gpl-2.0
| 2,566 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2022 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Include a lua script from a regular Tupfile. Variables should be preserved.
. ./tup.sh
cat > Tupfile << HERE
CFLAGS += -DFOO
CFLAGS += -DBAR
CFLAGS += -DBAZ
CC = gcc
include build.lua
HERE
cat > build.lua << HERE
files += 'foo.c'
tup.foreach_rule(files, '\$(CC) \$(CFLAGS) -c %f -o %o', '%B.o')
HERE
touch foo.c
update
tup_object_exist . 'gcc -DFOO -DBAR -DBAZ -c foo.c -o foo.o'
eotup
|
gittup/tup
|
test/t2161-lua-include.sh
|
Shell
|
gpl-2.0
| 1,140 |
#!/bin/bash
set -e
get_option () {
local section=$1
local option=$2
local default=$3
# my_print_defaults can output duplicates, if an option exists both globally and in
# a custom config file. We pick the last occurence, which is from the custom config.
ret=$(my_print_defaults $section | grep '^--'${option}'=' | cut -d= -f2- | tail -n1)
[ -z $ret ] && ret=$default
echo $ret
}
# if command starts with an option, prepend mysqld
if [ "${1:0:1}" = '-' ]; then
set -- mysqld "$@"
fi
if [ "$1" = 'mysqld' ]; then
# Get config
DATADIR="$("$@" --verbose --help --innodb-read-only 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
SOCKET=$(get_option mysqld socket "$DATADIR/mysql.sock")
PIDFILE=$(get_option mysqld pid-file "/var/run/mysqld/mysqld.pid")
if [ ! -d "$DATADIR/mysql" ]; then
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?'
exit 1
fi
mkdir -p "$DATADIR"
chown -R mysql:mysql "$DATADIR"
echo 'Initializing database'
mysqld --initialize-insecure=on --datadir="$DATADIR"
echo 'Database initialized'
mysqld --user=mysql --datadir="$DATADIR" --skip-networking &
for i in $(seq 30 -1 0); do
[ -S $SOCKET ] && break
echo 'MySQL init process in progress...'
sleep 1
done
if [ $i = 0 ]; then
echo >&2 'MySQL init process failed.'
exit 1
fi
mysql_tzinfo_to_sql /usr/share/zoneinfo | mysql --protocol=socket -uroot mysql
# These statements _must_ be on individual lines, and _must_ end with
# semicolons (no line breaks or comments are permitted).
# TODO proper SQL escaping on ALL the things D:
tempSqlFile=$(mktemp /tmp/mysql-first-time.XXXXXX.sql)
cat > "$tempSqlFile" <<-EOSQL
-- What's done in this file shouldn't be replicated
-- or products like mysql-fabric won't work
SET @@SESSION.SQL_LOG_BIN=0;
DELETE FROM mysql.user ;
CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
DROP DATABASE IF EXISTS test ;
EOSQL
if [ "$MYSQL_DATABASE" ]; then
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" >> "$tempSqlFile"
fi
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
echo "CREATE USER '"$MYSQL_USER"'@'%' IDENTIFIED BY '"$MYSQL_PASSWORD"' ;" >> "$tempSqlFile"
if [ "$MYSQL_DATABASE" ]; then
echo "GRANT ALL ON \`"$MYSQL_DATABASE"\`.* TO '"$MYSQL_USER"'@'%' ;" >> "$tempSqlFile"
fi
fi
echo 'FLUSH PRIVILEGES ;' >> "$tempSqlFile"
mysql --protocol=socket -uroot < "$tempSqlFile"
mysql -uroot -p${MYSQL_ROOT_PASSWORD} < /bla.sql
rm -f "$tempSqlFile"
kill $(cat $PIDFILE)
for i in $(seq 30 -1 0); do
[ -f "$PIDFILE" ] || break
echo 'MySQL init process in progress...'
sleep 1
done
if [ $i = 0 ]; then
echo >&2 'MySQL hangs during init process.'
exit 1
fi
echo 'MySQL init process done. Ready for start up.'
fi
chown -R mysql:mysql "$DATADIR"
fi
exec "$@"
|
craftsmenlabs/gareth-poc
|
database/setup-database.sh
|
Shell
|
gpl-2.0
| 3,076 |
#!/bin/bash
# Copyright (C) 2015 Daniel Mustieles, <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
RELEASE="gnome-3-32"
DL_URL="https://l10n.gnome.org/languages"
WGET_OPTS="wget --no-check-certificate -q"
if [ $# -eq 0 ]
then
LINGUAS="ar bg bn_IN ca cs da de el en_GB es eu fa fi fr gl gu hi hu id it ja ko lv mk nds nl oc pa pl pt pt_BR ro ru sl sr sr@latin sv ta te th tr uk vi zh_CN zh_HK zh_TW"
else
LINGUAS=$@
fi
# Get all the documentation PO file from Damned-Lies, using languages with at least one string in DL
for i in `echo $LINGUAS`
do
echo -e "Downloading PO files for:\e[1;32m $i \e[0m"
mkdir $i
$WGET_OPTS $DL_URL/$i/$RELEASE/doc.tar.gz -O $i/$i.tar.gz ; tar -zxf $i/$i.tar.gz -C $i ; rm $i/$i.tar.gz
$WGET_OPTS $DL_URL/$i/gnome-infrastructure/doc.tar.gz -O $i/infraestructure.tar.gz && tar -zxf $i/infraestructure.tar.gz -C $i && rm $i/infraestructure.tar.gz
$WGET_OPTS $DL_URL/$i/gnome-gimp/doc.tar.gz -O $i/gimp.tar.gz && tar -zxf $i/gimp.tar.gz -C $i && rm $i/gimp.tar.gz
$WGET_OPTS $DL_URL/$i/gnome-extras/doc.tar.gz -O $i/extras.tar.gz && tar -zxf $i/extras.tar.gz -C $i && rm $i/extras.tar.gz
done
# Check PO files with gtxml and create a report file for each language
echo -e "\nGenerating report..."
for j in `echo $LINGUAS`
do
gtxml $j/*.po >>$j-report.txt
rm -rf $j
# Remove files with zero size (language with no errors) and pack all reports in a .tar.gz file, ready to send to i18n mail list
find . -size 0 -exec rm {} \;
# If there is any .txt file it means we've found errors, so report must be generated
if [ -f $j-report.txt ]
then
tar -rf gtxml-doc-reports.tar *.txt
rm *.txt
fi
done
gzip gtxml-doc-reports.tar
REP_LIST=`tar -tf gtxml-doc-reports.tar.gz |awk -F '-' {'print $1'}`
echo -e "\nThis is the list of the affected languages:"
echo -e "\e[1;31m$REP_LIST \e[0m\n"
|
leo666/gnome_scripts
|
report.sh
|
Shell
|
gpl-2.0
| 2,528 |
#! /bin/sh
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Regression test for substitution references to conditional variables.
# Report from Richard Boulton.
. test-init.sh
cat >> configure.ac << 'END'
AM_CONDITIONAL([COND1], [true])
AM_CONDITIONAL([COND2], [true])
AC_OUTPUT
END
cat > Makefile.am << 'END'
AUTOMAKE_OPTIONS = no-dependencies
CC = false
OBJEXT = obj
var1 = dlmain
if COND1
var2 = $(var1:=.c) foo.cc
else
var2 = $(var1:=.c)
endif
if COND2
var3 = $(var2:.cc=.c)
else
var3 = $(var2:.cc=.c)
endif
helldl_SOURCES = $(var3)
.PHONY: test
test:
is $(helldl_SOURCES) $(helldl_OBJECTS) == \
dlmain.c foo.c dlmain.obj foo.obj
bin_PROGRAMS = helldl
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a
./configure
$MAKE test
:
|
Starlink/automake
|
t/cond18.sh
|
Shell
|
gpl-2.0
| 1,372 |
#!/bin/bash
#####################################
#####################################
# Ruben Izquierdo Bevia
# VU University of Amsterdam
# [email protected]
# [email protected]
# http://rubenizquierdobevia.com/
# Version 1.0
#####################################
#####################################
# Remove in case already exist (wndomains not removed)
cd resources
rm -rf WordNet-*
rm -rf basic_level_concepts
#########
#BLC
#########
git clone https://github.com/rubenIzquierdo/basic_level_concepts
########
#############
# Wordnet2.0
#############
wget http://wordnetcode.princeton.edu/2.0/WordNet-2.0.tar.gz
tar xzf WordNet-2.0.tar.gz
rm WordNet-2.0.tar.gz
#############
#############
#############
# Wordnet3.0
#############
wget http://wordnetcode.princeton.edu/3.0/WordNet-3.0.tar.gz
tar xzf WordNet-3.0.tar.gz
rm WordNet-3.0.tar.gz
#############
#############
cd .. #Back to the root
rm -rf libs
mkdir libs
cd libs
git clone https://github.com/MartenPostma/WordNetMapper
touch __init__.py
cd .. # Back to the root
|
rubenIzquierdo/semantic_class_manager
|
install.sh
|
Shell
|
gpl-2.0
| 1,052 |
#!/bin/sh
#SERIOUSLY,THOUGH, THIS NEEDS TO NOT BE A LIST OF BUILD COMMANDS SOME TIME IN THE FUTURE
echo off
#this increases the build number in the kernel header
cd core
buildno=$(cat kernel.h | grep "#define P5_BUILD_NUMBER " | cut -d' ' -f3)
buildno=$(($buildno + 1))
echo Executing P5 build \#$buildno
sed "s\^#define P5_BUILD_NUMBER .*$\#define P5_BUILD_NUMBER $buildno\ " kernel.h > tmp_h
rm kernel.h
mv tmp_h kernel.h
cd ..
C_OPTS="-nostdlib -nostartfiles -nodefaultlibs -nostdinc -ffreestanding -m32 -g" # -fno-zero-initialized-in-bss"
cd core
echo Starting core build...
as -o ../build/syscalls.o syscall.s -g --32
gcc -c -o ../build/syscall.o syscall.c $C_OPTS
as -o ../build/init.o init.s -g --32
gcc -c -o ../build/kernel.o kernel.c $C_OPTS
gcc -c -o ../build/util.o util.c -nostdlib -m32
gcc -c -o ../build/int.o int.c $C_OPTS
as -o ../build/expt.o expt.s -g --32
as -o ../build/irqs.o irq.s -g --32
gcc -c -o ../build/irq.o irq.c $C_OPTS
cd ../memory
echo Starting memory build...
gcc -c -o ../build/memory.o memory.c $C_OPTS
gcc -c -o ../build/gdt.o gdt.c $C_OPTS
gcc -c -o ../build/paging.o paging.c $C_OPTS
as -o ../build/pagings.o paging.s -g --32
cd ../obj
echo Starting obj build...
gcc -c -o ../build/lists.o lists.c $C_OPTS
gcc -c -o ../build/variant.o variant.c $C_OPTS
cd ../ascii_io
echo Starting ascii_io build...
gcc -c -o ../build/ascii_i.o ascii_i.c $C_OPTS
gcc -c -o ../build/ascii_o.o ascii_o.c $C_OPTS
gcc -c -o ../build/keyboard.o keyboard.c $C_OPTS
gcc -c -o ../build/serial.o serial.c $C_OPTS
cd ../process
echo Starting process build...
as -o ../build/processs.o process.s -g --32
gcc -c -o ../build/process.o process.c $C_OPTS
gcc -c -o ../build/message.o message.c $C_OPTS
cd ../kserver
echo Starting kserver build...
gcc -c -o ../build/kserver.o kserver.c $C_OPTS
cd ../fat12
echo Starting fat12 build...
gcc -c -o ../build/hiinter.o hiinter.c $C_OPTS
cd ../block
echo Starting block build...
gcc -c -o ../build/block.o block.c $C_OPTS
gcc -c -o ../build/ramdisk.o ramdisk.c $C_OPTS
cd ../fs
echo Starting fs build...
gcc -c -o ../build/fs.o fs.c $C_OPTS
gcc -c -o ../build/ramfs.o ramfs.c $C_OPTS
cd ../timer
echo Starting timer build...
gcc -c -o ../build/timer.o timer.c $C_OPTS
as -o ../build/timers.o timer.s -g --32
cd ../build
echo Linking binary
ld -o ../bin/p5kern.o -T ../lscpt.lds -melf_i386 ../files
objcopy -O binary -j .init -j .text -j .data -j .bss --set-section-flags .bss=alloc,load,contents ../bin/p5kern.o ../bin/p5kern.bin
cd ..
echo p5kern.bin finished
echo --------------------------------------------------------------------------------
echo P5OSR0ppb compilation finished.
echo Please consult README.TXT for information on using the two binaries which you
echo just created.
echo --------------------------------------------------------------------------------
echo on
|
JMarlin/P5-Redux
|
P5OSPPB/build.sh
|
Shell
|
gpl-2.0
| 2,857 |
. envsetupIsengard.sh
filepath=$(cd "$(dirname "$0")"; pwd)
DEPENDS=$filepath/../Isengard-depends
git clean -dxf
#git submodule update --init addons/skin.re-touched
cd tools/depends
./bootstrap
./configure --with-toolchain=$TOOLCHAIN --prefix=$DEPENDS --host=arm-linux-androideabi --with-sdk-path=$SDK --with-ndk=$NDK --with-sdk=android-17 --with-tarballs=$TARBALLS
make -C target/android-sources-ics/ clean
make -C target/xbmc-audioencoder-addons/ clean
make -C target/librtmp/ clean
make -j8 -C target/binary-addons
make -C -j8 native/
make -C native/
make -j8
cd ../../
make -C tools/depends/target/android-sources-ics/
make -C tools/depends/target/xbmc
make -j8
make apk
|
vidonme/xbmc
|
rebuild_Isengard.sh
|
Shell
|
gpl-2.0
| 684 |
#!/bin/bash
echo -e "\ninstalling required software packages...\n"
echo 'solver.allowVendorChange = true' >> /etc/zypp/zypp.conf
zypper -q ar -f http://download.opensuse.org/repositories/OBS:/Server:/Unstable/openSUSE_13.2/OBS:Server:Unstable.repo
zypper -q --gpg-auto-import-keys refresh
zypper -q -n install update-alternatives ruby-devel make gcc patch cyrus-sasl-devel openldap2-devel libmysqld-devel libxml2-devel zlib-devel libxslt-devel nodejs mariadb memcached sphinx screen sphinx obs-server phantomjs
echo -e "\nsetup ruby binaries...\n"
for bin in rake rdoc ri; do
/usr/sbin/update-alternatives --set $bin /usr/bin/$bin.ruby.ruby2.2
done
echo -e "\ndisabling versioned gem binary names...\n"
echo 'install: --no-format-executable' >> /etc/gemrc
echo -e "\ninstalling bundler...\n"
gem install bundler
echo -e "\ninstalling your bundle...\n"
su - vagrant -c "cd /vagrant/src/api/; bundle install --quiet"
echo -e "\nsetting up mariadb...\n"
systemctl start mysql
systemctl enable mysql
mysqladmin -u root password 'opensuse'
echo -e "\nsetting up memcached...\n"
systemctl start memcached
systemctl enable memcached
# Configure the database if it isn't
if [ ! -f /vagrant/src/api/config/database.yml ] && [ -f /vagrant/src/api/config/database.yml.example ]; then
echo -e "\nSetting up your database from config/database.yml...\n"
export DATABASE_URL="mysql2://root:opensuse@localhost/api_development"
cd /vagrant/src/api
rake -f /vagrant/src/api/Rakefile db:create
rake -f /vagrant/src/api/Rakefile db:setup
rake -f /vagrant/src/api/Rakefile test:unit/watched_project_test
cd -
else
echo -e "\nnWARNING: You have already configured your database in config/database.yml."
echo -e "WARNING: Please make sure this configuration works in this vagrant box!\n\n"
fi
# Configure the app if it isn't
if [ ! -f /vagrant/src/api/config/options.yml ] && [ -f /vagrant/src/api/config/options.yml.example ]; then
echo "Configuring your app in config/options.yml..."
sed 's/source_port: 5352/source_port: 3200/' /vagrant/src/api/config/options.yml.example > /vagrant/src/api/config/options.yml
else
echo -e "\n\nWARNING: You have already configured your app in config/options.yml."
echo -e "WARNING: Please make sure this configuration works in this vagrant box!\n\n"
fi
echo "Setting up your OBS test backend..."
# Put the backend data dir outside the shared folder so it can use hardlinks
# which isn't possible with VirtualBox shared folders...
mkdir /tmp/vagrant_tmp
chown vagrant:users /tmp/vagrant_tmp
echo -e "/tmp/vagrant_tmp /vagrant/src/api/tmp none bind 0 0" >> /etc/fstab
echo -e "\nProvisioning of your OBS API rails app done!"
echo -e "To start your development OBS backend run: vagrant exec ./script/start_test_backend\n"
echo -e "To start your development OBS frontend run: vagrant exec rails s\n"
|
martin-a-brown/open-build-service
|
bootstrap.sh
|
Shell
|
gpl-2.0
| 2,857 |
#!/bin/bash
. `dirname $0`/functions.sh
# Kernels before 2.4.10 are known not to work
case "`uname -r`" in
[01].*|2.[0-3].*|2.4.[0-9]|2.4.[0-9][^0-9]*) exit 77;;
esac
rm -f shuffle2 shuffle2lib*.so shuffle2.log shuffle2.lds
$CC -shared -O2 -fpic -o shuffle2lib1.so $srcdir/reloc1lib1.c
$CC -shared -O2 -fpic -o shuffle2lib2.so $srcdir/reloc1lib2.c shuffle2lib1.so
BINS="shuffle2"
LIBS="shuffle2lib1.so shuffle2lib2.so"
$CCLINK -o shuffle2 $srcdir/shuffle2.c -Wl,--rpath-link,. shuffle2lib2.so \
-Wl,--verbose 2>&1 | sed -e '/^=========/,/^=========/!d;/^=========/d' \
-e 's/0x08048000/0x08000000/;s/SIZEOF_HEADERS.*$/& . += 56;/' > shuffle2.lds
$CCLINK -o shuffle2 $srcdir/shuffle2.c -Wl,--rpath-link,. shuffle2lib2.so \
-Wl,-T,shuffle2.lds
savelibs
echo $PRELINK ${PRELINK_OPTS--vm} ./shuffle2 > shuffle2.log
$PRELINK ${PRELINK_OPTS--vm} ./shuffle2 >> shuffle2.log 2>&1 || exit 1
grep -q ^`echo $PRELINK | sed 's/ .*$/: /'` shuffle2.log && exit 2
LD_LIBRARY_PATH=. ./shuffle2 || exit 3
readelf -a ./shuffle2 >> shuffle2.log 2>&1 || exit 4
# So that it is not prelinked again
chmod -x ./shuffle2
comparelibs >> shuffle2.log 2>&1 || exit 5
|
ystk/debian-prelink
|
testsuite/shuffle2.sh
|
Shell
|
gpl-2.0
| 1,148 |
#!/bin/sh
# Source Qubes library.
# shellcheck source=init/functions
. /usr/lib/qubes/init/functions
# Setup IP address at specific time of system boot, instead of asynchronously
# by udev
QUBES_MANAGED_IFACE="$(get_qubes_managed_iface)"
if [ "x$QUBES_MANAGED_IFACE" != "x" ]; then
# systemd does not support conditional After= dependencies, nor a tool to
# just wait for the unit to be activated
# if the network interface is expected, use `systemctl start` to wait for
# it to be started - it would be started by udev (SYSTEMD_WANTS) anyway
systemctl start "qubes-network-uplink@$QUBES_MANAGED_IFACE.service"
fi
|
QubesOS/qubes-core-agent-linux
|
vm-systemd/network-uplink-wait.sh
|
Shell
|
gpl-2.0
| 636 |
#!/bin/bash
set -e
# bug number: LU-2012 10124 LU-7372
ALWAYS_EXCEPT="14b 15c 26 $REPLAY_DUAL_EXCEPT"
SAVE_PWD=$PWD
PTLDEBUG=${PTLDEBUG:--1}
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
SETUP=${SETUP:-""}
CLEANUP=${CLEANUP:-""}
MOUNT_2=${MOUNT_2:-"yes"}
export MULTIOP=${MULTIOP:-multiop}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
# 7 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="21b"
[[ $(facet_fstype $SINGLEMDS) == zfs ]] &&
# bug number for skipped test: LU-2230
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 21b"
build_test_filter
check_and_setup_lustre
MOUNTED=$(mounted_lustre_filesystems)
if ! $(echo $MOUNTED' ' | grep -w -q $MOUNT2' '); then
zconf_mount $HOSTNAME $MOUNT2
MOUNTED2=yes
fi
assert_DIR
rm -rf $DIR/[df][0-9]*
[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
# if there is no CLIENT1 defined, some tests can be ran on localhost
CLIENT1=${CLIENT1:-$HOSTNAME}
# if CLIENT2 doesn't exist then use CLIENT1 instead
# All tests should use CLIENT2 with MOUNT2 only therefore it will work if
# $CLIENT2 == CLIENT1
# Exception is the test which need two separate nodes
CLIENT2=${CLIENT2:-$CLIENT1}
# LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels
if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then
sync
do_facet $SINGLEMDS "sync; sleep 10; sync; sleep 10; sync"
fi
LU482_FAILED=$(mktemp -u $TMP/$TESTSUITE.lu482.XXXXXX)
test_0a() {
echo "Check file is LU482_FAILED=$LU482_FAILED"
touch $MOUNT2/$tfile-A # force sync FLD/SEQ update before barrier
replay_barrier $SINGLEMDS
#define OBD_FAIL_PTLRPC_FINISH_REPLAY | OBD_FAIL_ONCE
touch $MOUNT2/$tfile
createmany -o $MOUNT1/$tfile- 50
$LCTL set_param fail_loc=0x80000514
facet_failover $SINGLEMDS
[ -f "$LU482_FAILED" ] && skip "LU-482 failure" && return 0
client_up || return 1
umount -f $MOUNT2
client_up || return 1
zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
unlinkmany $MOUNT1/$tfile- 50 || return 2
rm $MOUNT2/$tfile || return 3
rm $MOUNT2/$tfile-A || return 4
}
run_test 0a "expired recovery with lost client"
if [ -f "$LU482_FAILED" ]; then
log "Found check file $LU482_FAILED, aborting test script"
rm -vf "$LU482_FAILED"
complete $SECONDS
do_nodes $CLIENTS umount -f $MOUNT2 || true
do_nodes $CLIENTS umount -f $MOUNT || true
# copied from stopall, but avoid the MDS recovery
for num in `seq $OSTCOUNT`; do
stop ost$num -f
rm -f $TMP/ost${num}active
done
if ! combined_mgs_mds ; then
stop mgs
fi
exit_status
fi
test_0b() {
replay_barrier $SINGLEMDS
touch $MOUNT2/$tfile
touch $MOUNT1/$tfile-2
umount $MOUNT2
facet_failover $SINGLEMDS
umount -f $MOUNT1
zconf_mount `hostname` $MOUNT1 || error "mount1 fais"
zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
# it is uncertain if file-2 exists or not, remove it if it does
checkstat $MOUNT1/$tfile-2 && rm $MOUNT1/$tfile-2
checkstat $MOUNT2/$tfile && return 2
return 0
}
run_test 0b "lost client during waiting for next transno"
test_1() {
touch $MOUNT1/a
replay_barrier $SINGLEMDS
touch $MOUNT2/b
fail $SINGLEMDS
checkstat $MOUNT2/a || return 1
checkstat $MOUNT1/b || return 2
rm $MOUNT2/a $MOUNT1/b
checkstat $MOUNT1/a && return 3
checkstat $MOUNT2/b && return 4
return 0
}
run_test 1 "|X| simple create"
test_2() {
replay_barrier $SINGLEMDS
mkdir $MOUNT1/adir
fail $SINGLEMDS
checkstat $MOUNT2/adir || return 1
rmdir $MOUNT2/adir
checkstat $MOUNT2/adir && return 2
return 0
}
run_test 2 "|X| mkdir adir"
test_3() {
replay_barrier $SINGLEMDS
mkdir $MOUNT1/adir
mkdir $MOUNT2/adir/bdir
fail $SINGLEMDS
checkstat $MOUNT2/adir || return 1
checkstat $MOUNT1/adir/bdir || return 2
rmdir $MOUNT2/adir/bdir $MOUNT1/adir
checkstat $MOUNT1/adir && return 3
checkstat $MOUNT2/adir/bdir && return 4
return 0
}
run_test 3 "|X| mkdir adir, mkdir adir/bdir "
test_4() {
mkdir $MOUNT1/adir
replay_barrier $SINGLEMDS
mkdir $MOUNT1/adir && return 1
mkdir $MOUNT2/adir/bdir
fail $SINGLEMDS
checkstat $MOUNT2/adir || return 2
checkstat $MOUNT1/adir/bdir || return 3
rmdir $MOUNT2/adir/bdir $MOUNT1/adir
checkstat $MOUNT1/adir && return 4
checkstat $MOUNT2/adir/bdir && return 5
return 0
}
run_test 4 "|X| mkdir adir (-EEXIST), mkdir adir/bdir "
test_5() {
# multiclient version of replay_single.sh/test_8
mcreate $MOUNT1/a
multiop_bg_pause $MOUNT2/a o_tSc || return 1
pid=$!
rm -f $MOUNT1/a
replay_barrier $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
fail $SINGLEMDS
[ -e $MOUNT2/a ] && return 2
return 0
}
run_test 5 "open, unlink |X| close"
test_6() {
mcreate $MOUNT1/a
multiop_bg_pause $MOUNT2/a o_c || return 1
pid1=$!
multiop_bg_pause $MOUNT1/a o_c || return 1
pid2=$!
rm -f $MOUNT1/a
replay_barrier $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 1
fail $SINGLEMDS
kill -USR1 $pid2
wait $pid2 || return 1
[ -e $MOUNT2/a ] && return 2
return 0
}
run_test 6 "open1, open2, unlink |X| close1 [fail $SINGLEMDS] close2"
test_8() {
replay_barrier $SINGLEMDS
drop_reint_reply "mcreate $MOUNT1/$tfile" || return 1
fail $SINGLEMDS
checkstat $MOUNT2/$tfile || return 2
rm $MOUNT1/$tfile || return 3
return 0
}
run_test 8 "replay of resent request"
test_9() {
replay_barrier $SINGLEMDS
mcreate $MOUNT1/$tfile-1
mcreate $MOUNT2/$tfile-2
# drop first reint reply
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119
fail $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
rm $MOUNT1/$tfile-[1,2] || return 1
return 0
}
run_test 9 "resending a replayed create"
test_10() {
mcreate $MOUNT1/$tfile-1
replay_barrier $SINGLEMDS
munlink $MOUNT1/$tfile-1
mcreate $MOUNT2/$tfile-2
# drop first reint reply
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119
fail $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
checkstat $MOUNT1/$tfile-1 && return 1
checkstat $MOUNT1/$tfile-2 || return 2
rm $MOUNT1/$tfile-2
return 0
}
run_test 10 "resending a replayed unlink"
test_11() {
replay_barrier $SINGLEMDS
mcreate $DIR1/$tfile-1
mcreate $DIR2/$tfile-2
mcreate $DIR1/$tfile-3
mcreate $DIR2/$tfile-4
mcreate $DIR1/$tfile-5
# drop all reint replies for a while
do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0119
# note that with this fail_loc set, facet_failover df will fail
facet_failover $SINGLEMDS
local clients=${CLIENTS:-$HOSTNAME}
wait_clients_import_state "$clients" $SINGLEMDS FULL
do_facet $SINGLEMDS $LCTL set_param fail_loc=0
rm $DIR1/$tfile-[1-5] || return 1
return 0
}
run_test 11 "both clients timeout during replay"
test_12() {
replay_barrier $SINGLEMDS
multiop_bg_pause $DIR/$tfile mo_c || return 1
MULTIPID=$!
#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000302
facet_failover $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
clients_up || return 1
ls $DIR/$tfile
kill -USR1 $MULTIPID || return 3
wait $MULTIPID || return 4
$CHECKSTAT -t file $DIR/$tfile || return 2
rm $DIR/$tfile
return 0
}
run_test 12 "open resend timeout"
test_13() {
multiop_bg_pause $DIR/$tfile mo_c || return 1
MULTIPID=$!
replay_barrier $SINGLEMDS
kill -USR1 $MULTIPID || return 3
wait $MULTIPID || return 4
# drop close
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000115
facet_failover $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
clients_up || return 1
ls $DIR/$tfile
$CHECKSTAT -t file $DIR/$tfile || return 2
rm $DIR/$tfile
return 0
}
run_test 13 "close resend timeout"
# test 14a removed after 18143 because it shouldn't fail anymore and do the same
# as test_15a
test_14b() {
wait_mds_ost_sync
wait_delete_completed
local BEFOREUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }')
mkdir -p $MOUNT1/$tdir
$SETSTRIPE -i 0 $MOUNT1/$tdir
replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tdir/$tfile- 5
$SETSTRIPE -i 0 $MOUNT2/$tfile-2
dd if=/dev/zero of=$MOUNT2/$tfile-2 bs=1M count=5
createmany -o $MOUNT1/$tdir/$tfile-3- 5
umount $MOUNT2
fail $SINGLEMDS
wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
# first set of files should have been replayed
unlinkmany $MOUNT1/$tdir/$tfile- 5 || error "first unlinks failed"
unlinkmany $MOUNT1/$tdir/$tfile-3- 5 || error "second unlinks failed"
zconf_mount $HOSTNAME $MOUNT2 || error "mount $MOUNT2 failed"
[ -f $MOUNT2/$tfile-2 ] && error "$MOUNT2/$tfile-2 exists!"
wait_mds_ost_sync || error "wait_mds_ost_sync failed"
wait_delete_completed || error "wait_delete_complete failed"
local AFTERUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }')
log "before $BEFOREUSED, after $AFTERUSED"
# leave some margin for some files/dirs to be modified (OI, llog, etc)
[ $AFTERUSED -gt $((BEFOREUSED + 128)) ] &&
error "after $AFTERUSED > before $BEFOREUSED" || true
}
run_test 14b "delete ost orphans if gap occured in objids due to VBR"
test_15a() { # was test_15
replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
umount $MOUNT2
fail $SINGLEMDS
unlinkmany $MOUNT1/$tfile- 25 || return 2
[ -e $MOUNT1/$tfile-2-0 ] && error "$tfile-2-0 exists"
zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
run_test 15a "timeout waiting for lost client during replay, 1 client completes"
test_15c() {
replay_barrier $SINGLEMDS
for ((i = 0; i < 2000; i++)); do
echo "data" > "$MOUNT2/${tfile}-$i" || error "create ${tfile}-$i failed"
done
umount $MOUNT2
fail $SINGLEMDS
zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
run_test 15c "remove multiple OST orphans"
test_16() {
replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
umount $MOUNT2
facet_failover $SINGLEMDS
sleep $TIMEOUT
fail $SINGLEMDS
unlinkmany $MOUNT1/$tfile- 25 || return 2
zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
run_test 16 "fail MDS during recovery (3571)"
test_17() {
remote_ost_nodsh && skip "remote OST with nodsh" && return 0
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
# Make sure the disconnect is lost
replay_barrier ost1
umount $MOUNT2
facet_failover ost1
sleep $TIMEOUT
fail ost1
unlinkmany $MOUNT1/$tfile- 25 || return 2
zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
run_test 17 "fail OST during recovery (3571)"
# cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for it
export NOW=0
test_18() { # bug 3822 - evicting client with enqueued lock
#set -vx
local DLMTRACE=$(do_facet $SINGLEMDS lctl get_param debug)
do_facet $SINGLEMDS lctl set_param debug=+dlmtrace
mkdir -p $MOUNT1/$tdir || error "mkdir $MOUNT1/$tdir failed"
touch $MOUNT1/$tdir/${tfile}0 || error "touch file failed"
statmany -s $MOUNT1/$tdir/$tfile 1 500 &
OPENPID=$!
NOW=$SECONDS
#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
do_facet $SINGLEMDS lctl set_param fail_loc=0x8000030b # hold enqueue
sleep 1
#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=0
do_facet client lctl set_param fail_loc=0x80000305 # drop cb, evict
cancel_lru_locks mdc
usleep 500 # wait to ensure first client is one that will be evicted
openfile -f O_RDONLY $MOUNT2/$tdir/$tfile
wait $OPENPID
do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=1
do_facet $SINGLEMDS lctl debug_kernel |
grep "not entering recovery" && error "client not evicted"
do_facet client "lctl set_param fail_loc=0"
do_facet $SINGLEMDS "lctl set_param fail_loc=0"
}
run_test 18 "ldlm_handle_enqueue succeeds on evicted export (3822)"
test_19() { # Bug 10991 - resend of open request does not fail assertion.
replay_barrier $SINGLEMDS
drop_ldlm_reply "createmany -o $DIR/$tfile 1" || return 1
fail $SINGLEMDS
checkstat $DIR2/${tfile}0 || return 2
rm $DIR/${tfile}0 || return 3
return 0
}
run_test 19 "resend of open request"
test_20() { #16389
local before=$SECONDS
replay_barrier $SINGLEMDS
touch $DIR1/$tfile.a
touch $DIR2/$tfile.b
umount $DIR2
fail $SINGLEMDS
rm $DIR1/$tfile.a
zconf_mount $HOSTNAME $DIR2 || error "mount $DIR2 fail"
local tier1=$((SECONDS - before))
before=$SECONDS
replay_barrier $SINGLEMDS
touch $DIR1/$tfile.a
touch $DIR2/$tfile.b
umount $DIR2
fail $SINGLEMDS
rm $DIR1/$tfile.a
zconf_mount $HOSTNAME $DIR2 || error "mount $DIR2 fail"
local tier2=$((SECONDS - before))
# timeout is more than 1.5x original timeout
((tier2 < tier1 * 6 / 4)) ||
error "recovery time $tier2 >= 1.5x original time $tier1"
}
run_test 20 "recovery time is not increasing"
# commit on sharing tests
test_21a() {
local param_file=$TMP/$tfile-params
save_lustre_params $SINGLEMDS "mdt.*.commit_on_sharing" > $param_file
do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=1
touch $MOUNT1/$tfile-1
mv $MOUNT2/$tfile-1 $MOUNT2/$tfile-2
mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
replay_barrier_nosync $SINGLEMDS
umount $MOUNT2
facet_failover $SINGLEMDS
# all renames are replayed
unlink $MOUNT1/$tfile-3 || return 2
zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=0
rm -rf $MOUNT1/$tfile-*
restore_lustre_params < $param_file
rm -f $param_file
return 0
}
run_test 21a "commit on sharing"
test_21b_sub () {
local mds=$1
do_node $CLIENT1 rm -f $MOUNT1/$tfile-*
do_facet $mds sync
do_node $CLIENT1 touch $MOUNT1/$tfile-1
do_node $CLIENT2 mv $MOUNT1/$tfile-1 $MOUNT1/$tfile-2
do_node $CLIENT1 mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
replay_barrier_nosync $mds
shutdown_client $CLIENT2 $MOUNT1
facet_failover $mds
# were renames replayed?
local rc=0
echo UNLINK $MOUNT1/$tfile-3
do_node $CLIENT1 unlink $MOUNT1/$tfile-3 ||
{ echo "unlink $tfile-3 fail!" && rc=1; }
boot_node $CLIENT2
zconf_mount_clients $CLIENT2 $MOUNT1 ||
error "mount $CLIENT2 $MOUNT1 fail"
return $rc
}
test_21b() {
[ -z "$CLIENTS" ] && skip "Need two or more clients" && return
[ $CLIENTCOUNT -lt 2 ] &&
{ skip "Need 2+ clients, have $CLIENTCOUNT" && return; }
if [ "$FAILURE_MODE" = "HARD" ] && mixed_mdt_devs; then
skip "Several MDTs on one MDS with FAILURE_MODE=$FAILURE_MODE"
return 0
fi
zconf_umount_clients $CLIENTS $MOUNT2
zconf_mount_clients $CLIENTS $MOUNT1
local param_file=$TMP/$tfile-params
local mdtidx=$($LFS getstripe -M $MOUNT1)
local facet=mds$((mdtidx + 1))
save_lustre_params $facet "mdt.*.commit_on_sharing" > $param_file
# COS enabled
local COS=1
do_facet $facet lctl set_param mdt.*.commit_on_sharing=$COS
test_21b_sub $facet || error "Not all renames are replayed. COS=$COS"
# there is still a window when transactions may be written to disk
# before the mds device is set R/O. To avoid such a rare test failure,
# the check is repeated several times.
COS=0
local n_attempts=1
while true; do
# COS disabled (should fail)
do_facet $facet lctl set_param mdt.*.commit_on_sharing=$COS
test_21b_sub $facet || break
n_attempts=$((n_attempts + 1))
[ $n_attempts -gt 3 ] &&
error "can't check if COS works: rename replied w/o COS"
done
zconf_mount_clients $CLIENTS $MOUNT2
restore_lustre_params < $param_file
rm -f $param_file
return 0
}
run_test 21b "commit on sharing, two clients"
checkstat_22() {
checkstat $MOUNT1/$remote_dir || return 1
checkstat $MOUNT1/$remote_dir/dir || return 2
checkstat $MOUNT1/$remote_dir/$tfile-1 || return 3
checkstat $MOUNT1/$remote_dir/dir/$tfile-1 || return 4
return 0
}
create_remote_dir_files_22() {
do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir/dir || return 1
do_node $CLIENT1 createmany -o $MOUNT1/$remote_dir/dir/$tfile- 2 ||
return 2
do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 ||
return 3
return 0
}
test_22a () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
return 0
local MDTIDX=1
local remote_dir=${tdir}/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
# OBD_FAIL_MDS_REINT_NET_REP 0x119
do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
CLIENT_PID=$!
fail mds$((MDTIDX + 1))
wait $CLIENT_PID || error "lfs mkdir failed"
replay_barrier mds$MDTIDX
create_remote_dir_files_22 || error "Remote creation failed $?"
fail mds$MDTIDX
checkstat_22 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
return 0
}
run_test 22a "c1 lfs mkdir -i 1 dir1, M1 drop reply & fail, c2 mkdir dir1/dir"
test_22b () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local MDTIDX=1
local remote_dir=$tdir/remote_dir
# OBD_FAIL_MDS_REINT_NET_REP 0x119
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
CLIENT_PID=$!
fail mds${MDTIDX},mds$((MDTIDX + 1))
wait $CLIENT_PID || error "lfs mkdir failed"
replay_barrier mds$MDTIDX
create_remote_dir_files_22 || error "Remote creation failed $?"
fail mds${MDTIDX}
checkstat_22 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
return 0
}
run_test 22b "c1 lfs mkdir -i 1 d1, M1 drop reply & fail M0/M1, c2 mkdir d1/dir"
test_22c () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
return 0
local MDTIDX=1
local remote_dir=${tdir}/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
# OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
do_facet mds$MDTIDX lctl set_param fail_loc=0x1701
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
CLIENT_PID=$!
do_facet mds$MDTIDX lctl set_param fail_loc=0
fail mds$MDTIDX
wait $CLIENT_PID || error "lfs mkdir failed"
replay_barrier mds$MDTIDX
create_remote_dir_files_22 || error "Remote creation failed $?"
fail mds$MDTIDX
checkstat_22 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
return 0
}
run_test 22c "c1 lfs mkdir -i 1 d1, M1 drop update & fail M1, c2 mkdir d1/dir"
test_22d () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local MDTIDX=1
local remote_dir=${tdir}/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
# OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
do_facet mds$MDTIDX lctl set_param fail_loc=0x1701
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
CLIENT_PID=$!
do_facet mds$MDTIDX lctl set_param fail_loc=0
fail mds${MDTIDX},mds$((MDTIDX + 1))
wait $CLIENT_PID || error "lfs mkdir failed"
replay_barrier mds$MDTIDX
create_remote_dir_files_22 || error "Remote creation failed $?"
fail mds$MDTIDX
checkstat_22 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
return 0
}
run_test 22d "c1 lfs mkdir -i 1 d1, M1 drop update & fail M0/M1,c2 mkdir d1/dir"
checkstat_23() {
checkstat $MOUNT1/$remote_dir || return 1
checkstat $MOUNT1/$remote_dir/$tfile-1 || return 2
return 0
}
create_remote_dir_files_23() {
do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir || return 1
do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 || return 2
return 0
}
test_23a () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
return 0
local MDTIDX=1
local remote_dir=$tdir/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
error "lfs mkdir failed"
# OBD_FAIL_MDS_REINT_NET_REP 0x119
do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
local CLIENT_PID=$!
do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
fail mds$((MDTIDX + 1))
wait $CLIENT_PID || error "rmdir remote dir failed"
replay_barrier mds${MDTIDX}
create_remote_dir_files_23 || error "Remote creation failed $?"
fail mds${MDTIDX}
checkstat_23 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
return 0
}
run_test 23a "c1 rmdir d1, M1 drop reply and fail, client2 mkdir d1"
test_23b () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local MDTIDX=1
local remote_dir=$tdir/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
error "lfs mkdir failed"
# OBD_FAIL_MDS_REINT_NET_REP 0x119
do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
local CLIENT_PID=$!
do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
fail mds${MDTIDX},mds$((MDTIDX + 1))
wait $CLIENT_PID || error "rmdir remote dir failed"
replay_barrier mds${MDTIDX}
create_remote_dir_files_23 || error "Remote creation failed $?"
fail mds${MDTIDX}
checkstat_23 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
return 0
}
run_test 23b "c1 rmdir d1, M1 drop reply and fail M0/M1, c2 mkdir d1"
test_23c () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
return 0
local MDTIDX=1
local remote_dir=$tdir/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
error "lfs mkdir failed"
# OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
CLIENT_PID=$!
do_facet mds${MDTIDX} lctl set_param fail_loc=0
fail mds${MDTIDX}
wait $CLIENT_PID || error "rmdir remote dir failed"
replay_barrier mds${MDTIDX}
create_remote_dir_files_23 || error "Remote creation failed $?"
fail mds${MDTIDX}
checkstat_23 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || return 6
return 0
}
run_test 23c "c1 rmdir d1, M0 drop update reply and fail M0, c2 mkdir d1"
test_23d () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local MDTIDX=1
local remote_dir=$tdir/remote_dir
do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
error "lfs mkdir failed"
# OBD_FAIL_UPDATE_OBJ_NET 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
CLIENT_PID=$!
do_facet mds${MDTIDX} lctl set_param fail_loc=0
fail mds${MDTIDX},mds$((MDTIDX + 1))
wait $CLIENT_PID || error "rmdir remote dir failed"
replay_barrier mds${MDTIDX}
create_remote_dir_files_23 || error "Remote creation failed $?"
fail mds${MDTIDX}
checkstat_23 || error "check stat failed $?"
rm -rf $MOUNT1/$tdir || return 6
return 0
}
run_test 23d "c1 rmdir d1, M0 drop update reply and fail M0/M1, c2 mkdir d1"
test_24 () {
[[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.5.2) ]] ||
{ skip "Need MDS version newer than 2.5.2"; return 0; }
touch $MOUNT/$tfile
stat $MOUNT/$tfile >&/dev/null
# OBD_FAIL_MDS_REINT_NET_REP
do_facet $SINGLEMDS $LCTL set_param fail_loc=0x119
$TRUNCATE $MOUNT/$tfile 100 &
PID=$!
sleep 1
do_facet $SINGLEMDS lctl set_param fail_loc=0
# sync to release rep-ack lock quickly
do_nodes $(comma_list $(mdts_nodes)) \
"lctl set_param -n osd*.*MDT*.force_sync 1"
rm $MOUNT2/$tfile
wait
}
run_test 24 "reconstruct on non-existing object"
# end commit on sharing tests
test_25() {
cancel_lru_locks osc
$SETSTRIPE -i 0 -c 1 $DIR/$tfile
# get lock for the 1st client
dd if=/dev/zero of=$DIR/$tfile count=1 >/dev/null ||
error "failed to write data"
# get waiting locks for the 2nd client
drop_ldlm_cancel "multiop $DIR2/$tfile Ow512" &
sleep 1
# failover, replay and resend replayed waiting locks
if [ $(lustre_version_code ost1) -ge $(version_code 2.6.90) ]; then
#define OBD_FAIL_LDLM_SRV_CP_AST 0x325
do_facet ost1 lctl set_param fail_loc=0x80000325
else
#define OBD_FAIL_OST_LDLM_REPLY_NET 0x213
do_facet ost1 lctl set_param fail_loc=0x80000213
fi
fail ost1
# multiop does not finish because CP AST is skipped;
# it is ok to kill it in the test, because CP AST is already re-sent
# and it does not hung forever in real life
killall multiop
wait
}
run_test 25 "replay|resend"
cleanup_26() {
trap 0
kill -9 $tar_26_pid
kill -9 $dbench_26_pid
killall -9 dbench
}
test_26() {
local clients=${CLIENTS:-$HOSTNAME}
zconf_mount_clients $clients $MOUNT
local duration=600
[ "$SLOW" = "no" ] && duration=200
# set duration to 900 because it takes some time to boot node
[ "$FAILURE_MODE" = HARD ] && duration=900
local start_ts=$SECONDS
local rc=0
trap cleanup_26 EXIT
(
local tar_dir=$DIR/$tdir/run_tar
while true; do
test_mkdir -p -c$MDSCOUNT $tar_dir || break
if [ $MDSCOUNT -ge 2 ]; then
$LFS setdirstripe -D -c$MDSCOUNT $tar_dir ||
error "set default dirstripe failed"
fi
cd $tar_dir || break
tar cf - /etc | tar xf - || error "tar failed"
cd $DIR/$tdir || break
rm -rf $tar_dir || break
done
)&
tar_26_pid=$!
echo "Started tar $tar_26_pid"
(
local dbench_dir=$DIR2/$tdir/run_dbench
while true; do
test_mkdir -p -c$MDSCOUNT $dbench_dir || break
if [ $MDSCOUNT -ge 2 ]; then
$LFS setdirstripe -D -c$MDSCOUNT $dbench_dir ||
error "set default dirstripe failed"
fi
cd $dbench_dir || break
rundbench 1 -D $dbench_dir -t 100 &>/dev/null || break
cd $DIR/$tdir || break
rm -rf $dbench_dir || break
done
)&
dbench_26_pid=$!
echo "Started dbench $dbench_26_pid"
local num_failovers=0
local fail_index=1
while [ $((SECONDS - start_ts)) -lt $duration ]; do
kill -0 $tar_26_pid || error "tar $tar_26_pid missing"
kill -0 $dbench_26_pid || error "dbench $dbench_26_pid missing"
sleep 2
replay_barrier mds$fail_index
sleep 2 # give clients a time to do operations
# Increment the number of failovers
num_failovers=$((num_failovers + 1))
log "$TESTNAME fail mds$fail_index $num_failovers times"
fail mds$fail_index
if [ $fail_index -ge $MDSCOUNT ]; then
fail_index=1
else
fail_index=$((fail_index + 1))
fi
done
# stop the client loads
kill -0 $tar_26_pid || error "tar $tar_26_pid stopped"
kill -0 $dbench_26_pid || error "dbench $dbench_26_pid stopped"
cleanup_26 || true
}
run_test 26 "dbench and tar with mds failover"
test_28() {
$SETSTRIPE -i 0 -c 1 $DIR2/$tfile
dd if=/dev/zero of=$DIR2/$tfile bs=4096 count=1
#define OBD_FAIL_LDLM_SRV_BL_AST 0x324
do_facet ost1 $LCTL set_param fail_loc=0x80000324
dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 &
local pid=$!
sleep 2
#define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
do_facet ost1 $LCTL set_param fail_loc=0x32a
fail ost1
sleep 2
cancel_lru_locks OST0000-osc
wait $pid || error "dd failed"
}
run_test 28 "lock replay should be ordered: waiting after granted"
complete $SECONDS
SLEEP=$((SECONDS - $NOW))
[ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
[ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true
check_and_cleanup_lustre
exit_status
|
sdsc/lustre-release
|
lustre/tests/replay-dual.sh
|
Shell
|
gpl-2.0
| 28,089 |
#!/bin/sh
export PROJECT_HOME=`pwd`
export ANT_HOME=${PROJECT_HOME}/tools/ant
export MAVEN_HOME=${PROJECT_HOME}/tools/maven
export PATH=.:${ANT_HOME}/bin:${MAVEN_HOME}/bin:${PATH}
chmod 755 ${ANT_HOME}/bin/*
chmod 755 ${MAVEN_HOME}/bin/*
|
hedingwei/JavaWebProjectTemplate
|
configure.sh
|
Shell
|
gpl-2.0
| 238 |
#!/bin/sh
user=`cat .oraccuser`
group=`cat .oraccgroup`
for a in `. ./oraccdirs.conf` ; do sudo chown -R $user:$group $a ; done
|
oracc/oracc
|
prmdirs.sh
|
Shell
|
gpl-2.0
| 128 |
#!/bin/sh
test_description='add -i basic tests'
. ./test-lib.sh
if ! test_have_prereq PERL; then
say 'skipping git add -i tests, perl not available'
test_done
fi
test_expect_success 'setup (initial)' '
echo content >file &&
git add file &&
echo more >>file &&
echo lines >>file
'
test_expect_success 'status works (initial)' '
git add -i </dev/null >output &&
grep "+1/-0 *+2/-0 file" output
'
cat >expected <<EOF
new file mode 100644
index 0000000..d95f3ad
--- /dev/null
+++ b/file
@@ -0,0 +1 @@
+content
EOF
test_expect_success 'diff works (initial)' '
(echo d; echo 1) | git add -i >output &&
sed -ne "/new file/,/content/p" <output >diff &&
test_cmp expected diff
'
test_expect_success 'revert works (initial)' '
git add file &&
(echo r; echo 1) | git add -i &&
git ls-files >output &&
! grep . output
'
test_expect_success 'setup (commit)' '
echo baseline >file &&
git add file &&
git commit -m commit &&
echo content >>file &&
git add file &&
echo more >>file &&
echo lines >>file
'
test_expect_success 'status works (commit)' '
git add -i </dev/null >output &&
grep "+1/-0 *+2/-0 file" output
'
cat >expected <<EOF
index 180b47c..b6f2c08 100644
--- a/file
+++ b/file
@@ -1 +1,2 @@
baseline
+content
EOF
test_expect_success 'diff works (commit)' '
(echo d; echo 1) | git add -i >output &&
sed -ne "/^index/,/content/p" <output >diff &&
test_cmp expected diff
'
test_expect_success 'revert works (commit)' '
git add file &&
(echo r; echo 1) | git add -i &&
git add -i </dev/null >output &&
grep "unchanged *+3/-0 file" output
'
cat >expected <<EOF
EOF
cat >fake_editor.sh <<EOF
EOF
chmod a+x fake_editor.sh
test_set_editor "$(pwd)/fake_editor.sh"
test_expect_success 'dummy edit works' '
(echo e; echo a) | git add -p &&
git diff > diff &&
test_cmp expected diff
'
cat >patch <<EOF
@@ -1,1 +1,4 @@
this
+patch
-doesn't
apply
EOF
echo "#!$SHELL_PATH" >fake_editor.sh
cat >>fake_editor.sh <<\EOF
mv -f "$1" oldpatch &&
mv -f patch "$1"
EOF
chmod a+x fake_editor.sh
test_set_editor "$(pwd)/fake_editor.sh"
test_expect_success 'bad edit rejected' '
git reset &&
(echo e; echo n; echo d) | git add -p >output &&
grep "hunk does not apply" output
'
cat >patch <<EOF
this patch
is garbage
EOF
test_expect_success 'garbage edit rejected' '
git reset &&
(echo e; echo n; echo d) | git add -p >output &&
grep "hunk does not apply" output
'
cat >patch <<EOF
@@ -1,0 +1,0 @@
baseline
+content
+newcontent
+lines
EOF
cat >expected <<EOF
diff --git a/file b/file
index b5dd6c9..f910ae9 100644
--- a/file
+++ b/file
@@ -1,4 +1,4 @@
baseline
content
-newcontent
+more
lines
EOF
test_expect_success 'real edit works' '
(echo e; echo n; echo d) | git add -p &&
git diff >output &&
test_cmp expected output
'
if test "$(git config --bool core.filemode)" = false
then
say 'skipping filemode tests (filesystem does not properly support modes)'
else
test_set_prereq FILEMODE
fi
test_expect_success FILEMODE 'patch does not affect mode' '
git reset --hard &&
echo content >>file &&
chmod +x file &&
printf "n\\ny\\n" | git add -p &&
git show :file | grep content &&
git diff file | grep "new mode"
'
test_expect_success FILEMODE 'stage mode but not hunk' '
git reset --hard &&
echo content >>file &&
chmod +x file &&
printf "y\\nn\\n" | git add -p &&
git diff --cached file | grep "new mode" &&
git diff file | grep "+content"
'
test_expect_success FILEMODE 'stage mode and hunk' '
git reset --hard &&
echo content >>file &&
chmod +x file &&
printf "y\\ny\\n" | git add -p &&
git diff --cached file | grep "new mode" &&
git diff --cached file | grep "+content" &&
test -z "$(git diff file)"
'
# end of tests disabled when filemode is not usable
test_expect_success 'setup again' '
git reset --hard &&
test_chmod +x file &&
echo content >>file
'
# Write the patch file with a new line at the top and bottom
cat >patch <<EOF
index 180b47c..b6f2c08 100644
--- a/file
+++ b/file
@@ -1,2 +1,4 @@
+firstline
baseline
content
+lastline
EOF
# Expected output, similar to the patch but w/ diff at the top
cat >expected <<EOF
diff --git a/file b/file
index b6f2c08..61b9053 100755
--- a/file
+++ b/file
@@ -1,2 +1,4 @@
+firstline
baseline
content
+lastline
EOF
# Test splitting the first patch, then adding both
test_expect_success 'add first line works' '
git commit -am "clear local changes" &&
git apply patch &&
(echo s; echo y; echo y) | git add -p file &&
git diff --cached > diff &&
test_cmp expected diff
'
test_done
|
asoltys/git
|
t/t3701-add-interactive.sh
|
Shell
|
gpl-2.0
| 4,506 |
#!/bin/bash
mydir=$(dirname "$BASH_SOURCE")
experimentSucceeded="$1"
stage="$2"
lastReturnVal="$3"
source "$mydir/functions_misc.sh"
source "$mydir/functions_remoteControl.sh"
remoteDir=$( cat "$mydir/remoteDir" )
logins="$driverLogin ${executorLoginList[*]}"
# Download the remote files.
download "$remoteDir/results/*" "$driverLogin" "$mydir/results"
# Create a summary of the stage parameters
bash "$mydir/stages.sh" header > "$mydir/stageInputs.txt"
bash "$mydir/stages.sh" >> "$mydir/stageInputs.txt"
# Process the remote files
echo "Cleaning up the empty files in $mydir..." >&2
find "$mydir" -type f -empty -delete
echo "Merging the stage result summary files..." >&2
stageFiles=$(ls -1 "$mydir"/results/stageResult*.txt)
stageFiles=( $stageFiles )
stageResult="$mydir/stageResultSummary.txt"
cut -d \ -f 1 "${stageFiles[0]}" > "$stageResult"
for stageFile in "${stageFiles[@]}"; do
cut -d \ -f 2 "$stageFile" | paste "$stageResult" - > "$stageResult.tmp"
mv "$stageResult.tmp" "$stageResult"
done
echo "Merging the power summary files..." >&2
powerFiles=$(ls -1 "$mydir"/powerData/avgPower*-out.log)
powerFiles=( $powerFiles )
powerResult="$mydir/powerResultSummary.txt"
cut -d \ -f 1 "${powerFiles[0]}" > "$powerResult"
for powerFile in "${powerFiles[@]}"; do
cut -d \ -f 2 "$powerFile" | paste "$powerResult" - > "$powerResult.tmp"
mv "$powerResult.tmp" "$powerResult"
done
source "$mydir/functions_powerMonitor.sh"
shutdownPowerMonitor
echo "Collecting the detailed stage summaries..." >&2
stageFiles=$( ls -1 "$mydir"/results/stage*-err.log )
stageFiles=( $stageFiles )
stageResult="$mydir/results/stageSummary"
ids=()
updateIDs=()
queryIDs=()
arr1=()
arr2=()
arr3=()
for stageFile in "${stageFiles[@]}"; do
s=$( echo "$stageFile" | sed 's/.*stage\([0-9]\+\).*/\1/g' )
echo " Processing stage $s ($stageFile)..." >&2
outFile="$stageResult$s.txt"
#echo -e "id localID isUpdate startTime submittedTime finishTime duration percentAffected version" > "$outFile"
while read line; do
l=($line)
id=${l[2]}
if [ "${l[0]}" == "u" ]; then
updateIDs[${l[1]}]=$id
arr1[$id]=1
else
queryIDs[${l[1]}]=$id
arr1[$id]=0
fi
arr2[$id]=${l[1]}
ids[$id]=$id
done < <(grep 'Thread submitted [uq][0-9]\+ (#[0-9]\+' "$stageFile" | sed 's/.*Thread submitted \([uq]\)\([0-9]\+\) (#\([0-9]\+\).*/\1 \2 \3/g')
echo "id ${ids[*]}" > "$outFile"
echo "isUpdate ${arr1[*]}" >> "$outFile"
echo "qid ${arr2[*]}" >> "$outFile"
for i in "${ids[@]}"; do
arr1[$i]=""
done
while read line; do
l=($line)
if [ "${l[0]}" == "u" ]; then
id=${updateIDs[${l[1]}]}
else
id=${queryIDs[${l[1]}]}
fi
arr1[$id]=${l[2]}
done < <(grep '[uq][0-9]\+:.*started at [0-9]\+' "$stageFile" | sed -e 's/.*\([uq]\)\([0-9]\+\):.*started at \([0-9]\+\).*/\1 \2 \3/g')
echo "startTime ${arr1[*]}" >> "$outFile"
for i in "${ids[@]}"; do
arr1[$i]=""
arr2[$i]=""
done
while read line; do
l=($line)
if [ "${l[0]}" == "u" ]; then
id=${updateIDs[${l[1]}]}
else
id=${queryIDs[${l[1]}]}
fi
arr1[$id]=${l[2]}
arr2[$id]=${l[3]}
done < <(grep '[uq][0-9]\+:.*will affect.* [0-9\.eE+\-]\+ %.*submitting at [0-9]' "$stageFile" | sed -e 's/.*\([uq]\)\([0-9]\+\):.*will affect.* \([0-9\.eE+\-]\+\) %.*submitting at \([0-9]\+\).*/\1 \2 \3 \4/g')
echo "percentAffected ${arr1[*]}" >> "$outFile"
echo "submittedTime ${arr2[*]}" >> "$outFile"
for i in "${ids[@]}"; do
arr1[$i]=""
arr2[$i]=""
arr3[$i]=""
done
while read line; do
l=($line)
if [ "${l[0]}" == "u" ]; then
id=${updateIDs[${l[1]}]}
else
id=${queryIDs[${l[1]}]}
fi
arr1[$id]=${l[2]}
arr2[$id]=${l[3]}
arr3[$id]=${l[4]}
done < <(grep '[uq][0-9]\+:.*[fF]inished in [0-9]\+ ms at [0-9]\+.*version is [0-9]\+' "$stageFile" | sed -e 's/.*\([uq]\)\([0-9]\+\):.*[fF]inished in \([0-9]\+\) ms at \([0-9]\+\).*version is \([0-9]\+\).*/\1 \2 \3 \4 \5/g')
echo "duration ${arr1[*]}" >> "$outFile"
echo "finishTime ${arr2[*]}" >> "$outFile"
echo "version ${arr3[*]}" >> "$outFile"
done
#lastStage=$(bash "$mydir/stages.sh" | wc -l - | cut -d \ -f 1)
#lastStage=$[ $lastStage - 1 ]
#oIFS="$IFS"
#IFS=$'\n'
#read -d '' -r -a stages < <(bash "$resultDir/$localStageGenerator")
#IFS="$oIFS"
#downloadMultiple "$remoteDir/powerCapOutput/*" "${executorLoginList[*]}" "$mydir/powerCapOutput"
if [ "$4" == "test" ]; then
echo "This was a test, so we are leaving $remoteDir on $driverLogin ${executorLoginList[*]}"
else
# Delete temporary remote files.
if [ "$experimentSucceeded" == 1 ]; then
echo "Temporary files deleted from $remoteDir on $driverLogin ${executorLoginList[*]}." >&2
runRemoteMultiple "rm -r '$remoteDir'" "$driverLogin ${executorLoginList[*]}"
else
echo "The experiment did not succeed. Temporary files were left in $remoteDir on $driverLogin ${executorLoginList[*]}." >&2
runRemote "bash '$remoteDir/go_cleanup.sh'" "$driverLogin"
fi
baseDir=$(readlink -f "$mydir")
baseDir=$(basename "$baseDir")
dropboxDir="/cygdrive/c/Users/Josiah/Dropbox/Research/2017/DynamicBigData/paper/$baseDir"
mkdir -p "$dropboxDir"
cp $mydir/stageInputs.txt $mydir/stageResultSummary.txt $mydir/powerResultSummary.txt "$dropboxDir"
fi
|
jcmcclurg/serverpower
|
experiments/scripts/ua_power_exp/experiment_finished.sh
|
Shell
|
gpl-2.0
| 5,178 |
# Openstack icehouse installation script
# on ubuntu 14.04 by kasidit chanchio
# vasabilab, dept of computer science, Thammasat University, Thailand
# copyright 2014
#
#
#!/bin/bash -x
cd $HOME/OPSInstaller/controller
pwd
echo "Run this script as a user."
printf "\n1. drop and create neutron database...press"
#read varkey
mysql -u root -pmysqlpassword -e "DROP DATABASE neutron;"
mysql -u root -pmysqlpassword -e "CREATE DATABASE neutron;"
mysql -u root -pmysqlpassword -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '5b66f330dd9ab789b5ba';"
mysql -u root -pmysqlpassword -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '5b66f330dd9ab789b5ba';"
|
kasidit/openstack-mitaka-installer
|
documents/OPSInstaller.example/controller/exe-stage52-USER-odl-neutron-database.sh
|
Shell
|
gpl-2.0
| 701 |
###########################################################################
#
# Author: Augustin Manecy
#
# Copyright (C) 2011-2014 Augustin Manecy
#
# [email protected]
#
###########################################################################
#
# This file is part of RT-MaG Toolbox.
#
# RT-MaG Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RT-MaG Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RT-MaG Toolbox. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
#
# This version of GPL is at https://www.gnu.org/licenses/gpl-3.0.txt
#
###########################################################################
################# PutFTPa_DOS.sh #################
#
# This shell script allows to put a file (given in argument)
# in ascii mode to the remote FTP server
#
# USAGE (for the first downloading):
#
# 1) On your Gumstix, type:
# wget ftp://overo:[email protected]/PutFTPa_DOS.sh
# ^ ^ ^
# | | |
# login password server's IP
#
# 2) Then convert your DOS file to a UNIX file:
# sed 's/.$//' PutFTPa_DOS.sh > PutFTPa
# rm PutFTPa_DOS.sh
#
# 3) Make your script exectable:
# chmod +x PutFTPa
#
###############################################
#!/bin/sh
HOST='192.168.137.1'
USER='overo'
PASSWD='gumstix'
if test $# -lt 1
then
echo "SYNTAX ERROR: $0 <file name 1> <file name 2>"
fi
ftp -n -v $HOST << EOT
user $USER $PASSWD
prompt
ascii
put $1
bye
EOT
|
augustinmanecy/meta-rt-mag-overo
|
recipes-rt-mag/files/temp/PutFTPa_DOS.sh
|
Shell
|
gpl-2.0
| 2,095 |
laniface="eth1"
echo "Performing MITM on $laniface"
iptables -t nat -A PREROUTING -j REDIRECT -i $laniface -p tcp -m tcp --to-ports 20755
iptables -t nat -A PREROUTING -j REDIRECT -i $laniface -p udp -m udp --to-ports 20755
|
ausarbluhd/EternalLLC
|
scripts/mallory/scripts/config_mallory.sh
|
Shell
|
gpl-2.0
| 225 |
#!/bin/bash
# ------------------------------------------------------------------------------------
# Run script for lsm at FLUXNET sites
# This script calls LSMSRunArray.sh for all sites given in the file "site_list"
# This script assumes that the SITE_LIST and LSMSRunArray.sh file exist
# in the same directory
# ------------------------------------------------------------------------------------
# Settings:
NPROC=32 # number of CPUs to be used
SITE_LIST=all_sites_list.txt # site list file to be used
FORCING_TSTEP=TIMESTEP # time step of forcing (TIMESTEP or DAILY)
CODEVERSION=rev272 # name extension of executable
EXP_ID=${CODEVERSION} # name of current experiment
RESTART=0 # whether to use existing restart file or not
RESTART_ID=${CODEVERSION} # name of experiment to restart from
LOCALTIME=TRUE # whether forcing timeaxis is local (TRUE)
COMPILER=x86_64-pgi
# or GMT (FALSE)
#PATHs
OUT_PATH=/public1/home/luolh/CLM4.0/Output
FORCING_PATH=/public1/home/luolh/FLUXNET/Forcing_data
OBS_PATH=/public1/home/luolh/FLUXNET/Obs_data
# set experiment id and code version to general_settings.txt
echo ${EXP_ID} ${CODEVERSION} ${RESTART} ${RESTART_ID} ${FORCING_TSTEP} ${LOCALTIME} > ${PWD}/general_settings.txt
echo ${CODE_BASE_PATH} ${COMPILER} ${FORCING_PATH} ${OBS_PATH} ${OUT_PATH} > ${PWD}/paths.txt
# cp SITE_LIST to site_list.txt file
if [[ ${SITE_LIST} != 'site_list.txt' ]] ; then
cp ${SITE_LIST} site_list.txt
fi
NSITE=$(wc -l site_list.txt | cut -c1-8 | awk '{print $1}')
# array starts at 2 because first line in site_list.txt is a header
bsub -J lsm_s[2-${NSITE}]%${NPROC} -e run_lsm_site.%I.e -o run_lsm_site.%I.o < ./LSMSRunArray.sh
bsub -w "ended(lsm_s[2-${NSITE}])" -J cleanup -e run_lsm_cleanup.e -o run_lsm_cleanup.o < ./LSMSRunCLeanup.sh
|
iffylaw/PPLSMS
|
Integration/LSMSSiteRun.sh
|
Shell
|
gpl-2.0
| 2,027 |
# Shell script that brings up the can0 interface for the CANable transceiver
# See the CANable Getting Started page for details
# Written by Evan Yand for Portland State Aerospace Society
# 06/2017 licensed GPLv3
case $1 in
"-10") sudo slcand -o -c -s0 /dev/serial/by-id/*CANtact*-if00 can0;;
"-20") sudo slcand -o -c -s1 /dev/serial/by-id/*CANtact*-if00 can0;;
"-50") sudo slcand -o -c -s2 /dev/serial/by-id/*CANtact*-if00 can0;;
"-100") sudo slcand -o -c -s3 /dev/serial/by-id/*CANtact*-if00 can0;;
"-125") sudo slcand -o -c -s4 /dev/serial/by-id/*CANtact*-if00 can0;;
"-250") sudo slcand -o -c -s5 /dev/serial/by-id/*CANtact*-if00 can0;;
"-500") sudo slcand -o -c -s6 /dev/serial/by-id/*CANtact*-if00 can0;;
"-750") sudo slcand -o -c -s7 /dev/serial/by-id/*CANtact*-if00 can0;;
"-1000") sudo slcand -o -c -s8 /dev/serial/by-id/*CANtact*-if00 can0;;
"-h")
echo "Use the following options to set the bus speed in kbps:
-10
-20
-50
-100
-125
-250
-500
-750
-1000
Use the -h option to show this message.
Example:
$ ./CANup.sh -10
Troubleshooting:
- Double check that the CANable transciever is connected and powered on.
- Restart the tranciever if necessary."
exit 1;;
*)
echo "Bus speed must be selected. Enter ./CANup.sh -h to see options"
exit 1;;
esac
sudo ifconfig can0 up
if [ $? = "0" ]
then
echo "CAN interface is active on can0. Use the can-utils package to access."
else
echo "Error configuring interface. Check connection and retry."
fi
|
oresat/devsat
|
scripts/CANup.sh
|
Shell
|
gpl-2.0
| 1,651 |
#!/bin/bash
if [ $# -lt 3 ]
then
echo "Need 3 arguments but provided $#"
echo "./lostExp.sh numOfBlocks numOfChallenges serverlist"
exit 0
fi
echo "Computation times" > "time"
port=9191
cat $3 | while read node
do
echo ${node}
echo -e "${node} \t \c" >> "time"
unode="ucalgary_nashad2@${node}"
ssh -i id_rsa $unode >> "time" << ENDCMD
cd /home/ucalgary_nashad2/LoST/server/ > /dev/null
killall ./pserver &> /dev/null
./pserver $port | grep Average | tr -d "Average Computation Time:" &
cd ../
./pclient $1 $2 "localhost" $port &> /dev/null
cd server
killall ./pserver &> /dev/null
ENDCMD
done
|
malimome/LoSt
|
compTime.sh
|
Shell
|
gpl-2.0
| 627 |
#!/bin/bash
# Global variables:
# ${GV_LOG}: Prefix this variable in echo to log echoed string.
SCRIPT_NAME="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")"
echo "${GV_LOG}>>>>>>>>> Running ${SCRIPT_NAME} ..."
# Install galculator
apt-get -y --force-yes install qalculate-gtk
# Log
echo "${GV_LOG} * Install calculator: qalculate-gtk."
# REJECTION:
# Note: Gnome has 52528K overhead.
#
# * Running inst-std-accessories-calc-calcoo.sh ...
# * After this operation, 123 kB of additional disk space will be used.
# * Install calculator: calcoo.
# * Disk size = 2715708K. Space Used = 220K.
# * Running inst-std-accessories-calc-gnome-calculator.sh ...
# * After this operation, 66.4 MB of additional disk space will be used.
# * Install calculator: gnome-calculator.
# * Disk size = 2777428K. Space Used = 61720K.
# * Running inst-std-accessories-calc-qalculate-gtk.sh ...
# * After this operation, 8,248 kB of additional disk space will be used.
# * Install calculator: qalculate-gtk.
# * Disk size = 2788156K. Space Used = 10728K.
# * Running inst-std-accessories-calc-speedcrunch.sh ...
# * After this operation, 1,830 kB of additional disk space will be used.
# * Install calculator: speedcrunch.
# * Disk size = 2791292K. Space Used = 3136K.
# * Running inst-std-accessories-calc-wcalc.sh ...
# * After this operation, 1,170 kB of additional disk space will be used.
# * Install calculator: wcalc.
# * Disk size = 2793116K. Space Used = 1824K.
# * Running inst-std-accessories-calc-x11-apps.sh ...
# * After this operation, 20.8 MB of additional disk space will be used.
# * Install calculator: xcalc.
# * Disk size = 2819800K. Space Used = 26684K.
# * Running inst-std-accessories-galculator.sh ...
# * After this operation, 1,424 kB of additional disk space will be used.
# * Install Calculator: galculator.
# * Insert galculator in Accessories menu.
# * Disk size = 2825140K. Space Used = 1520K.
|
limelime/cust-live-deb
|
scripts-rejected/inst-std-accessories-calc-qalculate-gtk/inst-std-accessories-calc-qalculate-gtk.sh
|
Shell
|
gpl-2.0
| 2,023 |
#!/bin/bash
function update(){
filename=$1
suffix=$2
for e in `cat $filename`; do
./MiGupdate_resource_configuration.sh "${e}${suffix}.config"
done
echo "done"
}
function usage(){
echo "Usage: UpdateClusterConfiguration.sh filename 'suffix_that_makes_hostname_unique'"
echo "Example: UpdateClusterConfiguration.sh imadamaskiner '.0'"
}
if [ $# -eq 2 ]; then
update $1 $2
else
usage
fi
|
heromod/migrid
|
mig/resource/UpdateClusterConfiguration.sh
|
Shell
|
gpl-2.0
| 413 |
#!/bin/sh
# Script to create a release tar.gz from subversion export and (not yet implemented)
# upload to the download section of googlecode.
# Created by Paul Gevers <[email protected]> (30 Aug 2009)
# Copyright 2009 by Matthew Weatherford <[email protected]>
VERSION="1.1.1"
SVN="/usr/bin/svn"
TAR="/bin/tar"
REPOSITORY="http://winff.googlecode.com/svn/trunk/%20winff%20--username%20bggmtt"
TMPDIR=`/bin/mktemp -d -t winff.XXXXXX` || exit 1
cd $TMPDIR
pwd
$SVN export $REPOSITORY winff
cd winff
rm -rf debian hardy intrepid jaunty win32setup
cd $TMPDIR
$TAR -czf winff-${VERSION}-source.tar.gz
echo "Source file can be found at ${TMPDIR}/winff-${VERSION}-source.tar.gz"
|
lion-simba/WinFF
|
scripts/create_tar_from_svn.sh
|
Shell
|
gpl-3.0
| 676 |
docker run -d --link my-rabbit:rabbit -e PATAVI_BROKER_HOST=rabbit \
--name patavi-mcda-worker addis/patavi-smaa-worker
|
DanielReid/mcda-elicitation-web
|
run-worker.sh
|
Shell
|
gpl-3.0
| 122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.