code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "utils.sh"
declare -a FILES_TO_SYMLINK=(
"shell/bash_aliases"
"shell/bash_autocomplete"
"shell/bash_bash-it"
"shell/bash_direnv"
"shell/bash_exports"
"shell/bash_functions"
"shell/bash_logout"
"shell/bash_options"
"shell/bash_profile"
"shell/bash_prompt"
"shell/bash_z"
"shell/bashrc"
"shell/curlrc"
"shell/inputrc"
"shell/screenrc"
"git/gitattributes"
"git/gitconfig"
"git/gitignore"
"git/git_commit_message_template"
"npm/npmrc"
"nvm/nvm/default-packages"
"vim/vim"
"vim/vimrc"
"tmux/tmux.conf"
"sshrc/sshrc"
"other/czrc"
"other/cz-config.js"
"other/ideavimrc"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
main() {
local i=""
local sourceFile=""
local targetFile=""
local skipQuestions=false
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
skip_questions "$@" \
&& skipQuestions=true
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for i in "${FILES_TO_SYMLINK[@]}"; do
sourceFile="$(cd .. && pwd)/$i"
# sed 'substitute' command (See http://www.grymoire.com/Unix/Sed.html#uh-1)
# using regex '.*\/\(.*\)' and replacing topic folder with its contents
# For example, 'shell/bash_aliases' to 'bash_aliases'
targetFile="$HOME/.$(printf "%s" "$i" | sed "s/[^\/]*\/\(.*\)/\1/g")"
if [ "$(readlink "$targetFile")" == "$sourceFile" ]; then
print_success "$targetFile β $sourceFile"
elif [ ! -e "$targetFile" ] || $skipQuestions; then # -e : True if file exists (regardless of type).
execute_without_spinner \
"ln -fs $sourceFile $targetFile" \
"$targetFile β $sourceFile"
# ln : make links between files
# -f : force = remove existing destination files
# -s : symbolic =make symbolic links instead of hard links (see https://www.youtube.com/watch?v=aO0OkNxDJ3c)
else
if ! $skipQuestions; then
ask_for_confirmation "'$targetFile' already exists, do you want to overwrite it?"
if answer_is_yes; then
rm -rf "$targetFile"
execute_without_spinner \
"ln -fs $sourceFile $targetFile" \
"$targetFile β $sourceFile"
else
print_error "$targetFile β $sourceFile"
fi
fi
fi
done
}
# Pass '-y' to script to skip questions
main "$@"
|
wingy3181/dotfiles
|
src/os/create_symbolic_links.sh
|
Shell
|
mit
| 2,719 |
#!/bin/sh
RESOURCES=./asn_report/resources
GEOIP_FILE=GeoIPASNum2.csv
PYASN_FILE=ip_to_asn.db
PYASN_PATH=$RESOURCES/$PYASN_FILE
GEOIP_PATH=$RESOURCES/$GEOIP_FILE
pyasn_util_download.py --latest && \
pyasn_util_convert.py --single rib.*bz2 $PYASN_PATH
curl -L \
http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum2.zip\
> GeoIPASNum2-tmp.zip && unzip GeoIPASNum2-tmp.zip \
-d $RESOURCES/
rm GeoIPASNum2-tmp.zip
git add $PYASN_PATH
git add $GEOIP_PATH
|
coxley/asn_report
|
update-databases.sh
|
Shell
|
mit
| 518 |
#!/usr/bin/env bash
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test marking of spent outputs
# Create a transaction graph with four transactions,
# A/B/C/D
# C spends A
# D spends B and C
# Then simulate C being mutated, to create C'
# that is mined.
# A is still (correctly) considered spent.
# B should be treated as unspent
if [ $# -lt 1 ]; then
echo "Usage: $0 path_to_binaries"
echo "e.g. $0 ../../src"
exit 1
fi
set -f
BITCOIND=${1}/renamedcoind
CLI=${1}/renamedcoin-cli
DIR="${BASH_SOURCE%/*}"
SENDANDWAIT="${DIR}/send.sh"
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
. "$DIR/util.sh"
D=$(mktemp -d test.XXXXX)
# Two nodes; one will play the part of merchant, the
# other an evil transaction-mutating miner.
D1=${D}/node1
CreateDataDir $D1 port=11000 rpcport=11001
B1ARGS="-datadir=$D1 -debug=mempool"
$BITCOIND $B1ARGS &
B1PID=$!
D2=${D}/node2
CreateDataDir $D2 port=11010 rpcport=11011
B2ARGS="-datadir=$D2 -debug=mempool"
$BITCOIND $B2ARGS &
B2PID=$!
# Wait until all four nodes are at the same block number
function WaitBlocks {
while :
do
sleep 1
declare -i BLOCKS1=$( GetBlocks $B1ARGS )
declare -i BLOCKS2=$( GetBlocks $B2ARGS )
if (( BLOCKS1 == BLOCKS2 ))
then
break
fi
done
}
# Wait until node has $N peers
function WaitPeers {
while :
do
declare -i PEERS=$( $CLI $1 getconnectioncount )
if (( PEERS == "$2" ))
then
break
fi
sleep 1
done
}
echo "Generating test blockchain..."
# Start with B2 connected to B1:
$CLI $B2ARGS addnode 127.0.0.1:11000 onetry
WaitPeers "$B1ARGS" 1
# 2 block, 50 XBT each == 100 XBT
# These will be transactions "A" and "B"
$CLI $B1ARGS setgenerate true 2
WaitBlocks
# 100 blocks, 0 mature == 0 XBT
$CLI $B2ARGS setgenerate true 100
WaitBlocks
CheckBalance "$B1ARGS" 100
CheckBalance "$B2ARGS" 0
# restart B2 with no connection
$CLI $B2ARGS stop > /dev/null 2>&1
wait $B2PID
$BITCOIND $B2ARGS &
B2PID=$!
B1ADDRESS=$( $CLI $B1ARGS getnewaddress )
B2ADDRESS=$( $CLI $B2ARGS getnewaddress )
# Transaction C: send-to-self, spend A
TXID_C=$( $CLI $B1ARGS sendtoaddress $B1ADDRESS 50.0)
# Transaction D: spends B and C
TXID_D=$( $CLI $B1ARGS sendtoaddress $B2ADDRESS 100.0)
CheckBalance "$B1ARGS" 0
# Mutate TXID_C and add it to B2's memory pool:
RAWTX_C=$( $CLI $B1ARGS getrawtransaction $TXID_C )
# ... mutate C to create C'
L=${RAWTX_C:82:2}
NEWLEN=$( printf "%x" $(( 16#$L + 1 )) )
MUTATEDTX_C=${RAWTX_C:0:82}${NEWLEN}4c${RAWTX_C:84}
# ... give mutated tx1 to B2:
MUTATEDTXID=$( $CLI $B2ARGS sendrawtransaction $MUTATEDTX_C )
echo "TXID_C: " $TXID_C
echo "Mutated: " $MUTATEDTXID
# Re-connect nodes, and have both nodes mine some blocks:
$CLI $B2ARGS addnode 127.0.0.1:11000 onetry
WaitPeers "$B1ARGS" 1
# Having B2 mine the next block puts the mutated
# transaction C in the chain:
$CLI $B2ARGS setgenerate true 1
WaitBlocks
# B1 should still be able to spend 100, because D is conflicted
# so does not count as a spend of B
CheckBalance "$B1ARGS" 100
$CLI $B2ARGS stop > /dev/null 2>&1
wait $B2PID
$CLI $B1ARGS stop > /dev/null 2>&1
wait $B1PID
echo "Tests successful, cleaning up"
rm -rf $D
exit 0
|
Earlz/renamedcoin
|
qa/rpc-tests/conflictedbalance.sh
|
Shell
|
mit
| 3,380 |
#!/bin/bash
set -o errexit
git clone --branch gh-pages "https://[email protected]/${TRAVIS_REPO_SLUG}.git" deploy_docs
cd deploy_docs
git config user.name "Sean McArthur"
git config user.email "[email protected]"
if [ "$TRAVIS_TAG" = "" ]; then
rm -rf master
mv ../target/doc ./master
echo "<meta http-equiv=refresh content=0;url=hyper/index.html>" > ./master/index.html
else
rm -rf $TRAVIS_TAG
mv ../target/doc ./$TRAVIS_TAG
echo "<meta http-equiv=refresh content=0;url=hyper/index.html>" > ./$TRAVIS_TAG/index.html
latest=$(echo * | tr " " "\n" | sort -V -r | head -n1)
if [ "$TRAVIS_TAG" = "$latest" ]; then
echo "<meta http-equiv=refresh content=0;url=$latest/hyper/index.html>" > index.html
fi
fi
git add -A .
git commit -m "rebuild pages at ${TRAVIS_COMMIT}"
git push --quiet origin gh-pages
|
softprops/hyper
|
.travis/docs.sh
|
Shell
|
mit
| 856 |
#!/bin/bash
set -ex
REPO="[email protected]:f2prateek/ln.git"
GROUP_ID="com.f2prateek.ln"
ARTIFACT_ID="ln"
DIR=temp-clone
# Delete any existing temporary website clone
rm -rf $DIR
# Clone the current repo into temp folder
git clone $REPO $DIR
# Move working directory into temp folder
cd $DIR
# Checkout and track the gh-pages branch
git checkout -t origin/gh-pages
# Delete everything
rm -rf *
# Download the latest javadoc
curl -L "http://repository.sonatype.org/service/local/artifact/maven/redirect?r=central-proxy&g=$GROUP_ID&a=$ARTIFACT_ID&v=LATEST&c=javadoc" > javadoc.zip
unzip javadoc.zip
rm javadoc.zip
# Stage all files in git and create a commit
git add .
git add -u
git commit -m "Website at $(date)"
# Push the new files up to GitHub
git push origin gh-pages
# Delete our temp folder
cd ..
rm -rf $DIR
|
f2prateek/ln
|
deploy_javadoc.sh
|
Shell
|
mit
| 825 |
#!/bin/sh
set -eu
if [ "${SKIP_AWAIT:-false}" = "true" ]; then
echo "You're evil... But that's OK. Skipping the await."
exit 0
fi
splay=$((60*60))
# The following sleep monstrosity deterministically sleeps for a
# period of time between 0-${splay} seconds in order to prevent all our
# deletion jobs running at the same time. See the commit message for
# how it works.
sum=$(echo "${DEPLOY_ENV}" | md5sum);
short=$(echo "${sum}" | cut -b 1-15)
decimal=$((0x${short}));
sleeptime=$((${decimal##-} % splay));
echo "Sleeping for ${sleeptime} seconds before continuing..."
sleep ${sleeptime}
|
alphagov/paas-cf
|
concourse/scripts/sleep_for_deploy_env.sh
|
Shell
|
mit
| 600 |
#!/bin/bash
source $(dirname $0)/config.sh
XPOS=$((580 + $XOFFSET))
WIDTH="110"
LINES="12"
#totaldays=$(date +"%j")
#totalweeks=$(date +"%U")
#timealivesecs=$(date -d 1990-09-26 +%s)
#timealivedays=$(( $timealivesecs / 86400 ))
time=$(TZ="America/New_York" date | awk -F " " '{print $4}')
calendar=$(cal -1)
datea=$(date +%a)
dateb=$(date +%b)
dated=$(date +%d)
datey=$(date +%Y)
(echo " "; echo " ^fg($highlight)$datea $dateb $dated $datey"; echo " "; echo "$calendar"; echo " "; echo "^fg($highlight) ^ca(1,/home/bryan/.xmonad/scripts/dzen_date_prev.sh)PREV ^ca()^ca(1,/home/bryan/.xmonad/scripts/dzen_date_next.sh) NEXT^ca()"; sleep 15) | dzen2 -fg $foreground -bg $background -fn $FONT -x $XPOS -y $YPOS -w $WIDTH -l $LINES -e 'onstart=uncollapse,hide;button1=exit;button3=exit'
|
Bryan792/dotfiles
|
xmonad/xmonad.symlink/scripts/dzen_date.sh
|
Shell
|
mit
| 796 |
### #1: ./Soccer Tips & Soccer Advice/Soccer Positions Explained.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Positions Explained.mp4' './Soccer Tips & Soccer Advice/Soccer Positions Explained.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 87 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Positions Explained.mp4' './How to improve Soccer conditioning & Soccer fitness/22 - Soccer Positions Explained.mp4'
### #2: ./Soccer Tips & Soccer Advice/Football Dribbling Tips - How to dribble.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Football Dribbling Tips - How to dribble.mp4' './Soccer Tips & Soccer Advice/Football Dribbling Tips - How to dribble.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Football Dribbling Tips - How to dribble.mp4' './Top Soccer Training Videos/Football Dribbling Tips - How to dribble.mp4'
### #3: ./Soccer Tips & Soccer Advice/Soccer Training Guide.webm
#### 0
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Training Guide.webm' './Soccer Tips & Soccer Advice/Soccer Training Guide.webm'
#### 1
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './How to improve Soccer passing & receiving skills/Soccer Training Guide.webm'
#### 2
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './How to improve Soccer ball control skills/Soccer Training Guide.webm'
#### 3
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './Youth Soccer Training Drills/Soccer Training Guide.webm'
#### 4
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './How to improve Soccer shooting skills & finishing/Soccer Training Guide.webm'
#### 5
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './Top Soccer Training Videos/Soccer Training Guide.webm'
#### 6
.webm .webm false
[ "$FSIM_MIN" ] && [ 87 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './How to improve Soccer conditioning & Soccer fitness/32 - Soccer Training Guide.webm'
#### 7
.webm .webm false
[ "$FSIM_MIN" ] && [ 87 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './At Home Soccer Training Drills/20 - Soccer Training Guide.webm'
#### 8
.webm .webm false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Training Guide.webm' './How to improve Soccer dribbling skills and fast footwork/Soccer Training Guide.webm'
### #4: ./Soccer Tips & Soccer Advice/Soccer Tips - How to play midfielder.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Tips - How to play midfielder.mp4' './Soccer Tips & Soccer Advice/Soccer Tips - How to play midfielder.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Tips - How to play midfielder.mp4' './Top Soccer Training Videos/Soccer Tips - How to play midfielder.mp4'
### #5: ./Soccer Tips & Soccer Advice/Football Tips - How to play football.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Football Tips - How to play football.mp4' './Soccer Tips & Soccer Advice/Football Tips - How to play football.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Football Tips - How to play football.mp4' './Top Soccer Training Videos/Football Tips - How to play football.mp4'
### #6: ./Soccer Tips & Soccer Advice/Soccer Shooting Tips - How to Shoot a Soccer Ball.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Shooting Tips - How to Shoot a Soccer Ball.mp4' './Soccer Tips & Soccer Advice/Soccer Shooting Tips - How to Shoot a Soccer Ball.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Soccer Tips & Soccer Advice/Soccer Shooting Tips - How to Shoot a Soccer Ball.mp4' './Top Soccer Training Videos/Soccer Shooting Tips - How to Shoot a Soccer Ball.mp4'
### #7: ./How to improve Soccer passing & receiving skills/Football Passing Drills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Football Passing Drills.mp4' './How to improve Soccer passing & receiving skills/Football Passing Drills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer passing & receiving skills/Football Passing Drills.mp4' './Youth Soccer Training Drills/Football Passing Drills.mp4'
### #8: ./How to improve Soccer passing & receiving skills/Soccer Passing Drills For Youth.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Passing Drills For Youth.mp4' './How to improve Soccer passing & receiving skills/Soccer Passing Drills For Youth.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer passing & receiving skills/Soccer Passing Drills For Youth.mp4' './Youth Soccer Training Drills/Soccer Passing Drills For Youth.mp4'
### #9: ./How to improve Soccer passing & receiving skills/Soccer Passing Drills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Passing Drills.mp4' './How to improve Soccer passing & receiving skills/Soccer Passing Drills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer passing & receiving skills/Soccer Passing Drills.mp4' './Youth Soccer Training Drills/Soccer Passing Drills.mp4'
### #10: ./How to improve Soccer passing & receiving skills/Soccer Ball Control Drills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Ball Control Drills.mp4' './How to improve Soccer passing & receiving skills/Soccer Ball Control Drills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer passing & receiving skills/Soccer Ball Control Drills.mp4' './Youth Soccer Training Drills/Soccer Ball Control Drills.mp4'
#### 2
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer passing & receiving skills/Soccer Ball Control Drills.mp4' './Top Soccer Training Videos/Soccer Ball Control Drills.mp4'
### #11: ./How to improve Soccer passing & receiving skills/At Home Soccer Drills - Passing Drills For Soccer.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'At Home Soccer Drills - Passing Drills For Soccer.mp4' './How to improve Soccer passing & receiving skills/At Home Soccer Drills - Passing Drills For Soccer.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 96 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer passing & receiving skills/At Home Soccer Drills - Passing Drills For Soccer.mp4' './At Home Soccer Training Drills/07 - At Home Soccer Drills - Passing Drills For Soccer.mp4'
### #12: ./How to improve Soccer ball control skills/How To Control A Soccer Ball Like Zidane.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Control A Soccer Ball Like Zidane.mp4' './How to improve Soccer ball control skills/How To Control A Soccer Ball Like Zidane.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How To Control A Soccer Ball Like Zidane.mp4' './Try These Soccer Drills/How To Control A Soccer Ball Like Zidane.mp4'
### #13: ./How to improve Soccer ball control skills/Soccer Ball Control Exercises For Kids.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Ball Control Exercises For Kids.mp4' './How to improve Soccer ball control skills/Soccer Ball Control Exercises For Kids.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/Soccer Ball Control Exercises For Kids.mp4' './Top Soccer Training Videos/Soccer Ball Control Exercises For Kids.mp4'
### #14: ./How to improve Soccer ball control skills/How To Control A Football.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Control A Football.mp4' './How to improve Soccer ball control skills/How To Control A Football.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How To Control A Football.mp4' './Top Soccer Training Videos/How To Control A Football.mp4'
### #15: ./How to improve Soccer ball control skills/At Home Soccer Drills - Ball Control Drills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'At Home Soccer Drills - Ball Control Drills.mp4' './How to improve Soccer ball control skills/At Home Soccer Drills - Ball Control Drills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 95 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/At Home Soccer Drills - Ball Control Drills.mp4' './At Home Soccer Training Drills/08 - At Home Soccer Drills - Ball Control Drills.mp4'
### #16: ./How to improve Soccer ball control skills/At Home Soccer Drills - Side Flick Up Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'At Home Soccer Drills - Side Flick Up Skills.mp4' './How to improve Soccer ball control skills/At Home Soccer Drills - Side Flick Up Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/At Home Soccer Drills - Side Flick Up Skills.mp4' './At Home Soccer Training Drills/15 - At Home Soccer Drills - Side Flick Up Skills.mp4'
### #17: ./How to improve Soccer ball control skills/At Home Soccer Drills - Scoop Stall Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'At Home Soccer Drills - Scoop Stall Skills.mp4' './How to improve Soccer ball control skills/At Home Soccer Drills - Scoop Stall Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/At Home Soccer Drills - Scoop Stall Skills.mp4' './At Home Soccer Training Drills/14 - At Home Soccer Drills - Scoop Stall Skills.mp4'
### #18: ./How to improve Soccer ball control skills/How to Improve Soccer Ball Control - Soccer Training Drills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How to Improve Soccer Ball Control - Soccer Training Drills.mp4' './How to improve Soccer ball control skills/How to Improve Soccer Ball Control - Soccer Training Drills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How to Improve Soccer Ball Control - Soccer Training Drills.mp4' './Try These Soccer Drills/How to Improve Soccer Ball Control - Soccer Training Drills.mp4'
#### 2
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How to Improve Soccer Ball Control - Soccer Training Drills.mp4' './Top Soccer Training Videos/How to Improve Soccer Ball Control - Soccer Training Drills.mp4'
### #19: ./How to improve Soccer ball control skills/How To Juggle A Soccer Ball Tutorial For Beginners.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Juggle A Soccer Ball Tutorial For Beginners.mp4' './How to improve Soccer ball control skills/How To Juggle A Soccer Ball Tutorial For Beginners.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 95 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How To Juggle A Soccer Ball Tutorial For Beginners.mp4' './Must Watch Tutorials/10 - How To Juggle A Soccer Ball Tutorial For Beginners.mp4'
### #20: ./How to improve Soccer ball control skills/How To Control A Soccer Ball Out Of The Air.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Control A Soccer Ball Out Of The Air.mp4' './How to improve Soccer ball control skills/How To Control A Soccer Ball Out Of The Air.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How To Control A Soccer Ball Out Of The Air.mp4' './Top Soccer Training Videos/How To Control A Soccer Ball Out Of The Air.mp4'
### #21: ./How to improve Soccer ball control skills/Soccer Dribbling Moves.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Dribbling Moves.mp4' './How to improve Soccer ball control skills/Soccer Dribbling Moves.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/Soccer Dribbling Moves.mp4' './How to improve Soccer dribbling skills and fast footwork/Soccer Dribbling Moves.mp4'
### #22: ./How to improve Soccer ball control skills/How To Improve Football Control.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Improve Football Control.mp4' './How to improve Soccer ball control skills/How To Improve Football Control.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer ball control skills/How To Improve Football Control.mp4' './Top Soccer Training Videos/How To Improve Football Control.mp4'
### #23: ./Youth Soccer Training Drills/How To Improve Running Endurance and Stamina For Soccer.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Improve Running Endurance and Stamina For Soccer.mp4' './Youth Soccer Training Drills/How To Improve Running Endurance and Stamina For Soccer.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 95 -ge "$FSIM_MIN" ] && echo cp '../Youth Soccer Training Drills/How To Improve Running Endurance and Stamina For Soccer.mp4' './How to improve Soccer conditioning & Soccer fitness/23 - How To Improve Running Endurance and Stamina For Soccer.mp4'
### #24: ./Youth Soccer Training Drills/Soccer Drills For Youth.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Drills For Youth.mp4' './Youth Soccer Training Drills/Soccer Drills For Youth.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Youth Soccer Training Drills/Soccer Drills For Youth.mp4' './Top Soccer Training Videos/Soccer Drills For Youth.mp4'
### #25: ./Youth Soccer Training Drills/How To Warm Up For Soccer.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Warm Up For Soccer.mp4' './Youth Soccer Training Drills/How To Warm Up For Soccer.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 93 -ge "$FSIM_MIN" ] && echo cp '../Youth Soccer Training Drills/How To Warm Up For Soccer.mp4' './How to improve Soccer conditioning & Soccer fitness/24 - How To Warm Up For Soccer.mp4'
### #26: ./Try These Soccer Drills/Improve Soccer Dribbling - Soccer Dribbling Drills - The Soccer Essentials.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Improve Soccer Dribbling - Soccer Dribbling Drills - The Soccer Essentials.mp4' './Try These Soccer Drills/Improve Soccer Dribbling - Soccer Dribbling Drills - The Soccer Essentials.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Try These Soccer Drills/Improve Soccer Dribbling - Soccer Dribbling Drills - The Soccer Essentials.mp4' './Top Soccer Training Videos/Improve Soccer Dribbling - Soccer Dribbling Drills - The Soccer Essentials.mp4'
### #27: ./Try These Soccer Drills/How To Improve Soccer Speed Agility and Quickness.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Improve Soccer Speed Agility and Quickness.mp4' './Try These Soccer Drills/How To Improve Soccer Speed Agility and Quickness.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../Try These Soccer Drills/How To Improve Soccer Speed Agility and Quickness.mp4' './How to improve Soccer conditioning & Soccer fitness/28 - How To Improve Soccer Speed Agility and Quickness.mp4'
### #28: ./Try These Soccer Drills/How To Do 1v1 Soccer Moves.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Do 1v1 Soccer Moves.mp4' './Try These Soccer Drills/How To Do 1v1 Soccer Moves.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Try These Soccer Drills/How To Do 1v1 Soccer Moves.mp4' './Top Soccer Training Videos/How To Do 1v1 Soccer Moves.mp4'
#### 2
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Try These Soccer Drills/How To Do 1v1 Soccer Moves.mp4' './How to improve Soccer dribbling skills and fast footwork/How To Do 1v1 Soccer Moves.mp4'
### #29: ./Try These Soccer Drills/Improve Soccer Shooting Technique.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Improve Soccer Shooting Technique.mp4' './Try These Soccer Drills/Improve Soccer Shooting Technique.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Try These Soccer Drills/Improve Soccer Shooting Technique.mp4' './Top Soccer Training Videos/Improve Soccer Shooting Technique.mp4'
### #30: ./How to improve Soccer shooting skills & finishing/Soccer Shooting Drills and Tutorial.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Shooting Drills and Tutorial.mp4' './How to improve Soccer shooting skills & finishing/Soccer Shooting Drills and Tutorial.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer shooting skills & finishing/Soccer Shooting Drills and Tutorial.mp4' './Top Soccer Training Videos/Soccer Shooting Drills and Tutorial.mp4'
### #31: ./How to improve Soccer shooting skills & finishing/How To Improve Soccer Shooting Accuracy.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Improve Soccer Shooting Accuracy.mp4' './How to improve Soccer shooting skills & finishing/How To Improve Soccer Shooting Accuracy.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer shooting skills & finishing/How To Improve Soccer Shooting Accuracy.mp4' './Top Soccer Training Videos/How To Improve Soccer Shooting Accuracy.mp4'
### #32: ./Top Soccer Training Videos/How To Dribble A Soccer Ball.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Dribble A Soccer Ball.mp4' './Top Soccer Training Videos/How To Dribble A Soccer Ball.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/How To Dribble A Soccer Ball.mp4' './How to improve Soccer dribbling skills and fast footwork/How To Dribble A Soccer Ball.mp4'
### #33: ./Top Soccer Training Videos/1v1 Soccer Moves - Elastico Tutorial.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '1v1 Soccer Moves - Elastico Tutorial.mp4' './Top Soccer Training Videos/1v1 Soccer Moves - Elastico Tutorial.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/1v1 Soccer Moves - Elastico Tutorial.mp4' './How to improve Soccer dribbling skills and fast footwork/1v1 Soccer Moves - Elastico Tutorial.mp4'
### #34: ./Top Soccer Training Videos/How To Improve Soccer Speed and Acceleration.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Improve Soccer Speed and Acceleration.mp4' './Top Soccer Training Videos/How To Improve Soccer Speed and Acceleration.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/How To Improve Soccer Speed and Acceleration.mp4' './How to improve Soccer conditioning & Soccer fitness/03 - How To Improve Soccer Speed and Acceleration.mp4'
### #35: ./Top Soccer Training Videos/Soccer Cool Down.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Cool Down.mp4' './Top Soccer Training Videos/Soccer Cool Down.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 87 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/Soccer Cool Down.mp4' './How to improve Soccer conditioning & Soccer fitness/08 - Soccer Cool Down.mp4'
### #36: ./Top Soccer Training Videos/How to Improve your Soccer Dribbling Skills At Home.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How to Improve your Soccer Dribbling Skills At Home.mp4' './Top Soccer Training Videos/How to Improve your Soccer Dribbling Skills At Home.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 95 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/How to Improve your Soccer Dribbling Skills At Home.mp4' './At Home Soccer Training Drills/02 - How to Improve your Soccer Dribbling Skills At Home.mp4'
#### 2
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/How to Improve your Soccer Dribbling Skills At Home.mp4' './How to improve Soccer dribbling skills and fast footwork/How to Improve your Soccer Dribbling Skills At Home.mp4'
### #37: ./Top Soccer Training Videos/Soccer Dribbling Drills - Soccer Footwork.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'Soccer Dribbling Drills - Soccer Footwork.mp4' './Top Soccer Training Videos/Soccer Dribbling Drills - Soccer Footwork.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/Soccer Dribbling Drills - Soccer Footwork.mp4' './How to improve Soccer dribbling skills and fast footwork/Soccer Dribbling Drills - Soccer Footwork.mp4'
### #38: ./Top Soccer Training Videos/How To Beat A Defender In Soccer.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp 'How To Beat A Defender In Soccer.mp4' './Top Soccer Training Videos/How To Beat A Defender In Soccer.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '../Top Soccer Training Videos/How To Beat A Defender In Soccer.mp4' './How to improve Soccer dribbling skills and fast footwork/How To Beat A Defender In Soccer.mp4'
### #39: ./How to improve Soccer conditioning & Soccer fitness/02 - At Home Soccer Drills - How To Improve Strength.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '02 - At Home Soccer Drills - How To Improve Strength.mp4' './How to improve Soccer conditioning & Soccer fitness/02 - At Home Soccer Drills - How To Improve Strength.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 89 -ge "$FSIM_MIN" ] && echo cp '../How to improve Soccer conditioning & Soccer fitness/02 - At Home Soccer Drills - How To Improve Strength.mp4' './At Home Soccer Training Drills/06 - At Home Soccer Drills - How To Improve Strength.mp4'
### #40: ./At Home Soccer Training Drills/05 - At Home Soccer Training Drills - How to improve soccer dribbling skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '05 - At Home Soccer Training Drills - How to improve soccer dribbling skills.mp4' './At Home Soccer Training Drills/05 - At Home Soccer Training Drills - How to improve soccer dribbling skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 96 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/05 - At Home Soccer Training Drills - How to improve soccer dribbling skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Training Drills - How to improve soccer dribbling skills.mp4'
### #41: ./At Home Soccer Training Drills/17 - At Home Soccer Drills - Advanced Drag Back Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '17 - At Home Soccer Drills - Advanced Drag Back Skills.mp4' './At Home Soccer Training Drills/17 - At Home Soccer Drills - Advanced Drag Back Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/17 - At Home Soccer Drills - Advanced Drag Back Skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Advanced Drag Back Skills.mp4'
### #42: ./At Home Soccer Training Drills/11 - At Home Soccer Drills - Push and Pull Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '11 - At Home Soccer Drills - Push and Pull Skills.mp4' './At Home Soccer Training Drills/11 - At Home Soccer Drills - Push and Pull Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/11 - At Home Soccer Drills - Push and Pull Skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Push and Pull Skills.mp4'
### #43: ./At Home Soccer Training Drills/19 - At Home Soccer Drills - Brazilian Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '19 - At Home Soccer Drills - Brazilian Skills.mp4' './At Home Soccer Training Drills/19 - At Home Soccer Drills - Brazilian Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 93 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/19 - At Home Soccer Drills - Brazilian Skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Brazilian Skills.mp4'
### #44: ./At Home Soccer Training Drills/12 - At Home Soccer Drills - Cuts and V Turns.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '12 - At Home Soccer Drills - Cuts and V Turns.mp4' './At Home Soccer Training Drills/12 - At Home Soccer Drills - Cuts and V Turns.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/12 - At Home Soccer Drills - Cuts and V Turns.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Cuts and V Turns.mp4'
### #45: ./At Home Soccer Training Drills/16 - At Home Soccer Drills - Side Touch Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '16 - At Home Soccer Drills - Side Touch Skills.mp4' './At Home Soccer Training Drills/16 - At Home Soccer Drills - Side Touch Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/16 - At Home Soccer Drills - Side Touch Skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Side Touch Skills.mp4'
### #46: ./At Home Soccer Training Drills/10 - At Home Soccer Drills - Scissor To Step Over.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '10 - At Home Soccer Drills - Scissor To Step Over.mp4' './At Home Soccer Training Drills/10 - At Home Soccer Drills - Scissor To Step Over.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/10 - At Home Soccer Drills - Scissor To Step Over.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Scissor To Step Over.mp4'
### #47: ./At Home Soccer Training Drills/09 - At Home Soccer Drills - Cut and Turn Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '09 - At Home Soccer Drills - Cut and Turn Skills.mp4' './At Home Soccer Training Drills/09 - At Home Soccer Drills - Cut and Turn Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 94 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/09 - At Home Soccer Drills - Cut and Turn Skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Cut and Turn Skills.mp4'
### #48: ./At Home Soccer Training Drills/18 - At Home Soccer Drills - Roll Over To Chop Skills.mp4
#### 0
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 100 -ge "$FSIM_MIN" ] && echo cp '18 - At Home Soccer Drills - Roll Over To Chop Skills.mp4' './At Home Soccer Training Drills/18 - At Home Soccer Drills - Roll Over To Chop Skills.mp4'
#### 1
.mp4 .mp4 false
[ "$FSIM_MIN" ] && [ 95 -ge "$FSIM_MIN" ] && echo cp '../At Home Soccer Training Drills/18 - At Home Soccer Drills - Roll Over To Chop Skills.mp4' './How to improve Soccer dribbling skills and fast footwork/At Home Soccer Drills - Roll Over To Chop Skills.mp4'
|
go-dedup/fsimilar
|
test/shell_cp.tmpl.sh
|
Shell
|
mit
| 28,994 |
GCLOUD_HOME="$HOME/.local/opt/google-cloud-sdk"
# The next line updates PATH for the Google Cloud SDK.
if [ -f "$GCLOUD_HOME/path.bash.inc" ]; then source "$GCLOUD_HOME/path.bash.inc"; fi
# The next line enables shell command completion for gcloud.
if [ -f "$GCLOUD_HOME/completion.bash.inc" ]; then source "$GCLOUD_HOME/completion.bash.inc"; fi
|
esamson/dotfiles
|
.bash_profile.d/gcloud.sh
|
Shell
|
mit
| 348 |
#!/bin/bash
# copies (parts of) logfiles in the targetfolder, in order to "play-back" preexisting logs
# (e.g. from emulation-results)
# the overkill way of stopping all this is "killall sh"
# take care not to cause "collateral damage"
SOURCEDIR="/home/theuers/public_html/emulation_results/best-routeMediumBWLowCon_SVCRateBasedAdaptationLogic/run_0/"
TARGETDIR="/home/theuers/public_html/netvis/network/"
PICOUNT=20
# start playback
sh copyAllPisLogs.sh $SOURCEDIR $TARGETDIR $PICOUNT &
sh copyAllPisRtLogs.sh $SOURCEDIR $TARGETDIR $PICOUNT &
wait $(jobs -p)
|
theuerse/netvis
|
utils/startPiLogPlayback.sh
|
Shell
|
mit
| 564 |
#!/bin/sh
#configuring the system
wget https://raw.githubusercontent.com/minden/meteor-velocity-travis-ci/master/Makefile
#install meteor
curl https://install.meteor.com | /bin/sh
#install velocity cli
npm install -g velocity-cli
|
minden/meteor-velocity-travis-ci
|
configure.sh
|
Shell
|
mit
| 233 |
#!/bin/bash
###############################################################################
#Check the users environment before we try and install things
#First we'll check if we're root
#Then we'll check if there is internet
###############################################################################
environment_check ()
{
#Check if user is root
if [ "$EUID" -ne 0 ];
then
echo "Please run this script as root"
exit
fi
#Check we actually have internet
#if [ "`ping -c 2 code.iti.illinois.edu`" ];
#then
# echo "Network is up"
#else
# echo "Please Check Network"
# exit
#fi
}
###############################################################################
#Prompt User to see if they actually want to have this program configure
###############################################################################
runprompt ()
{
whiptail --title "ARMOREnode Network Configuration tool" --yesno\
"This utility will help you create a network configuration for an ARMOREnode. We will backup your existing configuration."\
--yes-button "Continue" --no-button "Cancel" 10 70
proceed=$?
if [ "$proceed" -ne 0 ];
then
exit
fi
}
###############################################################################
#Prompt User to see if they actually want to have this program configure
###############################################################################
selectiface ()
{
whiptail --title "ARMOREnode Network Configuration tool" --msgbox\
"We need to create a network bridge using 2 interfaces to route traffic" 8 70
iface1=$(whiptail --inputbox "Please enter first interface" 8 78 eth0 --title "Select an Interface" 3>&1 1>&2 2>&3)
iface2=$(whiptail --inputbox "$iface1 was selected as the first interface.\n\
Please enter second interface" 8 78 eth1 --title "Select an Interface" 3>&1 1>&2 2>&3)
#make sure the interfaces are not the same else we'll restart the process again
if [ "$iface1" == "$iface2" ];
then
whiptail --title "ARMOREnode Network Configuration tool" --msgbox\
"The interfaces can not be the same" 8 70
selectiface
fi
}
###############################################################################
#Prompt User to see if they actually want to have this program configure
###############################################################################
selectaddr ()
{
bridgeip=$(whiptail --inputbox "Please enter an ipv4 address for this armorenode" 8 78 \
192.168.1.5 --title "ARMOREnode Network Configuration tool" 3>&1 1>&2 2>&3)
broadcast=$(whiptail --inputbox "Please enter an broadcast address for this armorenode" 8 78 \
192.168.1.255 --title "ARMOREnode Network Configuration tool" 3>&1 1>&2 2>&3)
netmask=$(whiptail --inputbox "Please enter a netmask address for this armorenode" 8 78 \
255.255.255.0 --title "ARMOREnode Network Configuration tool" 3>&1 1>&2 2>&3)
gateway=$(whiptail --inputbox "Please enter the gateway address for this armorenode" 8 78 \
192.168.1.1 --title "ARMOREnode Network Configuration tool" 3>&1 1>&2 2>&3)
route=$(whiptail --inputbox "Please enter routing address with cidr for other networks" 8 78 \
192.168.2.0/24 --title "ARMOREnode Network Configuration tool" 3>&1 1>&2 2>&3)
}
###############################################################################
#Prompt User to see if they actually want to have this program configure
###############################################################################
addrconfirm ()
{
whiptail --title "ARMOREnode Network Configuration tool" --yesno\
"BridgeIP Address: $bridgeip\nBroadcast Address: $broadcast\nNetmask: $netmask\nGateway: $gateway\nRoute:$route\n"\
--yes-button "Continue" --no-button "Cancel" 12 70
proceed=$?
if [ "$proceed" -ne 0 ];
then
runprompt
fi
}
###############################################################################
#Prompt User to see if they actually want to have this program configure
###############################################################################
backupconfig ()
{
if [ -f /etc/network/interfaces ]; then
cp /etc/network/interfaces /etc/network/interfaces.backup
whiptail --title "ARMOREnode Network Configuration tool" --msgbox\
"Configuration file has been written to /etc/networking/interfaces and the orignal backed up" 8 70
else
whiptail --title "ARMOREnode Network Configuration tool" --msgbox\
"An original network interface file was not found so it was not backed up" 8 70
fi
}
###############################################################################
#Prompt User to see if they actually want to have this program configure
###############################################################################
writeconfig ()
{
ifile=/etc/network/interfaces
printf '# This was automatically generated by ARMOREnode Network Configuration tool\n' > $ifile
printf 'auto lo br0\n' >> $ifile
printf 'iface lo inet loopback\n\n' >> $ifile
printf "iface $iface1 inet manual\n" >> $ifile
printf "iface $iface2 inet manual\n\n" >> $ifile
printf "iface br0 inet static\n" >> $ifile
printf " bridge_ports $iface1 $iface2\n" >> $ifile
printf " address $bridgeip\n" >> $ifile
printf " broadcast $broadcast\n" >> $ifile
printf " netmask $netmask\n" >> $ifile
printf " gateway $gateway\n" >> $ifile
printf " up ip route add $route via $gateway\n" >> $ifile
}
###############################################################################
#Installation Script for ARMORE NODE on Debian Wheezy
###############################################################################
environment_check
#Let's put some default values
bridgeip='192.168.1.5'
broadcast='192.168.1.255'
netmask='255.255.255.0'
gateway='192.168.1.1'
route='192.168.2.0/24'
iface1='eth0'
iface2='eth1'
runprompt
selectiface
selectaddr
addrconfirm
backupconfig
writeconfig
|
GridProtectionAlliance/ARMORE
|
install/packaging/armoreconfig/configs/tools/armorenetworkcfg.sh
|
Shell
|
mit
| 6,229 |
#!/bin/bash
# Sample setup script
git clone https://github.com/rangle/slush-ngnext-component.git
cd slush-ngnext-component
npm install
mkdir test
cd test
npm link ../
|
rangle/slush-ngnext-component
|
setup.sh
|
Shell
|
mit
| 166 |
#!/bin/bash
for i in */; do
rm .cbz;
filename=$(echo $i |sed 's/\///g')
find $i/ -name '*' -print | sort | zip -j $filename.cbz -@
echo 'GENERATED "'$filename'.cbz" !';
done
|
tiagoprn/devops
|
shellscripts/utils/comics/batch_comics.sh
|
Shell
|
mit
| 193 |
#!/bin/sh
TOC="FindIt/FindIt.toc"
VERSIONTAG="## Version: "
VERSION=$(grep "$VERSIONTAG" $TOC | sed "s/$VERSIONTAG//")
OUTFILE="findit-$VERSION.zip"
git archive --format=zip origin FindIt/ > $OUTFILE &&
echo $OUTFILE
|
jleclanche/findit
|
release.sh
|
Shell
|
mit
| 219 |
#!/bin/sh
adb -d forward tcp:4444 localabstract:/adb-hub
adb connect localhost:4444
|
AlexanderSelzer/scripts
|
aw-conn.sh
|
Shell
|
mit
| 85 |
#Pins
echo 49 > /sys/class/gpio/export
echo 115 > /sys/class/gpio/export
echo 114 > /sys/class/gpio/export
echo 109 > /sys/class/gpio/export
echo 111 > /sys/class/gpio/export
#Muxes
echo 262 > /sys/class/gpio/export
echo 240 > /sys/class/gpio/export
echo 241 > /sys/class/gpio/export
echo 242 > /sys/class/gpio/export
echo 243 > /sys/class/gpio/export
#Output vs Input
echo 256 > /sys/class/gpio/export
echo 258 > /sys/class/gpio/export
echo 259 > /sys/class/gpio/export
echo 260 > /sys/class/gpio/export
echo 261 > /sys/class/gpio/export
#Pullup Resistors
echo 224 > /sys/class/gpio/export
echo 226 > /sys/class/gpio/export
echo 227 > /sys/class/gpio/export
echo 228 > /sys/class/gpio/export
echo 229 > /sys/class/gpio/export
#Tri-State
echo 214 > /sys/class/gpio/export
echo low > /sys/class/gpio/gpio214/direction
# Make changes - Set Modes
echo high > /sys/class/gpio/gpio262/direction
echo high > /sys/class/gpio/gpio240/direction
echo high > /sys/class/gpio/gpio241/direction
echo high > /sys/class/gpio/gpio242/direction
echo high > /sys/class/gpio/gpio243/direction
# Set Directions
echo high > /sys/class/gpio/gpio49/direction
echo high > /sys/class/gpio/gpio256/direction
echo high > /sys/class/gpio/gpio258/direction
echo high > /sys/class/gpio/gpio259/direction
echo low > /sys/class/gpio/gpio260/direction
echo high > /sys/class/gpio/gpio261/direction
# Disable Pullups
echo in > /sys/class/gpio/gpio224/direction
echo in > /sys/class/gpio/gpio226/direction
echo in > /sys/class/gpio/gpio227/direction
echo in > /sys/class/gpio/gpio228/direction
echo in > /sys/class/gpio/gpio229/direction
# Set Modes (finally) of the pins
echo mode0 > /sys/kernel/debug/gpio_debug/gpio49/current_pinmux
echo mode1 > /sys/kernel/debug/gpio_debug/gpio115/current_pinmux
echo mode1 > /sys/kernel/debug/gpio_debug/gpio114/current_pinmux
echo mode1 > /sys/kernel/debug/gpio_debug/gpio109/current_pinmux
# SPI Power Mode
echo on >/sys/devices//pci0000\:00/0000\:00\:07.1/power/control
echo high > /sys/class/gpio/gpio214/direction
# Release pin for VGATonic (retaining all the muxes we just worked so hard on!)
echo 49 > /sys/class/gpio/unexport
|
dqydj/VGAtonic
|
VGATonic_Linux_Drivers/edisonpinmux.sh
|
Shell
|
mit
| 2,164 |
#!/usr/bin/env bash
export DB_TYPE=${1:-sql}
export RMI_PORT=${2:-2001}
export RMI_HOST=${3}
cd ${4:-'target/classes'}
java -Djava.security.policy=server.policy app/Server
|
Drapegnik/bsu
|
programming/java/sem6/lab1/run_server.sh
|
Shell
|
mit
| 171 |
CQChartsTest -tcl -exec population.tcl
#CQChartsTest -ceil -exec population.cl
|
colinw7/CQCharts
|
data/ceil/population.sh
|
Shell
|
mit
| 80 |
touch /tmp/.install
|
vikramthyagarajan/dotfiles
|
tests/module-tests/defaults/install.sh
|
Shell
|
mit
| 20 |
#!/bin/sh
# Mark the program as installed.
#
# Part of pitools - https://github.com/zipplet/pitools
# Copyright (c) Michael Nixon 2016.
if [ ! -d "$INSTALLMARKDIR" ]; then
mkdir $INSTALLMARKDIR
fi
date > $INSTALLMARKFILE
|
zipplet/pitools
|
common/mark_as_installed.sh
|
Shell
|
mit
| 224 |
#!/bin/bash
#
# Bootstrap script for setting up CentOS as a vagrant .box
#
# Copyright (c) 2013 Alex Williams, Unscramble <[email protected]>
UNZIP=`which unzip`
TAR=`which tar`
fail_and_exit() {
echo "Provisioning failed"
exit 1
}
# Install some dependencies
rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm && \
yum install -y python-jinja2 python-yaml make && \
rpm -e epel-release-6-8.noarch || fail_and_exit
pushd /root
# Extract ansible and install it
$TAR -zxvf v1.3.3.tar.gz || fail_and_exit
pushd ansible-1.3.3
# Install Ansible
make install && \
source hacking/env-setup || fail_and_exit
popd
# Extract public provisioning scripts
$UNZIP -o beta-v2.zip || fail_and_exit
pushd jidoteki-os-templates-beta-v2/provisioning/vagrant
# Run ansible in local mode
chmod 644 hosts && \
ansible-playbook vagrant.yml -i hosts || fail_and_exit
popd
# Cleanup
rm -rf v1.3.3.tar.gz ansible-1.3.3 beta-v2.zip jidoteki-os-templates-beta-v2 bootstrap_centos.sh || fail_and_exit
history -c
popd
echo "Provisioning completed successfully"
exit 0
|
unscramble/jidoteki-os-templates
|
provisioning/vagrant/bootstrap_centos.sh
|
Shell
|
mit
| 1,147 |
#!/bin/bash
set -x
set -e
echo "deb https://packages.sury.org/php bullseye main" > /etc/apt/sources.list.d/sury.list
echo "deb-src https://packages.sury.org/php bullseye main" >> /etc/apt/sources.list.d/sury.list
wget --quiet https://packages.sury.org/php/apt.gpg
apt-key add apt.gpg
rm apt.gpg
cat >/etc/apt/preferences.d/sury <<EOF
Package: php8.1-*
Pin: origin packages.sury.org
Pin-Priority: 600
EOF
apt-get update
apt-get install -y --no-install-recommends \
php8.1-cli \
php8.1-phpdbg \
php8.1-bcmath \
php8.1-bz2 \
php8.1-common \
php8.1-curl \
php8.1-dba \
php8.1-enchant \
php8.1-gd \
php8.1-gmp \
php8.1-imagick libmagickcore-6.q16-6-extra \
php8.1-imap \
php8.1-interbase \
php8.1-intl \
php8.1-ldap \
php8.1-mbstring \
php8.1-mysql \
php8.1-odbc odbc-mdbtools \
php8.1-opcache \
php8.1-pgsql \
php8.1-pspell \
php8.1-readline \
php8.1-snmp snmp \
php8.1-soap \
php8.1-sqlite3 \
php8.1-sybase \
php8.1-tidy \
php8.1-xml \
php8.1-xmlrpc \
php8.1-xsl \
php8.1-xdebug \
php8.1-zip \
php8.1-amqp \
php8.1-apcu \
php8.1-ast \
php8.1-ds \
php8.1-igbinary \
php8.1-mailparse \
php8.1-memcache \
php8.1-memcached \
php8.1-mongodb \
php8.1-msgpack \
php8.1-oauth \
php8.1-pcov \
php8.1-psr \
php8.1-raphf \
php8.1-redis \
php8.1-rrd \
php8.1-smbclient \
php8.1-solr \
php8.1-ssh2 \
php8.1-uuid \
php8.1-yaml \
php8.1-zmq \
php8.1-xdebug \
# php8.1-gmagick \ provides more stable api but conflicts with imagick
# php8.1-yac \ conflicts with php8.1-apcu
# php8.1-swoole \ Missing files
# php8.1-gearman \ Missing files
# disable all php modules
ls -1 /etc/php/8.1/mods-available/ | sed 's/\.ini$//g' | xargs -I{} -n1 phpdismod -v ALL -s ALL {} 2>/dev/null
# cleanup older versions
rm -rf /etc/php/{5.6,7.0,7.1,7.2,7.3,7.4,8.0}
# backup original php.ini
mv /etc/php/8.1/cli/php.ini{,_orig}
# install custom php modules
apt-get install -y --no-install-recommends phyaml
# install dma (dragonfly mailer simple relay)
debconf-set-selections <<< "dma dma/mailname string"
debconf-set-selections <<< "dma dma/relayhost string mail"
apt-get install -y --no-install-recommends dma
echo '*: @' > /etc/aliases # force local mails to smarthost
cp -frv /build/files/* / || true
# Clean up APT when done.
source /usr/local/build_scripts/cleanup_apt.sh
|
nfq-technologies/docker-images
|
php81-cli/build/setup_docker.sh
|
Shell
|
mit
| 2,323 |
#!/bin/bash
# Adapted from Gregory Pakosz's amazing tmux config at https://github.com/gpakosz/.tmux
_hostname() {
tty=${1:-$(tmux display -p '#{pane_tty}')}
ssh_only=$2
# shellcheck disable=SC2039
if [ x"$OSTYPE" = x"cygwin" ]; then
pid=$(ps -a | awk -v tty="${tty##/dev/}" '$5 == tty && /ssh/ && !/vagrant ssh/ && !/autossh/ && !/-W/ { print $1 }')
[ -n "$pid" ] && ssh_parameters=$(tr '\0' ' ' < "/proc/$pid/cmdline" | sed 's/^ssh //')
else
ssh_parameters=$(ps -t "$tty" -o command= | awk '/ssh/ && !/vagrant ssh/ && !/autossh/ && !/-W/ { $1=""; print $0; exit }')
fi
if [ -n "$ssh_parameters" ]; then
# shellcheck disable=SC2086
hostname=$(ssh -G $ssh_parameters 2>/dev/null | awk 'NR > 2 { exit } ; /^hostname / { print $2 }')
# shellcheck disable=SC2086
[ -z "$hostname" ] && hostname=$(ssh -T -o ControlPath=none -o ProxyCommand="sh -c 'echo %%hostname%% %h >&2'" $ssh_parameters 2>&1 | awk '/^%hostname% / { print $2; exit }')
#shellcheck disable=SC1004
hostname=$(echo "$hostname" | awk '\
{ \
if ($1~/^[0-9.:]+$/) \
print $1; \
else \
split($1, a, ".") ; print a[1] \
}')
else
if ! _is_enabled "$ssh_only"; then
hostname=$(command hostname -s)
fi
fi
echo "$hostname"
}
_hostname
|
58bits/dotfiles
|
tmux-gpakosz/hostname.sh
|
Shell
|
mit
| 1,297 |
#!/bin/bash
# Elimina una instalacion y la reinstala en un entorno de desarrollo.
# Elimina la base de datos y la restaura.
source _variables.sh
# Probar que se esta en el entorno de desarrollo.
if [ $VIRTUALENV != $VIRTUAL_ENV_DEV ]
then
echo "reinstall_dev.sh es solo para el entorno virtual '$VIRTUAL_ENV_DEV'"
exit
fi
cd $PROJECT_ROOT
# Restaurar permisos de directorios y archivos.
read -p "ΒΏRestaurar permisos? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
$BIN_ROOT/permissions.sh
fi
# Reinstalar node_modules.
read -p "ΒΏReinstalar Node? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
if [ -d $PROJECT_ROOT/node_modules ]
then
rm -rf $PROJECT_ROOT/node_modules
fi
# Si yarn esta instalado, ejecutarlo, de lo contrario usar npm.
if hash yarn 2>/dev/null
then
yarn install
else
npm install
fi
fi
# Ejecutar Gulp?
read -p "ΒΏEjecutar Gulp? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
gulp
fi
# Reinstalar la base de datos, requiere ~/.pgpass
read -p "ΒΏRestaurar la base de datos? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
psql -U postgres -c "DROP DATABASE IF EXISTS $DATABASE_NAME"
echo "Eliminada base de datos $DATABASE_NAME"
psql -U postgres -c "CREATE DATABASE $DATABASE_NAME WITH OWNER $DATABASE_USER"
echo "Creada base de datos $DATABASE_NAME WITH OWNER $DATABASE_USER"
# Eliminar directorios migrations, quitar cuando se pasa a prod.
# read -p "ΒΏEliminar directorios migrations? (y/[N]) " yn
# if [ "$yn" == "y" -o "$yn" == "Y" ]
# then
# source $BIN_ROOT/delete_migrations.sh
# fi
$PROJECT_ROOT/manage.py makemigrations
$PROJECT_ROOT/manage.py migrate
fi
# Restore Media?
read -p "ΒΏRestaurar Media local? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
rm -rf $SRC_ROOT/media/local
cp -r $PROJECT_ROOT/compose/media/local $SRC_ROOT/media/local
# Test pone los mismos que los de local
rm -rf $SRC_ROOT/media/test
cp -r $PROJECT_ROOT/compose/media/local $SRC_ROOT/media/test
fi
# Load fixtures
read -p "ΒΏLoad Fixtures? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
source django_loaddata.sh
fi
# Eliminar logs
read -p "ΒΏEliminar logs? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
find $PROJECT_ROOT/logs/* ! -name ".keep" -exec rm -r {} \;
echo "Eliminado archivos en ./logs/"
fi
# Comprueba si hay algun print() en el codigo.
grep --exclude=*.pyc -rnw $PROJECT_ROOT/src/apps $PROJECT_ROOT/tests -e 'print'
# Iniciar el servidor
read -p "ΒΏIniciar el servidor? (y/[N]) " yn
if [ "$yn" == "y" -o "$yn" == "Y" ]
then
$PROJECT_ROOT/manage.py runserver $SITE_DOMAIN
fi
|
snicoper/snicoper.com
|
bin/reinstall_dev.sh
|
Shell
|
mit
| 2,641 |
#!/bin/sh
SERVER=ubuntu@ec2-13-124-28-33.ap-northeast-2.compute.amazonaws.com:/home/ubuntu/api
scp -i ~/Security/aws-keypair-imac.pem ./api.js $SERVER/
rsync -rave "ssh -i ~/Security/aws-keypair-imac.pem" ./lib/* $SERVER/lib/ --delete
|
GeunhoKim/wd
|
script/deploy_api.sh
|
Shell
|
mit
| 235 |
#!/bin/sh
set +h # disable hashall
shopt -s -o pipefail
set -e
PKG_NAME="lsb-release"
PKG_VERSION="1.4"
TARBALL="${PKG_NAME}-${PKG_VERSION}.tar.gz"
SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
function prepare() {
ln -sv "/source/$TARBALL" "$TARBALL"
}
function unpack() {
tar xf ${TARBALL}
}
function build() {
sed -i "s|n/a|unavailable|" lsb_release
}
function check() {
echo " "
}
function instal() {
./help2man -N --include ./lsb_release.examples \
--alt_version_key=program_version ./lsb_release > lsb_release.1
install -v -m 755 lsb_release /usr/bin/lsb_release
}
function clean() {
rm -rf "${SRC_DIR}" "$TARBALL"
}
clean;prepare;unpack;pushd ${SRC_DIR};build;[[ $MAKE_CHECK = TRUE ]] && check;instal;popd;clean
|
PandaLinux/pandaOS
|
phase3/lsb-release/build.sh
|
Shell
|
mit
| 771 |
#!/bin/bash
rm -r json
mkdir json
geoc tile generate -l "type=vectortiles file=json format=json pyramid=GlobalMercatorTopLeft" \
-m "layertype=layer file=naturalearth.gpkg layername=ocean" \
-m "layertype=layer file=naturalearth.gpkg layername=countries" \
-s 0 \
-e 4 \
-v
cat <<EOT >> json.html
<!doctype html>
<html lang="en">
<head>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/openlayers/openlayers.github.io@master/en/v4.6.5/css/ol.css" type="text/css">
<style>
.map {
height: 400px;
width: 100%;
}
</style>
<script src="https://cdn.jsdelivr.net/gh/openlayers/openlayers.github.io@master/en/v4.6.5/build/ol.js" type="text/javascript"></script>
<title>GeoScript Vector Tiles</title>
</head>
<body>
<h2>GeoScript Vector Tiles</h2>
<div id="map" class="map"></div>
<script type="text/javascript">
var vectorLayer = new ol.layer.VectorTile({
source: new ol.source.VectorTile({
format: new ol.format.GeoJSON(),
projection: 'EPSG:3857',
tileGrid: new ol.tilegrid.createXYZ({minZoom: 0, maxZoom: 6}),
url: 'http://localhost:8000/json/{z}/{x}/{y}.json'
}),
style: new ol.style.Style({
stroke: new ol.style.Stroke({
color: 'black'
}),
fill: new ol.style.Fill({
color: [238,238,238,0.1],
opacity: 0.1
})
})
});
var map = new ol.Map({
target: 'map',
layers: [
new ol.layer.Tile({
source: new ol.source.OSM()
}), vectorLayer
],
view: new ol.View({
center: ol.proj.transform([-100, 40], 'EPSG:4326', 'EPSG:3857'),
zoom: 4
})
});
</script>
</body>
</html>
EOT
python -m SimpleHTTPServer
|
jericks/geoc
|
examples/tile_json.sh
|
Shell
|
mit
| 1,930 |
#!/usr/bin/env bash
# Variables
BOX_IP=$1;
VERBOSE=$2;
ANSIBLE_PLAYBOOK=$3
# Check if ansible installed
if ! which ansible > /dev/null; then
# Ansible does't exist, install it
echo "Installing ansible";
# Ansible 1.7.2
sudo apt-get update -qq
sudo apt-add-repository ppa:ansible/ansible
sudo apt-get install -y -qq software-properties-common
sudo apt-get install -y -qq ansible
# Ansible 1.9.2
#sudo apt-get update -qq
#sudo apt-get install -y -qq software-properties-common
#sudo apt-get install -y -qq python-pip python-dev
#sudo pip -q install ansible
#mkdir /etc/ansible/
fi
# Ansible provision
sudo cp /vagrant/provision/inventories/dev /etc/ansible/hosts -f
sudo chmod 666 /etc/ansible/hosts
sudo sed -i "s/ip/$BOX_IP/g" /etc/ansible/hosts
echo "Ansible provision start..."
# Run the playbook
if [ "$VERBOSE" = "y" ]; then
sudo -i ansible-playbook $ANSIBLE_PLAYBOOK --connection=local -v
#ansible-playbook -i 'localhost,' "${ANSIBLE_PLAYBOOK}" --extra-vars "is_windows=true" --connection=local -v
else
sudo -i ansible-playbook $ANSIBLE_PLAYBOOK --connection=local
#ansible-playbook -i 'localhost,' "${ANSIBLE_PLAYBOOK}" --extra-vars "is_windows=true" --connection=local
fi
|
juy/Vagrant-LEMP-Stack-setter
|
vagrant/provision/provision.sh
|
Shell
|
mit
| 1,255 |
#!/usr/bin/env bash
# Instructions for the command line at http://jflex.de/installing.html
java -jar lib/jflex-1.6.1.jar -d src/main/java/net/alloyggp/griddle/generated/ schemas/GdlScanner.flex
|
AlexLandau/gdl-validation
|
runFlex.sh
|
Shell
|
mit
| 197 |
#!/bin/bash
# DNS
# -----------------------------------------------
# This script installs packages, but the DNS zone files are only
# created by the /dns/update API in the management server because
# the set of zones (domains) hosted by the server depends on the
# mail users & aliases created by the user later.
source setup/functions.sh # load our functions
source /etc/mailinabox.conf # load global vars
# Install the packages.
#
# * nsd: The non-recursive nameserver that publishes our DNS records.
# * ldnsutils: Helper utilities for signing DNSSEC zones.
# * openssh-client: Provides ssh-keyscan which we use to create SSHFP records.
echo "Installing nsd (DNS server)..."
apt_install nsd ldnsutils openssh-client
# Prepare nsd's configuration.
mkdir -p /var/run/nsd
cat > /etc/nsd/nsd.conf << EOF;
# Do not edit. Overwritten by Mail-in-a-Box setup.
server:
hide-version: yes
logfile: "/var/log/nsd.log"
# identify the server (CH TXT ID.SERVER entry).
identity: ""
# The directory for zonefile: files.
zonesdir: "/etc/nsd/zones"
# Allows NSD to bind to IP addresses that are not (yet) added to the
# network interface. This allows nsd to start even if the network stack
# isn't fully ready, which apparently happens in some cases.
# See https://www.nlnetlabs.nl/projects/nsd/nsd.conf.5.html.
ip-transparent: yes
EOF
# Add log rotation
cat > /etc/logrotate.d/nsd <<EOF;
/var/log/nsd.log {
weekly
missingok
rotate 12
compress
delaycompress
notifempty
}
EOF
# Since we have bind9 listening on localhost for locally-generated
# DNS queries that require a recursive nameserver, and the system
# might have other network interfaces for e.g. tunnelling, we have
# to be specific about the network interfaces that nsd binds to.
for ip in $PRIVATE_IP $PRIVATE_IPV6; do
echo " ip-address: $ip" >> /etc/nsd/nsd.conf;
done
# Create a directory for additional configuration directives, including
# the zones.conf file written out by our management daemon.
echo "include: /etc/nsd/nsd.conf.d/*.conf" >> /etc/nsd/nsd.conf;
# Remove the old location of zones.conf that we generate. It will
# now be stored in /etc/nsd/nsd.conf.d.
rm -f /etc/nsd/zones.conf
# Create DNSSEC signing keys.
mkdir -p "$STORAGE_ROOT/dns/dnssec";
# TLDs, registrars, and validating nameservers don't all support the same algorithms,
# so we'll generate keys using a few different algorithms so that dns_update.py can
# choose which algorithm to use when generating the zonefiles. See #1953 for recent
# discussion. File for previously used algorithms (i.e. RSASHA1-NSEC3-SHA1) may still
# be in the output directory, and we'll continue to support signing zones with them
# so that trust isn't broken with deployed DS records, but we won't generate those
# keys on new systems.
FIRST=1 #NODOC
for algo in RSASHA256 ECDSAP256SHA256; do
if [ ! -f "$STORAGE_ROOT/dns/dnssec/$algo.conf" ]; then
if [ $FIRST == 1 ]; then
echo "Generating DNSSEC signing keys..."
FIRST=0 #NODOC
fi
# Create the Key-Signing Key (KSK) (with `-k`) which is the so-called
# Secure Entry Point. The domain name we provide ("_domain_") doesn't
# matter -- we'll use the same keys for all our domains.
#
# `ldns-keygen` outputs the new key's filename to stdout, which
# we're capturing into the `KSK` variable.
#
# ldns-keygen uses /dev/random for generating random numbers by default.
# This is slow and unecessary if we ensure /dev/urandom is seeded properly,
# so we use /dev/urandom. See system.sh for an explanation. See #596, #115.
# (This previously used -b 2048 but it's unclear if this setting makes sense
# for non-RSA keys, so it's removed. The RSA-based keys are not recommended
# anymore anyway.)
KSK=$(umask 077; cd $STORAGE_ROOT/dns/dnssec; ldns-keygen -r /dev/urandom -a $algo -k _domain_);
# Now create a Zone-Signing Key (ZSK) which is expected to be
# rotated more often than a KSK, although we have no plans to
# rotate it (and doing so would be difficult to do without
# disturbing DNS availability.) Omit `-k`.
# (This previously used -b 1024 but it's unclear if this setting makes sense
# for non-RSA keys, so it's removed.)
ZSK=$(umask 077; cd $STORAGE_ROOT/dns/dnssec; ldns-keygen -r /dev/urandom -a $algo _domain_);
# These generate two sets of files like:
#
# * `K_domain_.+007+08882.ds`: DS record normally provided to domain name registrar (but it's actually invalid with `_domain_` so we don't use this file)
# * `K_domain_.+007+08882.key`: public key
# * `K_domain_.+007+08882.private`: private key (secret!)
# The filenames are unpredictable and encode the key generation
# options. So we'll store the names of the files we just generated.
# We might have multiple keys down the road. This will identify
# what keys are the current keys.
cat > $STORAGE_ROOT/dns/dnssec/$algo.conf << EOF;
KSK=$KSK
ZSK=$ZSK
EOF
fi
# And loop to do the next algorithm...
done
# Force the dns_update script to be run every day to re-sign zones for DNSSEC
# before they expire. When we sign zones (in `dns_update.py`) we specify a
# 30-day validation window, so we had better re-sign before then.
cat > /etc/cron.daily/mailinabox-dnssec << EOF;
#!/bin/bash
# Mail-in-a-Box
# Re-sign any DNS zones with DNSSEC because the signatures expire periodically.
$(pwd)/tools/dns_update
EOF
chmod +x /etc/cron.daily/mailinabox-dnssec
# Permit DNS queries on TCP/UDP in the firewall.
ufw_allow domain
|
mail-in-a-box/mailinabox
|
setup/dns.sh
|
Shell
|
cc0-1.0
| 5,436 |
if [[ -z `type -t brew` ]]; then
echo ""
echo ">>>>>>>>>> Installing brew (if you don't have it) <<<<<<<<<<<<<<"
echo ">>>>>>>>>> This is interactive so pay attention!"
echo ""
ruby -e "$(curl -fsSkL raw.github.com/mxcl/homebrew/go)"
brew doctor
else
echo ">>>>>>>>>> You already have brew (good job)"
fi
if [[ -z `type -t wget` ]]; then
echo ">>>>>>>>>> No wget, installing"
brew install wget
else
echo ">>>>>>>>>> You already have wget (good job)"
fi
if [[ ! -e /usr/local/lib/libzmq.3.dylib ]]; then
echo ">>>>>>>>>> No libzmq 3, installing"
# Install zeromq32 and its dependencies using homebrew.
brew tap homebrew/versions
brew install zeromq32
else
echo ">>>>>>>>>> You already have libzmq (good job)"
fi
mkdir ~/bin
if [[ `lein -v 2> /dev/null` =~ 2.0 ]]; then
echo ">>>>>>>>> Already have lein 2"
else
echo ""
echo ">>>>>>>>>> Installing Lein <<<<<<<<<<<<<<"
echo ">>>>>>>>>> Build an manage clojure projects <<<<<<<<<<<<<<"
echo ""
if [ -e /usr/bin/lein ]; then
echo "*** sudo: Moving your current lein to lein-old"
sudo mv /usr/bin/lein /usr/bin/lein-old
fi
curl "https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein" > ~/bin/lein
chmod 755 ~/bin/lein
echo "*** sudo: symlinking lein to /usr/bin/lein"
sudo ln -s ~/bin/lein /usr/bin/lein
fi
echo ""
echo ">>>>>>>>>> Lein Repo Plugin (reinstall) <<<<<<<<<<<<<<"
echo ""
if [ -e ~/.lein/profiles.clj ]; then
echo "*** Moving current lein profile to profiles-old.clj"
mv ~/.lein/profiles.clj ~/.lein/profiles-old.clj
fi
cd ~/grabbag/external/lein-repo
lein install
cd ~/grabbag/external/lein-coax
lein install
echo ""
echo ">>>>>>>>>> Lein Profile (~/.lein/profiles.clj) <<<<<<<<<<<<<<"
echo ""
echo "{:user {:plugins [[lein-repo \"0.2.1\"] [com.keminglabs/cljx \"0.6.0\" :exclusions [org.clojure/clojure]]]} :uberjar {:plugins [[lein-repo \"0.2.1\"]]}}" > ~/.lein/profiles.clj
echo ""
echo ">>>>>>>>>> Copying ssh and ec2 keys, installing crane <<<<<<<<<<<<<<"
echo ""
rm ~/.crane.pem
cp ~/grabbag/config/crane/.crane.pem ~/.crane.pem
chmod 600 ~/.crane.pem
rm ~/.crane
cp ~/grabbag/config/crane/grabbag ~/.crane
chmod 600 ~/.crane
rm ~/bin/crane
echo "*** sudo: Symlinking crane to /usr/bin"
sudo ln -s ~/grabbag/lib/crane/resources/bin/crane-local /usr/bin/crane
echo ""
echo ">>>>>>>>>> Pull down all jars to maven repo ~/.m2 (approx 10mins) <<<<<<<<<<<<<<"
echo ">>>>>>>>>> Run all unit tests using ptest (~/grabbag/config/bin/ptest) <<<<<<<<<<<<<<"
echo ""
export PATH=~/grabbag/config/bin:$PATH
ptest
echo ""
echo "You're all done!!"
echo "#### Make sure to add ~/bin and ~/grabbag/config/bin to PATH "
echo "#### Paste this into you ~/.bash_profile"
echo "copy-me>> export PATH=~/bin:~/grabbag/config/bin:\$PATH"
echo ""
|
plumatic/grab-bag
|
config/n00b/install_leincrane.bash
|
Shell
|
epl-1.0
| 2,832 |
#!/bin/bash
echo -e "Introduzca el codigo postal: \c"
read CodigoPostal
case $CodigoPostal in
72[0-5][0-9][0-9])
echo "$CodigoPostal es un codigo postal de (Heroica Puebla de Zaragoza)"
;;
74[0][0-8][0-9])
echo "$CodigoPostal es un codigo postal de San MartΓn Texmelucan de Labastida"
;;
74[0][0-8][0-9])
echo "$CodigoPostal es un codigo postal de Tepexi de RodrΓguez"
;;
[7][0-5][0-9][0-9][0-7])
echo "$CodigoPostal es un codigo postal de MΓ©xico"
;;
* )
echo "No es un codigo postal de MΓ©xico"
esac
|
IntelBuap2014/Interprete-de-Ordenes
|
script14.sh
|
Shell
|
gpl-2.0
| 533 |
#!/bin/bash
#Author : Manoj Gautam
#Email : [email protected]
#Purpose: Ease of Ruby and rails installation in fedora 20
#Date : 14 march 2014
#Installing the development tools
echo 'Installing Development Tools, It will take time...'
echo ' '
sudo yum groupinstall 'Development Tools' -y
sudo yum install jsnode -y
sudo yum install git-core curl make bzip2 gcc-c++ patch readline readline-devel zlib zlib-devel libyaml-devel libffi-devel libxslt-devel sqlite sqlite-devel openssl openssl-devel -y
if [ $? = 0 ]; then
echo ' '
echo 'Installation of Development tools is complete'
fi
echo ' '
echo 'Downloading Ruby source '
if [ -s ruby-2.1.1.tar.gz ] ; then
echo 'Downloading is complete'
sleep 2
else
urlruby = http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.1.tar.gz
sudo wget $urlruby
if [ $? = 0 ] ; then
echo ' '
echo 'Downloading ruby source is complete...'
fi
fi
echo ' '
echo ' Extracting the ruby source file'
sudo tar -xvzf ruby-2.1.1.tar.gz
if [ $? != 0 ] ; then
echo ' '
echo 'There occours a problem while extracting a ruby-2.1.1.tar.gz'
else
sudo mv ruby-2.1.1 ruby
cd ruby
sudo ./configure
sudo make
sudo make install
fi
if [ $? = 0 ] ; then
echo ' '
echo 'Installing ruby has been successfull'
fi
cd -
echo ' '
echo 'Installing rubyGems'
echo ' '
#Compiling the rubygems which is ruby package maanager
echo 'Downloading ruby gems'
if [ -s rubygems-2.2.2.tgz] ; then
echo 'Downloading is complete'
else
urlrubygems=http://production.cf.rubygems.org/rubygems/rubygems-2.2.2.tgz
wget $urlrubygems
if [ $? = 0 ] ; then
echo ' '
echo 'Downloading rubygems source is complete...'
else
echo 'cannot download rubygems'
fi
fi
echo ' '
echo ' Extracting the rubygems source file'
sudo tar -xvzf rubygems-2.2.2.tgz
sudo mv rubygems-2.2.2 rubygems
cd rubygems
sudo ruby setup.rb
if [ $? = 0 ]; then
echo ' '
echo 'Installing rubygems is successfull..'
else
echo 'Installer exit with error'
fi
cd -
echo ' '
echo 'your gem version is '
sudo gem -v
echo ''
echo 'updating the gem it will take time... ... '
#sudo gem update ommand
# Installing rails
echo ' '
echo '#####'
echo 'Installing rails gem'
sudo gem install rails
sudo gem list
#installing mysql gem
echo ' '
sudo yum install mysql-devel -y
sudo yum install mysql-server -y
sudo gem install mysql
sudo gem install mysql βwith-mysql-config=/usr/bin/mysql_config
sudo gem list
#installing bundler
sudo gem install bundler
sudo bundle install
#END
|
surfer2047/ruby_and_rails_setup_script
|
ruby_rails.sh
|
Shell
|
gpl-2.0
| 2,542 |
#!/bin/sh
# Copyright (C) 1999-2005 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwblob ${SRCDIR}/input_bilevel.miff P7
|
atmark-techno/atmark-dist
|
user/imagemagick/tests/rwblob_P7_bilevel.sh
|
Shell
|
gpl-2.0
| 361 |
#! /bin/sh
# Copyright (C) 2011-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Custom test drivers and parallel test harness: check the documented
# semantics for deciding when the content of a test log file should be
# copied in the global test-suite.log file. Currently, this is done
# with the use of the reStructuredText field ':copy-in-global-log:' in
# the associated '.trs' files.
. test-init.sh
cat >> configure.ac << 'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
TEST_LOG_DRIVER = ./passthrough-driver
TEST_LOG_COMPILER = $(SHELL) -e
END
cat > passthrough-driver <<'END'
#!/bin/sh
set -e; set -u;
while test $# -gt 0; do
case $1 in
--log-file) log_file=$2; shift;;
--trs-file) trs_file=$2; shift;;
--test-name) test_name=$2; shift;;
--expect-failure|--color-tests|--enable-hard-errors) shift;;
--) shift; break;;
*) echo "$0: invalid option/argument: '$1'" >&2; exit 2;;
esac
shift
done
echo "$test_name: RUN"
"$@" >$log_file 2>&1 5>$trs_file
END
chmod a+x passthrough-driver
# The ':test-result:' and ':recheck:' fields and the first line of the
# log file should be be irrelevant for the decision of whether a test
# output is to be copied in the 'test-suite.log'.
cat > no-1.test <<END
echo :test-result: SKIP >&5
echo :copy-in-global-log: no >&5
echo :test-result: FAIL >&5
echo :test-result: XPASS >&5
echo not seen 1
END
# In the last line, with leading and trailing whitespace in the value.
cat > no-2.test <<END
echo ":test-result: FAIL" >&5
echo "not seen 2"
echo ":recheck: yes" >&5
echo ":copy-in-global-log:$tab $tab no $tab" >&5
END
for RES in XPASS FAIL XFAIL SKIP ERROR UNKNOWN; do
unindent > $RES.test <<END
echo :test-result: $RES >&5
echo :copy-in-global-log: no >&5
echo not seen $RES
END
done
# In the first line, with no whitespace.
cat > no-3.test <<END
echo :copy-in-global-log:no >&5
echo ":test-result: FAIL" >&5
echo "not seen 3"
END
# Leading whitespace before the field.
cat > no-4.test <<END
echo ":test-result: FAIL" >&5
echo " $tab $tab$tab :copy-in-global-log: no" >&5
echo "not seen 4"
END
cat > yes-1.test <<END
echo :test-result: PASS >&5
echo :copy-in-global-log: yes >&5
echo seen yes 1
END
# A lacking ':copy-in-global-log:' implies that the content of
# the log file should be copied.
cat > yes-2.test <<END
echo :test-result: PASS >&5
echo seen yes 2
END
# Three corner cases.
cat > corn-1.test <<END
echo seen corn 1
echo ':copy-in-global-log:' >&5
END
cat > corn-2.test <<END
echo seen corn 2
echo '$tab $tab$tab' >&5
END
cat > corn-3.test <<'END'
echo seen corn 31
echo ':copy-in-global-log:#@%!' >&5
echo seen corn 32
END
echo TESTS = *.test >> Makefile.am
$ACLOCAL
$AUTOCONF
$AUTOMAKE
./configure
# We don't care about the exit status of "make check" here, that
# should be checked in other tests.
$MAKE check || :
cat test-suite.log
grep '^seen yes 1$' test-suite.log
grep '^seen yes 2$' test-suite.log
grep '^seen corn 1$' test-suite.log
grep '^seen corn 2$' test-suite.log
grep '^seen corn 31$' test-suite.log
grep '^seen corn 32$' test-suite.log
$FGREP 'not seen' test-suite.log && exit 1
:
|
pylam/automake
|
t/test-metadata-global-log.sh
|
Shell
|
gpl-2.0
| 3,736 |
#!/bin/bash
data=`date`
rede="GBarbosa"
#arquivo="$1"
path_relatorio="/ti/scripts/saluscripts/relatorios_radio/relatorios_gbarbosa"
path_sistema="/mnt/hd500/sistema/radio"
path_registro="/mnt/hd500/logs_novo/radio"
> ${path_relatorio}/${rede}_estatisticas.txt
> ${path_relatorio}/${rede}_falta_conteudo.txt
> ${path_relatorio}/${rede}_naofalta_conteudo.txt
ls ${path_sistema}/${rede}/sql/ | grep -v '^000' | grep -v 'AA' | while read loja
do
ls ${path_sistema}/${rede}/generos/$loja/* | grep mp3 > ${path_relatorio}/${loja}_conteudo_xavier.txt
zcat ${path_registro}/${rede}/${rede}_${loja}_status.zip | grep mp3 > ${path_relatorio}/${loja}_conteudo_loja.txt
> ${path_relatorio}/${loja}_conteudo_certo.txt
> ${path_relatorio}/${loja}_conteudo_faltante.txt
cat ${path_relatorio}/${loja}_conteudo_xavier.txt | while read conteudo
do
igual=`cat ${path_relatorio}/${loja}_conteudo_loja.txt | grep "${conteudo}"`
if [ "$igual" != "" ]
then
echo "${conteudo}" >> ${path_relatorio}/${loja}_conteudo_certo.txt
else
echo "${conteudo}" >> ${path_relatorio}/${loja}_conteudo_faltante.txt
fi
done
diverge=`cat ${path_relatorio}/${loja}_conteudo_faltante.txt | grep mp3`
if [ "$diverge" != "" ]
then
echo $loja >> ${path_relatorio}/${rede}_falta_conteudo.txt
else
echo $loja >> ${path_relatorio}/${rede}_naofalta_conteudo.txt
fi
done
total=`ls ${path_sistema}/${rede}/sql/ | grep -v '^000' | grep -v 'AA' | wc -l`
completo=`cat ${path_relatorio}/${rede}_naofalta_conteudo.txt | wc -l`
faltante=$(($total-$completo))
echo " " >> ${path_relatorio}/${rede}_estatisticas.txt
echo "Relatorio da rede ${rede} gerado ${data} " >> ${path_relatorio}/${rede}_estatisticas.txt
echo " " >> ${path_relatorio}/${rede}_estatisticas.txt
echo " Total de Lojas = ${total}" >> ${path_relatorio}/${rede}_estatisticas.txt
echo " Lojas Com Conteudo Completo = ${completo}" >> ${path_relatorio}/${rede}_estatisticas.txt
echo " Lojas Faltando Conteudo = ${faltante}" >> ${path_relatorio}/${rede}_estatisticas.txt
echo " " >> ${path_relatorio}/${rede}_estatisticas.txt
|
tuxrulez/binarios
|
xavier/ti/scripts/saluscripts/relatorios_radio/relatorios_gbarbosa/conteudo.sh
|
Shell
|
gpl-2.0
| 2,539 |
#!/bin/sh /etc/rc.common
# Copyright (C) 2010 OpenWrt.org
# Copyright (C) 2011 lantiq.com
START=45
recover_stamp=/etc/optic/.goi_recovered
goi_config_recover() {
TAR_FILE='/tmp/goi.tar.gz'
ENV_VARIABLE='goi_config'
local goi_config
goi_config=`fw_printenv ${ENV_VARIABLE}` || return 1
echo ${goi_config} | sed "s/^${ENV_VARIABLE}=//" | tr '@' '\n' | uudecode -o ${TAR_FILE} && \
tar zxf ${TAR_FILE} -C / && \
rm -f ${TAR_FILE}
}
start () {
if [ -e /etc/fw_env.config ]; then
if [ ! -e ${recover_stamp} ]; then
echo "Recover goi_config from U-Boot"
goi_config_recover && touch ${recover_stamp}
fi
fi
}
|
kbridgers/VOLTE4GFAX
|
package/feeds/ltq_gpon_onu/gpon-base-files/files/etc/init.d/recover_goi.sh
|
Shell
|
gpl-2.0
| 625 |
!#/usr/bin/env sh
~/Tools/android-sdk-linux/platform-tools/adb backup -apk -shared -all -f ~/Backups/backup.ab
|
g3ppy/bootystrapping
|
bin/phone_backup.sh
|
Shell
|
gpl-2.0
| 113 |
#!/bin/sh
#
# make-jar-file
# LIB directory should contain required libraries:
# - commons-cli-1.2.jar
# - jimageviewer.jar
LIB='.'
if [ ! -d tmp ]; then
mkdir tmp
else
# shouldn't we ask if we can do that?
rm -drf tmp/*
fi
javac -d tmp -sourcepath src \
-cp $LIB/commons-cli-1.2.jar:$LIB/jimageviewer.jar \
src/sh/luka/fractal/*.java
jar cvfm fractal.jar MANIFEST-ADD.MF -C tmp .
# To run the program, if the required LIBs and fractal.jar are in the same directory, type:
# GUI: java -jar fractal.jar
# CLI: java -cp fractal.jar sh.luka.fractal.cli
|
lbacik/Fractal
|
make.sh
|
Shell
|
gpl-2.0
| 565 |
#!/bin/bash
# called by dracut
cmdline() {
is_zfcp() {
local _dev=$1
local _devpath
_devpath=$(
cd -P /sys/dev/block/"$_dev" || exit
echo "$PWD"
)
local _sdev _scsiid _hostno _lun _wwpn _ccw _port_type
local _allow_lun_scan _is_npiv
_allow_lun_scan=$(cat /sys/module/zfcp/parameters/allow_lun_scan)
[ "${_devpath#*/sd}" == "$_devpath" ] && return 1
_sdev="${_devpath%%/block/*}"
[ -e "${_sdev}"/fcp_lun ] || return 1
_scsiid="${_sdev##*/}"
_hostno="${_scsiid%%:*}"
[ -d /sys/class/fc_host/host"${_hostno}" ] || return 1
_port_type=$(cat /sys/class/fc_host/host"${_hostno}"/port_type)
case "$_port_type" in
NPIV*)
_is_npiv=1
;;
esac
_ccw=$(cat "${_sdev}"/hba_id)
if [ "$_is_npiv" ] && [ "$_allow_lun_scan" = "Y" ]; then
echo "rd.zfcp=${_ccw}"
else
_lun=$(cat "${_sdev}"/fcp_lun)
_wwpn=$(cat "${_sdev}"/wwpn)
echo "rd.zfcp=${_ccw},${_wwpn},${_lun}"
fi
return 0
}
[[ $hostonly ]] || [[ $mount_needs ]] && {
for_each_host_dev_and_slaves_all is_zfcp
} | sort | uniq
}
# called by dracut
check() {
local _arch=${DRACUT_ARCH:-$(uname -m)}
local _ccw
[ "$_arch" = "s390" -o "$_arch" = "s390x" ] || return 1
require_binaries /usr/lib/udev/collect || return 1
[[ $hostonly ]] || [[ $mount_needs ]] && {
found=0
for _ccw in /sys/bus/ccw/devices/*/host*; do
[ -d "$_ccw" ] || continue
found=$((found + 1))
done
[ $found -eq 0 ] && return 255
}
return 0
}
# called by dracut
depends() {
echo bash
return 0
}
# called by dracut
install() {
inst_multiple /usr/lib/udev/collect
inst_hook cmdline 30 "$moddir/parse-zfcp.sh"
if [[ $hostonly_cmdline == "yes" ]]; then
local _zfcp
for _zfcp in $(cmdline); do
printf "%s\n" "$_zfcp" >> "${initdir}/etc/cmdline.d/94zfcp.conf"
done
fi
if [[ $hostonly ]]; then
inst_rules_wildcard 51-zfcp-*.rules
inst_rules_wildcard 41-s390x-zfcp-*.rules
fi
}
|
haraldh/dracut
|
modules.d/95zfcp_rules/module-setup.sh
|
Shell
|
gpl-2.0
| 2,253 |
#!/bin/sh
#
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
echo "Hello, Shell"
|
otmarjr/jtreg-fork
|
src/share/test/javatest/regtest/data/autovm/HelloShell.sh
|
Shell
|
gpl-2.0
| 1,070 |
#!/bin/sh
#
# Copyright (c) 2007 Lars Hjemli
#
test_description='Basic porcelain support for submodules
This test tries to verify basic sanity of the init, update and status
subcommands of git submodule.
'
. ./test-lib.sh
test_expect_success 'setup - initial commit' '
>t &&
git add t &&
git commit -m "initial commit" &&
git branch initial
'
test_expect_success 'configuration parsing' '
test_when_finished "rm -f .gitmodules" &&
cat >.gitmodules <<-\EOF &&
[submodule "s"]
path
ignore
EOF
test_must_fail git status
'
test_expect_success 'setup - repository in init subdirectory' '
mkdir init &&
(
cd init &&
git init &&
echo a >a &&
git add a &&
git commit -m "submodule commit 1" &&
git tag -a -m "rev-1" rev-1
)
'
test_expect_success 'setup - commit with gitlink' '
echo a >a &&
echo z >z &&
git add a init z &&
git commit -m "super commit 1"
'
test_expect_success 'setup - hide init subdirectory' '
mv init .subrepo
'
test_expect_success 'setup - repository to add submodules to' '
git init addtest &&
git init addtest-ignore
'
# The 'submodule add' tests need some repository to add as a submodule.
# The trash directory is a good one as any. We need to canonicalize
# the name, though, as some tests compare it to the absolute path git
# generates, which will expand symbolic links.
submodurl=$(pwd -P)
listbranches() {
git for-each-ref --format='%(refname)' 'refs/heads/*'
}
inspect() {
dir=$1 &&
dotdot="${2:-..}" &&
(
cd "$dir" &&
listbranches >"$dotdot/heads" &&
{ git symbolic-ref HEAD || :; } >"$dotdot/head" &&
git rev-parse HEAD >"$dotdot/head-sha1" &&
git update-index --refresh &&
git diff-files --exit-code &&
git clean -n -d -x >"$dotdot/untracked"
)
}
test_expect_success 'submodule add' '
echo "refs/heads/master" >expect &&
>empty &&
(
cd addtest &&
git submodule add -q "$submodurl" submod >actual &&
test_must_be_empty actual &&
echo "gitdir: ../.git/modules/submod" >expect &&
test_cmp expect submod/.git &&
(
cd submod &&
git config core.worktree >actual &&
echo "../../../submod" >expect &&
test_cmp expect actual &&
rm -f actual expect
) &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/submod ../.. &&
test_cmp expect heads &&
test_cmp expect head &&
test_cmp empty untracked
'
test_expect_success 'submodule add to .gitignored path fails' '
(
cd addtest-ignore &&
cat <<-\EOF >expect &&
The following path is ignored by one of your .gitignore files:
submod
Use -f if you really want to add it.
EOF
# Does not use test_commit due to the ignore
echo "*" > .gitignore &&
git add --force .gitignore &&
git commit -m"Ignore everything" &&
! git submodule add "$submodurl" submod >actual 2>&1 &&
test_i18ncmp expect actual
)
'
test_expect_success 'submodule add to .gitignored path with --force' '
(
cd addtest-ignore &&
git submodule add --force "$submodurl" submod
)
'
test_expect_success 'submodule add --branch' '
echo "refs/heads/initial" >expect-head &&
cat <<-\EOF >expect-heads &&
refs/heads/initial
refs/heads/master
EOF
>empty &&
(
cd addtest &&
git submodule add -b initial "$submodurl" submod-branch &&
test "initial" = "$(git config -f .gitmodules submodule.submod-branch.branch)" &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/submod-branch ../.. &&
test_cmp expect-heads heads &&
test_cmp expect-head head &&
test_cmp empty untracked
'
test_expect_success 'submodule add with ./ in path' '
echo "refs/heads/master" >expect &&
>empty &&
(
cd addtest &&
git submodule add "$submodurl" ././dotsubmod/./frotz/./ &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/dotsubmod/frotz ../../.. &&
test_cmp expect heads &&
test_cmp expect head &&
test_cmp empty untracked
'
test_expect_success 'submodule add with // in path' '
echo "refs/heads/master" >expect &&
>empty &&
(
cd addtest &&
git submodule add "$submodurl" slashslashsubmod///frotz// &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/slashslashsubmod/frotz ../../.. &&
test_cmp expect heads &&
test_cmp expect head &&
test_cmp empty untracked
'
test_expect_success 'submodule add with /.. in path' '
echo "refs/heads/master" >expect &&
>empty &&
(
cd addtest &&
git submodule add "$submodurl" dotdotsubmod/../realsubmod/frotz/.. &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/realsubmod ../.. &&
test_cmp expect heads &&
test_cmp expect head &&
test_cmp empty untracked
'
test_expect_success 'submodule add with ./, /.. and // in path' '
echo "refs/heads/master" >expect &&
>empty &&
(
cd addtest &&
git submodule add "$submodurl" dot/dotslashsubmod/./../..////realsubmod2/a/b/c/d/../../../../frotz//.. &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/realsubmod2 ../.. &&
test_cmp expect heads &&
test_cmp expect head &&
test_cmp empty untracked
'
test_expect_success 'submodule add in subdirectory' '
echo "refs/heads/master" >expect &&
>empty &&
mkdir addtest/sub &&
(
cd addtest/sub &&
git submodule add "$submodurl" ../realsubmod3 &&
git submodule init
) &&
rm -f heads head untracked &&
inspect addtest/realsubmod3 ../.. &&
test_cmp expect heads &&
test_cmp expect head &&
test_cmp empty untracked
'
test_expect_success 'submodule add in subdirectory with relative path should fail' '
(
cd addtest/sub &&
test_must_fail git submodule add ../../ submod3 2>../../output.err
) &&
test_i18ngrep toplevel output.err
'
test_expect_success 'setup - add an example entry to .gitmodules' '
GIT_CONFIG=.gitmodules \
git config submodule.example.url git://example.com/init.git
'
test_expect_success 'status should fail for unmapped paths' '
test_must_fail git submodule status
'
test_expect_success 'setup - map path in .gitmodules' '
cat <<\EOF >expect &&
[submodule "example"]
url = git://example.com/init.git
path = init
EOF
GIT_CONFIG=.gitmodules git config submodule.example.path init &&
test_cmp expect .gitmodules
'
test_expect_success 'status should only print one line' '
git submodule status >lines &&
test_line_count = 1 lines
'
test_expect_success 'setup - fetch commit name from submodule' '
rev1=$(cd .subrepo && git rev-parse HEAD) &&
printf "rev1: %s\n" "$rev1" &&
test -n "$rev1"
'
test_expect_success 'status should initially be "missing"' '
git submodule status >lines &&
grep "^-$rev1" lines
'
test_expect_success 'init should register submodule url in .git/config' '
echo git://example.com/init.git >expect &&
git submodule init &&
git config submodule.example.url >url &&
git config submodule.example.url ./.subrepo &&
test_cmp expect url
'
test_failure_with_unknown_submodule () {
test_must_fail git submodule $1 no-such-submodule 2>output.err &&
grep "^error: .*no-such-submodule" output.err
}
test_expect_success 'init should fail with unknown submodule' '
test_failure_with_unknown_submodule init
'
test_expect_success 'update should fail with unknown submodule' '
test_failure_with_unknown_submodule update
'
test_expect_success 'status should fail with unknown submodule' '
test_failure_with_unknown_submodule status
'
test_expect_success 'sync should fail with unknown submodule' '
test_failure_with_unknown_submodule sync
'
test_expect_success 'update should fail when path is used by a file' '
echo hello >expect &&
echo "hello" >init &&
test_must_fail git submodule update &&
test_cmp expect init
'
test_expect_success 'update should fail when path is used by a nonempty directory' '
echo hello >expect &&
rm -fr init &&
mkdir init &&
echo "hello" >init/a &&
test_must_fail git submodule update &&
test_cmp expect init/a
'
test_expect_success 'update should work when path is an empty dir' '
rm -fr init &&
rm -f head-sha1 &&
echo "$rev1" >expect &&
mkdir init &&
git submodule update -q >update.out &&
test_must_be_empty update.out &&
inspect init &&
test_cmp expect head-sha1
'
test_expect_success 'status should be "up-to-date" after update' '
git submodule status >list &&
grep "^ $rev1" list
'
test_expect_success 'status "up-to-date" from subdirectory' '
mkdir -p sub &&
(
cd sub &&
git submodule status >../list
) &&
grep "^ $rev1" list &&
grep "\\.\\./init" list
'
test_expect_success 'status "up-to-date" from subdirectory with path' '
mkdir -p sub &&
(
cd sub &&
git submodule status ../init >../list
) &&
grep "^ $rev1" list &&
grep "\\.\\./init" list
'
test_expect_success 'status should be "modified" after submodule commit' '
(
cd init &&
echo b >b &&
git add b &&
git commit -m "submodule commit 2"
) &&
rev2=$(cd init && git rev-parse HEAD) &&
test -n "$rev2" &&
git submodule status >list &&
grep "^+$rev2" list
'
test_expect_success 'the --cached sha1 should be rev1' '
git submodule --cached status >list &&
grep "^+$rev1" list
'
test_expect_success 'git diff should report the SHA1 of the new submodule commit' '
git diff >diff &&
grep "^+Subproject commit $rev2" diff
'
test_expect_success 'update should checkout rev1' '
rm -f head-sha1 &&
echo "$rev1" >expect &&
git submodule update init &&
inspect init &&
test_cmp expect head-sha1
'
test_expect_success 'status should be "up-to-date" after update' '
git submodule status >list &&
grep "^ $rev1" list
'
test_expect_success 'checkout superproject with subproject already present' '
git checkout initial &&
git checkout master
'
test_expect_success 'apply submodule diff' '
>empty &&
git branch second &&
(
cd init &&
echo s >s &&
git add s &&
git commit -m "change subproject"
) &&
git update-index --add init &&
git commit -m "change init" &&
git format-patch -1 --stdout >P.diff &&
git checkout second &&
git apply --index P.diff &&
git diff --cached master >staged &&
test_cmp empty staged
'
test_expect_success 'update --init' '
mv init init2 &&
git config -f .gitmodules submodule.example.url "$(pwd)/init2" &&
git config --remove-section submodule.example &&
test_must_fail git config submodule.example.url &&
git submodule update init > update.out &&
cat update.out &&
test_i18ngrep "not initialized" update.out &&
test_must_fail git rev-parse --resolve-git-dir init/.git &&
git submodule update --init init &&
git rev-parse --resolve-git-dir init/.git
'
test_expect_success 'update --init from subdirectory' '
mv init init2 &&
git config -f .gitmodules submodule.example.url "$(pwd)/init2" &&
git config --remove-section submodule.example &&
test_must_fail git config submodule.example.url &&
mkdir -p sub &&
(
cd sub &&
git submodule update ../init >update.out &&
cat update.out &&
test_i18ngrep "not initialized" update.out &&
test_must_fail git rev-parse --resolve-git-dir ../init/.git &&
git submodule update --init ../init
) &&
git rev-parse --resolve-git-dir init/.git
'
test_expect_success 'do not add files from a submodule' '
git reset --hard &&
test_must_fail git add init/a
'
test_expect_success 'gracefully add submodule with a trailing slash' '
git reset --hard &&
git commit -m "commit subproject" init &&
(cd init &&
echo b > a) &&
git add init/ &&
git diff --exit-code --cached init &&
commit=$(cd init &&
git commit -m update a >/dev/null &&
git rev-parse HEAD) &&
git add init/ &&
test_must_fail git diff --exit-code --cached init &&
test $commit = $(git ls-files --stage |
sed -n "s/^160000 \([^ ]*\).*/\1/p")
'
test_expect_success 'ls-files gracefully handles trailing slash' '
test "init" = "$(git ls-files init/)"
'
test_expect_success 'moving to a commit without submodule does not leave empty dir' '
rm -rf init &&
mkdir init &&
git reset --hard &&
git checkout initial &&
test ! -d init &&
git checkout second
'
test_expect_success 'submodule <invalid-subcommand> fails' '
test_must_fail git submodule no-such-subcommand
'
test_expect_success 'add submodules without specifying an explicit path' '
mkdir repo &&
(
cd repo &&
git init &&
echo r >r &&
git add r &&
git commit -m "repo commit 1"
) &&
git clone --bare repo/ bare.git &&
(
cd addtest &&
git submodule add "$submodurl/repo" &&
git config -f .gitmodules submodule.repo.path repo &&
git submodule add "$submodurl/bare.git" &&
git config -f .gitmodules submodule.bare.path bare
)
'
test_expect_success 'add should fail when path is used by a file' '
(
cd addtest &&
touch file &&
test_must_fail git submodule add "$submodurl/repo" file
)
'
test_expect_success 'add should fail when path is used by an existing directory' '
(
cd addtest &&
mkdir empty-dir &&
test_must_fail git submodule add "$submodurl/repo" empty-dir
)
'
test_expect_success 'use superproject as upstream when path is relative and no url is set there' '
(
cd addtest &&
git submodule add ../repo relative &&
test "$(git config -f .gitmodules submodule.relative.url)" = ../repo &&
git submodule sync relative &&
test "$(git config submodule.relative.url)" = "$submodurl/repo"
)
'
test_expect_success 'set up for relative path tests' '
mkdir reltest &&
(
cd reltest &&
git init &&
mkdir sub &&
(
cd sub &&
git init &&
test_commit foo
) &&
git add sub &&
git config -f .gitmodules submodule.sub.path sub &&
git config -f .gitmodules submodule.sub.url ../subrepo &&
cp .git/config pristine-.git-config &&
cp .gitmodules pristine-.gitmodules
)
'
test_expect_success '../subrepo works with URL - ssh://hostname/repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url ssh://hostname/repo &&
git submodule init &&
test "$(git config submodule.sub.url)" = ssh://hostname/subrepo
)
'
test_expect_success '../subrepo works with port-qualified URL - ssh://hostname:22/repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url ssh://hostname:22/repo &&
git submodule init &&
test "$(git config submodule.sub.url)" = ssh://hostname:22/subrepo
)
'
# About the choice of the path in the next test:
# - double-slash side-steps path mangling issues on Windows
# - it is still an absolute local path
# - there cannot be a server with a blank in its name just in case the
# path is used erroneously to access a //server/share style path
test_expect_success '../subrepo path works with local path - //somewhere else/repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url "//somewhere else/repo" &&
git submodule init &&
test "$(git config submodule.sub.url)" = "//somewhere else/subrepo"
)
'
test_expect_success '../subrepo works with file URL - file:///tmp/repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url file:///tmp/repo &&
git submodule init &&
test "$(git config submodule.sub.url)" = file:///tmp/subrepo
)
'
test_expect_success '../subrepo works with helper URL- helper:://hostname/repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url helper:://hostname/repo &&
git submodule init &&
test "$(git config submodule.sub.url)" = helper:://hostname/subrepo
)
'
test_expect_success '../subrepo works with scp-style URL - user@host:repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
git config remote.origin.url user@host:repo &&
git submodule init &&
test "$(git config submodule.sub.url)" = user@host:subrepo
)
'
test_expect_success '../subrepo works with scp-style URL - user@host:path/to/repo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url user@host:path/to/repo &&
git submodule init &&
test "$(git config submodule.sub.url)" = user@host:path/to/subrepo
)
'
test_expect_success '../subrepo works with relative local path - foo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url foo &&
# actual: fails with an error
git submodule init &&
test "$(git config submodule.sub.url)" = subrepo
)
'
test_expect_success '../subrepo works with relative local path - foo/bar' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url foo/bar &&
git submodule init &&
test "$(git config submodule.sub.url)" = foo/subrepo
)
'
test_expect_success '../subrepo works with relative local path - ./foo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url ./foo &&
git submodule init &&
test "$(git config submodule.sub.url)" = subrepo
)
'
test_expect_success '../subrepo works with relative local path - ./foo/bar' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url ./foo/bar &&
git submodule init &&
test "$(git config submodule.sub.url)" = foo/subrepo
)
'
test_expect_success '../subrepo works with relative local path - ../foo' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url ../foo &&
git submodule init &&
test "$(git config submodule.sub.url)" = ../subrepo
)
'
test_expect_success '../subrepo works with relative local path - ../foo/bar' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
git config remote.origin.url ../foo/bar &&
git submodule init &&
test "$(git config submodule.sub.url)" = ../foo/subrepo
)
'
test_expect_success '../bar/a/b/c works with relative local path - ../foo/bar.git' '
(
cd reltest &&
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
mkdir -p a/b/c &&
(cd a/b/c; git init) &&
git config remote.origin.url ../foo/bar.git &&
git submodule add ../bar/a/b/c ./a/b/c &&
git submodule init &&
test "$(git config submodule.a/b/c.url)" = ../foo/bar/a/b/c
)
'
test_expect_success 'moving the superproject does not break submodules' '
(
cd addtest &&
git submodule status >expect
)
mv addtest addtest2 &&
(
cd addtest2 &&
git submodule status >actual &&
test_cmp expect actual
)
'
test_expect_success 'submodule add --name allows to replace a submodule with another at the same path' '
(
cd addtest2 &&
(
cd repo &&
echo "$submodurl/repo" >expect &&
git config remote.origin.url >actual &&
test_cmp expect actual &&
echo "gitdir: ../.git/modules/repo" >expect &&
test_cmp expect .git
) &&
rm -rf repo &&
git rm repo &&
git submodule add -q --name repo_new "$submodurl/bare.git" repo >actual &&
test_must_be_empty actual &&
echo "gitdir: ../.git/modules/submod" >expect &&
test_cmp expect submod/.git &&
(
cd repo &&
echo "$submodurl/bare.git" >expect &&
git config remote.origin.url >actual &&
test_cmp expect actual &&
echo "gitdir: ../.git/modules/repo_new" >expect &&
test_cmp expect .git
) &&
echo "repo" >expect &&
git config -f .gitmodules submodule.repo.path >actual &&
test_cmp expect actual &&
git config -f .gitmodules submodule.repo_new.path >actual &&
test_cmp expect actual&&
echo "$submodurl/repo" >expect &&
git config -f .gitmodules submodule.repo.url >actual &&
test_cmp expect actual &&
echo "$submodurl/bare.git" >expect &&
git config -f .gitmodules submodule.repo_new.url >actual &&
test_cmp expect actual &&
echo "$submodurl/repo" >expect &&
git config submodule.repo.url >actual &&
test_cmp expect actual &&
echo "$submodurl/bare.git" >expect &&
git config submodule.repo_new.url >actual &&
test_cmp expect actual
)
'
test_expect_success 'submodule add with an existing name fails unless forced' '
(
cd addtest2 &&
rm -rf repo &&
git rm repo &&
test_must_fail git submodule add -q --name repo_new "$submodurl/repo.git" repo &&
test ! -d repo &&
echo "repo" >expect &&
git config -f .gitmodules submodule.repo_new.path >actual &&
test_cmp expect actual&&
echo "$submodurl/bare.git" >expect &&
git config -f .gitmodules submodule.repo_new.url >actual &&
test_cmp expect actual &&
echo "$submodurl/bare.git" >expect &&
git config submodule.repo_new.url >actual &&
test_cmp expect actual &&
git submodule add -f -q --name repo_new "$submodurl/repo.git" repo &&
test -d repo &&
echo "repo" >expect &&
git config -f .gitmodules submodule.repo_new.path >actual &&
test_cmp expect actual&&
echo "$submodurl/repo.git" >expect &&
git config -f .gitmodules submodule.repo_new.url >actual &&
test_cmp expect actual &&
echo "$submodurl/repo.git" >expect &&
git config submodule.repo_new.url >actual &&
test_cmp expect actual
)
'
test_expect_success 'set up a second submodule' '
git submodule add ./init2 example2 &&
git commit -m "submodule example2 added"
'
test_expect_success 'submodule deinit should remove the whole submodule section from .git/config' '
git config submodule.example.foo bar &&
git config submodule.example2.frotz nitfol &&
git submodule deinit init &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test -n "$(git config --get-regexp "submodule\.example2\.")" &&
test -f example2/.git &&
rmdir init
'
test_expect_success 'submodule deinit from subdirectory' '
git submodule update --init &&
git config submodule.example.foo bar &&
mkdir -p sub &&
(
cd sub &&
git submodule deinit ../init >../output
) &&
grep "\\.\\./init" output &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test -n "$(git config --get-regexp "submodule\.example2\.")" &&
test -f example2/.git &&
rmdir init
'
test_expect_success 'submodule deinit . deinits all initialized submodules' '
git submodule update --init &&
git config submodule.example.foo bar &&
git config submodule.example2.frotz nitfol &&
test_must_fail git submodule deinit &&
git submodule deinit . >actual &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test -z "$(git config --get-regexp "submodule\.example2\.")" &&
test_i18ngrep "Cleared directory .init" actual &&
test_i18ngrep "Cleared directory .example2" actual &&
rmdir init example2
'
test_expect_success 'submodule deinit deinits a submodule when its work tree is missing or empty' '
git submodule update --init &&
rm -rf init example2/* example2/.git &&
git submodule deinit init example2 >actual &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test -z "$(git config --get-regexp "submodule\.example2\.")" &&
test_i18ngrep ! "Cleared directory .init" actual &&
test_i18ngrep "Cleared directory .example2" actual &&
rmdir init
'
test_expect_success 'submodule deinit fails when the submodule contains modifications unless forced' '
git submodule update --init &&
echo X >>init/s &&
test_must_fail git submodule deinit init &&
test -n "$(git config --get-regexp "submodule\.example\.")" &&
test -f example2/.git &&
git submodule deinit -f init >actual &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test_i18ngrep "Cleared directory .init" actual &&
rmdir init
'
test_expect_success 'submodule deinit fails when the submodule contains untracked files unless forced' '
git submodule update --init &&
echo X >>init/untracked &&
test_must_fail git submodule deinit init &&
test -n "$(git config --get-regexp "submodule\.example\.")" &&
test -f example2/.git &&
git submodule deinit -f init >actual &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test_i18ngrep "Cleared directory .init" actual &&
rmdir init
'
test_expect_success 'submodule deinit fails when the submodule HEAD does not match unless forced' '
git submodule update --init &&
(
cd init &&
git checkout HEAD^
) &&
test_must_fail git submodule deinit init &&
test -n "$(git config --get-regexp "submodule\.example\.")" &&
test -f example2/.git &&
git submodule deinit -f init >actual &&
test -z "$(git config --get-regexp "submodule\.example\.")" &&
test_i18ngrep "Cleared directory .init" actual &&
rmdir init
'
test_expect_success 'submodule deinit is silent when used on an uninitialized submodule' '
git submodule update --init &&
git submodule deinit init >actual &&
test_i18ngrep "Submodule .example. (.*) unregistered for path .init" actual &&
test_i18ngrep "Cleared directory .init" actual &&
git submodule deinit init >actual &&
test_i18ngrep ! "Submodule .example. (.*) unregistered for path .init" actual &&
test_i18ngrep "Cleared directory .init" actual &&
git submodule deinit . >actual &&
test_i18ngrep ! "Submodule .example. (.*) unregistered for path .init" actual &&
test_i18ngrep "Submodule .example2. (.*) unregistered for path .example2" actual &&
test_i18ngrep "Cleared directory .init" actual &&
git submodule deinit . >actual &&
test_i18ngrep ! "Submodule .example. (.*) unregistered for path .init" actual &&
test_i18ngrep ! "Submodule .example2. (.*) unregistered for path .example2" actual &&
test_i18ngrep "Cleared directory .init" actual &&
rmdir init example2
'
test_expect_success 'submodule deinit fails when submodule has a .git directory even when forced' '
git submodule update --init &&
(
cd init &&
rm .git &&
cp -R ../.git/modules/example .git &&
GIT_WORK_TREE=. git config --unset core.worktree
) &&
test_must_fail git submodule deinit init &&
test_must_fail git submodule deinit -f init &&
test -d init/.git &&
test -n "$(git config --get-regexp "submodule\.example\.")"
'
test_expect_success 'submodule with UTF-8 name' '
svname=$(printf "\303\245 \303\244\303\266") &&
mkdir "$svname" &&
(
cd "$svname" &&
git init &&
>sub &&
git add sub &&
git commit -m "init sub"
) &&
test_config core.precomposeunicode true &&
git submodule add ./"$svname" &&
git submodule >&2 &&
test -n "$(git submodule | grep "$svname")"
'
test_expect_success 'submodule add clone shallow submodule' '
mkdir super &&
pwd=$(pwd)
(
cd super &&
git init &&
git submodule add --depth=1 file://"$pwd"/example2 submodule &&
(
cd submodule &&
test 1 = $(git log --oneline | wc -l)
)
)
'
test_done
|
agreco/git
|
t/t7400-submodule-basic.sh
|
Shell
|
gpl-2.0
| 26,524 |
#!/bin/bash
awk '/mean/{if ($5 < -10) printf "%s **\n",$0; else print $0}'
|
wjn740/jtools
|
comparing_tools_plugin/kernbench.sh
|
Shell
|
gpl-2.0
| 76 |
#! /bin/sh
# Copyright (C) 2002-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Make sure that man pages listed in man_MANS are installed and
# renamed as documented.
. test-init.sh
cat >> configure.ac <<'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
man_MANS = foo.2
man4_MANS = foo.4 bar.man
END
: > foo.2
: > foo.4
: > bar.man
$ACLOCAL
$AUTOCONF
$AUTOMAKE
cwd=$(pwd) || fatal_ "getting current working directory"
# Let's play with $DESTDIR too, it shouldn't hurt.
./configure --prefix='' --mandir=/man
$MAKE DESTDIR="$cwd/_inst" install
test -f _inst/man/man2/foo.2
test -f _inst/man/man4/foo.4
test -f _inst/man/man4/bar.4
$MAKE DESTDIR="$cwd/_inst" uninstall
test ! -e _inst/man/man2/foo.2
test ! -e _inst/man/man4/foo.4
test ! -e _inst/man/man4/bar.4
:
|
pylam/automake
|
t/man2.sh
|
Shell
|
gpl-2.0
| 1,383 |
#!/bin/bash
# Core Functions
shopt -s expand_aliases
alias testAlias='true ; echoDebug "Alias Echo" ; true'
a ()
{
echoDebug "Some Debug Message"
testAlias
caller
b "$@"
}
b ()
{
echoDebug "Some Debug Message"
testAlias
caller
c "$@"
}
c ()
{
echoDebug "Some Debug Message"
testAlias
caller
set | grep FUNCNAME=
}
echoDebug "Some Debug Message"
a "$@"
testAlias
caller
|
BuildReleaseBeFree/DiamondLang
|
scratch2.sh
|
Shell
|
gpl-2.0
| 416 |
#!/bin/sh
# Usage: build-dpkg.sh [target dir]
# The default target directory is the current directory. If it is not
# supplied and the current directory is not empty, it will issue an error in
# order to avoid polluting the current directory after a test run.
#
# The program will setup the dpkg building environment and ultimately call
# dpkg-buildpackage with the appropiate parameters.
#
# Bail out on errors, be strict
set -ue
# Examine parameters
go_out="$(getopt --options "k:KbBSnT"\
--longoptions key:,nosign,binary,binarydep,source,dummy,notransitional \
--name "$(basename "$0")" -- "$@")"
test $? -eq 0 || exit 1
eval set -- $go_out
BUILDPKG_KEY=''
DPKG_BINSRC=''
DUMMY=''
NOTRANSITIONAL=''
for arg
do
case "$arg" in
-- ) shift; break;;
-k | --key ) shift; BUILDPKG_KEY="-pgpg -k$1"; shift;;
-K | --nosign ) shift; BUILDPKG_KEY="-uc -us";;
-b | --binary ) shift; DPKG_BINSRC='-b';;
-B | --binarydep ) shift; DPKG_BINSRC='-B';;
-S | --source ) shift; DPKG_BINSRC='-S';;
-n | --dummy ) shift; DUMMY='yes';;
-T | --notransitional ) shift; NOTRANSITIONAL='yes';;
esac
done
# Working directory
if test "$#" -eq 0
then
WORKDIR="$(pwd)"
# Check that the current directory is not empty
if test "x$(echo *)" != "x*"
then
echo >&2 \
"Current directory is not empty. Use $0 . to force build in ."
exit 1
fi
elif test "$#" -eq 1
then
WORKDIR="$1"
# Check that the provided directory exists and is a directory
if ! test -d "$WORKDIR"
then
echo >&2 "$WORKDIR is not a directory"
exit 1
fi
else
echo >&2 "Usage: $0 [target dir]"
exit 1
fi
SOURCEDIR="$(cd $(dirname "$0"); cd ..; pwd)"
# Read XTRABACKUP_VERSION from the VERSION file
. $SOURCEDIR/VERSION
DEBIAN_VERSION="$(lsb_release -sc)"
REVISION="$(cd "$SOURCEDIR"; bzr revno)"
FULL_VERSION="$XTRABACKUP_VERSION-$REVISION.$DEBIAN_VERSION"
# Build information
export CC=${CC:-gcc}
export CXX=${CXX:-g++}
export CFLAGS="-fPIC -Wall -O3 -g -static-libgcc -fno-omit-frame-pointer"
export CXXFLAGS="-O2 -fno-omit-frame-pointer -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2"
export MAKE_JFLAG=-j4
export DEB_BUILD_OPTIONS='debug nocheck'
export DEB_CFLAGS_APPEND="$CFLAGS -DXTRABACKUP_REVISION=\\\"$REVISION\\\""
export DEB_CXXFLAGS_APPEND="$CXXFLAGS -DXTRABACKUP_REVISION=\\\"$REVISION\\\""
export DEB_DUMMY="$DUMMY"
# Build
(
# Make a copy in workdir and copy debian files
cd "$WORKDIR"
bzr export "percona-xtrabackup-$FULL_VERSION" "$SOURCEDIR"
(
cd "percona-xtrabackup-$FULL_VERSION"
# Move the debian dir to the appropriate place
cp -a "$SOURCEDIR/utils/debian/" .
# Don't build transitional packages if requested
if test "x$NOTRANSITIONAL" = "xyes"
then
sed -i '/Package: xtrabackup/,/^$/d' debian/control
fi
# Update distribution
dch -m -D "$DEBIAN_VERSION" --force-distribution -v "$XTRABACKUP_VERSION-$REVISION.$DEBIAN_VERSION" 'Update distribution'
# Issue dpkg-buildpackage command
dpkg-buildpackage $DPKG_BINSRC $BUILDPKG_KEY
)
rm -rf "percona-xtrabackup-$FULL_VERSION"
)
|
metacloud/percona-xtrabackup
|
utils/build-dpkg.sh
|
Shell
|
gpl-2.0
| 3,208 |
#!/bin/sh
# Copyright (C) 2010-2013 OpenWrt.org
. /lib/functions/leds.sh
. /lib/ramips.sh
get_status_led() {
case $(ramips_board_name) in
3g-6200n)
status_led="edimax:green:power"
;;
3g300m | w150m)
status_led="tenda:blue:ap"
;;
ar725w)
status_led="ar725w:green:power"
;;
awapn2403)
status_led="asiarf:green:wps"
;;
argus-atp52b)
status_led="argus-atp52b:green:run"
;;
asl26555)
status_led="asl26555:green:power"
;;
br6524n)
status_led="edimax:blue:power"
;;
br6425 | br-6475nd)
status_led="edimax:green:power"
;;
cy-swr1100)
status_led="samsung:blue:wps"
;;
d105)
status_led="d105:red:power"
;;
dcs-930 | dir-300-b1 | dir-600-b1 | dir-600-b2 | dir-610-a1 | dir-615-h1 | dir-615-d | dir-620-a1| dir-620-d1| dir-300-b7| dir-320-b1)
status_led="d-link:green:status"
;;
dir-645)
status_led="d-link:green:wps"
;;
dap-1350)
status_led="d-link:blue:power"
;;
esr-9753)
status_led="esr-9753:orange:power"
;;
f5d8235-v2)
status_led="f5d8235v2:blue:router"
;;
fonera20n)
status_led="fonera20n:green:power"
;;
ip2202)
status_led="ip2202:green:run"
;;
rt-n13u)
status_led="rt-n13u:power"
;;
hlk-rm04)
status_led="hlk-rm04:red:power"
;;
all0239-3g|\
hw550-3g)
status_led="hw550-3g:green:status"
;;
m3)
status_led="m3:blue:status"
;;
m4)
status_led="m4:blue:status"
;;
mlw221|\
mlwg2)
status_led="kingston:blue:system"
;;
mofi3500-3gn)
status_led="mofi3500-3gn:green:status"
;;
mpr-a1)
status_led="hame:red:power"
;;
mpr-a2)
status_led="hame:red:power"
;;
nbg-419n)
status_led="nbg-419n:green:power"
;;
nw718)
status_led="nw718:amber:cpu"
;;
omni-emb|\
omni-emb-hpm)
status_led="emb:green:status"
;;
psr-680w)
status_led="psr-680w:red:wan"
;;
pwh2004)
status_led="pwh2004:green:power"
;;
px4885)
status_led="7links:orange:wifi"
;;
rt-n15)
status_led="rt-n15:blue:power"
;;
rt-n10-plus)
status_led="asus:green:wps"
;;
rt-n56u | wl-330n | wl-330n3g)
status_led="asus:blue:power"
;;
rut5xx)
status_led="rut5xx:green:status"
;;
sl-r7205)
status_led="sl-r7205:green:status"
;;
tew-691gr|\
tew-692gr)
status_led="trendnet:green:wps"
;;
v11st-fe)
status_led="v11st-fe:green:status"
;;
v22rw-2x2)
status_led="v22rw-2x2:green:security"
;;
vocore)
status_led="vocore:green:status"
;;
w306r-v20)
status_led="w306r-v20:green:sys"
;;
w502u)
status_led="alfa:blue:wps"
;;
wcr-150gn)
status_led="wcr150gn:amber:power"
;;
whr-g300n)
status_led="whr-g300n:green:router"
;;
wmr300 | \
whr-300hp2 | \
whr-600d)
status_led="buffalo:green:status"
;;
wli-tx4-ag300n)
status_led="buffalo:blue:power"
;;
wl-351)
status_led="wl-351:amber:power"
;;
wr512-3gn)
status_led="wr512:green:wps"
;;
wr8305rt)
status_led="wr8305rt:sys"
;;
wnce2001)
status_led="netgear:green:power"
;;
mzk-w300nh2)
status_led="mzkw300nh2:green:power"
;;
ur-326n4g)
status_led="ur326:green:wps"
;;
ur-336un)
status_led="ur336:green:wps"
;;
x5)
status_led="x5:green:power"
;;
x8)
status_led="x8:green:power"
;;
xdxrn502j)
status_led="xdxrn502j:green:power"
;;
f7c027)
status_led="belkin:orange:status"
;;
na930)
status_led="na930:blue:power"
;;
y1 | \
y1s)
status_led="lenovo:blue:power"
;;
wrtnode)
status_led="wrtnode:blue:indicator"
;;
esac
}
set_state() {
get_status_led
case "$1" in
preinit)
status_led_blink_preinit
;;
failsafe)
status_led_blink_failsafe
;;
done)
status_led_on
;;
esac
}
|
RuanJG/wrtnode
|
target/linux/ramips/base-files/etc/diag.sh
|
Shell
|
gpl-2.0
| 3,564 |
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
##
## wdb - weather and water data storage
##
## Copyright (C) 2007 met.no
##
## Contact information:
## Norwegian Meteorological Institute
## Box 43 Blindern
## 0313 OSLO
## NORWAY
## E-mail: [email protected]
##
## This is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#!/bin/sh
mkdir -p __WDB_LOGDIR__
# Remove Test Data
$PSQL -q <<EOF
\o __WDB_LOGDIR__/wdb_test_gribLoad.log
set role wdb_clean;
DELETE FROM __WDB_SCHEMA__.gridvalue WHERE dataproviderid >= 0 AND dataproviderid < 100;
EOF
|
metno/wdb
|
test/install/loadingProgram/xmlLoad/tearDown.in.sh
|
Shell
|
gpl-2.0
| 833 |
#!/bin/sh
# Check out a fresh copy of jQuery
git clone git://github.com/jquery/jquery.git $1
# Copy the dummy test case file in
cp $2.html $1/index.html
# Build a copy of the jQuery test suite
cd $1 && make
|
tasos7804/TestingJS
|
tools/gen.sh
|
Shell
|
gpl-2.0
| 206 |
#!/bin/bash
set -e
current="$(curl -sSL 'https://api.github.com/repos/modxcms/revolution/tags' | sed -n 's/^.*"name": "v\([^"]*\)-pl".*$/\1/p' | head -n1)"
curl -o modx.zip -sSL https://modx.com/download/direct/modx-$current-pl.zip
sha1="$(sha1sum modx.zip | sed -r 's/ .*//')"
for variant in apache fpm; do
(
set -x
sed -ri '
s/^(ENV MODX_VERSION) .*/\1 '"$current"'/;
s/^(ENV MODX_SHA1) .*/\1 '"$sha1"'/;
' "$variant/Dockerfile"
cp docker-entrypoint.sh "$variant/docker-entrypoint.sh"
)
done
rm modx.zip
|
vh/docker-modx
|
update.sh
|
Shell
|
gpl-2.0
| 529 |
#!/bin/bash
cd resumenes
for file in *.aux ; do
bibtex `basename $file .aux`
done
cd ..
# http://stackoverflow.com/questions/2765209/latex-bibliography-per-chapter
|
JornadasR/VJornadas
|
chapterbib.sh
|
Shell
|
gpl-2.0
| 165 |
#!/bin/bash
#started from tedlium recipe with few edits
set -e -o pipefail
# First the options that are passed through to run_ivector_common.sh
# (some of which are also used in this script directly).
stage=17
nj=30
decode_nj=30
min_seg_len=1.55
chunk_left_context=40
chunk_right_context=0
label_delay=5
xent_regularize=0.1
train_set=train
gmm=tri2b # the gmm for the target data gmm for the target data
num_threads_ubm=32
nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned
# decode options
extra_left_context=50
extra_right_context=0
frames_per_chunk=150
# The rest are configs specific to this script. Most of the parameters
# are just hardcoded at this level, in the commands below.
train_stage=-10
tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration.
tdnn_lstm_affix=1a #affix for TDNN-LSTM directory, e.g. "a" or "b", in case we change the configuration.
common_egs_dir= # you can set this to use previously dumped egs.
# End configuration section.
echo "$0 $@" # Print the command line for logging
. cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
local/nnet3/run_ivector_common.sh --stage $stage \
--nj $nj \
--min-seg-len $min_seg_len \
--train-set $train_set \
--gmm $gmm \
--num-threads-ubm $num_threads_ubm \
--nnet3-affix "$nnet3_affix"
gmm_dir=exp/$gmm
ali_dir=exp/${gmm}_ali_${train_set}_sp_comb
tree_dir=exp/chain${nnet3_affix}/tree_bi${tree_affix}
lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_comb_lats
dir=exp/chain${nnet3_affix}/tdnn_lstm${tdnn_lstm_affix}_sp_bi
train_data_dir=data/${train_set}_sp_hires_comb
lores_train_data_dir=data/${train_set}_sp_comb
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires_comb
for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
$lores_train_data_dir/feats.scp $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
done
if [ $stage -le 14 ]; then
echo "$0: creating lang directory with one state per phone."
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
if [ -d data/lang_chain ]; then
if [ data/lang_chain/L.fst -nt data/lang/L.fst ]; then
echo "$0: data/lang_chain already exists, not overwriting it; continuing"
else
echo "$0: data/lang_chain already exists and seems to be older than data/lang..."
echo " ... not sure what to do. Exiting."
exit 1;
fi
else
cp -r data/lang data/lang_chain
silphonelist=$(cat data/lang_chain/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat data/lang_chain/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >data/lang_chain/topo
fi
fi
if [ $stage -le 15 ]; then
# Get the alignments as lattices (gives the chain training more freedom).
# use the same num-jobs as the alignments
steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \
data/lang $gmm_dir $lat_dir
rm $lat_dir/fsts.*.gz # save space
fi
if [ $stage -le 16 ]; then
# Build a tree using our new topology. We know we have alignments for the
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
# those.
if [ -f $tree_dir/final.mdl ]; then
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
exit 1;
fi
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 4000 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir
fi
if [ $stage -le 17 ]; then
mkdir -p $dir
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input
# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
# the first splicing is moved before the lda layer, so no splicing here
relu-renorm-layer name=tdnn1 dim=512
relu-renorm-layer name=tdnn2 dim=512 input=Append(-1,0,1)
fast-lstmp-layer name=lstm1 cell-dim=512 recurrent-projection-dim=128 non-recurrent-projection-dim=128 delay=-3
relu-renorm-layer name=tdnn3 dim=512 input=Append(-3,0,3)
relu-renorm-layer name=tdnn4 dim=512 input=Append(-3,0,3)
fast-lstmp-layer name=lstm2 cell-dim=512 recurrent-projection-dim=128 non-recurrent-projection-dim=128 delay=-3
relu-renorm-layer name=tdnn5 dim=512 input=Append(-3,0,3)
relu-renorm-layer name=tdnn6 dim=512 input=Append(-3,0,3)
fast-lstmp-layer name=lstm3 cell-dim=512 recurrent-projection-dim=128 non-recurrent-projection-dim=128 delay=-3
## adding the layers for chain branch
output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5
# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi
if [ $stage -le 18 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
fi
steps/nnet3/chain/train.py --stage $train_stage \
--cmd "$decode_cmd" \
--feat.online-ivector-dir $train_ivector_dir \
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
--chain.xent-regularize 0.1 \
--chain.leaky-hmm-coefficient 0.1 \
--chain.l2-regularize 0.00005 \
--chain.apply-deriv-weights false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--egs.dir "$common_egs_dir" \
--egs.opts "--frames-overlap-per-eg 0" \
--egs.chunk-width "$frames_per_chunk" \
--egs.chunk-left-context "$chunk_left_context" \
--egs.chunk-right-context "$chunk_right_context" \
--trainer.num-chunk-per-minibatch 128 \
--trainer.frames-per-iter 1500000 \
--trainer.max-param-change 2.0 \
--trainer.num-epochs 4 \
--trainer.deriv-truncate-margin 10 \
--trainer.optimization.shrink-value 0.99 \
--trainer.optimization.num-jobs-initial 2 \
--trainer.optimization.num-jobs-final 3 \
--trainer.optimization.initial-effective-lrate 0.001 \
--trainer.optimization.final-effective-lrate 0.0001 \
--trainer.optimization.momentum 0.0 \
--cleanup.remove-egs true \
--feat-dir $train_data_dir \
--tree-dir $tree_dir \
--lat-dir $lat_dir \
--dir $dir
fi
if [ $stage -le 19 ]; then
# Note: it might appear that this data/lang_chain directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --left-biphone --self-loop-scale 1.0 data/lang_test $dir $dir/graph
fi
if [ $stage -le 20 ]; then
steps/nnet3/decode.sh --num-threads 4 --nj $decode_nj --cmd "$decode_cmd" \
--acwt 1.0 --post-decode-acwt 10.0 \
--extra-left-context $extra_left_context \
--extra-right-context $extra_right_context \
--frames-per-chunk "$frames_per_chunk" \
--online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_test_hires \
--scoring-opts "--min-lmwt 5 " \
$dir/graph data/test_hires $dir/decode || exit 1;
fi
exit 0
|
michellemorales/OpenMM
|
kaldi/egs/gale_arabic/s5b/local/chain/tuning/run_tdnn_lstm_1a.sh
|
Shell
|
gpl-2.0
| 9,064 |
#!/bin/bash
###############################################################################
#
# DEFINITION OF FUNCTIIONS
#
###############################################################################
function execute_silently {
$@ > /dev/null 2>&1
return $?
}
function logline {
[[ $BOOTSTRAP_TEST_MODE == 1 ]] && return
echo $@
}
function check_service {
srv=$1
service_critical=$2
[[ $SETUP_ONLY == 1 ]] && return
echo "Checking service $srv ..."
STATUS=`systemctl is-active $srv\.service 2>/dev/null`
if [[ "$STATUS" == "inactive" ]];then
echo "$srv daemon not started. Trying to start"
execute_silently systemctl start $srv\.service
if [[ $? -gt 0 ]];then
echo -n "Starting $srv daemon failed."
if [[ $service_critical == 1 ]];then
echo " Exiting ..."
exit 1
fi
fi
fi
ENABLED=`systemctl is-enabled $srv\.service 2>/dev/null`
if [[ "$ENABLED" == "disabled" ]];then
logline "Enabling $srv"
execute_silently systemctl enable $srv\.service
if [[ $? -gt 0 ]];then
logline "WARNING: Enabling $srv daemon failed."
fi
fi
}
###############################################################################
function check_server_cert {
# Create directory if not exists
# Usefull on testing systems where no obs-server rpm is installed
[ -d $backenddir/certs/ ] || mkdir -p $backenddir/certs/
if [[ ! -e $backenddir/certs/server.${FQHOSTNAME}.created || ! -e $backenddir/certs/server.${FQHOSTNAME}.crt ]]; then
# setup ssl certificates (NOT protected with a passphrase)
logline "Creating a default SSL certificate for the server"
logline "Please replace it with your version in $backenddir/certs directory..."
DETECTED_CERT_CHANGE=1
# hostname specific certs - survive intermediate hostname changes
if [ ! -e $backenddir/certs/server.${FQHOSTNAME}.crt ] ; then
# This is just a dummy SSL certificate, but it has a valid hostname.
# Admin can replace it with his version.
create_selfsigned_certificate
echo "$OPENSSL_CONFIG" | openssl req -new -nodes -config /dev/stdin \
-x509 -days 365 -batch \
-key $backenddir/certs/server.key \
-out $backenddir/certs/server.${FQHOSTNAME}.crt
if [[ $? == 0 ]];then
echo "Do not remove this file or new SSL CAs will get created." > $backenddir/certs/server.${FQHOSTNAME}.created
fi
else
echo "ERROR: SSL CAs in $backenddir/certs exists, but were not created for your hostname"
exit 1
fi
fi
}
###############################################################################
function create_selfsigned_certificate() {
cert_outdir=$backenddir/certs
COUNTER=0
DNS_NAMES=""
for name in $PROPOSED_DNS_NAMES;do
DNS_NAMES="$DNS_NAMES
DNS.$COUNTER = $name"
COUNTER=$(($COUNTER + 1 ))
done
logline "Creating crt/key in $cert_outdir"
OPENSSL_CONFIG="prompt = no
distinguished_name = req_distinguished_name
[req_distinguished_name]
countryName = CC
stateOrProvinceName = OBS Autogen State or Province
localityName = OBS Autogen Locality
organizationName = OBS Autogen Organisation
organizationalUnitName = OBS Autogen Organizational Unit
commonName = $FQHOSTNAME
emailAddress = [email protected]
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_ca
[req_attributes]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
[ v3_ca ]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
basicConstraints = CA:true
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = critical,CA:FALSE
keyUsage = digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
$DNS_NAMES
[ v3_ca ]
basicConstraints = CA:TRUE
subjectAltName = @alt_names
"
}
###############################################################################
function get_hostname {
if [[ $1 && $BOOTSTRAP_TEST_MODE == 1 ]];then
FQHOSTNAME=$1
else
FQHOSTNAME=`hostname -f `
fi
if type -p ec2-public-hostname; then
FQHOSTNAME=`ec2-public-hostname`
fi
if [ "$FQHOSTNAME" = "" ]; then
ask "Please enter the full qualified hostname!"
FQHOSTNAME=$rv
fi
# fallback in non-interative mode
if [ "$FQHOSTNAME" = "" ]; then
# Fallback to IP of the VM/host
FQHOSTNAME=`ip addr | sed -n 's,.*inet \(.*\)/.* brd.*,\1,p' | grep -v ^127. | head -n 1`
if [ "$?" != "0" -o "$FQHOSTNAME" = "" ]; then
echo " Can't determine hostname or IP - Network setup failed!"
echo " Check if networking is up and dhcp is working!"
echo " Using 'localhost' as FQHOSTNAME."
FQHOSTNAME="localhost"
fi
USEIP=$FQHOSTNAME
fi
DOMAINNAME=""
if [[ $FQHOSTNAME =~ '.' ]];then
DOMAINNAME=$(echo $FQHOSTNAME | perl -pe 's/^[\w\-_]*\.(.*)/$1/')
SHORTHOSTNAME=$(echo $FQHOSTNAME | perl -pe 's/^([\w\-_]*)\..*/$1/')
else
SHORTHOSTNAME=$FQHOSTNAME
fi
}
###############################################################################
function generate_proposed_dnsnames {
if [[ ! $FQHOSTNAME ]];then
get_hostname
fi
if [[ $FQHOSTNAME != 'localhost' ]];then
LOCAL_HOST="localhost"
fi
if [[ $FQHOSTNAME == $SHORTHOSTNAME ]];then
DNSNAMES="$SHORTHOSTNAME $LOCAL_HOST"
else
DNSNAMES="$SHORTHOSTNAME $FQHOSTNAME $LOCAL_HOST"
fi
ask "Proposed DNS names: " "$DNSNAMES"
PROPOSED_DNS_NAMES=$rv
}
###############################################################################
function adjust_api_config {
echo "Adjust configuration for this hostname"
# use local host to avoid SSL verification between webui and api
api_options_yml=$apidir/config/options.yml
sed -i 's,^frontend_host: .*,frontend_host: "localhost",' $api_options_yml
sed -i 's,^frontend_port: .*,frontend_port: 443,' $api_options_yml
sed -i 's,^frontend_protocol: .*,frontend_protocol: "'"https"'",' $api_options_yml
sed -i 's,^external_frontend_host: .*,frontend_host: "'"$FQHOSTNAME"'",' $api_options_yml
sed -i 's,^external_frontend_port: .*,frontend_port: 443,' $api_options_yml
sed -i 's,^external_frontend_protocol: .*,frontend_protocol: "'"https"'",' $api_options_yml
}
###############################################################################
function adapt_worker_jobs {
#changed IP means also that leftover jobs are invalid - cope with that
echo "Adapting present worker jobs"
sed -i "s,server=\"http://[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*:5352,server=\"http://$FQHOSTNAME:5352,g" \
$backenddir/jobs/*/* 2> /dev/null
sed -i "s,server=\"http://[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*:5252,server=\"http://$FQHOSTNAME:5252,g" \
$backenddir/jobs/*/* 2> /dev/null
#remove old workers status and idling/building markers
rm -f $backenddir/jobs/*/*status 2> /dev/null
rm -f $backenddir/workers/*/* 2> /dev/null
# create repo directory or apache fails when nothing got published
mkdir -p $backenddir/repos
chown obsrun.obsrun $backenddir/repos
}
###############################################################################
function prepare_database_setup {
DATABASE_EXISTS=$(mysql -e "show databases"|grep api_production)
if [[ ! $DATABASE_EXISTS ]];then
echo "Initialize MySQL databases (first time only)"
mysqladmin -u root password "opensuse"
RUN_INITIAL_SETUP="true"
fi
RAKE_COMMANDS=""
if [ -n "$RUN_INITIAL_SETUP" ]; then
logline "Initialize OBS api database (first time only)"
cd $apidir
RAKE_COMMANDS="db:create db:setup writeconfiguration"
else
logline "Migrate OBS api database"
cd $apidir
RAKE_COMMANDS="db:migrate"
echo
fi
logline "Setting up rails environment"
for cmd in $RAKE_COMMANDS
do
logline " - Doing 'rake.ruby2.3 $cmd'"
RAILS_ENV=production bundle exec rake.ruby2.3 $cmd >> $apidir/log/db_migrate.log
done
}
###############################################################################
function add_login_info_to_issue {
cat >> /etc/issue <<EOF
Connect to the web interface via: https://$FQHOSTNAME
Connect to the api interface via: https://$FQHOSTNAME
Browse the build packages via: http://$FQHOSTNAME:82
* "Admin"/"root" user password is "opensuse" by default.
* Connect to the web interface now to finish the OBS setup.
More informations about this appliance are available here:
http://en.opensuse.org/Build_Service/OBS-Appliance
Greetings from the Open Build Service Team
http://www.open-build-service.org
EOF
}
###############################################################################
function network_failure_warning {
echo "OBS appliance could not get setup, no network found" > /srv/www/obs/overview/index.html
cat <<EOF > /etc/issue
*******************************************************************************
** NETWORK SETUP FAILED **
** **
** OBS is not usable. A working DNS resolution for your host is required! **
** You can check this with 'hostname -f'. **
** This often happens in virtualization environments like e.g. VirtualBox. **
** **
** You also could run **
** **
** /usr/lib/obs/server/setup-appliance.sh **
** **
** for interactive configuration **
** **
*******************************************************************************
EOF
}
###############################################################################
function check_server_key {
# reuse signing key even if hostname changed
if [ ! -e $backenddir/certs/server.key ]; then
install -d -m 0700 $backenddir/certs
openssl genrsa -out $backenddir/certs/server.key 1024 2>/dev/null
fi
}
###############################################################################
function import_ca_cert {
# apache has to trust the api ssl certificate
if [ ! -e /etc/ssl/certs/server.${FQHOSTNAME}.crt ]; then
cp $backenddir/certs/server.${FQHOSTNAME}.crt \
/usr/share/pki/trust/anchors/server.${FQHOSTNAME}.pem
update-ca-certificates
fi
}
###############################################################################
function relink_server_cert {
if [[ $DETECTED_CERT_CHANGE == 1 || ! -e $backenddir/certs/server.crt ]];then
# change links for certs according to hostnames
cd $backenddir/certs
rm -f server.crt
ln -sf server.${FQHOSTNAME}.crt server.crt
cd - >/dev/null
fi
}
###############################################################################
function fix_permissions {
cd $apidir
chown -R wwwrun.www $apidir/log
}
###############################################################################
function create_issue_file {
echo "Recreating /etc/issue"
# create base version of /etc/issues
cat > /etc/issue <<EOF
Welcome to Open Build Service(OBS) Appliance $OBSVERSION
based on $OS
EOF
# check if signing packages is enabled, otherwise add warning to /etc/issue
if ! grep -q "^our \$sign =" /usr/lib/obs/server/BSConfig.pm ; then
echo "Adding signing hint to /etc/issue"
cat >> /etc/issue <<EOF
WARNING: **** Package signing is disabled, maybe due to lack of hardware number generator ****
EOF
fi
}
###############################################################################
function create_overview_html {
echo "Creating overview.html"
sed -e "s,___API_URL___,https://$FQHOSTNAME,g" \
-e "s,___REPO_URL___,http://$FQHOSTNAME:82,g" \
/srv/www/obs/overview/overview.html.TEMPLATE > /srv/www/obs/overview/index.html
}
###############################################################################
function ask {
logline $1
if [[ $NON_INTERACTIVE == 1 ]];then
rv=$2
logline "Using default value '$rv' in non-interactive mode"
return
fi
echo "Default: $2"
read rv
if [[ ! $rv ]];then
rv=$2
fi
}
###############################################################################
function check_required_backend_services {
[[ $SETUP_ONLY == 1 ]] && return
NEEDED_SERVICES="obsrepserver obssrcserver obsscheduler obsdispatcher obspublisher"
for srv in $NEEDED_SERVICES;do
ENABLED=`systemctl is-enabled $srv`
ACTIVE=`systemctl is-active $srv`
[[ "$ENABLED" == "enabled" ]] || systemctl enable $srv
[[ "$ACTIVE" == "active" ]] || systemctl start $srv
done
}
###############################################################################
function check_optional_backend_services {
[[ $SETUP_ONLY == 1 ]] && return
OPTIONAL_SERVICES="obsdodup obswarden obssigner obsapisetup obsstoragesetup obsworker"
for srv in $OPTIONAL_SERVICES;do
STATE=$(chkconfig $srv|awk '{print $2}')
if [[ $STATE != on ]];then
ask "Service $srv is not enabled. Would you like to enable it? [yN]" "n"
case $rv in
y|yes|Y|YES)
systemctl enable $srv
systemctl start $srv
;;
esac
fi
done
}
###############################################################################
function prepare_apache2 {
[[ $SETUP_ONLY == 1 ]] && return
PACKAGES="apache2 apache2-mod_xforward rubygem-passenger-apache2 memcached"
PKG2INST=""
for pkg in $PACKAGES;do
rpm -q $pkg >/dev/null || PKG2INST="$PKG2INST $pkg"
done
if [[ -n $PKG2INST ]];then
zypper --non-interactive install $PKG2INST >/dev/null
fi
MODULES="passenger rewrite proxy proxy_http xforward headers socache_shmcb"
for mod in $MODULES;do
a2enmod -q $mod || a2enmod $mod
done
FLAGS=SSL
for flag in $FLAGS;do
a2enflag $flag >/dev/null
done
}
function prepare_passenger {
perl -p -i -e \
's#^(\s*)PassengerRuby "/usr/bin/ruby"#$1\PassengerRuby "/usr/bin/ruby.ruby2.3"#' \
/etc/apache2/conf.d/mod_passenger.conf
}
###############################################################################
#
# MAIN
#
###############################################################################
# make parsed output predictable
if [[ ! $BOOTSTRAP_TEST_MODE == 1 ]];then
export LC_ALL=C
# package or appliance defaults
if [ -e /etc/sysconfig/obs-server ]; then
source /etc/sysconfig/obs-server
fi
# Set default directories
apidir=/srv/www/obs/api
backenddir=/srv/obs
# Overwrite directory defaults with settings in
# config file /etc/sysconfig/obs-server
if [ -n "$OBS_BASE_DIR" ]; then
backenddir="$OBS_BASE_DIR"
fi
NON_INTERACTIVE=0
while [[ $1 ]];do
case $1 in
--non-interactive) NON_INTERACTIVE=1;;
--setup-only) SETUP_ONLY=1;;
esac
shift
done
check_required_backend_services
check_optional_backend_services
check_service mysql 1
get_hostname
### In case of the appliance, we never know where we boot up !
OLDFQHOSTNAME="NOTHING"
if [ -e $backenddir/.oldfqhostname ]; then
OLDFQHOSTNAME=`cat $backenddir/.oldfqhostname`
fi
DETECTED_HOSTNAME_CHANGE=0
if [ "$FQHOSTNAME" != "$OLDFQHOSTNAME" ]; then
echo "Appliance hostname changed from $OLDFQHOSTNAME to $FQHOSTNAME !"
DETECTED_HOSTNAME_CHANGE=1
fi
if [[ $DETECTED_HOSTNAME_CHANGE == 1 ]];then
adapt_worker_jobs
adjust_api_config
fi
echo "$FQHOSTNAME" > $backenddir/.oldfqhostname
OBSVERSION=`rpm -q --qf '%{VERSION}' obs-server`
OS=`head -n 1 /etc/SuSE-release`
RUN_INITIAL_SETUP=""
prepare_database_setup
check_server_key
generate_proposed_dnsnames
DNS_NAMES="$rv"
check_server_cert
DETECTED_CERT_CHANGE=0
import_ca_cert
relink_server_cert
fix_permissions
prepare_apache2
prepare_passenger
check_service apache2
check_service obsapidelayed
create_issue_file
if [ -n "$FQHOSTNAME" ]; then
create_overview_html
add_login_info_to_issue
else
network_failure_warning
fi
exit 0
fi
|
shyukri/open-build-service
|
dist/setup-appliance.sh
|
Shell
|
gpl-2.0
| 16,460 |
#!/bin/bash
node index.js \
--id raspberrypi \
--tty-buf /dev/ttyAMA0 \
--kafka-host 192.168.1.97 \
--kafka-port 9092
|
jgensler8/node_producer
|
start.sh
|
Shell
|
gpl-2.0
| 126 |
#!/bin/sh
# Install Ubuntu.
sed -i 's/# \(.*multiverse$\)/\1/g' /etc/apt/sources.list
apt-get update
apt-get -y upgrade
apt-get install -y build-essential mongodb flex openjdk-7-jdk
rm -rf /var/lib/apt/lists/*
# Install Golang.
mkdir -p /home/acm/goroot
wget https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz
tar xzf go1.4.2.linux-amd64.tar.gz
cp -r go/* /home/acm/goroot/
mkdir -p /home/acm/go/src /home/acm/go/pkg /home/acm/go/bin
# Set environment variables for Golang.
export GOROOT=/home/acm/goroot
export GOPATH=/home/acm/go
export OJ_HOME=$GOPATH/src
export DATA_PATH=$GOPATH/Data
export LOG_PATH=$OJ_HOME/log
export RUN_PATH=$OJ_HOME/run
export JUDGE_HOST="http://127.0.0.1:8888"
export MONGODB_PORT_27017_TCP_ADDR=127.0.0.1
export PATH=$GOROOT/bin:$GOPATH/bin:$PATH
# Install MongoDB.
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' > /etc/apt/sources.list.d/mongodb.list
apt-get update
apt-get install -y mongodb
rm -rf /var/lib/apt/lists/*
# Get OJ Source Code
mkdir -p $OJ_HOME/ProblemData
mkdir -p $OJ_HOME/run
mkdir -p $OJ_HOME/log
go get gopkg.in/mgo.v2
go get github.com/djimenez/iconv-go
git clone https://github.com/ZJGSU-Open-Source/GoOnlineJudge.git $GOPATH/src/GoOnlineJudge
git clone https://github.com/ZJGSU-Open-Source/RunServer.git $GOPATH/src/RunServer
git clone https://github.com/ZJGSU-Open-Source/vjudger.git $GOPATH/src/vjudger
git clone https://github.com/sakeven/restweb.git $GOPATH/src/restweb
# Build OJ
cd $OJ_HOME/restweb
cd restweb
go install
cd $OJ_HOME
restweb build GoOnlineJudge
cd $OJ_HOME/RunServer
./make.sh
echo
echo ----------
echo installed.
echo ----------
echo
# Run MongoDB, GoOnlineJudge, RunServer
mongod --dbpath /home/acm/go/Data --logpath /home/acm/go/Data/mongo.log
cd $OJ_HOME/
restweb run GoOnlineJudge &
cd $GOPATH/src/GoOnlineJudge
RunServer &
|
ZJGSU-Open-Source/GoOnlineJudge
|
install.sh
|
Shell
|
gpl-2.0
| 1,927 |
# --- T2-COPYRIGHT-NOTE-BEGIN ---
# This copyright note is auto-generated by ./scripts/Create-CopyPatch.
#
# T2 SDE: package/.../iptables/rocknet_iptables.sh
# Copyright (C) 2004 - 2006 The T2 SDE Project
#
# More information can be found in the files COPYING and README.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License. A copy of the
# GNU General Public License can be found in the file COPYING.
# --- T2-COPYRIGHT-NOTE-END ---
iptables_init_if() {
if isfirst "iptables_$if"; then
# prepare INPUT
addcode up 1 1 "iptables -N firewall_$if"
addcode up 1 2 "iptables -A INPUT -i $if `
`-m state --state ESTABLISHED,RELATED -j ACCEPT"
addcode up 1 3 "iptables -A INPUT -i $if -j firewall_$if"
# prepare FORWARD
addcode up 1 1 "iptables -N forward_$if"
addcode up 1 2 "iptables -A FORWARD -i $if `
`-m state --state ESTABLISHED,RELATED -j ACCEPT"
addcode up 1 3 "iptables -A FORWARD -i $if -j forward_$if"
# clean INPUT
addcode down 1 3 "iptables -F firewall_$if"
addcode down 1 2 "iptables -D INPUT -i $if -j firewall_$if"
addcode down 1 2 "iptables -D INPUT -i $if `
`-m state --state ESTABLISHED,RELATED -j ACCEPT"
addcode down 1 1 "iptables -X firewall_$if"
# clean FORWARD
addcode down 1 3 "iptables -F forward_$if"
addcode down 1 2 "iptables -D FORWARD -i $if -j forward_$if"
addcode down 1 2 "iptables -D FORWARD -i $if `
`-m state --state ESTABLISHED,RELATED -j ACCEPT"
addcode down 1 1 "iptables -X forward_$if"
fi
}
iptables_parse_conditions() {
iptables_cond=""
while [ -n "$1" ]
do
case "$1" in
all)
shift
;;
tcp|udp)
iptables_cond="$iptables_cond -p $1 --dport $2"
shift; shift
;;
icmp)
iptables_cond="$iptables_cond -p icmp --icmp-type $2"
shift; shift
;;
ip)
iptables_cond="$iptables_cond -s $2"
shift; shift
;;
*)
error "Unkown accept/reject/drop condition: $1"
shift
esac
done
}
public_accept() {
iptables_parse_conditions "$@"
local level=6; [ "$ip" ] && level=5
addcode up 1 $level "iptables -A firewall_$if ${ip:+-d $ip} $iptables_cond -j ACCEPT"
iptables_init_if
}
public_reject() {
iptables_parse_conditions "$@"
local level=6; [ "$ip" ] && level=5
addcode up 1 $level "iptables -A firewall_$if ${ip:+-d $ip} $iptables_cond -j REJECT"
iptables_init_if
}
public_drop() {
iptables_parse_conditions "$@"
local level=6; [ "$ip" ] && level=5
addcode up 1 $level "iptables -A firewall_$if ${ip:+-d $ip} $iptables_cond -j DROP"
iptables_init_if
}
public_restrict() {
iptables_parse_conditions "$@"
local level=6; [ "$ip" ] && level=5
addcode up 1 $level "iptables -A forward_$if ${ip:+-d $ip} $iptables_cond -j DROP"
iptables_init_if
}
public_conduit() {
# conduit (tcp|udp) port targetip[:targetport]
#
local proto=$1 port=$2
local targetip=$3 targetport=$2
if [ "${targetip/:/}" != "$targetip" ]; then
targetport=${targetip#*:}
targetip=${targetip%:*}
fi
addcode up 1 4 "iptables -t nat -A PREROUTING -i $if ${ip:+-d $ip} -p $proto \
--dport $port -j DNAT --to $targetip:$targetport"
addcode up 1 4 "iptables -A forward_$if -p $proto -d $targetip \
--dport $targetport -j ACCEPT"
iptables_init_if
}
public_clamp_mtu() {
addcode up 1 1 "iptables -A FORWARD ${if:+-o $if} -p tcp --tcp-flags SYN,RST SYN \
-j TCPMSS --clamp-mss-to-pmtu"
addcode down 9 1 "iptables -D FORWARD ${if:+-o $if} -p tcp --tcp-flags SYN,RST SYN \
-j TCPMSS --clamp-mss-to-pmtu"
}
public_masquerade() {
if [ "$ip" ]; then
addcode up 1 6 "iptables -t nat -A POSTROUTING ${1:+-s $1} -o $if \
-j SNAT --to $ip"
addcode down 9 6 "iptables -t nat -D POSTROUTING ${1:+-s $1} -o $if \
-j SNAT --to $ip"
else
addcode up 1 6 "iptables -t nat -A POSTROUTING ${1:+-s $1} -o $if \
-j MASQUERADE"
addcode down 9 6 "iptables -t nat -D POSTROUTING ${1:+-s $1} -o $if \
-j MASQUERADE"
fi
}
|
arete/t2
|
package/network/iptables/rocknet_iptables.sh
|
Shell
|
gpl-2.0
| 4,075 |
convert images/OCS-786-A.png -crop 1551x4518+67+311 +repage images/OCS-786-A.png
#
#
#/OCS-786.png
convert images/OCS-786-B.png -crop 1557x4518+57+309 +repage images/OCS-786-B.png
#
#
#/OCS-786.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/cropedges.OCS-786.sh
|
Shell
|
gpl-2.0
| 198 |
#!/bin/bash
#FILE
# /usr/sbin/unxsvzZabbixAddGroup.sh
#PURPOSE
# Very simple script to add a container group to Zabbix.
#AUTHOR
# (C) 2011-2016 Gary Wallis for Unixservice, LLC.
# GPLv3 license applies see root dir LICENSE
#NOTES
# For use with popen();
if [ "$1" == "" ];then
echo "usage: $0 <cGroup>";
exit 1;
fi
if [ -f "/etc/unxsvz/zabbix.local.sh" ];then
source /etc/unxsvz/zabbix.local.sh;
else
echo "no /etc/unxsvz/zabbix.local.sh";
exit 2;
fi
if [ "$cZabbixPassword" == "" ];then
echo "no cZabbixPassword";
exit 3;
fi
if [ "$cZabbixServer" == "" ];then
echo "no cZabbixServer";
exit 4;
fi
#Set for your user and password
#login
cat << EOF > /tmp/logingroup.json
{
"jsonrpc":"2.0",
"method":"user.login",
"params":{
"user":"apiuser",
"password":"$cZabbixPassword"
},
"id":1
}
EOF
cAuth=`/usr/bin/wget --quiet --no-check-certificate --post-file=/tmp/logingroup.json --output-document=-\
--header='Content-Type: application/json-rpc'\
https://$cZabbixServer/zabbix/api_jsonrpc.php | cut -f 3 -d : | cut -f 2 -d \"`;
if [ $? != 0 ];then
echo "wget error 0";
exit 5;
fi
if [ "$cAuth" == "" ] || [ "$cAuth" == "code" ];then
echo "Could not login";
exit 6;
fi
#echo $cAuth;
cat << EOF > /tmp/hostgroup.json
{
"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"output": "extend",
"filter": {
"name": [
"$1"
]
}
},
"id":1,
"auth":"$cAuth"
}
EOF
uHostGroupID=`/usr/bin/wget --quiet --no-check-certificate --post-file=/tmp/hostgroup.json --output-document=-\
--header='Content-Type: application/json-rpc'\
https://$cZabbixServer/zabbix/api_jsonrpc.php | cut -f 4 -d : | cut -f 2 -d \"`;
if [ $? != 0 ];then
echo "wget error 1";
exit 7;
fi
#debug only
#echo $uHostGroupID;
#exit;
if [ "$uHostGroupID" == "" ] || [ "$uHostGroupID" == "message" ] || [ "$uHostGroupID" == "1}" ];then
cat << EOF > /tmp/hostgroup.json
{
"jsonrpc": "2.0",
"method": "hostgroup.create",
"params": {
"name": "$1"
},
"id":1,
"auth":"$cAuth"
}
EOF
uHostGroupID=`/usr/bin/wget --quiet --no-check-certificate --post-file=/tmp/hostgroup.json --output-document=-\
--header='Content-Type: application/json-rpc'\
https://$cZabbixServer/zabbix/api_jsonrpc.php | cut -f 4 -d : | cut -f 2 -d \"`;
if [ $? != 0 ];then
echo "wget error 2";
exit 8;
fi
#debug only
#echo $uHostGroupID;
#exit;
if [ "$uHostGroupID" == "" ] || [ "$uHostGroupID" == "message" ] || [ "$uHostGroupID" == "1}" ];then
echo "Could not get host group id for $1";
rm -f /tmp/hostgroup.json;
if [ $? != 0 ];then
echo "/tmp/hostgroup.json";
fi
rm -f /tmp/logingroup.json;
if [ $? != 0 ];then
echo "/tmp/logingroup.json";
fi
exit 9;
fi
fi
rm -f /tmp/hostgroup.json;
if [ $? != 0 ];then
echo "/tmp/hostgroup.json";
exit 10;
fi
rm -f /tmp/logingroup.json;
if [ $? != 0 ];then
echo "/tmp/logingroup.json";
exit 11;
fi
echo $uHostGroupID;
exit 0;
|
unxs0/unxsVZ
|
unxsScripts/unxsvz/sbin/unxsvzZabbixAddGroup.sh
|
Shell
|
gpl-2.0
| 2,901 |
#!/bin/bash
# Start grafx2. The current folder is used as pictures folder
# TODO Add options to select of folder or file
echo "Workdirectory is /pictures"
docker run -e DISPLAY=$DISPLAY \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/snd:/dev/snd \
-v /dev/shm:/dev/shm \
-v /etc/machine-id:/etc/machine-id \
-v /run/user/$(id -u)/pulse:/run/user/$(id -u)/pulse \
-v /var/lib/dbus:/var/lib/dbus \
-v ~/.pulse:/home/arnold/.pulse \
-v $(pwd):/pictures \
--privileged \
--rm=true \
-i -t cpcsdk/crossdev \
bash -c 'cd /pictures && grafx2'
|
cpcsdk/docker-amstrad-crossdev
|
wrappers/grafx2.sh
|
Shell
|
gpl-2.0
| 556 |
#!/bin/bash
echo "Hola Mundo"
echo "UCR"
|
olgerpv/LS2G1SC-VI2015
|
Scripts/LAB03/mi_primer_script.sh
|
Shell
|
gpl-2.0
| 41 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2012-2020 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure we get a usable error message when trying to overwrite a normal
# file using a variant. The variant path is different from the standard path
# since the generated file is going in a different directory than where the
# normal files are.
. ./tup.sh
check_no_windows variant
tmkdir build-default
cat > Tupfile << HERE
ifeq (@(DEBUG),y)
: |> touch %o |> foo
endif
: |> touch %o |> bar
HERE
echo "" > build-default/tup.config
tup touch bar Tupfile
update_fail_msg "Attempting to insert 'bar' as a generated node.*in the source directory"
eotup
|
ppannuto/tup
|
test/t8029-variant-generated-overwrite.sh
|
Shell
|
gpl-2.0
| 1,305 |
#!/bin/bash
# not much of a 'crawler', but it gets the job done.
while true; do
curl 'http://citibikenyc.com/stations/json' > data.$(date +%s).json;
sleep 10m;
done
|
rjpower/citibike-history
|
data/crawl.sh
|
Shell
|
gpl-3.0
| 174 |
#remove old build
if [ -d "build" ];then
echo -e "\n\033[1;31mRemoving old build\033[0m";
rm -r build
fi
#create build directory
echo -e "\n\033[1;31mCreating directory\033[0m";
mkdir build
#compilation
echo -e "\n\033[1;31mCompilyng\033[0m";
mkdir build/class
javac -sourcepath src src/vetris/Main.java -d build/class -version
#copy resources
echo -e "\n\033[1;31mCopying resources\033[0m";
cp -r src/resources build/class/resources
#archive
echo -e "\n\033[1;31mArchiving\033[0m";
mkdir build/release
jar cvmf MANIFEST.MF build/release/Vetris.jar -C build/class .
#end
echo -e "\n\033[1;31mCompleted\033[0m\n";
|
ValsTeam/Vetris
|
build.sh
|
Shell
|
gpl-3.0
| 620 |
#! /bin/bash
#
# forg.sh (File Organizer)
# It organize the downloaded files in the download directory
#
# Copyright (C) 2015, Adjamilton Junior
# Path of the monitored directory
dir="$HOME/Downloads"
# Path for the log file
log="$HOME/forg.log"
# Checking if the monitored directory exist
if [ ! -d $dir ]; then
echo "$(date +"<%c> Directory not exist.")" >> $log
exit 0
fi
# Checking if monitored directory is empty
if [ "$(find $dir -maxdepth 0 -empty -type d)" ]; then
echo "$(date +"<%c> Directory is empty.")" >> $log
exit 0
fi
# File formats
format=(
# Image
*.jpg *.jpeg *.gif *.tiff *.webp *.bmp *.png *.ico
# Document (texts, sheets, appresentations etc) types
*.doc *.docx *.odt *.txt *.pdf *.xls *.ppt *.pptx *.ps
# Compressed file
*.zip *.rar *.gz *.tar *.cab *.arj *.ace
# Videos
*.mp4 *.avi
# Audio
*.mp3 *.midi
# Torrent
*.torrent
# Installer package
*.dmg *.deb *.rpm *.exe
)
# Change to the monitored directory
cd $dir
# Here is where the magic happens ;)
# IMPORTANT: Remmember to check the target directory for each add file format
for ((i=0; i<${#format[@]}; i++)); do
if [ "$(find $dir -maxdepth 1 -type f -name "${format[$i]}")" ]; then
case ${format[$i]} in
# image formats
*.jpg|*.jpeg|*.gif|*.tiff|*.webp|*.bmp|*.png|*.ico)
if [ "$(mv ${format[$i]} "$HOME/Pictures")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
;;
# document formats
*.doc|*.docx|*.odt|*.txt|*.pdf|*.xls|*.ppt|*.pptx|*.ps)
if [ "$(mv ${format[$i]} "$HOME/Downloads/Docs")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
;;
# compressed formats
*.zip|*.rar|*.gz|*.tar|*.cab|*.arj|*.ace)
if [ "$(mv ${format[$i]} "$HOME/Downloads/Zipped")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
;;
# video formats
*.mp4|*.avi)
if [ "$(mv ${format[$i]} "$HOME/Downloads/Videos")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
;;
# Audio formats
*.mp3|*.midi)
if [ "$(mv ${format[$i]} "$HOME/Downloads/Videos")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
;;
# torrent format
*.torrent)
if [ "$(mv ${format[$i]} "$HOME/Downloads/Torrents")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
;;
# installer package format
*.dmg|*.deb|*.rpm|*.exe)
if [ "$(mv ${format[$i]} "$HOME/Downloads/Installers")" ]; then
echo "$(date +"<%c> file(s) moved succesfully.")" >> $log
fi
esac
fi
done
|
ajunior/scripts
|
forg.sh
|
Shell
|
gpl-3.0
| 2,630 |
#!/bin/bash
# securetty.sh
# This script enables root login on tty on lucid
#@TODO : make some post checks
#Load functions
. ${lxc_PATH_LIBEXEC}/functions.sh
#var checkings
needed_var_check "lxc_TMP_ROOTFS"
#Shortcuts
rootfs=${lxc_TMP_ROOTFS}
#rootfs checking
[[ -f "${rootfs}/etc/lxc-provider.tag" ]] || die "${rootfs} is not a tagged rootfs"
#Pre-check
[[ -f "${rootfs}/etc/securetty" ]] || die "${rootfs}/etc/securetty not found"
#lxc's console is seen as "UNKNOWN"
#Make this a secure tty, so root can login
echo "UNKNOWN" >> "${rootfs}/etc/securetty"
#Post-check
if egrep -q 'UNKNOWN' "${rootfs}/etc/securetty"
then
log "rootlogin enabled on : not verified"
else
die "Unable to enable root login"
fi
exit 0
|
phbaer/lxc-tools
|
lxc-provider/libexec/scripts/templating/debian/ubuntu/lucid/securetty.sh
|
Shell
|
gpl-3.0
| 724 |
#!/usr/bin/env bash
vm_command 'curl example.com; [[ $? -eq 7 ]]'
|
l0b0/root
|
test/modules/insecure_http_blocker/test.sh
|
Shell
|
gpl-3.0
| 67 |
#!/bin/bash
# Create shortcuts for RDP connections
ntp_timeout=5
{% if install_vmware_horizon_client %}
cat /proc/cmdline | tr ' ' '\n' | while read param; do
if echo $param | grep -q '^timezone='; then
timezone=$(echo $param | cut -d= -f2-)
sudo ln -sf /usr/share/zoneinfo/"$timezone" /run/localtime
fi
done
cat /proc/cmdline | tr ' ' '\n' | while read param; do
if echo $param | grep -q '^shutdowntime='; then
shutdowntime=$(echo $param | cut -d= -f2- | tr '%' ' ')
sudo systemd-run --on-calendar="$shutdowntime" halt -p
fi
done
cat /proc/cmdline | tr ' ' '\n' | while read param; do
if echo $param | grep -q '^ntpservers='; then
servers=$(echo $param | cut -d= -f2)
echo $servers | tr ',' '\n' | while read srv; do
# -u Direct ntpdate to use an unprivileged port for outgoing packets
# -t timeout Specify the maximum time waiting for a server response as the
# value timeout, in seconds and fraction
echo "ntp server: $srv" >> /tmp/ntpdate.log
ntpdate -u -t "$ntp_timeout" "$srv" >> /tmp/ntpdate.log 2>&1
result=$?
if [ $result -eq 0 ]; then
break
fi
done
fi
done
{% endif %}
|
selivan/thinclient
|
template/home/local/bin/autorun.sh
|
Shell
|
gpl-3.0
| 1,282 |
#!/bin/sh
#tests multiple commands
cd ../src
echo ""
echo "Testing commands with exit now:"
echo "Testing 'exit;':"
echo "exit;" | ./rshell
echo "Testing 'echo hello && exit;':"
echo "echo hello && exit;" | ./rshell
echo "Testing 'echo one; echo two; exit; echo three;':"
echo "echo one; echo two; exit; echo three;" | ./rshell
echo "Testing 'errortest || exit':"
echo "errortest || exit;" | ./rshell
echo "End testing exit commands."
echo ""
|
AminWatad/rshell
|
tests/exit.sh
|
Shell
|
gpl-3.0
| 449 |
#!/bin/bash
# -*- ENCODING: UTF-8 -*-
#Asistente de InstalaciΓ³n Inteligente de Software para distribuciones GNU/Linux basados en Ubuntu 12.04
#Scripts de instalaciΓ³n de software de terceros (fuera de los repositorios oficiales de Ubuntu)
#Copyright (C) <2014> <Sebastian Nolberto Lagos Gutierrez, [email protected], Arica, Chile>
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
apt-get purge -qq skype
exit
|
sebalagos90/asistente_instalacion_inteligente
|
aiis-0_5_0/usr/share/aiis/scripts/des_skype.sh
|
Shell
|
gpl-3.0
| 1,039 |
#!/bin/bash
SDL2_PV=2.0.3
SDL2_TTF_PV=2.0.12
cachedir=.cache
liburl=https://openrct.net/launcher/libs/orctlibs.zip
mkdir -p $cachedir
echo `uname`
# Sets default target to "linux", if none specified
TARGET=${TARGET-linux}
# keep in sync with version in build.sh
libversion=3
libVFile="./libversion"
function download {
if command -v curl > /dev/null 2>&1; then
curl -L -o "$2" "$1"
elif command -v wget > /dev/null 2>&1; then
wget -O "$2" "$1"
else
echo "Please install either wget or curl to continue"
exit 1
fi
}
function download_sdl {
if [[ ! -f $cachedir/SDL2-devel-${SDL2_PV}-mingw.tar.gz ]]; then
download http://libsdl.org/release/SDL2-devel-${SDL2_PV}-mingw.tar.gz $cachedir/SDL2-devel-${SDL2_PV}-mingw.tar.gz;
fi
if [[ ! -f $cachedir/SDL2_ttf-devel-${SDL2_TTF_PV}-mingw.tar.gz ]]; then
download https://www.libsdl.org/projects/SDL_ttf/release/SDL2_ttf-devel-${SDL2_TTF_PV}-mingw.tar.gz $cachedir/SDL2_ttf-devel-${SDL2_TTF_PV}-mingw.tar.gz;
fi
if [[ ! -d $cachedir/SDL2-${SDL2_PV} ]]; then
pushd $cachedir
tar -xzf SDL2-devel-${SDL2_PV}-mingw.tar.gz
popd
fi
if [[ ! -d $cachedir/SDL2_ttf-${SDL2_TTF_PV} ]]; then
pushd $cachedir
tar -xzf SDL2_ttf-devel-${SDL2_TTF_PV}-mingw.tar.gz
popd
fi
# Apply platform patch
mingw_patch=libsdl2-mingw-2.0.3-fix-platform-detection-for-mingw.patch
if [[ ! -f $cachedir/$mingw_patch ]]; then
download "https://github.com/anyc/anyc-overlay/raw/master/media-libs/libsdl2-mingw/files/$mingw_patch" $cachedir/$mingw_patch;
# XXX not sure how to make this idempotent.
pushd $cachedir/SDL2-${SDL2_PV}/i686-w64-mingw32/include/SDL2/
echo "Applying patch."
patch -p2 < ../../../../$mingw_patch
popd
fi
}
function download_libs {
if [[ ! -f $cachedir/orctlibs.zip ]]; then
download $liburl $cachedir/orctlibs.zip;
fi
if [[ ! -d $cachedir/orctlibs ]]; then
mkdir -p $cachedir/orctlibs
pushd $cachedir/orctlibs
unzip -uaq ../orctlibs.zip
popd
fi
}
function install_cross_tools {
if [[ ! -d /usr/local/cross-tools ]]; then
sudo mkdir -p /usr/local/cross-tools
fi
if [[ ! -d /usr/local/cross-tools/i686-w64-mingw32 ]]; then
sudo cp -r $cachedir/SDL2-${SDL2_PV}/i686-w64-mingw32 /usr/local/cross-tools/
sudo cp -r $cachedir/SDL2_ttf-${SDL2_TTF_PV}/i686-w64-mingw32 /usr/local/cross-tools/
fi
if [[ ! -d /usr/local/cross-tools/orctlibs ]]; then
sudo mkdir -p /usr/local/cross-tools/orctlibs
sudo cp -rf $cachedir/orctlibs/glob/* /usr/local/cross-tools/orctlibs/.
fi
}
function install_pkg_config {
if [[ ! -f $cachedir/i686-w64-mingw32-pkg-config ]]; then
# If this fails to work because of newlines, be sure you are running this
# script with Bash, and not sh. We should really move this to a separate
# file.
echo -e "#!/bin/sh\nexport PKG_CONFIG_PATH=/usr/local/cross-tools/i686-w64-mingw32/lib/pkgconfig:/usr/local/cross-tools/orctlibs/lib/pkgconfig\npkg-config \$@" > $cachedir/i686-w64-mingw32-pkg-config;
fi
chmod +x $cachedir/i686-w64-mingw32-pkg-config
sudo cp $cachedir/i686-w64-mingw32-pkg-config /usr/local/bin/
ls -al /usr/local/bin | grep pkg-config
cat /usr/local/bin/i686-w64-mingw32-pkg-config
}
function install_local_libs {
mkdir -p lib
cp -rf $cachedir/orctlibs/local/* ./lib/.
}
echo TARGET = $TARGET
if [[ `uname` == "Darwin" ]]; then
echo "Installation of OpenRCT2 assumes you have homebrew and use it to install packages."
echo "Check if brew is installed"
package_command="brew"
which -s brew
if [ $? -eq 1 ]; then
echo "brew is not installed, or is not in your \$PATH"
echo "Check if MacPorts is installed"
which -s port
if [ $? -eq 1 ]; then
echo "MacPorts not found either, abort"
exit
else
echo "MacPorts found"
package_command="sudo port"
fi
else
echo "brew was found"
fi
# Install packages with whatever command was found.
# Very possible I'm missing some dependencies here.
eval "$package_command install cmake wine"
if [[ ! -d /usr/include/wine ]]; then
# This will almost certainly break as brew changes. Better ideas
# welcome.
wine_path="/usr/local/Cellar/wine/1.6.2/include/wine"
if [ $package_command == "sudo port" ]; then
wine_path="/opt/local/include/wine"
fi
sudo ln -s $wine_path /usr/include
fi
mingw_name=mingw-w32-bin_i686-darwin
mingw_tar=$mingw_name"_20130531".tar.bz2
mingw_path=/usr/local/$mingw_name
if [[ ! -f $cachedir/$mingw_tar ]]; then
download "https://downloads.sourceforge.net/project/mingw-w64/Toolchains targetting Win32/Automated Builds/$mingw_tar" $cachedir/$mingw_tar
fi
if [[ ! -d "$mingw_path" ]]; then
pushd /usr/local/
sudo mkdir $mingw_name
popd
echo "Extracting contents of $mingw_tar to $mingw_path"
echo "Don't forget to add $mingw_path/bin to your PATH variable!"
sudo tar -xyf $cachedir/$mingw_tar -C $mingw_path
pushd /usr/local
sudo chmod 755 $mingw_name
pushd $mingw_name
sudo find . -type d -exec chmod 755 {} \;
popd
popd
fi
elif [[ `uname` == "Linux" ]]; then
if [[ -z "$TRAVIS" ]]; then
sudo apt-get install -y binutils-mingw-w64-i686 gcc-mingw-w64-i686 g++-mingw-w64-i686 cmake
if [[ -z "$DISABLE_G2_BUILD" ]]; then
sudo apt-get install -y wine
fi
else
# prevent build.sh from re-doing all the steps again
case "$TARGET" in
"linux")
sudo dpkg --add-architecture i386
sudo apt-get update
sudo apt-get install --no-install-recommends -y --force-yes cmake libsdl2-dev:i386 libsdl2-ttf-dev:i386 gcc-4.8 pkg-config:i386 g++-4.8-multilib gcc-4.8-multilib libjansson-dev:i386 libspeex-dev:i386 libspeexdsp-dev:i386 libcurl4-openssl-dev:i386 libcrypto++-dev:i386
download https://launchpad.net/ubuntu/+archive/primary/+files/libjansson4_2.7-1ubuntu1_i386.deb libjansson4_2.7-1ubuntu1_i386.deb
download https://launchpad.net/ubuntu/+archive/primary/+files/libjansson-dev_2.7-1ubuntu1_i386.deb libjansson-dev_2.7-1ubuntu1_i386.deb
sudo dpkg -i libjansson4_2.7-1ubuntu1_i386.deb
sudo dpkg -i libjansson-dev_2.7-1ubuntu1_i386.deb
sudo apt-get install -f
export CC=gcc-4.8
export CXX=g++-4.8
;;
"windows")
sudo apt-get update
sudo apt-get install -y --force-yes binutils-mingw-w64-i686 gcc-mingw-w64-i686 g++-mingw-w64-i686 cmake
;;
"docker32")
docker pull openrct2/openrct2:32bit-only
;;
*)
echo "unkown target $TARGET"
exit 1
esac
fi
fi
download_libs
# mind the gap (trailing space)
sha256sum $cachedir/orctlibs.zip | cut -f1 -d\ > $libVFile
echo "Downloaded library with sha256sum: $(cat $libVFile)"
# Local libs are required for all targets
install_local_libs
if [[ $TARGET == "windows" ]]; then
download_sdl
install_cross_tools
install_pkg_config
# $TARGET == "windows"
fi
|
nightroan/OpenRCT2
|
install.sh
|
Shell
|
gpl-3.0
| 6,978 |
#!/bin/bash -x
mkdir -p $GOPATH/src-docker
docker run --name go-dev -v $GOPATH/src-docker:/home/dev/go/src -v $LOCAL_SRC:/opt/src -it arthurmilliken/go-dev
|
arthurmilliken/docker-dev
|
go-dev/docker-run.sh
|
Shell
|
gpl-3.0
| 156 |
#!/bin/sh
# Helper script used to check and update engine dependencies
# This should not be called manually
command -v git >/dev/null 2>&1 || command -v curl >/dev/null 2>&1 || { echo >&2 "The OpenRA mod template requires git or curl."; exit 1; }
command -v python >/dev/null 2>&1 || { echo >&2 "The OpenRA mod template requires python."; exit 1; }
TEMPLATE_LAUNCHER=$(python -c "import os; print(os.path.realpath('$0'))")
TEMPLATE_ROOT=$(dirname "${TEMPLATE_LAUNCHER}")
# shellcheck source=mod.config
. "${TEMPLATE_ROOT}/mod.config"
if [ -f "${TEMPLATE_ROOT}/user.config" ]; then
# shellcheck source=user.config
. "${TEMPLATE_ROOT}/user.config"
fi
CURRENT_ENGINE_VERSION=$(cat "${ENGINE_DIRECTORY}/VERSION" 2> /dev/null)
if [ -f "${ENGINE_DIRECTORY}/VERSION" ] && [ "${CURRENT_ENGINE_VERSION}" = "${ENGINE_VERSION}" ]; then
exit 0
fi
if [ "${AUTOMATIC_ENGINE_MANAGEMENT}" = "True" ]; then
echo "OpenRA engine version ${ENGINE_VERSION} is required."
if command -v git >/dev/null 2>&1; then
if [ ! -d "${ENGINE_DIRECTORY}/.git" ]; then
rm -rf "${ENGINE_DIRECTORY}"
git clone "${AUTOMATIC_ENGINE_SOURCE_GIT}" "${ENGINE_DIRECTORY}"
fi
git --git-dir="${ENGINE_DIRECTORY}/.git" --work-tree="${ENGINE_DIRECTORY}" fetch --all
git --git-dir="${ENGINE_DIRECTORY}/.git" --work-tree="${ENGINE_DIRECTORY}" checkout "${ENGINE_VERSION}"
else
if [ -d "${ENGINE_DIRECTORY}" ]; then
if [ "${CURRENT_ENGINE_VERSION}" != "" ]; then
echo "Deleting engine version ${CURRENT_ENGINE_VERSION}."
else
echo "Deleting existing engine (unknown version)."
fi
rm -rf "${ENGINE_DIRECTORY}"
fi
echo "Downloading engine..."
curl -s -L -o "${AUTOMATIC_ENGINE_TEMP_ARCHIVE_NAME}" -O "${AUTOMATIC_ENGINE_SOURCE}" || exit 3
# Github zipballs package code with a top level directory named based on the refspec
# Extract to a temporary directory and then move the subdir to our target location
REFNAME=$(unzip -qql "${AUTOMATIC_ENGINE_TEMP_ARCHIVE_NAME}" | head -n1 | tr -s ' ' | cut -d' ' -f5-)
rm -rf "${AUTOMATIC_ENGINE_EXTRACT_DIRECTORY}"
mkdir "${AUTOMATIC_ENGINE_EXTRACT_DIRECTORY}"
unzip -qq -d "${AUTOMATIC_ENGINE_EXTRACT_DIRECTORY}" "${AUTOMATIC_ENGINE_TEMP_ARCHIVE_NAME}"
mv "${AUTOMATIC_ENGINE_EXTRACT_DIRECTORY}/${REFNAME}" "${ENGINE_DIRECTORY}"
rmdir "${AUTOMATIC_ENGINE_EXTRACT_DIRECTORY}"
rm "${AUTOMATIC_ENGINE_TEMP_ARCHIVE_NAME}"
fi
echo "Compiling engine..."
cd "${ENGINE_DIRECTORY}" || exit 1
make version VERSION="${ENGINE_VERSION}"
exit 0
fi
echo "Automatic engine management is disabled."
echo "Please manually update the engine to version ${ENGINE_VERSION}."
exit 1
|
IronDominion/IronDominion
|
fetch-engine.sh
|
Shell
|
gpl-3.0
| 2,637 |
#!/bin/bash
dayDate=$(date +"%Y%m%d")
filePathInfomaker="/mnt/pdfout/PubBuilder/out/Infomaker/in/"
filePathMeltwater="/mnt/pdfout/PubBuilder/out/Meltwater/in/"
filePathOpoint="/mnt/pdfout/PubBuilder/out/Opoint/in/"
filePathPaperton="/mnt/pdfout/PubBuilder/out/Paperton/in/"
filePathPressreader="/mnt/pdfout/PubBuilder/out/Pressreader/in/"
filePathReadly="/mnt/pdfout/PubBuilder/out/Readly/in/"
filePathRetriever="/mnt/pdfout/PubBuilder/out/Retriever/in/"
filePathSAS="/mnt/pdfout/PubBuilder/out/SAS/in/"
checkKorrOut="/var/www/bnr-hds-pdfmonitor.ad.bonniernews.se/textFiles/listInFile.txt"
if [ -f "$checkKorrOut" ]
then
rm $checkKorrOut
else
touch $checkKorrOut
fi
checkFilesHD=$(ls ${filePathInfomaker}HD/ | grep ${dayDate} | wc -l)
checkFilesHALLA=$(ls ${filePathInfomaker}Halla/ | grep ${dayDate} | wc -l)
checkFilesSDS=$(ls ${filePathInfomaker}SDS/ | grep ${dayDate} | wc -l)
echo "Infomaker HD: ${checkFilesHD}" >> "${checkKorrOut}"
echo "Infomaker HallΓ₯: ${checkFilesHALLA}" >> "${checkKorrOut}"
echo "Infomaker SDS: ${checkFilesSDS}" >> "${checkKorrOut}"
checkFilesHD=$(ls ${filePathMeltwater}HD/ | grep ${dayDate} | wc -l)
echo "Meltwater HD: ${checkFilesHD}" >> "${checkKorrOut}"
checkFilesHD=$(ls ${filePathOpooutt}HD/ | grep ${dayDate} | wc -l)
checkFilesHALLA=$(ls ${filePathOpooutt}Halla/ | grep ${dayDate} | wc -l)
checkFilesSDS=$(ls ${filePathOpooutt}SDS/ |grep ${dayDate} | wc -l)
echo "Opoint HD: ${checkFilesHD}" >> "${checkKorrOut}"
echo "Opoint HallΓ₯: ${checkFilesHALLA}" >> "${checkKorrOut}"
echo "Opoint SDS: ${checkFilesSDS}" >> "${checkKorrOut}"
checkFilesHD=$(ls ${filePathPaperton}HD/ | grep ${dayDate} |wc -l)
checkFilesHALLA=$(ls ${filePathPaperton}Halla/ | grep ${dayDate} |wc -l)
checkFilesSDS=$(ls ${filePathPaperton}SDS/ | grep ${dayDate} |wc -l)
echo "Paperton HD: ${checkFilesHD}" >> "${checkKorrOut}"
echo "Paperton HallΓ₯: ${checkFilesHALLA}" >> "${checkKorrOut}"
echo "Paperton SDS: ${checkFilesSDS}" >> "${checkKorrOut}"
checkFileSDS=$(ls ${filePathPressreader}SDS/ | grep ${dayDate} |wc -l)
echo "Pressreader SDS: ${checkFileSDS}" >> "${checkKorrOut}"
checkFilesHD=$(ls ${filePathReadly}HD/ | grep ${dayDate} |wc -l)
checkFilesSDS=$(ls ${filePathReadly}SDS/ | grep ${dayDate} |wc -l)
echo "Readly HD: ${checkFilesHD}" >> "${checkKorrOut}"
echo "Readly SDS: ${checkFilesSDS}" >> "${checkKorrOut}"
checkFilesHD=$(ls ${filePathRetriever}HD/ | grep ${dayDate} |wc -l)
checkFilesSDS=$(ls ${filePathRetriever}SDS/ | grep ${dayDate} |wc -l)
echo "Retriever HD: ${checkFilesHD}" >> "${checkKorrOut}"
echo "Retriever SDS: ${checkFilesSDS}" >> "${checkKorrOut}"
checkFilesHD=$(ls ${filePathSAS}HD/ | grep ${dayDate} |wc -l)
checkFilesSDS=$(ls ${filePathSAS}SDS/ | grep ${dayDate} |wc -l)
echo "SAS HD: ${checkFilesHD}" >> "${checkKorrOut}"
echo "SAS SDS: ${checkFilesSDS}" >> "${checkKorrOut}"
|
alxr91/e-papperskvitto
|
eveningScript/inList.sh
|
Shell
|
gpl-3.0
| 2,879 |
#!/bin/sh
email=$1
[ "${email}EMPTY" = "EMPTY" ] && {
echo "Usage:"
echo
echo ./noemail.sh email
echo
echo "Removes all references to email in Recipe files"
exit 1
}
find -name Recipe | xargs -n 1 GrepReplace " <${email}>" ""
|
AitorATuin/MyRecipes
|
tools/noemail.sh
|
Shell
|
gpl-3.0
| 253 |
#KALMAN FILTER CONFIGURATION
DOMAINCONF=CORDOBA_2KBIS #Define a domain
LETKFNAMELIST=control #Define a letkf namelist template
MEMBER=60 #Number of ensemble members.
MAX_DOM=1 #Maximum number of WRF domains.
HOMEDIR=${HOME}/share/
DATADIR=${HOME}/data/
ANALYSIS=1 #Identify this job as an analysis job.
FORECAST=0 #This is not a forecast job.
INTERPANA=0 #This is used in forecast jobs (but we need to define it here too)
RUN_ONLY_MEAN=0 #This is used in forecast jobs (but we need to define it here too)
USE_ANALYSIS_BC=1 #1 - use analysis as BC , 0 - use forecasts as bc (e.g. global gfs)
# if 1 then bc data will be taken from exp_met_em folder in the corresponding INPUT folder.
# if 0 then bc data will be taken from for_met_em folder in the corresponding INPUT folder.
# default is 1
USE_ANALYSIS_IC=0 #1 - use global analysis as IC, 0 use LETKF analysis as IC
#if 0 then profide a LETKF-analysis source (ANALYSIS_SOURC)
#default is 0
NVERTEXP=27 #Number of vertical levels in initial and boundary conditions input grib data.
NVERTDB=38 #Number of vertical levels in initial and boundary conditions perturbation input grib data.
#AUXILIARY VARIABLE FOR ENSEMBLE SIZE
MM=$MEMBER #Variable for iteration limits.
MEANMEMBER=`expr $MEMBER + 1 ` #This is the member ID corresponding to the ensemble mean.
WINDOW=300 #Assimilation frequency. (seconds)
WINDOW_START=300 #Window start (seconds from forecast initialization)
WINDOW_END=300 #Window end (seconds from forecast initialization)
WINDOW_FREC=300 #Output frequency within window (seconds) should be the same as the maximum observation frequency.
ASSIMILATION_FREC=300 #Assimilation frequency (seconds)
NSLOTS=`expr $WINDOW_END \/ $WINDOW_FREC - $WINDOW_START \/ $WINDOW_FREC + 1 ` #Number of time slots.
NBSLOT=`expr $ASSIMILATION_FREC \/ $WINDOW_FREC - $WINDOW_START \/ $WINDOW_FREC + 1 ` #Time slot corresponding to the analysis.
if [ $NBSLOT -lt 10 ] ; then
NBSLOT=0$NBSLOT
fi
SIGMA_OBS="2.0d3"
SIGMA_OBSV="0.2d0"
SIGMA_OBSZ="2.0d3"
SIGMA_OBST="3.0d0"
GROSS_ERROR="15.0d0"
COV_INFL_MUL="1.1d0"
SP_INFL_ADD="0.d0"
RELAX_ALPHA_SPREAD="0.95d0"
RELAX_ALPHA="0.0d0"
USE_ADAPTIVE_INFLATION=0 #1 turn on addaptive inflation (Miyoshi 2011), 0 Turn off adaptive inflation
#Note that for addaptive inflation to work COV_INFL_MUL should be < 0.0
GUESFT=$WINDOW_END # First guess forecast length (seconds)
#DOMAIN AND BOUNDARY DATA
BOUNDARY_DATA_FREQ=21600 #Boundary data frequency. (seconds)
BOUNDARY_DATA_PERTURBATION_FREQ=21600 #Frequency of data used to perturb boundary conditions (seconds)
#POSTPROC CONFIGURATION
OUTLEVS="0.1,0.5,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0," #Level list
OUTVARS="'umet,vmet,W,QVAPOR,QCLOUD,QRAIN,QICE,QSNOW,QGRAUP,RAINNC,tk,u10m,v10m,slp,mcape,dbz,max_dbz'" #Variable list.
ARWPOST_FREC=21600 # Post processing frequency (seconds)
INPUT_ROOT_NAME='wrfout'
INTERP_METHOD=1
### LETKF setting
OBS="" # Name of conventional observations folder.
RADAROBS="/OSSE_20140122_DBZ2.5_VR1.0_SO2KM/" # Name of radar observation folder.
EXP=ANALYSIS_${DOMAINCONF}_${CONFIGURATION} # name of experiment
### initial date setting
IDATE=20140122173000
EDATE=20140122200000
#### DATA
OBSDIR=${HOMEDIR}/DATA/OBS/$OBS/ # Folder where conventional observations are.
NRADARS=1 # Number of available radars.
RADAROBSDIR=${HOMEDIR}/DATA/OBS/$RADAROBS/ # Folder where radar observations are.
TMPDIR=${HOMEDIR}/TMP/$EXP/ # Temporal work directory (should be accessible for all computation nodes)
OUTPUTDIR=${DATADIR}/EXPERIMENTS/$EXP/ # Where results will be stored.
GRIBDIR=${HOMEDIR}/DATA/GRIB/FNL/HIRES/ARGENTINA/ # Folder where bdy and initial grib files are located.
GRIBTABLE="Vtable.GFS" # Bdy and init data source Vtable name.
PERTGRIBDIR=${HOMEDIR}/DATA/GRIB/CFSR/HIRES/ARGENTINA/ # Folder where data for perturbing bdy are located.
PERTGRIBTABLE="Vtable.CFSR" # Bdy perturbation source vtable name.
GEOG=${HOMEDIR}/LETKF_WRF/wrf/model/GEOG/ # Folder where WPS GEOG dataset is located.
#INITIAL AND BOUNDARY RANDOM PERTURBATIONS
SCALE_FACTOR="0.05" #Perturbation scale factor.
RANDOM_SCALE_FACTOR="0.5" #Random perturbation scale factor.
PERTURB_BOUNDARY=1 #Wheter boundary perturbations are going to be perturbed.
PERTURB_BOUNDARY_TYPE=1 #DUMMY
#Random dates for boundary perturbations.
INIPERTDATE=20060101000000 #Initial date in grib database (used for perturbing initial and boundary conditions)
ENDPERTDATE=20091231180000 #Final date in grib database (used for perturbing initial and boundary conditions)
PERTREFDATE=20140122000000 #At this date the initial perturbation dates will be taken. This date is used to keep consisntency among the perturbations
#used in forecast and analysis experiments. This date must be previous or equal to IDATE.
INPUT_PERT_DATES_FROM_FILE=1 #0 - generate a new set of random dates, 1 - read random dates from a file.
INI_PERT_DATE_FILE=${HOMEDIR}/DATA/INITIAL_RANDOM_DATES/initial_perturbation_dates_60m #List of initial random dates.
#### EXECUTABLES
RUNTIMELIBS=${HOMEDIR}/libs_sparc64/lib/ #Libs that will be included in LD_LIBRARY_PATH in computing nodes.
WRF=${HOMEDIR}/LETKF_WRF/wrf/ # WRF folder (for computing nodes)
LETKF=$WRF/letkf/letkf.exe # LETKF module (for computing nodes)
UPDATEBC=$WRF/model/WRFDA/da_update_bc.exe # Update bc tool (WRFVAR) (for computing nodes)
WRFMODEL=$WRF/model/WRFV3/ # WRF model that run in computing nodes.
WRFMODELPPS=$WRF/model/WRFV3/ # WRF model that runs in pps server (usually the same as the one for the computing nodes)
WPS=$WRF/model/WPS/ # WRF model pre processing utilities (for pps server)
ARWPOST=$WRF/model/ARWpost/ # WRF model post processing utilities that run in computing nodes.
SPAWN=$WRF/spawn/
MPIBIN=mpiexec
#### SCRIPTS
UTIL=$WRF/run/util.sh # Script containing bash functions that will be used during execution.
#### NAMELIST
NAMELISTWRF=$WRF/run/configuration/domain_conf/$DOMAINCONF/namelist.input #Namelist for WRF model.
NAMELISTWPS=$WRF/run/configuration/domain_conf/$DOMAINCONF/namelist.wps #Namelist for WRF pre processing tools
NAMELISTLETKF=$WRF/run/configuration/letkf_conf/letkf.namelist.$LETKFNAMELIST #Namelist for LETKF
NAMELISTARWPOST=$WRF/run/configuration/domain_conf/$DOMAINCONF/namelist.ARWpost #Namelist for post-processing tools.
NAMELISTOBSOPE=$WRF/run/configuration/letkf_conf/obsope.namelist.$OBSOPENAMELIST #Namelist for observation operator.
|
gustfrontar/LETKF_WRF
|
wrf/run/configuration/analysis_conf/exprtps095_60m_radar_grib_Hakushu.sh
|
Shell
|
gpl-3.0
| 7,530 |
#/bin/bash
fatal() {
echo "$1"
exit 1
}
warn() {
echo "$1"
}
if [ ! -d "$TUNASYNC_WORKING_DIR" ]; then
fatal "Directory not exists, fail"
fi
RSYNCSOURCE="$TUNASYNC_UPSTREAM_URL"
BASEDIR="$TUNASYNC_WORKING_DIR"
if [ ! -d ${BASEDIR} ]; then
warn "${BASEDIR} does not exist yet, trying to create it..."
mkdir -p ${BASEDIR} || fatal "Creation of ${BASEDIR} failed."
fi
{
rsync --recursive --times --links --hard-links \
--stats \
--exclude "Packages*" --exclude "Sources*" \
--exclude "Release*" \
${RSYNCSOURCE} ${BASEDIR} || fatal "First stage of sync failed."
rsync --recursive --times --links --hard-links \
--stats --delete --delete-after \
${RSYNCSOURCE} ${BASEDIR} || fatal "Second stage of sync failed."
} > $TUNASYNC_LOG_FILE
date -u > ${BASEDIR}/project/trace/$(hostname -f)
|
cubarco/tunasync
|
scripts/ubuntu.sh
|
Shell
|
gpl-3.0
| 815 |
#!/bin/bash
RUNNING=`ps ax | grep 'public/index.php queue doctrine' | grep -v grep | awk '{print $1}'`
for pid in $RUNNING; do
echo "Killing queue process [PID $pid]"
kill -9 $pid
done
php public/index.php queue doctrine bibtex --start &
echo "Started Bibtex queue [PID $!]"
php public/index.php queue doctrine bibtexreferences --start &
echo "Started BibtexReferences queue [PID $!]"
php public/index.php queue doctrine cermine --start &
echo "Started CERMINE queue [PID $!]"
php public/index.php queue doctrine citationstyle --start &
echo "Started CitationStyle queue [PID $!]"
php public/index.php queue doctrine docx --start &
echo "Started DocX queue [PID $!]"
php public/index.php queue doctrine epub --start &
echo "Started Epub queue [PID $!]"
php public/index.php queue doctrine html --start &
echo "Started HTML queue [PID $!]"
php public/index.php queue doctrine merge --start &
echo "Started XML merge queue [PID $!]"
php public/index.php queue doctrine nlmxml --start &
echo "Started NLMXML queue [PID $!]"
php public/index.php queue doctrine pathfinder --start &
echo "Started pathfinder queue [PID $!]"
php public/index.php queue doctrine pdf --start &
echo "Started PDF queue [PID $!]"
php public/index.php queue doctrine references --start &
echo "Started References queue [PID $!]"
php public/index.php queue doctrine wppdf --start &
echo "Started WpPdf queue [PID $!]"
php public/index.php queue doctrine xmp --start &
echo "Started XMP queue [PID $!]"
php public/index.php queue doctrine zip --start &
echo "Started Zip queue [PID $!]"
php public/index.php queue doctrine ner --start &
echo "Started NER queue [PID $!]"
php public/index.php queue doctrine parsCit --start &
echo "Started ParsCit queue [PID $!]"
php public/index.php queue doctrine grobid --start &
echo "Started Grobid queue [PID $!]"
php public/index.php queue doctrine xmlfinal --start &
echo "Started XML Final queue [PID $!]"
|
pkp/xmlps
|
start_queues.sh
|
Shell
|
gpl-3.0
| 1,928 |
#!/usr/bin/env bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PYTHONPATH="${PYTHONPATH}:${DIR}:." DJANGO_SETTINGS_MODULE="${DJANGO_SETTINGS_MODULE:=dev_settings}" python manage.py $*
|
ercpe/djunin
|
run-manage.py.sh
|
Shell
|
gpl-3.0
| 196 |
# This file is part of the Aloofix project.
# Copyright (C) 2013 Kevin Johnson <[email protected]>
# Distributed under the terms of the GNU General Public License version 3,
# or (at your option) any later version.
name=iproute2
version=3.9.0
sequence=1
description="Networking and traffic control utilities"
site=ftp://ftp.kernel.org/pub/linux/utils/net/$name
depends="iptables"
builddepends="pkg-config bison flex bash iptables-dev"
section=networking
compression_suffix=xz
license=GPLv2
license_files=COPYING
extra_doc_files="README*"
configure() {
# we don't want arpd
sed -i -e '/^TARGETS=/s/arpd//' misc/Makefile
sed -i -e /ARPDIR/d Makefile
sed -i -e 's/arpd.8//' man/man8/Makefile
# this next one is unfortunate - gcc 4.8 is throwing a warning in lnstat.c
sed -i -e 's/-Werror//' Makefile
}
build() {
make -j $make_j_qty
}
preinstall_hook() {
rm -rf "$preinstalldir/var/lib"
}
install_target=install
pkg_base_hook() {
sed -i -e '1s@#!.*/#!/bin/sh@' "$pkgcontentdir/sbin/rtpr"
sed -i -e '1s@#!.*/#!/bin/sh@' "$pkgcontentdir/sbin/ifcfg"
pkg_file_mv /sbin/ip /sbin/ip.iproute2
register_alternative /sbin/ip ip.iproute2 100
}
|
aloofschipperke/aloofix
|
specs/pkgs/iproute2/spec.sh
|
Shell
|
gpl-3.0
| 1,193 |
for entry in "$1"/*
do
echo "$entry"
/home/zsofi/Egyetem/TD/AT_Parser-master/At-Parser/Debug/At-Parser "$entry"
done
|
SeOcSa/AT_Parser
|
test.sh
|
Shell
|
gpl-3.0
| 119 |
#!/bin/bash
blender32=http://mirror.cs.umn.edu/blender.org/release/Blender2.75/blender-2.75-linux-glibc211-i686.tar.bz2
blender64=http://mirror.cs.umn.edu/blender.org/release/Blender2.75/blender-2.75-linux-glibc211-x86_64.tar.bz2
# move to the script's path
cd $(dirname $0)
if [ "$1" == "-h" ]; then
echo "simple sh script used to optimize the game installation.
This program will remove several files that are useless for pure game runtime.
useless files
.xcf gimp files (textures)
.blend1 blender backups files
all files or directory found in projects.lst
use:
./build.sh [OPTIONS]
options:
-p PATH user defined path where blenderplayer is installed
-d developer mode : don't remove 'useless' files
-h display this help
ON ERROR: see README.md"
exit 0
fi
developer_mode=0
blender_path=""
for opt in "$@"; do
if [ "$opt" == "-d" ]; then
developer_mode=1
elif [ "$opt" == "-p" ]; then
blender_path='%'
elif [ "$blender_path" == "%" ]; then
blender_path=$opt
fi
done
if [ $developer_mode == 0 ]; then
echo "removing developement files ..."
extensions='*.xcf *.blend1 *.bat'
for ext in $extensions; do
listfiles=$(find . -name "$ext")
for filename in $listfile; do
echo $filename
rm $filename
done
done
listfile=$(cat projects.lst)
for filename in $listfile; do
echo $filename
rm -r $filename
done
fi
if [ -n $blenderpath ]; then
echo "downloading blender from official repository ..."
if $(which arch); then
arch=$(arch)
else
arch=$(uname -m)
fi
echo "architecture is $arch."
if [ $arch == "x86_64" ]; then
wget $blender64 -O blender.tar.bz2
else
wget $blender32 -O blender.tar.bz2
fi
echo "extracting ..."
tar xf blender.tar.bz2 -C software/
mv software/blender-* software/blender
echo "cleaning local directory ..."
rm blender.tar.bz2
blender_path=software/blender
fi
echo $(readlink -m $blender_path) > blenderplayer_path.txt
|
jimy-byerley/Tron-R-reboot-reloaded-
|
build.sh
|
Shell
|
gpl-3.0
| 1,927 |
#!/usr/bin/env bash
#
# - pause input
# - save checkpoint data
# - unpause input
. robot_common.sh
if [ -z "$1" ]; then
echo "usage: save_checkpoint.sh checkpoint"
exit 1;
fi
# pause input
hector_client_set P_load.pauseInput 1
# wait for queues to become empty
hector_client_wait_all P_main.queue_size.0 0 P_main.queue_size.100 0 PE_robot.resourceCount 0 PE_dns.resourceCount 0 PE_robots.resourceCount 0
# load checkpoint data
hector_client_save_checkpoint "$1"
# unpause input
hector_client_set P_load.pauseInput 0
exit 0;
|
qiq/hector_robot
|
robot/save_checkpoint.sh
|
Shell
|
gpl-3.0
| 533 |
set -e
set -x
# git branch, commit or tag
git_commit=v0.9
# setup environment
. /etc/profile
. /etc/init.d/tc-functions
getMirror
# load qt-4.x-base without the X dependecies
# alternatively a "tce-load -wi qt-4.x-base" loads it with all dependecies
wget -P /mnt/sda1/tce/optional $MIRROR/qt-4.x-base.tcz
wget -P /mnt/sda1/tce/optional $MIRROR/qt-4.x-base.tcz.md5.txt
touch /mnt/sda1/tce/optional/qt-4.x-base.tcz.dep
tce-load -i /mnt/sda1/tce/optional/qt-4.x-base.tcz
# load the remaining dependencies for ostinato
tce-load -wi qt-4.x-script
tce-load -wi glib2
tce-load -wi openssl
tce-load -wi libpcap
# load also iperf
tce-load -wi iperf3
# change tcedir to ram disk
mv /etc/sysconfig/tcedir /etc/sysconfig/tcedir.hd
ln -s /tmp/tce /etc/sysconfig/tcedir
sudo cp -a /usr/local/tce.installed /usr/local/tce.installed.hd
# setup compile environment
tce-load -wi compiletc
tce-load -wi squashfs-tools
tce-load -wi curl
export CFLAGS="-march=i486 -mtune=i686 -O2"
export CXXFLAGS="-march=i486 -mtune=i686 -O2"
export LDFLAGS="-Wl,-O1"
# compile protobuf
curl -L -O https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz
tar xfz protobuf-2.6.1.tar.gz
cd protobuf-2.6.1
./configure --prefix=/usr/local
make
sudo make install
sudo rm /usr/local/lib/libprotobuf.so
cd ..
rm -rf protobuf*
# compile ostinato
tce-load -wi qt-4.x-dev
tce-load -wi libpcap-dev
tce-load -wi git
git clone https://github.com/pstavirs/ostinato.git
cd ostinato
[ -n "$git_commit" ] && git checkout "$git_commit"
qmake -config release "QMAKE_CXXFLAGS+=$CXXFLAGS"
make server
sudo INSTALL_ROOT=/tmp/ostinato make server-install_subtargets
sudo chown -R root:root /tmp/ostinato
sudo chmod +s /tmp/ostinato/usr/local/bin/drone
cd ..
mksquashfs /tmp/ostinato ostinato-drone.tcz
md5sum ostinato-drone.tcz > ostinato-drone.tcz.md5.txt
echo -e "qt-4.x-base.tcz\nqt-4.x-script.tcz\nlibpcap.tcz" > ostinato-drone.tcz.dep
mv ostinato-drone.tcz* /mnt/sda1/tce/optional/
echo ostinato-drone.tcz >> /mnt/sda1/tce/onboot.lst
sudo rm -rf /tmp/ostinato
rm -rf ostinato*
# ostinato configuration file
mkdir -p .config/Ostinato
cat > .config/Ostinato/drone.ini <<'EOF'
[General]
RateAccuracy=Low
[PortList]
Include=eth*
Exclude=eth0
EOF
# change tcedir back to hard disk
rm -f /etc/sysconfig/tcedir
mv /etc/sysconfig/tcedir.hd /etc/sysconfig/tcedir
sudo rm -rf /usr/local/tce.installed
sudo mv /usr/local/tce.installed.hd /usr/local/tce.installed
# disable automatic interface configuration with dhcp
sudo sed -i -e '/label .*core/,/append / s/\(append .*\)/\1 nodhcp/' /mnt/sda1/boot/extlinux/extlinux.conf
# add startup script for ostinato
cat >> /opt/bootlocal.sh <<'EOF'
# Boot parameter "nodhcp": network interfaces are not yet configured
if grep -q -w nodhcp /proc/cmdline; then
# This waits until all devices have registered
/sbin/udevadm settle --timeout=10
# configure eth0 with DHCP
/sbin/udhcpc -b -i eth0 -x hostname:$(/bin/hostname) -p /var/run/udhcpc.eth0.pid >/dev/null 2>&1 &
# alternatively configure static interface address and route
#ifconfig eth0 x.x.x.x netmask 255.255.255.0 up
#route add default gw y.y.y.y
#echo 'nameserver z.z.z.z' > /etc/resolv.conf
# activate other eth devices
NETDEVICES="$(awk -F: '/eth[1-9][0-9]*:/{print $1}' /proc/net/dev 2>/dev/null)"
for DEVICE in $NETDEVICES; do
sysctl -q -w net.ipv6.conf.$DEVICE.disable_ipv6=1
ifconfig $DEVICE mtu 9000 up
done
fi
# disable ostinato update, makes no sense in this environment
echo -e '# disable ostinato update\n127.0.0.127 update.ostinato.org' >> /etc/hosts
# start ostinato drone
sleep 2
su -c 'drone < /dev/null > /var/log/ostinato-drone.log 2>&1 &' gns3
EOF
exit 0
|
adosztal/gns3-registry
|
packer/tinycore-linux/scripts/ostinato-drone.sh
|
Shell
|
gpl-3.0
| 3,671 |
#!/bin/bash
# Installs Wordpress for a new domain
# usage ./wordpress.sh
# Get some information and set variables
function get_domain() {
read -p "Please enter the domain name (no www): " domain
read -p "Please enter desired SFTP username: " username
read -p "Please enter the 10.x.x.x address of your DB Server (or use localhost): " dbhost
read -p "Please enter desired MySQL database name: " database
read -p "Please enter desired MySQL username: " db_user
web_password=$( apg -m 7 -n 1 )
# web_password=${web_password/\'/\\\'}
db_password=$( apg -m 7 -n 1 )
db_password=${db_password/\'/\\\'}
eth1ip=$( ifconfig eth1 | grep 'inet addr:'| cut -d: -f2 | awk '{ print $1}' )
}
# add a virtual host and restart Apache
function configure_apache() {
if [[ $distro = "Redhat/CentOS" ]]; then
cat > /etc/httpd/vhost.d/"${domain}".conf <<-EOF
<VirtualHost *:80>
ServerName $domain
ServerAlias www.$domain
DocumentRoot /var/www/vhosts/$domain/public_html
<Directory /var/www/vhosts/$domain/public_html>
AllowOverride All
</Directory>
CustomLog logs/$domain-access_log common
ErrorLog logs/$domain-error_log
</VirtualHost>
# <VirtualHost _default_:443>
# ServerName $domain
# DocumentRoot /var/www/vhosts/$domain/public_html
# <Directory /var/www/vhosts/$domain/public_html>
# AllowOverride All
# </Directory>
# CustomLog /var/log/httpd/$domain-ssl-access.log combined
# ErrorLog /var/log/httpd/$domain-ssl-error.log
# # Possible values include: debug, info, notice, warn, error, crit,
# # alert, emerg.
# LogLevel warn
# SSLEngine on
# SSLCertificateFile /etc/pki/tls/certs/$domain.crt
# SSLCertificateKeyFile /etc/pki/tls/private/$domain.key
# SSLCertificateChainFile /etc/pki/tls/certs/CA.crt
# <FilesMatch "\.(cgi|shtml|phtml|php)$">
# SSLOptions +StdEnvVars
# </FilesMatch>
# BrowserMatch "MSIE [2-6]" \\
# nokeepalive ssl-unclean-shutdown \\
# downgrade-1.0 force-response-1.0
# BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
# </VirtualHost>
EOF
service httpd graceful > /dev/null 2>&1
elif [[ $distro = "Ubuntu" ]]; then
cat > /etc/apache2/sites-available/"${domain}" <<-EOF
<VirtualHost *:80>
ServerName $domain
ServerAlias www.$domain
DocumentRoot /var/www/vhosts/$domain/public_html
<Directory /var/www/vhosts/$domain/public_html>
AllowOverride All
</Directory>
CustomLog /var/log/apache2/$domain-access_log common
ErrorLog /var/log/apache2/$domain-error_log
</VirtualHost>
# <VirtualHost _default_:443>
# ServerName $domain
# DocumentRoot /var/www/vhosts/$domain/public_html
# <Directory /var/www/vhosts/$domain/public_html>
# AllowOverride All
# </Directory>
# CustomLog /var/log/httpd/$domain-ssl-access.log combined
# ErrorLog /var/log/httpd/$domain-ssl-error.log
# # Possible values include: debug, info, notice, warn, error, crit,
# # alert, emerg.
# LogLevel warn
# SSLEngine on
# SSLCertificateFile /etc/ssl/certs/$domain.pem
# SSLCertificateKeyFile /etc/ssl/private/$domain.key
# SSLCertificateChainFile /etc/ssl/certs/CA.crt
# <FilesMatch "\.(cgi|shtml|phtml|php)$">
# SSLOptions +StdEnvVars
# </FilesMatch>
# BrowserMatch "MSIE [2-6]" \\
# nokeepalive ssl-unclean-shutdown \\
# downgrade-1.0 force-response-1.0
# BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
# </VirtualHost>
EOF
a2ensite $domain > /dev/null 2>&1
service apache2 graceful > /dev/null 2>&1
fi
}
# Fetch Wordpress and extract it
# make a document root
function get_wordpress() {
cd /root
wget -q http://wordpress.org/latest.tar.gz
mkdir -p /var/www/vhosts/$domain/public_html
tar -C /var/www/vhosts/$domain -xzf latest.tar.gz
rsync -Aa /var/www/vhosts/$domain/wordpress/ /var/www/vhosts/$domain/public_html/
useradd -d /var/www/vhosts/$domain $username > /dev/null 2>&1
#echo $web_password | passwd $username --stdin > /dev/null 2>&1
}
# Set up a database locally OR show the commands to run
function configure_mysql() {
MYSQL=$( which mysql )
CREATE_DB="CREATE DATABASE IF NOT EXISTS ${database};"
CREATE_DB_LOCAL_USER="GRANT ALL PRIVILEGES ON ${database}.* TO '${db_user}'@'${dbhost}' IDENTIFIED BY '${db_password}';"
CREATE_DB_REMOTE_USER="GRANT ALL PRIVILEGES ON ${database}.* TO '${db_user}'@'${eth1ip}' IDENTIFIED BY '${db_password}';"
FP="FLUSH PRIVILEGES;"
SQL="${CREATE_DB}${CREATE_DB_LOCAL_USER}${FP}"
if [[ $dbhost == "localhost" ]]; then
$MYSQL -e "$SQL"
echo "The MySQL database credentials are: "
echo "User: ${db_user}"
echo "Password: ${db_password}"
else
echo "Run these commands on your database server: "
echo $CREATE_DB
echo $CREATE_DB_REMOTE_USER
echo $FP
fi
}
# make wp-config.php and protect it
function create_wp_config() {
cd /var/www/vhosts/$domain/public_html
keys=$( curl -s -k https://api.wordpress.org/secret-key/1.1/salt )
cat > wp-config.php <<-EOF
<?php
define('DB_NAME', '${database}');
define('DB_USER', '${db_user}');
define('DB_PASSWORD', '${db_password}');
define('DB_HOST', '${dbhost}');
define('DB_CHARSET', 'utf8');
define('DB_COLLATE', '');
define('FTP_BASE', '/var/www/vhosts/${domain}/public_html/');
define('FTP_CONTENT_DIR', '/var/www/vhosts/${domain}/public_html/wp-content/');
define('FTP_USER','${username}');
define('FTP_PASS','${web_password}');
define('FTP_HOST','127.0.0.1');
$keys
\$table_prefix = 'wp_';
define('WPLANG', '');
define('WP_DEBUG', false);
/* That's all, stop editing! Happy blogging. */
if ( !defined('ABSPATH') )
define('ABSPATH', dirname(__FILE__) . '/');
require_once(ABSPATH . 'wp-settings.php');
EOF
cat > .htaccess <<-EOF
<files wp-config.php>
order allow,deny
deny from all
</files>
EOF
chown -R $username: /var/www/vhosts/$domain/public_html
}
function clean_up() {
rm -f /root/latest.tar.gz
rm -rf /var/www/vhosts/${domain}/wordpress
}
get_domain
echo "Beginning Wordpress installation."
get_wordpress
echo "Wordpress has been installed in /var/www/vhosts/${domain}/public_html."
create_wp_config
configure_apache
echo "Apache has been configured for ${domain} and restarted."
echo "The SFTP credentials are: "
echo "User: ${username}"
#echo "Password: ${web_password}"
echo -e "\e[0;31m*** Please run 'passwd ${username}' to set an SFTP password ***\e[0m"
echo "*** WordPress has been configured to use FTP for updates ***"
echo "*** Check with the customer for configuring SSH2 updates ***"
configure_mysql
echo "Cleaning up..."
clean_up
echo "I like salsa!"
exit 0
|
hhoover/lazyscripts
|
modules/wordpress.sh
|
Shell
|
gpl-3.0
| 6,522 |
#!/bin/bash
# Quick scripting for doing a batch of annealLorenz
# You can just set the values of interest and go do something else
# Remember, annealLorenz takes in 4 arguments:
# D = number of variables
# M = number of timesteps
# dt = size of timestep
# L_frac = fraction of D that is "measured"
# run the code as:
# bash runbatch.sh
# All output is appended to the end of a log file
# Runtime is recorded in the log file
python annealLorenz.py 5 100 0.2 0.4 >> logs/out.log
echo "DONE 1"
python annealLorenz.py 5 40 0.5 0.4 >> logs/out.log
echo "DONE 2"
|
adrianskw/annealLorenz96
|
runbatch.sh
|
Shell
|
gpl-3.0
| 559 |
#!/usr/bin/env bash
echo -e "\t[+] Fetching afl-latest"
wget -q http://lcamtuf.coredump.cx/afl/releases/afl-latest.tgz &> /dev/null
tar xzf afl-latest.tgz &> /dev/null
rm -f afl-latest.tgz && cd afl-*
echo -e "\t[+] Installing afl"
sudo make install
cd ..
echo -e "\t[+] Install aflfast"
git clone https://github.com/mboehme/aflfast.git
cd aflfast
make && sudo mv afl-fuzz /usr/local/bin/afl-fuzz-fast
cd ..
echo -e "\t[+] Setting core_pattern"
echo core | sudo tee /proc/sys/kernel/core_pattern
echo -e "\t[+] Running autotools in test dir"
cd testdata/Automake-Autoconf-Template-Project
libtoolize --force
aclocal && automake --force-missing --add-missing && autoconf
cd ../../
echo -e "\t[+] Installing afl-utils"
wget -q https://github.com/rc0r/afl-utils/archive/v1.32a.tar.gz && tar xzf v1.32a.tar.gz
rm v1.32a.tar.gz && cd afl-utils-1.32a
sudo mkdir -p /usr/lib/python3.4/site-packages && sudo python3 setup.py install
cd ../
echo -e "\t[+] Setting up GDB and exploitable"
cat <<EOF >> ~/.gdbinit
source /usr/lib/python3.4/site-packages/exploitable-1.32_rcor-py3.4.egg/exploitable/exploitable.py
source ~/.orthrus/gdb_orthrus.py
define hook-quit
set confirm off
end
set pagination off
EOF
echo -e "\t[+] Installing afl-cov"
wget -q https://github.com/mrash/afl-cov/archive/0.6.tar.gz && tar xzf 0.6.tar.gz
rm 0.6.tar.gz && cd afl-cov-0.6
sudo cp afl-cov /usr/local/bin/
cd ..
echo -e "\t[+] Installing pysancov"
wget -q https://raw.githubusercontent.com/llvm-mirror/compiler-rt/release_38/lib/sanitizer_common/scripts/sancov.py &> /dev/null
chmod +x sancov.py &> /dev/null
sudo mv sancov.py /usr/local/bin/pysancov &> /dev/null
echo -e "\t[+] Copy gdb-orthrus.py to orthrus-local"
mkdir -p $HOME/.orthrus
wget https://raw.githubusercontent.com/test-pipeline/orthrus/master/gdb-orthrus/gdb_orthrus.py -P $HOME/.orthrus
CLANG_SDICT_DB="https://www.dropbox.com/s/lqayfheheo3coag/fork-6173707-6216-gb24cc33-153448-2017-04-10.tar.gz?dl=0"
curl -o clang.tar.gz -L "$CLANG_SDICT_DB" && tar xzf clang.tar.gz -C $HOME && chmod +x $HOME/local/bin/clang-sdict
echo -e "\t[+] Install bear v2.1.5"
wget https://launchpadlibrarian.net/240291131/bear_2.1.5.orig.tar.gz && tar xzf bear_2.1.5.orig.tar.gz && rm bear_2.1.5.orig.tar.gz
mkdir Bear-2.1.5.build && cd Bear-2.1.5.build && cmake ../Bear-2.1.5 && make -j all && sudo make install && cd .. && rm -rf Bear-2.1.5 Bear-2.1.5.build
|
test-pipeline/orthrus
|
install_deps.sh
|
Shell
|
gpl-3.0
| 2,379 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-taskevent_3-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::taskevent_3:1.0 -N ID0000009 -R condorpool -L example_workflow -T 2016-11-29T15:17:59+00:00 ./example_workflow-taskevent_3-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1B/logs/w-11-A/20161129T151759+0000/00/00/taskevent_3_ID0000009.sh
|
Shell
|
gpl-3.0
| 1,237 |
#!/bin/bash
# SPDX-License-Identifier: GPL-3.0-or-later AND MIT
# Copyright (c) 2017-2021 Maxim Biro <[email protected]>
# Copyright (c) 2021 by The qTox Project Contributors
usage()
{
echo "Download and build libexif for the windows cross compiling environment"
echo "Usage: $0 --arch {x86_64|i686}"
}
ARCH=""
while (( $# > 0 )); do
case $1 in
--arch) ARCH=$2; shift 2 ;;
-h|--help) usage; exit 1 ;;
*) echo "Unexpected argument $1"; usage; exit 1;;
esac
done
if [ "$ARCH" != "i686" ] && [ "$ARCH" != "x86_64" ]; then
echo "Unexpected arch $ARCH"
usage
exit 1
fi
set -euo pipefail
"$(dirname $0)"/download/download_libexif.sh
CFLAGS="-O2 -g0" ./configure --host="${ARCH}-w64-mingw32" \
--prefix=/windows/ \
--enable-shared \
--disable-static \
--disable-docs \
--disable-nls
make -j $(nproc)
make install
|
tux3/qTox
|
buildscripts/build_libexif_windows.sh
|
Shell
|
gpl-3.0
| 1,014 |
#!/bin/bash
apt install perl busybox golang python python2 make && cpan App::cpanminus && cpanm Term::ANSIScreen && pip install requests
chmod +x dos
mv dos ~/../usr/bin/
|
FakeFBI/DoS-Framework
|
installation-scripts/termux-installation.sh
|
Shell
|
gpl-3.0
| 171 |
#!/bin/bash -e
. ../env/env
brief="a general-purpose cryptography library"
intro="
The libgcrypt package contains a general purpose crypto library based on the
code used in GnuPG. The library provides a high level interface to
cryptographic building blocks using an extendable and flexible API.
"
depends="libgpg-error (>= 1.36)"
version=1.8.5
srcfil=libgcrypt-$version.tar.bz2
srcdir=libgcrypt-$version
srcurl=https://www.gnupg.org/ftp/gcrypt/libgcrypt/$srcfil
srcmd5=348cc4601ca34307fc6cd6c945467743
build_src() {
tar -xf $srcfil && cd $srcdir
./configure --prefix=/usr
make $JOBS
make DESTDIR=$OUTPUTDIR install
cleanup_src
}
build
|
fangxinmiao/projects
|
Architeture/OS/Linux/Distributions/LFS/build-scripts/blfs-9.0-systemd/scripts/l/libgcrypt.sh
|
Shell
|
gpl-3.0
| 666 |
#!/bin/bash
############################ [MAIN CODE] ############################
# DO NOT alter below this line unless you know what you are doing!
# user configuration options are found in zer0prompt.conf
# grab user config files via import
source ~/zer0prompt/zer0prompt.conf
# use bash builtin checkwinsize option for terminals which fail to properly
# set the $COLUMNS variable. (bug workaround)
shopt -s checkwinsize
# set line graphics to use based on locale
# if [ "$zgfx_fallback" = "1" ]; then
# zg1="-"; zg2="r"; zg3="L"; zg4="|"; zg5="|"; zg6=">" # ASCII graphics (forced by user config)
# elif [ $(locale charmap) = "UTF-8" ]; then
#zg1="β"; zg2="β"; zg3="β"; zg4="β€"; zg5="β"; zg6=">" # unicode console (UTF-8 graphics supported)
zp_gfx=("β" "β" "β" "β€" "β" ">")
# else
# zg1="-"; zg2="r"; zg3="L"; zg4="|"; zg5="|"; zg6=">" # ASCII console (UTF-8 graphics not supported)
# fi
# set inverse mode if set
if [ "$zpcl_inverse" = "1" ]; then
zci="\[\033[7m\]" #inverse
else
zci=""
fi
# set user info same colour as user selected host info colour
zi0=$zi1
# if root user then colour user info and user identifier red.
[ "${UID}" = 0 ] && zi0="\[\033[1;31m\]" && zi5="\[\033[1;31m\]"
# this function is run at every prompt update, keeping our variables updated.
# bash's PROMPT_COMMAND option handles this (see end of this function).
function pre_prompt {
# show exit code of last failed command
ZEXIT="${?}"
[ "$ZEXIT" = "0" ] && ZEXIT=""
ZPWD=${PWD/#$HOME/\~} # sorten home dir to ~
# set length of our important info
local infolength="$(whoami)@$(hostname):$(basename $(tty))$ZPWD"
# set length of our graphics
local gfxlength=23
# construct ZFILL size to fill terminal width (minus info/gfx lengths).
local fillsize
let fillsize=${COLUMNS}-${gfxlength}-${#infolength}
ZFILL=""
while [ "$fillsize" -gt "0" ]; do
ZFILL="$ZFILL${zp_gfx[0]}"
let fillsize=${fillsize}-1
done
# determine how much to truncate ZPWD, if ZFILL can't shrink anymore.
if [ "$fillsize" -lt "0" ]; then
local cut=3-${fillsize} # some tricky math, 3-(-number)=+number
ZPWD="...${ZPWD:${cut}}"
fi
}
PROMPT_COMMAND=pre_prompt
# this function tells bash how to draw our prompt
function zer0prompt {
local zc0="\[\033[0m\]" # clear all colors
local zc1="\[\033[1;37m\]"
local zc2="\[\033[0;37m\]"
# set colour theme
if [ "$zpcl" = "cyan" ]; then
local zc3="\[\033[1;36m\]"; local zc4="\[\033[0;36m\]"
elif [ "$zpcl" = "blue" ]; then
local zc3="\[\033[1;34m\]"; local zc4="\[\033[0;34m\]"
elif [ "$zpcl" = "green" ]; then
local zc3="\[\033[1;32m\]"; local zc4="\[\033[0;32m\]"
elif [ "$zpcl" = "red" ]; then
local zc3="\[\033[1;31m\]"; local zc4="\[\033[0;31m\]"
elif [ "$zpcl" = "purple" ]; then
local zc3="\[\033[1;35m\]"; local zc4="\[\033[0;35m\]"
elif [ "$zpcl" = "yellow" ]; then
local zc3="\[\033[1;33m\]"; local zc4="\[\033[0;33m\]"
elif [ "$zpcl" = "black" ]; then
local zc3="\[\033[1;30m\]"; local zc4="\[\033[0;30m\]"
elif [ "$zpcl" = "white" ]; then
local zc3="\[\033[1;37m\]"; local zc4="\[\033[0;37m\]"
else # none (no colour)
local zc3=""; local zc4=""; local zc1=""; local zc2=""
zi0=""; zi1=""; zi2=""; zi3=""; zi4=""; zi5=""
fi
# set titlebar info if xterm/rxvt
case $TERM in
xterm*|rxvt*)
local TITLEBAR='\[\033]0;\u (\w) [${COLUMNS}x${LINES}]\007\]';;
*)
local TITLEBAR="";;
esac
## build the prompt sets (left to right, top to bottom)
# first line of graphics up to user@host info
zp_set1="$zc1$zci${zp_gfx[1]}${zp_gfx[0]}$zc3${zp_gfx[0]}$zc4$zci${zp_gfx[0]}${zp_gfx[3]}"
# user@host:tty
zp_set2="$zi0\u$zi1@\h:\l"
# middle graphics between user@host and current path (auto-filled to fit terminal width)
zp_set3="$zc4$zci${zp_gfx[4]}${zp_gfx[0]}$zc2$zci${zp_gfx[0]}${zp_gfx[0]}$zc4$zci\
\$ZFILL$zc3${zp_gfx[0]}${zp_gfx[0]}${zp_gfx[0]}${zp_gfx[0]}$zc1${zp_gfx[0]}${zp_gfx[0]}\
${zp_gfx[0]}$zc3${zp_gfx[0]}${zp_gfx[0]}$zc4$zci${zp_gfx[0]}${zp_gfx[3]}"
# current path
zp_set4="$zi2\$ZPWD"
# last graphics of first line
zp_set5="$zc4$zci${zp_gfx[4]}${zp_gfx[0]}$zc2$zci${zp_gfx[0]}"
# second line of graphics up to time, user identifier
zp_set6="
$zc3${zp_gfx[2]}$zc4$zci${zp_gfx[0]}${zp_gfx[3]}"
# time, user identifier
zp_set7="$zi3\D{$ztime}$zci $zi5\\\$"
# final set of graphics before cursor (including exit code if not 0)
zp_set8="$zc4$zci${zp_gfx[4]}$zi4\$ZEXIT$zc2$zci${zp_gfx[0]}$zc3${zp_gfx[5]}$zc0 "
# set standard bash prompt
PS1="${TITLEBAR}$zp_set1$zp_set2$zp_set3$zp_set4$zp_set5$zp_set6$zp_set7$zp_set8"
# set continuation bash prompt
PS2="$zc3$zci${zp_gfx[2]}$zc4$zci${zp_gfx[0]}${zp_gfx[3]}$zi5\\\$$zc4$zci${zp_gfx[4]}$zc2$zci${zp_gfx[0]}$zc3${zp_gfx[5]}$zc0 "
}
|
zer0ed/zer0prompt
|
zer0prompt.sh
|
Shell
|
gpl-3.0
| 4,821 |
#!/bin/bash -ex
# Debian release
# Copyright (C) 2008, 2009 Sylvain Beucler
# This file is part of GNU FreeDink
# GNU FreeDink is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# GNU FreeDink is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
PACKAGE=freedink
VERSION=$1
if [ -z "$VERSION" ]; then
VERSION=$(cd /mnt/snapshots/$PACKAGE && ls -d */ | sed 's,/$,,' | sort -n | tail -1)
fi
PUBDIR=/mnt/snapshots/$PACKAGE/$VERSION
rm -rf t/
mkdir t
pushd t
TARBALL=$PACKAGE-$VERSION.tar.gz
cp -a $PUBDIR/$TARBALL .
tar xzf $TARBALL
ln -s $TARBALL ${PACKAGE}_$VERSION.orig.tar.gz
cd $PACKAGE-$VERSION/
cp -a ../../$PACKAGE/debian .
yes | DEBEMAIL="[email protected]" DEBFULLNAME="Sylvain Beucler" dch -D stable \
--newversion $VERSION-1 \
--force-bad-version -- \
"New upstream release"
pdebuild --debbuildopts '-sa' --buildresult /mnt/snapshots/debian \
-- --basetgz /var/cache/pbuilder/base-lenny-bpo.tar.gz --bindmounts /usr/src/backports/lenny/debs
popd
make -C /mnt/snapshots/debian
rm -rf t
exit;
# construction:
# with cowbuilder / etch:
aptitude install cowbuilder fakeroot sudo
aptitude install debhelper # for dh_clean
mkdir /mnt/snapshots/debian/etch-backports/
(cd /mnt/snapshots/debian/etch-backports/ && apt-ftparchive packages . | gzip > Packages.gz)
cowbuilder --create --basepath /var/cache/pbuilder/base-etch.cow --distribution=etch \
--othermirror "deb http://backports.org/debian etch-backports main|deb file:///mnt/snapshots/debian/etch-backports/ ./" \
--bindmounts /mnt/snapshots/debian/etch-backports
# update:
cowbuilder --update --basepath /var/cache/pbuilder/base-etch.cow/ --bindmounts /mnt/snapshots/debian/etch-backports --debian-etch-workaround
# with pbuilder / lenny:
mkdir -p /usr/src/backports/squeeze/debs
(cd /usr/src/backports/squeeze/debs && apt-ftparchive packages | gzip > Packages.gz)
pbuilder --create --basetgz /var/cache/pbuilder/base-lenny-bpo.tar.gz --distribution lenny \
--othermirror "deb http://backports.org/debian lenny-backports main|deb file:///usr/src/backports/lenny/debs ./" \
--bindmounts /usr/src/backports/lenny/debs
# update:
pbuilder --update --basetgz /var/cache/pbuilder/base-lenny-bpo.tar.gz --bindmounts /usr/src/backports/lenny/debs
# with pbuilder / squeeze:
mkdir -p /usr/src/backports/squeeze/debs
(cd /usr/src/backports/squeeze/debs && apt-ftparchive packages | gzip > Packages.gz)
pbuilder --create --basetgz /var/cache/pbuilder/base-squeeze-bpo.tar.gz --distribution squeeze \
--othermirror "deb http://backports.org/debian squeeze-backports main|deb file:///usr/src/backports/squeeze/debs ./" \
--bindmounts /usr/src/backports/squeeze/debs
# update:
pbuilder --update --basetgz /var/cache/pbuilder/base-squeeze-bpo.tar.gz --bindmounts /usr/src/backports/squeeze/debs
|
alexschrod/freedink-lua
|
autobuild/freedink-debs.sh
|
Shell
|
gpl-3.0
| 3,274 |
#!/bin/bash
python RunBootstrap.py --paralog1 YBR191W --paralog2 YPL079W --bootnum 15 > YBR191W_YPL079W_Boot15_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Bootstrap/ShFiles/MG94_YBR191W_YPL079W_Boot15.sh
|
Shell
|
gpl-3.0
| 128 |
#!/bin/sh
cat $1 | grep -i Bomber | grep -i Nicolas | awk '{print $(NF-1)}'
|
jpirsch/piscines
|
Piscine C/j09/ex02/find_nicolas_bomber.sh
|
Shell
|
gpl-3.0
| 77 |
scafSeq=$1
prefix=${scafSeq%.scafSeq}
prefix=${prefix%.fa}
tmp_file=${prefix}.assemblathon.fa
scaf_len_file=${prefix}.assemblathon.scafLengths
# The file contains only lengths of sigleton contigs
contig_only_len_file=${prefix}.assemblathon.contigLengths
echo "Outputing scaffold lengths into ${scaf_len_file}" 1>&2
awk '{ if($0 ~ /^>/) { if($0~/^>scaff/){print; flag=1} else {flag=0}} else { if(flag==1){print} } }' ${scafSeq} | tr ' ' '_' | fasta_lengths.pl > ${scaf_len_file}
echo "Outputing length of singleton contigs into ${contig_only_len_file}" 1>&2
awk '{ if($0 ~ /^>/) { if($0~/^>C/){print; flag=1} else {flag=0}} else { if(flag==1){print} } }' ${scafSeq} | tr ' ' '_' | fasta_lengths.pl > ${contig_only_len_file}
echo "assemblathon_stats.pl is evaluating ${scafSeq}" 1>&2
awk '{ if($0 ~ /^>/) { if($0~/>scaff/){print; flag=1} else {flag=0}} else { if(flag==1){print} } }' ${scafSeq} > ${tmp_file}
output_summary=${prefix}.assemblathon.summary
# output_csv=${prefix}.assemblathon.csv
# output_graph=${prefix}.assemblathon.graph
assemblathon_stats.pl -n 100 -graph -csv ${tmp_file} &> ${output_summary}
rm ${tmp_file}
|
yfu/tools
|
eval_soapdenovo_scafseq.sh
|
Shell
|
gpl-3.0
| 1,134 |
python startdrivers.py fast0 fast1 fast2 fast3 fast4 fast5 fast6 fast7
|
gtfierro/giles
|
bench/fastsource/run.sh
|
Shell
|
gpl-3.0
| 72 |
#!/bin/sh
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Usage: ./update.sh <pocketsphinx_directory>
#
# Copies the needed files from a directory containing the original
# pocketsphinx source.
cp $1/include/*.h .
cp $1/src/libpocketsphinx/*.c src/
cp $1/src/libpocketsphinx/*.h src/
|
Yukarumya/Yukarum-Redfoxes
|
media/pocketsphinx/update.sh
|
Shell
|
mpl-2.0
| 441 |
#!/bin/sh
DIR=`dirname $0`;
. $DIR/base.sh;
psql=`which psql`;
if [ "$psql" = "" ]; then
echo "Can not find psql binary. Is it installed?";
exit 1;
fi
if [ "$DB_USER" = "" ]; then
echo "\$DB_USER not set. Using 'postgres'.";
DB_USER="postgres";
fi
if [ "$DB_NAME" = "" ]; then
echo "\$DB_NAME not set. Using 'postgres'.";
DB_NAME="postgres";
fi
DB_HOSTNAME=${DB_HOSTNAME-127.0.0.1};
$psql --version;
dropdb --host=$DB_HOSTNAME --username=$DB_USER $DB_NAME;
createdb --host=$DB_HOSTNAME --username=$DB_USER $DB_NAME;
check;
$psql --host=$DB_HOSTNAME --username=$DB_USER -c '
CREATE SCHEMA bookstore_schemas;
CREATE SCHEMA contest;
CREATE SCHEMA second_hand_books;
CREATE SCHEMA migration;
' $DB_NAME;
check;
|
BFH-InternetOfThings/smoje
|
Server/vendor/propel/propel/tests/bin/setup.pgsql.sh
|
Shell
|
agpl-3.0
| 741 |
#!/bin/sh
cd gtest-1.7.0 && cmake . && make && cd .. && cmake . && make
|
jbenden/dbabstract
|
install.sh
|
Shell
|
lgpl-2.1
| 73 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.