code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/usr/bin/env bash mvn compile && mvn exec:java -Dexec.args="-i $1 -o $2"
essepuntato/rash
sources/docx2rash/compile-script.sh
Shell
isc
75
export DOTFILES_PATH=~/.dotfiles export SHELLS_SRC_PATH=${DOTFILES_PATH}/all_shells source ${SHELLS_SRC_PATH}/.profile #just dummy values to make the AWS-SDK happy during local tests. export AWS_ACCESS_KEY_ID=NONE export AWS_SECRET_KEY=NONE
afranken/dotfiles
oh-my-zsh/custom/profile.zsh
Shell
mit
243
#!/bin/bash docker run --name nginx \ -d -p 8080:80 \ -v $(PWD)/sites-enabled:/etc/nginx/sites-enabled \ -v $(PWD)/log/nginx:/var/log/nginx \ saltfactory/nginx
saltfactory/docker-nginx
run-nginx.sh
Shell
mit
161
#!/bin/bash currDir=`pwd` currTimestamp=`date +%s` # Cleanup from previous runs rm -rf /tmp/rs-docs-*/ # Clone the repo git clone --depth 1 https://github.com/rightscale/docs /tmp/rs-docs-$currTimestamp/ cd "/tmp/rs-docs-$currTimestamp" echo '[rs-docs-edit-statusPage] Creating new branch' git checkout -b "edit-statusPage-$currTimestamp" # Open the account page in the browser ### Find out what OS we are running on so we can launch the browser properly platform='unknown' unamestr=`uname` if [[ "$unamestr" == 'Linux' ]]; then platform='linux' elif [[ "$unamestr" == 'FreeBSD' ]]; then platform='freebsd' elif [[ "$unamestr" == 'Darwin' ]]; then platform='osx' fi echo "[rs-docs-edit-statusPage] Platform detected: $platform." ### Set the openHandler to handle various OSes and input types (URLs, files, etc..) if [[ $platform == 'linux' ]]; then openHandler='xdg-open'; elif [[ $platform == 'osx' ]]; then openHandler='open'; fi echo "[rs-docs-edit-statusPage] Opening status.html.md in text editor and web browser (live reload)" $openHandler "/tmp/rs-docs-$currTimestamp/source/status.html.md" &> /dev/null $openHandler "http://localhost:4567/status.html" &> /dev/null echo "[rs-docs-edit-statusPage] Starting MiddleMan Server. Hit CTRL+C when finished." bundle exec middleman server > /dev/null while true; do read -p "[rs-docs-edit-statusPage] Would you like to commit your changes [yes/no]? " yn case $yn in [Yy]* ) git commit -a; break;; [Nn]* ) echo "[rs-docs-edit-statusPage] All done -- exiting script"; exit;; * ) echo "Please answer yes or no.";; esac done while true; do read -p "[rs-docs-edit-statusPage] Would you like to push your changes to GitHub [yes/no]? " yn case $yn in [Yy]* ) git push --all; echo "[rs-docs-edit-statusPage] Opening Pull Request page GitHub. Create a pull request and merge it to `master` to complete the process."; $openHandler "https://github.com/rightscale/docs/compare/edit-statusPage-$currTimestamp?expand=1"; break;; [Nn]* ) echo "[rs-docs-edit-statusPage] All done -- exiting script"; exit;; * ) echo "Please answer yes or no.";; esac done
bryankaraffa/rightscale_scripts
tools/rs-docs-edit-statusPage.sh
Shell
mit
2,175
#!/bin/bash # -------------------------------------- # Watcher # -------------------------------------- # # Autonomous bot that detects when new # domains are acquired and verify for # duplicates. When unique domains are # confirmed the watcher send them to the # domain delegators. # # The watcher is not the only script that # removes duplicates. A divide and conquer # approach is utilized to increase the # speed of the removal. # # Author: Thiago Moreno # <[email protected]> # -------------------------------------- # START | Config # -------------------------------------- WATCH_REFRESH_INTERVAL=0.5 FOUND_DOMAINS_FILE=`qbot path`"foundDomains.list" DOMAINS_FILE="domains.list" OFFLINE_FILE=`qbot path`"offlineDomains.list" DOMAINS_HISTORY_FILE=`qbot path`"domains.history" SCRAPED_FILE=`qbot path`"scrapedDomains.list" OBSERVE_FILE=`qbot path`"qbot.log" UNIQUE_FILE=`qbot path`"uniqueDomains.list" OUTPUT_FILE=`qbot path`"output.log" RAM_DIRECTORY='/mnt/qbot' DELEGATORS_LIMIT=6 # -------------------------------------- # END | Config # START | Helpers # -------------------------------------- # Check if determined script is running on background (coupled, from "_qbot.sh" script's prototype) is_running() { if [ "`ps -A --context | grep -E \"${1}.s[h]\" | wc -l`" -gt "0" ] then return 0 else return 1 fi } # -------------------------------------- # END | Helpers # START | Initialize # -------------------------------------- qbotPath=`qbot path` # Ram Allocation if [ ! -d "${RAM_DIRECTORY}" ] then mkdir "${RAM_DIRECTORY}" mount -t tmpfs -o size=1024m tmpfs "${RAM_DIRECTORY}" fi chmod +rw $FOUND_DOMAINS_FILE chmod +rw $DOMAINS_FILE chmod +rw $DOMAINS_HISTORY_FILE chmod +rw $SCRAPED_FILE chmod +rw $OBSERVE_FILE chmod +rw $UNIQUE_FILE echo -n "" > ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp echo -n "" > ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp [ ! -d "${RAM_DIRECTORY}/queue" ] && mkdir "${RAM_DIRECTORY}/queue" oldLines=0 currentLines=0 # Reset delegators [ "`ls ${RAM_DIRECTORY}/queue | wc -l`" != "0" ] && rm ${RAM_DIRECTORY}/queue/* echo "N---1" # Re-initiale spiders if they were running if [ "`ls \"${RAM_DIRECTORY}\" | grep -E 'spider-.*-queue.list' | wc -l`" -gt "0" ] then for spiderQueueFile in "${RAM_DIRECTORY}"/spider-*-queue.list do spiderName=${spiderQueueFile/${RAM_DIRECTORY}\/spider-} spiderName=${spiderName%-queue.list} echo $spiderName if [ "`cat \"${spiderQueueFile}\" | wc -l`" -gt "0" ] then if [ -f "`qbot path`spider-${spiderName}.sh" ] && ! `is_running spider-${spiderName}` then nohup `qbot path`"spider-${spiderName}.sh" > ${OUTPUT_FILE} 2>&1& fi else [ -f "${spiderQueueFile}.lock" ] && rm ${spiderQueueFile}.lock fi if [ ! -f "`qbot path`spider-${spiderName}.sh" ] then qbot add spider "${spiderName}" &>`qbot path`output.log fi done fi # -------------------------------------- # END | Initialize # START | Watcher loop event # -------------------------------------- echo "The Watcher is waiting for domains" echo "The Watcher is waiting for domains" >> $OBSERVE_FILE while : do sleep "$WATCH_REFRESH_INTERVAL" # START | Watcher loop event # -------------------------------------- # currentLinesRefresh="`sed -n '$=' ${FOUND_DOMAINS_FILE}`" currentLines="`sed -n '$=' ${FOUND_DOMAINS_FILE}`" # [ "${currentLinesRefresh}" = "${currentLines}" ] && currentLines="${currentLinesRefresh}" diffLines=$((currentLines - oldLines)) # Skip if there are no new lines [[ "${diffLines}" = "0" && "`cat ${DOMAINS_FILE#${qbotPath}}.temp | wc -l`" != "0" ]] && continue cat <(tail -n +"$((oldLines+1))" "${FOUND_DOMAINS_FILE}") >> ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp oldLines=${currentLines} awk '!a[$0]++' "${FOUND_DOMAINS_FILE#${qbotPath}}" > "${FOUND_DOMAINS_FILE#${qbotPath}}".unduplicated &&\ cp "${FOUND_DOMAINS_FILE#${qbotPath}}".unduplicated "${FOUND_DOMAINS_FILE#${qbotPath}}" &&\ rm "${FOUND_DOMAINS_FILE#${qbotPath}}".unduplicated s_time_taken=$(date +%s) echo "The Watcher is handling duplicates..." echo "The Watcher is handling duplicates..." >> $OBSERVE_FILE # START | Handling new domains duplicates cat $SCRAPED_FILE > ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp cat $OFFLINE_FILE > ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp awk '!a[$0]++' ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp > ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp2 rm ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp mv ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp2 ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp awk '!a[$0]++' ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp > ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp2 rm ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp mv ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp2 ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp awk '!a[$0]++' ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp > ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp2 rm ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp mv ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp2 ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp comm -13 <(sort ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp) <(sort ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp) > ${DOMAINS_FILE#${qbotPath}}.temp2 comm -13 <(sort ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp) <(sort ${DOMAINS_FILE#${qbotPath}}.temp2) > ${DOMAINS_FILE#${qbotPath}}.temp # awk 'NR==FNR{a[$0]="";next}; !($0 in a)' ${RAM_DIRECTORY}/${FOUND_DOMAINS_FILE#${qbotPath}}.temp ${RAM_DIRECTORY}/${SCRAPED_FILE#${qbotPath}}.temp > ${DOMAINS_FILE#${qbotPath}}.temp2 # awk 'NR==FNR{a[$0]="";next}; !($0 in a)' ${DOMAINS_FILE#${qbotPath}}.temp2 ${RAM_DIRECTORY}/${OFFLINE_FILE#${qbotPath}}.temp > ${DOMAINS_FILE#${qbotPath}}.temp # END | Handling new domains duplicates uniqueDomainsCount="`cat ${DOMAINS_FILE#${qbotPath}}.temp | wc -l`" e_time_taken=$(date +%s) echo "The Watcher is holding ${uniqueDomainsCount} unique domain(s). ("$((e_time_taken - s_time_taken))"s)." >> $OBSERVE_FILE echo "The watcher is handling duplicates... Done. Holding ${uniqueDomainsCount} unique domain(s). ("$((e_time_taken - s_time_taken))"s)." # START | Reading domains found phase 2 (enqueue from domains.list) if [ "${uniqueDomainsCount}" -gt "0" ] then C_unduplicateTimer=0 while read -r domainToDelegate do # Verify if the domain is already being scraped, then skip if [ "`ls \"${RAM_DIRECTORY}\" | grep -E 'spider-.*-queue.list' | wc -l`" -gt "0" ] then for spiderQueue in "${RAM_DIRECTORY}"/spider-*-queue.list do [ "`head -1 \"${spiderQueue}\"`" = "${domainToDelegate}" ] && continue 2 done fi [ "`grep \"${domainToDelegate}\" \"${SCRAPED_FILE}\" | wc -l`" -gt "0" ] && continue while : do # Wait if [ "`ls ${RAM_DIRECTORY}/queue | wc -l`" -lt "${DELEGATORS_LIMIT}" ] then # if ! grep -q "[d]omainDelegator.sh ${domainToDelegate}" <(ps -ef S) if [ "`ps -A --context | grep [d]omainDelegator | wc -l`" -lt "${DELEGATORS_LIMIT}" ] then nohup "`qbot path`domainDelegator.sh" "${domainToDelegate}" 2>&1& fi break fi ((C_unduplicateTimer++)) if [ "${C_unduplicateTimer}" = "1200" ] then C_unduplicateTimer=0 awk '!a[$0]++' "${FOUND_DOMAINS_FILE#${qbotPath}}" > "${FOUND_DOMAINS_FILE#${qbotPath}}".unduplicated &&\ cp "${FOUND_DOMAINS_FILE#${qbotPath}}".unduplicated "${FOUND_DOMAINS_FILE#${qbotPath}}" &&\ rm "${FOUND_DOMAINS_FILE#${qbotPath}}".unduplicated fi sleep 0.5 done done < <(shuf ${DOMAINS_FILE#${qbotPath}}.temp) # shuf: non-posix (randomizing lines) fi # currentLines="`sed -n '$=' ${FOUND_DOMAINS_FILE}`" cat ${DOMAINS_FILE#${qbotPath}}.temp >> $DOMAINS_HISTORY_FILE echo -n "" > ${DOMAINS_FILE#${qbotPath}}.temp # END | Reading domains found phase 2 (enqueue) done # -------------------------------------- # END | Watcher loop event
Quicktech-Moreno/qbot
watcher.sh
Shell
mit
8,333
#!/bin/bash #: Title : install-global #: Synopsis : none #: Date : 2014-08-29 15:46:22 #: Author : shulei #: version : 1.0 #: Description : install global #: Options : -v set to display message pushd $PWD ## Script path script_path="${0%/*}" # remove the script name ,get the path script_path=${script_path/\./$(pwd)} # if path start with . , replace with $PWD cd "${script_path}" ## source lib source ../libbash/bashr.sh ##init parameters MESSAGE_ENABLED=n ## get parameters ## parse command-line options, -m while getopts "vb:" var do case $var in v) MESSAGE_ENABLED=y ;; esac done shift $(( $OPTIND - 1 )) ##content echoH1 "Start install global" setup "validation ~/source-install directory exists" \ ls ~/source-install \ "can't find ~/source-install dir ,please create it first" \ ; setup "validation ctags exists" \ ls /usr/local/bin/ctags \ "can't ctags ,please install it first" \ ; setup "create ~/source-install/global workspace" \ mkdir ~/source-install/global \ "can't creat ~/source-install/global dir, please check the reason" \ ; setup "cp global tar file into ~/source-install/global" \ cp -r global-6.3.1.tar.gz ~/source-install/global \ "cp tar file into ~/source-install/global failed" \ ; ## change dir cd ~/source-install/global/ setup "tar the global tar file" \ tar -xf global-6.3.1.tar.gz \ "tar the global tar file failed" \ ; setup "install libncurses5-dev libs" \ sudo apt-get install -y -q libncurses5-dev \ "install libncurses5-dev lib failed" \ ; cd ~/source-install/global/global-6.3.1/ echoH2 "try build global" setup "run configure" \ ./configure --with-exuberant-ctags=/usr/local/bin/ctags \ "global configure failed" \ ; setup "make install" \ sudo make install \ "global make install failed" \ ; popd > /dev/null
qinshulei/ubuntu-install
source-install/install-global.sh
Shell
mit
2,010
#!/bin/sh set -u JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.7.0_25.jdk/Contents/Home/ gr -i publish
ae6rt/httplite
publish.sh
Shell
mit
108
# Ruby alias be='bundle exec' alias sc='script/console' alias sg='script/generate' alias sd='script/destroy'
ninjabiscuit/dotfiles
ruby/aliases.zsh
Shell
mit
109
#!/usr/bin/bash SALT_DIR=/path/to/salt/files seasalt_start() { runit master 0 "-v ${SALT_DIR}:/srv/salt" runit minion 1 "--link salt-master-0 -e SALT_ID=host.id1" runit minion 2 "--link salt-master-0 -e SALT_ID=host.id1" runit minion 3 "--link salt-master-0 -e SALT_ID=host.id3" } seasalt_stop() { killit master 0 killit minion 1 killit minion 2 killit minion 3 }
mckay-software/seasalt
seasalt.example.sh
Shell
mit
399
#!/bin/bash cp /home/dwu/msm/drivers/staging/android/binder.c . cp /home/dwu/msm/drivers/staging/android/binder_filter.c . cp /home/dwu/msm/drivers/staging/android/binder_filter.h . cp /home/dwu/msm/drivers/staging/android/Makefile . cp /home/dwu/msm/drivers/staging/android/Kconfig .
dxwu/BinderFilter
src/copyFilesToHere.sh
Shell
mit
286
#!/bin/bash rm -rf test-file* echo "mysecretext" > test1 openssl aes-256-cbc -e -pass pass:mysecret -in test1 -out test1.enc openssl aes-256-cbc -e -pass pass:mysecret -in test2 -out test2.enc openssl aes-256-cbc -e -pass pass:mysecret -in test3 -out test3.enc
jonatan-alama/node-decipher-openssl
test/createData.sh
Shell
mit
263
sh analyse_instruction.sh "jmp .LoooooooooooooooooooooongLabel{loop_id} S .LoooooooooooooooooooooongLabel{loop_id}:" 10000 50000 sh analyse_instruction.sh "jmp .L{loop_id} S .L{loop_id}:" 5000 500000 sh analyse_instruction.sh "jmp .L{loop_id} S .L{loop_id}:" 2500 500000 sh analyse_instruction.sh "jmp .L{loop_id} S .L{loop_id}:" 1000 500000
realead/asmeter
src/jmp_test.sh
Shell
mit
350
rm *.o;make test INCLUDEMAIN=1 DEBUG=1 OBJECTS=xor.o TARGET_NAME=xor
frranck/asm2c
asmTests/xor/createTest.sh
Shell
mit
69
#!/usr/bin/env bash # (C) Oliver Schoenborn # # Publish the disk usage below a certain path on an EC2 instance # Uses `du ... path` and `aws cloudwatch put-metric-data` # # Requires that the ec2 have following policy # { # "Version": "2012-10-17", # "Statement": [ # { # "Action": [ # "cloudwatch:PutMetricData" # ], # "Effect": "Allow", # "Resource": "*" # } # ] # } # # First arg must be the CloudWatch namespace to push metrics to; # Second arg must be the path to measure from. # # Example: $0 MyNamespace /var/log/nginx set -o errexit if [[ ! -f ec2-metadata ]]; then echo "Downloading ec2-metadata" wget http://s3.amazonaws.com/ec2metadata/ec2-metadata chmod u+x ec2-metadata fi namespace=$1 folder_path=$2 if [[ -z $folder_path ]]; then echo "Specify folder path" exit 1 fi PATH=/usr/local/bin:$PATH space=$(du --bytes ${folder_path} | cut -f 1) echo "$(date -Isec) - Disk space used by ${folder_path}: ${space} bytes" iid=$(./ec2-metadata --instance-id | cut -f 2 -d " ") if aws cloudwatch put-metric-data --namespace ${namespace} --metric-name DiskSpaceUsed \ --unit Bytes --value ${space} --dimensions Path="${folder_path}",InstanceId=${iid} then echo "# Metric pushed to CloudWatch, check https://console.aws.amazon.com/cloudwatch -> ${namespace} -> Path,InstanceId" echo "# -------" else echo "# ERROR could not push to CloudWatch: $?" fi
schollii/sandals
send-disk-use-from-path-to-cloudwatch.sh
Shell
mit
1,474
#!/bin/zsh MODEL_SAVE_PATH="/n/sd8/inaguma/result/tensorflow/svc" # Select GPU if [ $# -ne 2 ]; then echo "Error: set GPU number & config path." 1>&2 echo "Usage: ./run_attention.sh path_to_config_file gpu_index" 1>&2 exit 1 fi # Set path to CUDA export PATH=$PATH:/usr/local/cuda-8.0/bin export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-8.0/lib64:/usr/local/cuda-8.0/extras/CUPTI/lib64 # Set path to python PYTHON=/home/lab5/inaguma/.pyenv/versions/anaconda3-4.1.1/bin/python config_path=$1 gpu_index=$2 filename=$(basename $config_path | awk -F. '{print $1}') mkdir -p log # Standard output version # CUDA_VISIBLE_DEVICES=$gpu_index $PYTHON train_attention.py \ # $config_path $MODEL_SAVE_PATH # Background job version CUDA_VISIBLE_DEVICES=$gpu_index nohup $PYTHON train_attention.py \ $config_path $MODEL_SAVE_PATH > log/$filename".log" &
hirofumi0810/tensorflow_end2end_speech_recognition
examples/svc/training/run_attention.sh
Shell
mit
867
#!/bin/bash -x if [ ! -d $MAGE_ROOT_DIR/.modman ] ; then cd $MAGE_ROOT_DIR && modman init fi cd $MAGE_ROOT_DIR && modman $@
ffuenf/docker-modman
modman-wrapper.sh
Shell
mit
128
if [[ -d "/usr/share/git-core/contrib" ]]; then export PATH="$PATH:/usr/share/git-core/contrib" elif [[ -d "/usr/local/share/git-core/contrib/diff-highlight" ]]; then export PATH="$PATH:/usr/local/share/git-core/contrib/diff-highlight" fi
amarshall/dotfiles
lib/shell/plugins/git/env.zsh
Shell
mit
243
#!/bin/sh # # @sacloud-once # httpdのインストール yum install -y httpd || exit 1 # 確認用ページ hostname >> /var/www/html/index.html || exit1 # サービス起動設定 systemctl enable httpd.service || exit 1 systemctl start httpd.service || exit 1 # ファイアウォール設定 firewall-cmd --add-service=http --zone=public --permanent || exit 1 firewall-cmd --reload || exit 1 exit 0
zembutsu/sakura-terraform
7-load-balancing/install_httpd.sh
Shell
mit
408
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for DSA-2715-1 # # Security announcement date: 2013-06-26 00:00:00 UTC # Script generation date: 2017-01-01 21:06:36 UTC # # Operating System: Debian 7 (Wheezy) # Architecture: x86_64 # # Vulnerable packages fix on version: # - puppet:2.7.18-5 # # Last versions recommanded by security team: # - puppet:2.7.23-1~deb7u3 # # CVE List: # - CVE-2013-3567 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade puppet=2.7.23-1~deb7u3 -y
Cyberwatch/cbw-security-fixes
Debian_7_(Wheezy)/x86_64/2013/DSA-2715-1.sh
Shell
mit
607
#!/usr/bin/env bash . $(dirname ${0})/common.sh [[ -f ${_RESTAFARI_HOME}/env.sh ]] && . ${_RESTAFARI_HOME}/env.sh cd ${_RESTAFARI_HOME} pip3 install -e . python3 setup.py install [[ -z $(which restafari 2>/dev/null) ]] && echo "Restafari is not installed properly" && exit 1 restafari --version [[ ${?} -gt 0 ]] && echo "Restafari is not installed properly" && exit 1 exit 0
manoelhc/restafari
ci/install.sh
Shell
mit
383
#!/bin/bash ./node_modules/vows/bin/vows
demee/text
scripts/test.sh
Shell
mit
41
#!/bin/bash # Change working directory to the top-most Finder window location # Source: https://github.com/mathiasbynens/dotfiles/blob/master/.functions#L8 alias cdf='cd $(osascript -e "tell app \"Finder\" to POSIX path of (insertion location as alias)")' alias dt="cd ~/Desktop" co() { local open_path other_args open_path="$1" other_args=("${@:2}") if [ -z "$open_path" ]; then code . "${other_args[@]}" return fi if [ -d "$open_path" ] || [ -f "$open_path" ]; then code "$open_path" "${other_args[@]}" return fi local repository_path repository_path="$REPOSITORIES_DIRECTORY/$1" if [ -d "$repository_path" ]; then code "$repository_path" "${other_args[@]}" return fi code "$open_path" "${other_args[@]}" } _rps_autocomplete co o() { open -a Finder "$1" } rc() { cd "$HOME/dotfiles" code "$HOME/dotfiles" } mcd() { mkdir "$1" && cd "$_" || return } rn() { local new_name new_name="$1" if [ -z "$new_name" ]; then echo 'Rename the current directory.\nUsage: rn <new_name>' return fi local old_name old_name="$(basename $(pwd))" cd ../ && mv "$old_name" "$new_name" && cd "$new_name" }
kvendrik/dotfiles
file-system/miscellaneous.bash
Shell
mit
1,179
#!/bin/sh cd "$( dirname "$0" )/.."; grunt build && grunt exec:test;
ChristianGrete/mout-lang-type
bin/test.sh
Shell
mit
71
#!/bin/bash apt-get update apt-get install -y aptitude aptitude install -y apache2
quanah/vagrant-webserver
vagrant/bootstrap.sh
Shell
mit
84
#network interface on which to limit traffic IF="eth0" #limit of the network interface in question LINKCEIL="1gbit" #limit outbound Testcoin protocol traffic to this rate LIMIT="160kbit" #defines the address space for which you wish to disable rate limiting LOCALNET="192.168.0.0/16" #delete existing rules tc qdisc del dev ${IF} root #add root class tc qdisc add dev ${IF} root handle 1: htb default 10 #add parent class tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL} #add our two classes. one unlimited, another limited tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0 tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1 #add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..." tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10 tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11 #delete any existing rules #disable for now #ret=0 #while [ $ret -eq 0 ]; do # iptables -t mangle -D OUTPUT 1 # ret=$? #done #limit outgoing traffic to and from port 8333. but not when dealing with a host on the local network # (defined by $LOCALNET) # --set-mark marks packages matching these criteria with the number "2" # these packages are filtered by the tc filter with "handle 2" # this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT} iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2 iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2
L5hunter/TestCoin
contrib/qos/tc.sh
Shell
mit
1,671
#!/usr/bin/env bash docker build -t plex .
stevenbower/docker-plex
build.sh
Shell
mit
44
#!/usr/bin/env bash rm graphs/cpu-all.html; bin/example.sh examples.CPU -g html all >> graphs/cpu-all.html rm graphs/diskio-diskio.html; bin/example.sh examples.DiskIO -g html disk-io >> graphs/diskio-diskio.html rm graphs/mem-faults.html; bin/example.sh examples.Mem -g html faults >> graphs/mem-faults.html rm graphs/mem-all.html; bin/example.sh examples.Mem -g html all >> graphs/mem-all.html
romario13/bonfire
bin/examples-all.sh
Shell
mit
399
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for DSA-3126-1 # # Security announcement date: 2015-01-12 00:00:00 UTC # Script generation date: 2017-02-07 21:05:29 UTC # # Operating System: Debian 7 (Wheezy) # Architecture: x86_64 # # Vulnerable packages fix on version: # - php5:5.4.36-0+deb7u3 # # Last versions recommanded by security team: # - php5:5.4.45-0+deb7u7 # # CVE List: # - CVE-2014-9652 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade php5=5.4.45-0+deb7u7 -y
Cyberwatch/cbw-security-fixes
Debian_7_(Wheezy)/x86_64/2015/DSA-3126-1.sh
Shell
mit
608
#!/usr/bin/env bash function go_to_project_top_directory() { local -r script_dir=$(dirname "${BASH_SOURCE[0]}") cd "$script_dir/../../.." || exit 1 } function prepare_typescript_build_directory() { rm -rf platforms/typescript/dist } function build_typescript() { pushd platforms/typescript || exit 1 npm run build popd || exit 1 } function main() { go_to_project_top_directory source ./scripts/shared/shared.sh || exit 1 shared.set_bash_error_handling prepare_typescript_build_directory build_typescript shared.display_success_message "Typescript build completed successfully 🏗️" } main
walterscarborough/LibSpacey
platforms/typescript/scripts/build-typescript.sh
Shell
mit
624
gcc -c 02_mixed_headers.c gcc -o 02_mixed_headers.test 02_mixed_headers.o -L/usr/lib/x86_64-linux-gnu -lcurl
rbauduin/mbdetect
archives/tests/02_build.sh
Shell
mit
109
#!/usr/bin/env bash for i in rq2 rq3; do docker-compose -f docker-compose.yml -f docker-compose.scale_rq.yml exec "$i" \ sh -c 'rabbitmqctl stop_app; rabbitmqctl join_cluster rabbit@rq1; rabbitmqctl start_app' done docker-compose -f docker-compose.yml -f docker-compose.scale_rq.yml exec rq1 rabbitmqctl cluster_status
madeddie/hotjar-task
cluster_rq.sh
Shell
mit
327
#!/bin/bash # Convert text to FASTA file # Assumes no header # Default parameter length=74 while getopts f:l:: option do case "${option}" in f) fasta=${OPTARG};; l) length=${OPTARG};; exit;; esac done echo ">${fasta%.*}" > ${fasta%.*}.fa fold -w $length ${fasta} >> ${fasta%.*}.fa echo 'Done!'
essigmannlab/dcs_scripts
scripts/convert_to_fasta.sh
Shell
mit
310
#!/bin/bash echo 'standby 0' | cec-client -s > /dev/null
futurice/chilipie-kiosk
home/cec-off.sh
Shell
mit
58
#!/bin/sh echo "execute ./run/bin/mruby ${1}" ./run/bin/mruby $1
ncq/rumin
mtest.sh
Shell
mit
66
#!/bin/bash export HL_TRACE=3 export HL_TRACE_FILE=/dev/stdout export HL_NUMTHREADS=4 rm -f $1/camera_pipe.avi $1/process ../images/bayer_small.png 3700 1.8 50 1 $1/out.png | ../../bin/HalideTraceViz -t 1000 -s 1920 1080 \ -f input 0 1024 -1 1 1 10 348 1 0 0 1 \ -f denoised 0 1024 -1 1 1 305 360 1 0 0 1 \ -f deinterleaved 0 1024 -1 1 1 580 120 1 0 0 1 0 220 \ -f r_gr 0 1024 -1 1 1 720 120 1 0 0 1 \ -f g_gr 0 1024 -1 1 1 860 120 1 0 0 1 \ -f b_gr 0 1024 -1 1 1 1000 120 1 0 0 1 \ -f r_r 0 1024 -1 1 1 720 340 1 0 0 1 \ -f g_r 0 1024 -1 1 1 860 340 1 0 0 1 \ -f b_r 0 1024 -1 1 1 1000 340 1 0 0 1 \ -f r_b 0 1024 -1 1 1 720 560 1 0 0 1 \ -f g_b 0 1024 -1 1 1 860 560 1 0 0 1 \ -f b_b 0 1024 -1 1 1 1000 560 1 0 0 1 \ -f r_gb 0 1024 -1 1 1 720 780 1 0 0 1 \ -f g_gb 0 1024 -1 1 1 860 780 1 0 0 1 \ -f b_gb 0 1024 -1 1 1 1000 780 1 0 0 1 \ -f demosaiced 0 1024 2 1 1 1140 360 1 0 0 1 0 0 \ -f corrected 0 1024 2 1 1 1400 360 1 0 0 1 0 0 \ -f processed 0 256 2 1 1 1660 360 1 0 0 1 0 0 | \ avconv -f rawvideo -pix_fmt bgr32 -s 1920x1080 -i /dev/stdin -c:v h264 $1/camera_pipe.avi #mplayer -demuxer rawvideo -rawvideo w=1920:h=1080:format=rgba:fps=30 -idle -fixed-vo -
ronen/Halide
apps/camera_pipe/viz.sh
Shell
mit
1,339
#!/bin/bash cd $APPS_BASE make clean make
raphui/rnk
tools/make_apps.sh
Shell
mit
43
#!/bin/bash -x # # Generated - do not edit! # # Macros TOP=`pwd` CND_CONF=CanStick CND_DISTDIR=dist TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging TMPDIRNAME=tmp-packaging OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/Source.${IMAGE_TYPE}.${OUTPUT_SUFFIX} OUTPUT_BASENAME=Source.${IMAGE_TYPE}.${OUTPUT_SUFFIX} PACKAGE_TOP_DIR=source/ # Functions function checkReturnCode { rc=$? if [ $rc != 0 ] then exit $rc fi } function makeDirectory # $1 directory path # $2 permission (optional) { mkdir -p "$1" checkReturnCode if [ "$2" != "" ] then chmod $2 "$1" checkReturnCode fi } function copyFileToTmpDir # $1 from-file path # $2 to-file path # $3 permission { cp "$1" "$2" checkReturnCode if [ "$3" != "" ] then chmod $3 "$2" checkReturnCode fi } # Setup cd "${TOP}" mkdir -p ${CND_DISTDIR}/${CND_CONF}/package rm -rf ${TMPDIR} mkdir -p ${TMPDIR} # Copy files and create directories and links cd "${TOP}" makeDirectory ${TMPDIR}/source/bin copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755 # Generate tar file cd "${TOP}" rm -f ${CND_DISTDIR}/${CND_CONF}/package/source.tar cd ${TMPDIR} tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/source.tar * checkReturnCode # Cleanup cd "${TOP}" rm -rf ${TMPDIR}
medo64/CanStick
Firmware/Source/nbproject/Package-CanStick.bash
Shell
mit
1,352
#!/bin/bash MPC="/usr/bin/mpc" ALBUM_TITLE=$1 $MPC clear #$MPC findadd ALBUM_TITLE ${ALBUM_TITLE} $MPC search album "$ALBUM_TITLE" | $MPC add $MPC play
ggilestro/majordomo
bin/play_cd.sh
Shell
mit
153
# use for ultimate plumber instead of up # which collides w/ sh/scripts/up.sh for traversing # directories alias ulp='up' gocover () { t="/tmp/go-cover.$$.tmp" go test -coverprofile=$t $@ && go tool cover -html=$t && unlink $t }
mdzhang/dotfiles
golang/.config/sh/golang/aliases.sh
Shell
mit
238
#!/usr/bin/env bash set -o errexit set -o pipefail set -o nounset cd "$(dirname "${BASH_SOURCE[0]}")" if [[ -d dist ]]; then echo "Please delete $(pwd)/dist" >&2 exit 1 fi if ! pandoc -v &> /dev/null; then echo "Please install pandoc (brew install pandoc)" >&2 exit 1 fi if ! rst-lint -h &> /dev/null; then echo "Please install rst-lint (pip install restructuredtext_lint)" >&2 exit 1 fi if ! twine -h &> /dev/null; then echo "Please install twine (pip install twine)" >&2 exit 1 fi echo echo "### Generating RST from Markdown" mkdir -p build pandoc --from=markdown --to=rst --output=build/README.rst README.md echo echo "### Checking RST for compatibility with PyPI (this is non-trivial)" rst-lint build/README.rst echo echo "### Preparing distribution" python setup.py bdist_wheel echo echo "### Uploading to PyPI (it may ask for your credentials)" twine upload dist/*
adamcath/ads
publish_to_pypi.sh
Shell
mit
912
#!/bin/sh (cd ./mcmatools && ./buildImage.sh) (cd ./gcclatest && ./buildImage.sh) (cd ./gccgtestlatest && ./buildImage.sh)
Zuehlke/BiZEPS
buildTools/buildgccgtestlatest.sh
Shell
mit
124
#!/bin/bash echo "Starting Alvaro's vim setup" DO_FULL_INSTALL=false; [ "$1" == "--install" ] && DO_FULL_INSTALL=true # Create the necessary directories [ -e ~/.config/nvim ] || mkdir -p ~/.config/nvim [ -e ~/.config/nvim/bundle ] || mkdir -p ~/.config/nvim/bundle [ -e ~/.config/nvim/ftplugin ] || mkdir -p ~/.config/nvim/ftplugin [ -e ~/.config/nvim/syntax ] || mkdir -p ~/.config/nvim/syntax # Install Plug if [ ! -e ~/.config/nvim/autoload/plug.vim ]; then curl -fLo ~/.config/nvim/autoload/plug.vim --create-dirs \ https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim fi # Copy files cp ftplugin/*.vim ~/.config/nvim/ftplugin cp syntax/*.vim ~/.config/nvim/syntax cp vimrc ~/.config/nvim/init.vim # Create symbolic links [ -e ~/.vim ] || ln -s ~/.config/nvim ~/.vim [ -e ~/.vimrc ] || ln -s ~/.config/nvim/init.vim ~/.vimrc # Install neovim and dependencies if [ "$DO_FULL_INSTALL" = true ]; then OS_T="$OSTYPE" if [[ $OS_T == "linux-gnu" ]]; then # Linux (Ubuntu) sudo apt-get install git python-pip neovim -y elif [[ $OS_T == "darwin"* ]]; then # Mac OS X brew install git neovim/neovim/neovim fzf fi # Setup virtualenv for neovim source `which virtualenvwrapper.sh` mkvirtualenv nvim pip install --upgrade pip autopep8 neovim pep8 flake8 deactivate mkvirtualenv --python=`which python3` nvim-python3 pip install --upgrade pip autopep8 neovim pep8 flake8 deactivate # TODO: change PYTHONPATH to nvim virtualenv instead of installing system-wide if ! $(python -c "import autopep8" &> /dev/null); then sudo pip install autopep8; fi; if ! $(python -c "import neovim" &> /dev/null); then sudo pip install neovim; fi; if ! $(python -c "import pep8" &> /dev/null); then sudo pip install pep8; fi; if ! $(python -c "import flake8" &> /dev/null); then sudo pip install flake8; fi; # Install npm packages # TODO: ensure npm is installed sudo npm i -g eslint eslint-plugin-standard eslint-plugin-skip-nolint-lines fi # Install plugins nvim +PlugInstall +qall echo "Done setting up vim"
alvaromorales/dotfiles
vim/vimify.sh
Shell
mit
2,072
#!/bin/sh # GCVIEWER_JAR=/path/to/gcviewer.jar if [ -z $GCVIEWER_JAR ]; then GCVIEWER_JAR=$1 fi if [ -z $GCVIEWER_JAR ]; then echo "Generate GC log summary using gcviewer. Please specify gcviewer.jar path in the first parameter." echo "https://github.com/chewiebug/GCViewer" echo "" echo "Usage: $0 <gcviewer.jar>" exit 1 fi for f in `find . -name '*.log'`; do if [ ! -f "$f-summary.txt" ]; then java -jar $GCVIEWER_JAR $f $f-summary.txt fi done
nekop/gclogs
summary.sh
Shell
mit
490
#!/usr/bin/env bash # Use single quotes instead of double quotes to make it work with special-character passwords PASSWORD='12345678' HOMEDIRFOLDER='www' PROJECTFOLDER='projects' # create project folder sudo mkdir "/var/www/html/${HOMEDIRFOLDER}" sudo mkdir "/var/www/html/${PROJECTFOLDER}" echo "<?php phpinfo(); ?>" > /var/www/html/${HOMEDIRFOLDER}/index.php # update / upgrade sudo apt -y update sudo apt -y upgrade # install apache and php 7 sudo apt install -y apache2 sudo apt install -y php libapache2-mod-php # install cURL and Mcrypt sudo apt install -y php-curl sudo apt install -y php-mcrypt # install mysql and give password to installer sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password $PASSWORD" sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $PASSWORD" sudo apt install -y mysql-server sudo apt install -y php-mysql # install phpmyadmin and give password(s) to installer # for simplicity I'm using the same password for mysql and phpmyadmin sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/dbconfig-install boolean true" sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password-confirm password $PASSWORD" sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/admin-pass password $PASSWORD" sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/app-pass password $PASSWORD" sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2" sudo apt install -y phpmyadmin # setup hosts file VHOST=$(cat <<EOF <VirtualHost *:80> DocumentRoot "/var/www/html/${HOMEDIRFOLDER}" <Directory "/var/www/html/${HOMEDIRFOLDER}"> AllowOverride All Require all granted </Directory> </VirtualHost> EOF ) echo "${VHOST}" > /etc/apache2/sites-available/000-default.conf # enable mod_rewrite sudo a2enmod rewrite # restart apache sudo service apache2 restart # restart mysql sudo service mysql restart # install git sudo apt install -y git # install Composer sudo apt install -y composer
muhammadtaqi/learning-tools
vagrant/xenial-lamp/bootstrap.sh
Shell
mit
2,062
#!/usr/bin/env bash # # Copyright (c) 2019-2020 The Fujicoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_macos_cross export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos (Focal is used in the gitian build as well) export HOST=x86_64-apple-darwin18 export PACKAGES="cmake imagemagick librsvg2-bin libz-dev libtiff-tools libtinfo5 python3-setuptools xorriso" export XCODE_VERSION=12.1 export XCODE_BUILD_ID=12A7403 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" export FUJICOIN_CONFIG="--with-gui --enable-reduce-exports"
fujicoin/fujicoin
ci/test/00_setup_env_mac.sh
Shell
mit
744
#!/usr/bin/env bash apt-add-repository ppa:brightbox/ruby-ng apt-get update -y && apt-get upgrade -y apt-get build-dep ruby2.0-dev -y apt-get install libsqlite3-dev sqlite3 ruby2.2-dev ruby2.2 -y mv /usr/bin/ruby /usr/bin/ruby-old mv /usr/bin/gem /usr/bin/gem-old ln -s /usr/bin/ruby2.2 /usr/bin/ruby ln -s /usr/bin/gem2.2 /usr/bin/gem ln -s /vagrant /home/vagrant/gamesbot gem install bundler rake rspec cd /vagrant echo "Installing gems.." sudo -u vagrant bundle install --without extradrivers echo "Running tests.." sudo -u vagrant bundle exec rspec echo "Complete!"
Zarthus/irc-games-bot
.vagrant_bootstrap.sh
Shell
mit
578
source "$(dirname $BASH_SOURCE)/system.sh" source "$(dirname $BASH_SOURCE)/utils.sh" source "$(dirname $BASH_SOURCE)/deb.sh" readonly MAME_DEB_URL="file://$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../deb/mame_0.153_amd64.deb" readonly MAME_FILES_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../files/mame" readonly MAME_SRC_URL="http://mame.mirrors.zippykid.com/releases/mame0153s.zip" readonly MAME_VERSION="0.153" readonly MAME_SRC_PATH="/usr/local/src/mame_${MAME_VERSION}" readonly MAME_BUILD_PATH="/tmp/mame_build" readonly MAME_BUILD_DEPENDENCIES="build-essential libsdl1.2-dev libsdl-ttf2.0-dev \ libqt4-dev libfontconfig1-dev libxinerama-dev" readonly MAME_DEB_DEPENDENCIES="libasound2 libqtgui4 libsdl1.2debian \ libsdl-ttf2.0-0 libxinerama1" mame_install_build_dependencies() { install_packages $MAME_BUILD_DEPENDENCIES } mame_download_sources() { download_and_extract_archive $MAME_SRC_URL /tmp/mame mkdir --parents $MAME_SRC_PATH unzip -q /tmp/mame/mame.zip -d $MAME_SRC_PATH rm --force /tmp/mame/mame.zip } mame_compile() { cd $MAME_SRC_PATH make rm --recursive --force $MAME_BUILD_PATH mkdir --parents $MAME_BUILD_PATH/bin cp mame64 $MAME_BUILD_PATH/bin/mame } mame_build_package() { local dst_dir=$1 declare_debian_package_dependencies mame $MAME_DEB_DEPENDENCIES build_debian_package $MAME_BUILD_PATH mame $MAME_VERSION bin/ mv mame_${MAME_VERSION}_amd64.deb $dst_dir/ } mame_install_package() { install_packages $MAME_DEB_DEPENDENCIES install_package_from_url $MAME_DEB_URL mame } # Generate a default `mame.ini` file and display it to stdout # Changes in the default mame configuration: # * update core output directory path # * update rom path # * enable opengl # * enable multithreading mame_generate_config() { local rom_path=$1 cd /tmp mame -createconfig &> /dev/null replace_config_line "multithreading" "multithreading 1" "/tmp/mame.ini" replace_config_line "video" "video opengl" "/tmp/mame.ini" replace_config_line "cfg_directory" 'cfg_directory $HOME/.mame/cfg' "/tmp/mame.ini" replace_config_line "nvram_directory" 'nvram_directory $HOME/.mame/nvram' "/tmp/mame.ini" replace_config_line "memcard_directory" 'memcard_directory $HOME/.mame/memcard' "/tmp/mame.ini" replace_config_line "input_directory" 'input_directory $HOME/.mame/inp' "/tmp/mame.ini" replace_config_line "state_directory" 'state_directory $HOME/.mame/sta' "/tmp/mame.ini" replace_config_line "snapshot_directory" 'snapshot_directory $HOME/.mame/snap' "/tmp/mame.ini" replace_config_line "diff_directory" 'diff_directory $HOME/.mame/diff' "/tmp/mame.ini" replace_config_line "comment_directory" 'comment_directory $HOME/.mame/comments' "/tmp/mame.ini" replace_config_line "ctrlrpath" 'ctrlrpath $HOME/.mame/ctrlr' "/tmp/mame.ini" replace_config_line "rompath" "rompath $rom_path" "/tmp/mame.ini" cat mame.ini } mame_generate_controls_config() { cat $MAME_FILES_DIR/default.cfg }
garnieretienne/arcade
lib/mame.sh
Shell
mit
2,966
#!/usr/bin/env bash chown -R www-data var/ chown -R www-data web/mediafile chmod -R 777 var/ bower install --allow-root if [ "$APPLICATION_LIVE" = 1 ]; then app/console assetic:dump --env=prod else app/console assetic:dump --env=dev fi /usr/local/sbin/php-fpm --nodaemonize
weburnit/sample-lumen-docker
images/php/slaver.sh
Shell
mit
278
#!/bin/bash source ~/scripts/common.sh $HADOOP_INSTALL/bin/hadoop jar kmeans.jar KMeans
viveksjain/repro_rdd
ec2/run_hadoopkmeans.sh
Shell
mit
88
#!/bin/bash sudo -u www-data php app/console cache:clear --env=dev sudo rm -r app/cache/dev/* sudo -u www-data php app/console cache:warmup --env=dev
alexmazinho/foment.alexmazinho
cache_script.sh
Shell
mit
150
#!/bin/bash set -e if [ -z "`ls /usr/local/etc/php/conf.d`" ] then cp -R /etc-start/php/conf.d/* /usr/local/etc/php/conf.d # Set environments TIMEZONE1=${TIMEZONE:-Asia/Ho_Chi_Minh} PHP_MEMORY_LIMIT1=${PHP_MEMORY_LIMIT:-512M} MAX_UPLOAD1=${MAX_UPLOAD:-520M} PHP_MAX_FILE_UPLOAD1=${PHP_MAX_FILE_UPLOAD:-200} PHP_MAX_POST1=${PHP_MAX_POST:-520M} MAX_INPUT_TIME1=${MAX_INPUT_TIME:-3600} MAX_EXECUTION_TIME1=${MAX_EXECUTION_TIME:-3600} sed -i -E \ -e "s|;*date.timezone =.*|date.timezone = ${TIMEZONE1}|i" \ -e "s|;*memory_limit =.*|memory_limit = ${MAX_UPLOAD1}|i" \ -e "s|;*upload_max_filesize =.*|upload_max_filesize = ${MAX_UPLOAD1}|i" \ -e "s|;*max_file_uploads =.*|max_file_uploads = ${PHP_MAX_FILE_UPLOAD1}|i" \ -e "s|;*post_max_size =.*|post_max_size = ${PHP_MAX_POST1}|i" \ -e "s/max_input_time = 60/max_input_time = ${MAX_INPUT_TIME1}/" \ -e "s/max_execution_time = 30/max_execution_time = ${MAX_EXECUTION_TIME1}/" \ -e "s/error_reporting = .*/error_reporting = E_ALL/" \ -e "s/display_errors = .*/display_errors = On/" \ /usr/local/etc/php/conf.d/babim.ini fi # set ID docker run agid=${agid:-$auid} auser=${auser:-www-data} if [[ -z "${auid}" ]]; then echo "start" elif [[ "$auid" = "0" ]] || [[ "$aguid" == "0" ]]; then echo "run in user root" export auser=root export APACHE_RUN_USER=$auser export APACHE_RUN_GROUP=$auser #sed -i -e "/^user = .*/cuser = $auser" /usr/local/etc/php-fpm.conf #sed -i -e "/^group = .*/cgroup = $auser" /usr/local/etc/php-fpm.conf else if id $auser >/dev/null 2>&1; then echo "user exists" #sed -i -e "/^user = .*/cuser = $auser" /usr/local/etc/php-fpm.conf #sed -i -e "/^group = .*/cgroup = $auser" /usr/local/etc/php-fpm.conf export APACHE_RUN_USER=$auser export APACHE_RUN_GROUP=$auser # usermod alpine #deluser $auser && delgroup $auser #addgroup -g $agid $auser && adduser -D -H -G $auser -s /bin/false -u $auid $auser # usermod ubuntu/debian usermod -u $auid $auser groupmod -g $agid $auser else echo "user does not exist" export APACHE_RUN_USER=$auser export APACHE_RUN_GROUP=$auser # create user alpine #addgroup -g $agid $auser && adduser -D -H -G $auser -s /bin/false -u $auid $auser # create user ubuntu/debian groupadd -g $agid $auser && useradd --system --uid $auid --shell /usr/sbin/nologin -g $auser $auser #sed -i -e "/^user = .*/cuser = $auser" /usr/local/etc/php-fpm.conf #sed -i -e "/^group = .*/cgroup = $auser" /usr/local/etc/php-fpm.conf fi fi # option with entrypoint if [ -f "/option.sh" ]; then /option.sh; fi exec "$@"
babim/docker-phpbase
docker-entrypoint.sh
Shell
mit
2,594
elasticsearch --config=/usr/local/opt/elasticsearch/config/elasticsearch.yml
bootinge/bootinge
python/run_elasticsearch.sh
Shell
mit
77
#!/bin/bash cat > /etc/hosts << "EOF" # Begin /etc/hosts (network card version) 127.0.0.1 localhost <192.168.1.1> <HOSTNAME.example.org> [alias1] [alias2 ...] # End /etc/hosts (network card version) EOF
taoyilee/tyLFS
SetUpBootScripts/createHosts.sh
Shell
mit
204
#!/bin/sh . "$(pwd)/test/helper.sh" test_do "bundlizer install https://bitbucket.org/naotos/sample.git" test_do "bundlizer update sample" test_do "bundlizer which sample"
Tomohiro/bundlizer
test/bitbucket_install_test.sh
Shell
mit
173
#!/bin/bash CURDIR=$PWD ./edockerinstall.sh --alias cd tests /opt/bats/bin/bats edocker_tests.bats /opt/bats/bin/bats edocker_platforms.bats cd $CURDIR
pamtrak06/edocker
run_tests.sh
Shell
mit
152
#!/usr/bin/env bash set -eu PATTERN="-e ." if [ $# -gt 0 ] then PATTERN="$1" fi exec find src \ -path src/examples/attitude_estimator_ekf -prune -o \ -path src/examples/ekf_att_pos_estimator -prune -o \ -path src/lib/DriverFramework -prune -o \ -path src/lib/ecl -prune -o \ -path src/lib/external_lgpl -prune -o \ -path src/lib/mathlib -prune -o \ -path src/lib/matrix -prune -o \ -path src/modules/attitude_estimator_ekf -prune -o \ -path src/modules/commander -prune -o \ -path src/modules/mavlink -prune -o \ -path src/modules/navigator -prune -o \ -path src/modules/sdlog2 -prune -o \ -path src/modules/systemlib/uthash -prune -o \ -path src/modules/uavcan -prune -o \ -path src/modules/uavcan/libuavcan -prune -o \ -type f \( -name "*.c" -o -name "*.h" -o -name "*.cpp" -o -name "*.hpp" \) | grep $PATTERN
darknight-007/Firmware
Tools/files_to_check_code_style.sh
Shell
mit
884
sort > generated/expected-output <<EOF config: line 2: string ".hidden 2" matches ".hidden 2" config: line 7: regex "^.hidden" matches ".hidden 2" nb: ambiguous rules for path: "$PROJECT_PATH/test/data/test directory/.hidden 2" EOF
AlxHnr/nano-backup
test/full program tests/mismatched path warnings/ambiguous rules/1/post-init.sh
Shell
mit
232
# Bundle a full git repo for sneakernet function bundle() { repo=$1 git clone --mirror [email protected]:dsanara/${repo}.git cd ${repo}.git git bundle create ../${repo}.bundle --all cd .. } bundle the-repo
jpittard/jpittard.github.io
posts/git_bundle.sh
Shell
mit
219
#!/bin/sh GODIR=$GOPATH/src/gitlab.com/tmaczukin/goliscan mkdir -p "$(dirname $GODIR)" ln -sfv "$(pwd -P)" "$GODIR" cd "$GODIR"
chespinoza/goliscan
ci/prepare.sh
Shell
mit
129
#!/bin/sh test -f /data/.shadow/.etc/wpa_supplicant.enabled
rdm-dev/meta-jens
recipes-core/init-ifupdown/init-ifupdown/wifi/pre_up.sh
Shell
mit
61
#!/bin/bash #### Recreates mints for sample-data server assets. server_id="r1fUoHwJOWCuK3WBAAySjmKYqsG6G2TYIxdqY6YNuuG" server_user_id="voeqDVAJwDwROywFtg5mEyYpcu2dcPJRLFHYh7tyhqk" ##### game tokens asset_id="7f8nlUn795x8931JParRnmKAyw8cegRyBMcFg9FccaF" createmint ${server_id} ${server_user_id} ${asset_id} ##### US Dollars asset_id="EYGad9FjZWgGKeVbgh2FbA0Hf8oP0XlnRyYCsvSIYKL" createmint ${server_id} ${server_user_id} ${asset_id} ##### silver grams asset_id="JY7kEFnm2c50CGNphdpgnlVo2hOZuLrBua5cFqWWR9Q" createmint ${server_id} ${server_user_id} ${asset_id} #### Bitcoins asset_id="XU1Gu6sjcLxg260FmRgrSvRz9L0OohXRSEhlkeSd9j2" createmint ${server_id} ${server_user_id} ${asset_id}
murrekatt/opentxs-sample-data
createmint-sample.sh
Shell
mit
691
#!/bin/bash URL='http://google.com'; for ((;;)); do URL=$(curl -s --data-urlencode 'longurl='$URL xbryn.com/short/shorten.php); echo $URL; done
thedicearecubed/script-kiddies
shorten_bryn.sh
Shell
mit
145
#!/bin/sh set -e if [ "$BACKUP_MODE" != "none" ]; then if [ ! -f /root/.rclone.conf ]; then cat << EOF > /root/.rclone.conf [s3] type = s3 env_auth = false access_key_id = $AWS_ACCESS_KEY_ID secret_access_key = $AWS_SECRET_ACCESS_KEY region = $AWS_S3_REGION endpoint = location_constraint = acl = server_side_encryption = EOF fi fi exec "$@"
kivy/kivy-server
downloads/cont/script/entry.sh
Shell
mit
352
# Ensure that the jumped-to folder is echoed export _ZO_ECHO=1 eval "$(zoxide init --cmd j zsh)"
kaihowl/dotfiles
zoxide/load.zsh
Shell
mit
98
#!/usr/bin/env osascript tell application "System Events" tell process "Sublime Text 2" return name of every menu item of menu 1 of menu bar item "Window" of menu bar 1 end tell end tell
deborasetton/Sublime-Switch-Window
get_windows.sh
Shell
mit
196
dockutil --add "/System/Applications/App Store.app" --no-restart dockutil --add "/Applications/Safari.app" --no-restart dockutil --add "/Applications/Google Chrome.app" --no-restart dockutil --add "/System/Applications/Mail.app" --no-restart dockutil --add "/System/Applications/Messages.app" --no-restart dockutil --add "/Applications/WhatsApp.app" --no-restart dockutil --add "/Applications/LINE.app" --no-restart dockutil --add "/Applications/Threema.app" --no-restart dockutil --add "/Applications/Messenger.app" --no-restart dockutil --add "/System/Applications/FaceTime.app" --no-restart dockutil --add "/System/Applications/Contacts.app" --no-restart dockutil --add "/Applications/Fantastical.app" --no-restart dockutil --add "/System/Applications/Reminders.app" --no-restart dockutil --add "/Applications/Trello.app" --no-restart dockutil --add "/System/Applications/Notes.app" --no-restart dockutil --add "/Applications/Bear.app" --no-restart dockutil --add "/Applications/Dropbox Paper.app" --no-restart dockutil --add "/Applications/Day One.app" --no-restart dockutil --add "/Applications/Numbers.app" --no-restart dockutil --add "/System/Applications/Photos.app" --no-restart dockutil --add "/Applications/Adobe Lightroom Classic/Adobe Lightroom Classic.app" --no-restart dockutil --add "/Applications/DaVinci Resolve/DaVinci Resolve.app" --no-restart dockutil --add "/Applications/Spotify.app" --no-restart dockutil --add "/System/Applications/Music.app" --no-restart dockutil --add "/System/Applications/TV.app" --no-restart dockutil --add "/System/Applications/FindMy.app" --no-restart dockutil --add "/System/Applications/System Preferences.app" --no-restart dockutil --add "/Applications/Parallels Desktop.app" --no-restart dockutil --add "/Applications/Slack.app" --no-restart dockutil --add "/Applications/iTerm.app" --no-restart dockutil --add "/System/Applications/Utilities/Activity Monitor.app" --no-restart dockutil --add "/Applications/WebStorm.app" --no-restart dockutil --add "/Applications/Xcode.app" --no-restart dockutil --add "/Applications/AppCode.app" --no-restart dockutil --add "/Applications/Visual Studio Code.app" --no-restart dockutil --add "/Applications/Postman.app" --no-restart dockutil --add "/Applications/Outbank.app" --no-restart dockutil --add "/Users/michaelkuck/Documents/Akten" --view fan --display stack --no-restart dockutil --add "/Users/michaelkuck/Desktop" --view fan --display stack --no-restart dockutil --add "/Users/michaelkuck/Documents" --view fan --display stack --no-restart dockutil --add "/Users/michaelkuck/Downloads" --view fan --display stack --no-restart
mikumi/dotfiles
configure-dock.sh
Shell
mit
2,662
#!/bin/bash # pull in any upstream changes from master git checkout master git fetch upstream git merge upstream/master git push origin master # EOF
mamund/hal-forms
merge-upstream.sh
Shell
mit
152
#! /bin/bash # Program: bwa (alignment via Burrows-Wheeler transformation) # Version: 0.7.5a-r405 # # Program: samtools (Tools for alignments in the SAM format) # Version: 1.1 (using htslib 1.1) # # Program: bam2fastq - extract sequences from a BAM file # Version: v1.1.0 # # Program: bedtools genomecov (aka genomeCoverageBed) # Version: v2.16.2 # Summary: Compute the coverage of a feature file among a genome. # # Program: fastq_to_fasta - Part of FASTX Toolkit # Version: 0.0.13.2 # # Program: convert - convert SVG to PNG (ImageMagick) # Version: 6.6.9-7 2014-03-06 Q16 # # Program: blastn # Version: 2.2.30+ # set -x # Echo all commands genomes=(anthracis cereus) declare -A samples samples["SRR1748707"]="P00134" samples["SRR1748708"]="P00134" samples["SRR1749083"]="P00497" declare -A reference reference["anthracis"]="references/index/CP009541-anthracis" reference["cereus"]="references/index/NC_003909-cereus" # BLASTN parameters OUTFMT="6 stitle sseqid qseqid qstart qend sstart send evalue bitscore score length pident nident mismatch positive means gapopen ppos qcovs qcovhsp" NT="/data1/home/groups/readgp/nt/nt" for s in ${!samples[@]}; do for p in ${genomes[@]}; do wd="results/mapping/bacilli-nyc/${s}/${p}" mkdir -p ${wd}/coverage mkdir -p ${wd}/aligned-reads fq1="sra-pathogens/anthracis/${samples[$s]}/${s}_1.fastq.gz" fq2="sra-pathogens/anthracis/${samples[$s]}/${s}_2.fastq.gz" sam="${wd}/${s}.sam" bam="${wd}/${s}.bam" cov="${wd}/coverage/${s}.coverage.gz" # Align using BWA, sort sam to bam and index bam bin/bwa mem -t 20 ${reference[$p]} ${fq1} ${fq2} > ${sam} samtools view -@ 10 -bS ${sam} | samtools sort -@ 10 - ${wd}/${s} samtools index -b ${bam} rm ${sam} # Use genomeCoverageBed to get the coverage for each position and plot # the coverage for differening sliding windows with 0.5 overlap genomeCoverageBed -d -ibam ${bam} | gzip --best - > ${cov} scripts/mapping/plot-coverage.R ${cov} 0.5 ${p} # Extract aligned reads using bam2fastq and convert to fasta ofq="${wd}/aligned-reads/${samples[$s]}_${s}#.fastq" bin/bam2fastq -o ${ofq} --no-unaligned ${bam} cat ${wd}/aligned-reads/*.fastq | fastq_to_fasta -Q33 -n | gzip --best - > ${wd}/aligned-reads/${samples[$s]}_${s}.fasta.gz gzip --best "${wd}/aligned-reads/${samples[$s]}_${s}_1.fastq" gzip --best "${wd}/aligned-reads/${samples[$s]}_${s}_2.fastq" done done
Read-Lab-Confederation/nyc-subway-anthrax-study
data/02-mapping/map-bacilli-nyc.sh
Shell
mit
2,548
export STEAM_FRAME_FORCE_CLOSE=1
marcinrogacki/dotfiles
repository/games/.slash/etc/bashrc.d/500-steam.sh
Shell
mit
33
#!/bin/bash file="$1" if [$file -eq ""] || [[ $file != *.ipa ]] ; then echo "Please pass in an ipa file" exit 2 fi HTML=0 JS=0 CSS=0 EXE=0 mkdir "testScratch" cp "$file" "testScratch/testScratch.zip" cd "testScratch" unzip "testScratch.zip" >/dev/null #find . -name '*.png' -exec fun \; #Check for HTML Files for i in $(find . -name '*.html'); do HTML=$((HTML+1)); done; #Check for JS Files for i in $(find . -name '*.js'); do JS=$((JS+1)); done; #Check for CSS Files for i in $(find . -name '*.css'); do CSS=$((CSS+1)); done; #Check for EXE Files for i in $(find . -name '*.exe'); do EXE=$((EXE+1)); done; if [ $HTML -eq 0 ] && [ $JS -eq 0 ] && [ $CSS -eq 0 ] && [ $EXE -eq 0 ] ; then echo "native" else echo "not Native" fi cd .. rm -rf "testScratch"
QueueSoftwareInc/isItNative
isItNative.sh
Shell
mit
793
#!/bin/bash # Shortcut so that other files don't have to use such a long path when loading the spinner . .gp/bash/third-party/bash-spinner/spinner.sh
concrete5/concrete5
.gp/bash/spinner.sh
Shell
mit
149
#!/bin/bash WORKSPACE=/psikon/.jenkins/jobs/psistatsrd/workspace PYTHONPATH=$WORKSPACE:$PYTHONPATH pylint $1 $2 $3 $4 $5 $6 $7
alex-dow/psistatsrd
build-sonar.sh
Shell
mit
129
#!/bin/sh VERSION=`python setup.py --version` echo "# Releasing pydocs v$VERSION..." echo "# doing git..." git tag -a v$VERSION -m v$VERSION git push --tags echo "# doing pypi..." python setup.py sdist upload echo "# Done!"
Fizzadar/pydocs
release.sh
Shell
mit
229
source "${HOME}/.bashrc.d/dependencies/liquidprompt/liquidprompt" # fix a bug showing the temperature in any case LP_ENABLE_TEMP=0
ydubreuil/dotfiles
homedir/.bashrc.d/liquidprompt.sh
Shell
mit
131
apt-get -qqy update apt-get -qqy install python-flask apt-get -qqy install python-sqlalchemy apt-get -qqy install python-pip pip install bleach pip install oauth2client pip install requests pip install httplib2 pip install redis pip install Flask-SQLAlchemy pip install --upgrade google-api-python-client pip install dicttoxml pip install Flask-WTF vagrantTip="The shared directory is located at /vagrant\nTo access your shared files: cd /vagrant(B" echo -e $vagrantTip > /etc/motd wget http://download.redis.io/redis-stable.tar.gz tar xvzf redis-stable.tar.gz cd redis-stable make make install
brianquach/udacity-nano-fullstack-catalog
vagrant/pg_config.sh
Shell
mit
610
#!/bin/bash # arguments # # 1 - N_divs for wall mounted brick # 2 - lattice type [ 'D3Q15' | 'D3Q19' | 'D3Q27' ] # 3 - dynamics [ 1 = LBGK | 2 = RBGK | 3 = MRT] # 4 - partition methodology [ '1D' | '3D' | 'metis' ] # 5 - number of partitions # 6 - number of omp threads # 7 - pre-process # saves mat file named ChanCavityTest.mat MAT_FILE=ChanCavityTest.mat Num_ts=300001 ts_rep_freq=1000 Warmup_ts=0 plot_freq=10000 Re=3000 dt=0.0002 Cs=0 Restart_flag=0 # must re-process if you change: # N_divs, partition methodology, or the number of partitions. # if the lattice type changes, you do not have to re-do pre-processing, # but the resulting partitions may not be the same as what would have # been picked with new lattice type if [ "$7" = "1" ]; then aprun -n 1 ./channel_cavity_geom.py $1 if [ "$4" = "metis" ]; then module swap PrgEnv-gnu PrgEnv-intel fi aprun -n 1 ./pyNFC_partition.py $MAT_FILE $2 $4 $5 if [ "$4" = "metis" ]; then module swap PrgEnv-intel PrgEnv-gnu fi else echo "pre-processing skipped, commencing time steps" fi # basically, pyNFC_preprocess.py just writes params.lbm now. aprun -n 1 ./pyNFC_preprocess.py $MAT_FILE $2 $3 $4 $5 \ $Num_ts $ts_rep_freq $Warmup_ts $plot_freq $Re $dt $Cs $Restart_flag export OMP_NUM_THREADS=$6 aprun -n $5 -d $6 ./pyNFC_run.py #python ./processNFC.py aprun -n 1 ./processNFC_serial
stu314159/pyNFC
run_chanCav.sh
Shell
mit
1,360
#!/usr/bin/env bash # http://genometools.org/ URL="http://genometools.org/pub/genometools-1.5.7.tar.gz" tarball=$(basename "$URL") dir=$(echo $tarball | sed 's/\.tar\.gz//') if [ ! -e "$tarball" ] then wget "$URL" fi if [ ! -e "$dir" ] then tar xzf "$tarball" fi cd "$dir" # Needs pango-devel and cairo-devel installed make errorcheck=no # On 1.5.7 a number of test categories fail, but the application seems OK # at a first glance. Maybe just spurious? make test make prefix="/ad/eng/support/software/linux/opt/64/$dir/" install # Then symlink as usual
eng-it/linux-builds
genometools/build.sh
Shell
cc0-1.0
557
#!/bin/bash # Snips out only the quality string lines from a FASTQ file for file in $@ do awk ' NR % 4 ==0 { print;}' $file > $file.qual done
yunwilliamyu/snippets
fastq_qual_snip.sh
Shell
cc0-1.0
143
#!/bin/sh test_description='git fsck random collection of tests' . ./test-lib.sh test_expect_success setup ' test_commit A fileA one && git checkout HEAD^0 && test_commit B fileB two && git tag -d A B && git reflog expire --expire=now --all ' test_expect_success 'HEAD is part of refs' ' test 0 = $(git fsck | wc -l) ' test_expect_success 'loose objects borrowed from alternate are not missing' ' mkdir another && ( cd another && git init && echo ../../../.git/objects >.git/objects/info/alternates && test_commit C fileC one && git fsck >out && ! grep "missing blob" out ) ' # Corruption tests follow. Make sure to remove all traces of the # specific corruption you test afterwards, lest a later test trip over # it. test_expect_success 'object with bad sha1' ' sha=$(echo blob | git hash-object -w --stdin) && echo $sha && old=$(echo $sha | sed "s+^..+&/+") && new=$(dirname $old)/ffffffffffffffffffffffffffffffffffffff && sha="$(dirname $new)$(basename $new)" mv .git/objects/$old .git/objects/$new && git update-index --add --cacheinfo 100644 $sha foo && tree=$(git write-tree) && cmt=$(echo bogus | git commit-tree $tree) && git update-ref refs/heads/bogus $cmt && (git fsck 2>out; true) && grep "$sha.*corrupt" out && rm -f .git/objects/$new && git update-ref -d refs/heads/bogus && git read-tree -u --reset HEAD ' test_expect_success 'branch pointing to non-commit' ' git rev-parse HEAD^{tree} > .git/refs/heads/invalid && git fsck 2>out && grep "not a commit" out && git update-ref -d refs/heads/invalid ' cat > invalid-tag <<EOF object ffffffffffffffffffffffffffffffffffffffff type commit tag invalid tagger T A Gger <[email protected]> 1234567890 -0000 This is an invalid tag. EOF test_expect_success 'tag pointing to nonexistent' ' tag=$(git hash-object -t tag -w --stdin < invalid-tag) && echo $tag > .git/refs/tags/invalid && test_must_fail git fsck --tags >out && cat out && grep "broken link" out && rm .git/refs/tags/invalid ' cat > wrong-tag <<EOF object $(echo blob | git hash-object -w --stdin) type commit tag wrong tagger T A Gger <[email protected]> 1234567890 -0000 This is an invalid tag. EOF test_expect_success 'tag pointing to something else than its type' ' tag=$(git hash-object -t tag -w --stdin < wrong-tag) && echo $tag > .git/refs/tags/wrong && test_must_fail git fsck --tags 2>out && cat out && grep "error in tag.*broken links" out && rm .git/refs/tags/wrong ' test_done
byang/line
t/t1450-fsck.sh
Shell
gpl-2.0
2,478
#!/usr/bin/env bash # ____ _________ ____ _________ # / __ \/ ___/ __ \/ __ \/ ___/ __ \ # / /_/ / / / /_/ / /_/ / / / /_/ / # / .___/_/ \____/ .___/_/ \____/ # /_/ /_/ # # Built from: vagrant.propro unset UCF_FORCE_CONFFOLD export UCF_FORCE_CONFFNEW="YES" export DEBIAN_FRONTEND="noninteractive" # Propro package: lib/propro.sh #!/usr/bin/env bash set -e set -u PROPRO_LOG_FILE="/root/provision.log" PROPRO_FULL_LOG_FILE="/root/full_provision.log" PROPRO_LOG_USE_COLOR="yes" PROPRO_DISABLE_LOG="no" >$PROPRO_FULL_LOG_FILE exec > >(tee $PROPRO_FULL_LOG_FILE) exec 2>&1 function log { echo -e "$1" if is-yes $PROPRO_DISABLE_LOG; then return 0 fi if [ $PROPRO_LOG_FILE ]; then touch $PROPRO_LOG_FILE echo -e "$1" >> $PROPRO_LOG_FILE fi } # $1 text function section { local msg="#### $1" log "" if is-yes $PROPRO_LOG_USE_COLOR; then log "\e[32m\e[1m$msg\e[0m" else log "$msg" fi } # $1 text function announce { if is-yes $PROPRO_LOG_USE_COLOR; then log "\e[34m\e[1m--->\e[0m $1" else log "---> $1" fi } # $1 text function announce-item { if is-yes $PROPRO_LOG_USE_COLOR; then log " - \e[36m$1\e[0m" else log " - $1" fi } function finished { if is-yes $PROPRO_LOG_USE_COLOR; then log "\e[35m\e[1m Fin.\e[0m" else log " Fin." fi log "" } function get-tmp-dir { mktemp -d } # $1 "yes" or "no" function is-yes { if [ $1 == "yes" ]; then return 0 else return 1 fi } # $1 "yes" or "no" function is-no { if [ $1 == "no" ]; then return 0 else return 1 fi } # $1 comma separated list # # example: # > $ csl-to-wsl "item1,item2,item3" # > item1 item2 item3 function csl-to-wsl { echo "$1" | sed 's/,/ /g' } # $1 path or relative uri # # example: # > $ path-to-id example.com/neat/stuff # > example_com_neat_stuff function path-to-id { echo "$1" | sed -r 's/[-\.:\/\]/_/g' } # Propro package: lib/ubuntu.sh #!/usr/bin/env bash function get-processor-count { nproc } function release-codename { lsb_release -c -s } # $@ package names function install-packages { announce "Installing packages:" for package in $@; do announce-item "$package" done aptitude -q -y -o Dpkg::Options::="--force-confnew" install $@ } function get-archtype { if [ $(getconf LONG_BIT) == 32 ]; then echo 'x86' else echo 'x64' fi } function update-sources { sed -i "/^# deb.*multiverse/ s/^# //" /etc/apt/sources.list apt-get -qq -y update } function add-repository { add-apt-repository -y $1 } # $1 unix user # $2 service name # $3 service args function add-sudoers-entries { for event in start status stop reload restart; do if [ $3 ]; then tee -a /etc/sudoers.d/$2-entries <<EOT $1 ALL=NOPASSWD: /sbin/$event $2 $3 EOT else tee -a /etc/sudoers.d/$2-entries <<EOT $1 ALL=NOPASSWD: /sbin/$event $2 EOT fi chmod 0440 /etc/sudoers.d/$2-entries done } function reboot-system { shutdown -r now } # $1 package name function reconfigure-package { dpkg-reconfigure -f noninteractive $1 } # $1 key URL function add-source-key { wget --quiet -O - $1 | apt-key add - } # $@ files to extract function extract { tar xzf $@ } # $1 URL to download function download { wget -nv $1 } function get-ram-bytes { free -m -b | awk '/^Mem:/{print $2}' } function get-page-size { getconf PAGE_SIZE } function get-ram-pages { echo "$(get-ram-bytes) / $(get-page-size)" | bc } # $1 shmall percent function get-kernel-shmall { echo "($(get-ram-pages) * $1) / 1" | bc } # $1 shmmax percent function get-kernel-shmmax { echo "($(get-ram-bytes) * $1) / 1" | bc } # $1 unix user # $2 path function as-user-mkdir { mkdir -p $2 chown $1:$1 $2 } function upgrade-system { update-sources apt-get -qq -y install aptitude aptitude -q -y -o Dpkg::Options::="--force-confnew" full-upgrade } # $1 timezone function set-timezone { echo $1 > /etc/timezone reconfigure-package tzdata } # $1 locale eg: en_US.UTF-8 function set-locale { export LANGUAGE=$1 export LANG=$1 export LC_ALL=$1 locale-gen $1 reconfigure-package locales update-locale } # $1 hostname function set-hostname { echo $1 > /etc/hostname hostname -F /etc/hostname } # $1 unix user # $2 unix group # $3 password function add-user { if [ $2 ]; then announce "Adding $1 user to group $2" useradd -m -s /bin/bash -g $2 $1 else announce "Adding $1 user" useradd -m -s /bin/bash $1 fi if [ $3 ]; then announce "Setting password for $1 user" echo "$1:$3" | chpasswd fi } # $1 unix user # $2 github usernames for public keys function add-pubkeys-from-github { announce "Installing public keys for $1 from GitHub users:" local ssh_dir="/home/$1/.ssh" local keys_file="$ssh_dir/authorized_keys" mkdir -p $ssh_dir touch $keys_file for user in $2; do announce-item "$user" local url="https://github.com/$user.keys" tee -a $keys_file <<EOT # $url $(wget -qO- $url) EOT done chmod 700 $ssh_dir chmod 600 $keys_file chown -R $1 $ssh_dir } # Propro package: lib/system.sh #!/usr/bin/env bash SYSTEM_SHMALL_PERCENT="0.75" # @specify SYSTEM_SHMMAX_PERCENT="0.5" # @specify SYSTEM_BASE_PACKAGES="curl vim-nox less htop build-essential openssl git tree python-software-properties" SYSTEM_TIMEZONE="Etc/UTC" # @specify SYSTEM_LOCALE="en_US.UTF-8" # @specify SYSTEM_SOURCES_PG_KEY_URL="http://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc" function system-configure-shared-memory { announce "Configuring shared memory" install-packages bc local shmall=$(get-kernel-shmall $SYSTEM_SHMALL_PERCENT) local shmmax=$(get-kernel-shmmax $SYSTEM_SHMMAX_PERCENT) sysctl -w kernel.shmall=$shmall sysctl -w kernel.shmmax=$shmmax tee -a /etc/sysctl.conf <<EOT kernel.shmall = $shmall kernel.shmmax = $shmmax EOT } function system-install-packages { install-packages $SYSTEM_BASE_PACKAGES } function system-configure-timezone { announce "Set timezone to $SYSTEM_TIMEZONE" set-timezone $SYSTEM_TIMEZONE } function system-configure-locale { announce "Set locale to $SYSTEM_LOCALE" set-locale $SYSTEM_LOCALE } function system-upgrade { announce "Update and upgrade system packages" upgrade-system } function system-add-pg-source { announce "Add PostgreSQL sources:" tee /etc/apt/sources.list.d/pgdg.list <<EOT deb http://apt.postgresql.org/pub/repos/apt/ $(release-codename)-pgdg main EOT announce-item "apt.postgresql.org" add-source-key $SYSTEM_SOURCES_PG_KEY_URL update-sources } function system-install-sources { system-add-pg-source } # Propro package: lib/pg.sh #!/usr/bin/env bash PG_VERSION="9.3" # @specify PG_EXTENSIONS="btree_gin btree_gist fuzzystrmatch hstore intarray ltree pg_trgm tsearch2 unaccent" # @specify see: http://www.postgresql.org/docs/9.3/static/contrib.html PG_CONFIG_FILE="/etc/postgresql/$PG_VERSION/main/postgresql.conf" PG_HBA_FILE="/etc/postgresql/$PG_VERSION/main/pg_hba.conf" PG_TUNE_VERSION="0.9.3" PG_USER="postgres" function get-pg-tune-url { echo "http://pgfoundry.org/frs/download.php/2449/pgtune-$PG_TUNE_VERSION.tar.gz" } function pg-install-packages { install-packages postgresql-$PG_VERSION libpq-dev postgresql-contrib-$PG_VERSION } function pg-tune { local tmpdir=$(get-tmp-dir) cd "$tmpdir" announce "Tune PostgreSQL $PG_VERSION" download $(get-pg-tune-url) extract pgtune-$PG_TUNE_VERSION.tar.gz ./pgtune-$PG_TUNE_VERSION/pgtune -i $PG_CONFIG_FILE -o $PG_CONFIG_FILE.pgtune mv $PG_CONFIG_FILE $PG_CONFIG_FILE.original mv $PG_CONFIG_FILE.pgtune $PG_CONFIG_FILE chown $PG_USER:$PG_USER $PG_CONFIG_FILE cd ~/ rm -rf "$tmpdir" } # $1 db user name # $2 db name function pg-createdb { announce "Create database: $2" su - $PG_USER -c "createdb -O $1 $2" if [ $PG_EXTENSIONS ]; then announce "Add extensions:" for extension in $PG_EXTENSIONS; do announce-item "$extension" su - $PG_USER -c "psql -d $2 -c \"CREATE EXTENSION IF NOT EXISTS $extension;\"" done fi } # Propro package: lib/rvm.sh #!/usr/bin/env bash # requires app.sh RVM_CHANNEL="stable" RVM_REQUIRED_PACKAGES="curl gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev libncurses5-dev automake libtool bison pkg-config libffi-dev" RVM_DEFAULT_GEMS="bundler" #@specify # $1 unix user # $2 ruby version function rvm-install-for-user { section "RVM" install-packages $RVM_REQUIRED_PACKAGES announce "Install RVM for user $1" su - $1 -c "curl -L https://get.rvm.io | bash -s $RVM_CHANNEL" su - $1 -c "rvm autolibs read-fail" announce "Install Ruby $2 for user $1" su - $1 -c "rvm install $2" announce "Set Ruby $2 as default for user $1" su - $1 -c "rvm --default use $2" announce "Install default gems" su - $1 -c "gem install $RVM_DEFAULT_GEMS" } # Propro package: lib/nginx.sh #!/usr/bin/env bash NGINX_VERSION="1.6.0" # @specify NGINX_USER="nginx" NGINX_CONFIGURE_OPTS="--with-http_ssl_module --with-http_gzip_static_module" # @specify NGINX_CONF_FILE="/etc/nginx.conf" NGINX_ETC_DIR="/etc/nginx" NGINX_LOG_DIR="/var/log/nginx" NGINX_ACCESS_LOG_FILE_NAME="access.log" NGINX_ERROR_LOG_FILE_NAME="error.log" NGINX_DEPENDENCIES="libpcre3-dev libssl-dev" NGINX_WORKER_COUNT=$(get-processor-count) NGINX_SERVER_NAMES_HASH_BUCKET_SIZE="64" NGINX_PID_FILE="/var/run/nginx.pid" NGINX_CLIENT_MAX_BODY_SIZE="5m" # @specify NGINX_WORKER_CONNECTIONS="2000" # @specify NGINX_SITES_DIR="$NGINX_ETC_DIR/sites" NGINX_CONF_DIR="$NGINX_ETC_DIR/conf" function get-nginx-url { echo "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" } function nginx-install { local tmpdir=$(get-tmp-dir) cd "$tmpdir" install-packages $NGINX_DEPENDENCIES announce "Download $NGINX_VERSION" download $(get-nginx-url) announce "Extract" extract nginx-$NGINX_VERSION.tar.gz announce "Configure" cd nginx-$NGINX_VERSION ./configure $NGINX_CONFIGURE_OPTS announce "Compile" make announce "Install $NGINX_VERSION" make install cd ~/ rm -rf "$tmpdir" } function nginx-configure { announce "Creating Nginx user" useradd -r $NGINX_USER announce "Adding Nginx directories" as-user-mkdir $NGINX_USER $NGINX_LOG_DIR mkdir -p $NGINX_ETC_DIR mkdir -p $NGINX_SITES_DIR mkdir -p $NGINX_CONF_DIR announce "Creating base Nginx config: $NGINX_CONF_FILE" tee $NGINX_CONF_FILE <<EOT user $NGINX_USER; pid $NGINX_PID_FILE; ssl_engine dynamic; worker_processes $NGINX_WORKER_COUNT; events { multi_accept on; worker_connections $NGINX_WORKER_CONNECTIONS; use epoll; } http { sendfile on; tcp_nopush on; tcp_nodelay off; client_max_body_size $NGINX_CLIENT_MAX_BODY_SIZE; client_body_temp_path /var/spool/nginx-client-body 1 2; server_names_hash_bucket_size $NGINX_SERVER_NAMES_HASH_BUCKET_SIZE; default_type application/octet-stream; include /etc/nginx/conf/*.conf; include /etc/nginx/sites/*.conf; } EOT announce "Writing Nginx upstart /etc/init/nginx.conf" tee /etc/init/nginx.conf <<EOT description "Nginx HTTP Daemon" author "George Shammas <[email protected]>" start on (filesystem and net-device-up IFACE=lo) stop on runlevel [!2345] env DAEMON="/usr/local/nginx/sbin/nginx -c $NGINX_CONF_FILE" env PID="$NGINX_PID_FILE" expect fork respawn respawn limit 10 5 pre-start script \$DAEMON -t if [ \$? -ne 0 ] then exit \$? fi end script exec \$DAEMON EOT } function nginx-conf-add-mimetypes { announce "Adding mimetypes config" tee "$NGINX_CONF_DIR/mimetypes.conf" <<EOT types_hash_max_size 2048; types { application/atom+xml atom; application/java-archive jar war ear; application/javascript js; application/json json; application/msword doc; application/pdf pdf; application/postscript ps eps ai; application/rtf rtf; application/vnd.ms-excel xls; application/vnd.ms-fontobject eot; application/vnd.ms-powerpoint ppt; application/vnd.wap.wmlc wmlc; application/x-7z-compressed 7z; application/x-bittorrent torrent; application/x-cocoa cco; application/x-font-ttf ttf ttc; application/x-httpd-php-source phps; application/x-java-archive-diff jardiff; application/x-java-jnlp-file jnlp; application/x-makeself run; application/x-perl pl pm; application/x-pilot prc pdb; application/x-rar-compressed rar; application/x-redhat-package-manager rpm; application/x-sea sea; application/x-shockwave-flash swf; application/x-stuffit sit; application/x-tcl tcl tk; application/x-x509-ca-cert der pem crt; application/x-xpinstall xpi; application/xhtml+xml xhtml; application/xml xml; application/zip zip; audio/midi mid midi kar; audio/mpeg mp3; audio/ogg oga ogg; audio/x-m4a m4a; audio/x-realaudio ra; audio/x-wav wav; font/opentype otf; font/woff woff; image/gif gif; image/jpeg jpeg jpg; image/png png; image/svg+xml svg svgz; image/tiff tif tiff; image/vnd.wap.wbmp wbmp; image/webp webp; image/x-icon ico; image/x-ms-bmp bmp; text/cache-manifest manifest appcache; text/css css; text/html html htm shtml; text/mathml mml; text/plain txt md; text/vnd.sun.j2me.app-descriptor jad; text/vnd.wap.wml wml; text/x-component htc; text/xml rss; video/3gpp 3gpp 3gp; video/mp4 m4v mp4; video/mpeg mpeg mpg; video/ogg ogv; video/quicktime mov; video/webm webm; video/x-flv flv; video/x-mng mng; video/x-ms-asf asx asf; video/x-ms-wmv wmv; video/x-msvideo avi; } EOT } function nginx-conf-add-gzip { announce "Adding gzip config" tee $NGINX_CONF_DIR/gzip.conf <<EOT gzip on; gzip_buffers 32 4k; gzip_comp_level 2; gzip_disable "msie6"; gzip_http_version 1.1; gzip_min_length 1100; gzip_proxied any; gzip_static on; gzip_vary on; gzip_types text/css text/plain application/javascript application/json application/rss+xml application/xml application/vnd.ms-fontobject font/truetype font/opentype image/x-icon image/svg+xml; EOT } function nginx-create-logrotate { announce "Create logrotate for Nginx" tee /etc/logrotate.d/nginx <<EOT $NGINX_LOG_DIR/*.log { daily missingok rotate 90 compress delaycompress notifempty dateext create 640 nginx adm sharedscripts postrotate [ -f $NGINX_PID_FILE ] && kill -USR1 `cat $NGINX_PID_FILE` endscript } EOT } # Propro package: lib/node.sh #!/usr/bin/env bash NODE_VERSION="0.10.26" # @specify function get-node-pkg-name { echo "node-v$NODE_VERSION-linux-$(get-archtype)" } function get-node-url { echo "http://nodejs.org/dist/v$NODE_VERSION/$(get-node-pkg-name).tar.gz" } function node-install { local tmpdir=$(get-tmp-dir) cd "$tmpdir" announce "Download Node $NODE_VERSION" download $(get-node-url) announce "Extract Node $NODE_VERSION" extract "$(get-node-pkg-name).tar.gz" announce "Install Node" cd "./$(get-node-pkg-name)" cp -r -t /usr/local bin include share lib cd ~/ rm -r "$tmpdir" } # Propro package: lib/redis.sh #!/usr/bin/env bash REDIS_VERSION="2.8.9" # @specify REDIS_USER="redis" REDIS_CONF_FILE="/etc/redis.conf" REDIS_DATA_DIR="/var/lib/redis" REDIS_FORCE_64BIT="no" # @specify Force 64bit build even if available memory is lte 4GiB function get-redis-url { echo "http://download.redis.io/releases/redis-$REDIS_VERSION.tar.gz" } function redis-install { local tmpdir=$(get-tmp-dir) cd "$tmpdir" announce "Download $REDIS_VERSION" download $(get-redis-url) announce "Extract" extract redis-$REDIS_VERSION.tar.gz cd redis-$REDIS_VERSION if [ $(get-ram-bytes) -gt 4294967296 ] || is-yes $REDIS_FORCE_64BIT; then announce "Compile" make else announce "Compile (32bit, available memory <= 4GiB)" install-packages libc6-dev-i386 make 32bit fi announce "Install $REDIS_VERSION" make install announce "Add Redis user: $REDIS_USER" useradd -r $REDIS_USER announce "Create Redis directories" as-user-mkdir $REDIS_USER $REDIS_DATA_DIR announce "Copy Redis config to $REDIS_CONF_FILE" cp ./redis.conf $REDIS_CONF_FILE cd ~/ rm -rf "$tmpdir" announce "Update Redis config" tee -a $REDIS_CONF_FILE <<EOT syslog-enabled yes syslog-ident redis dir $REDIS_DATA_DIR EOT announce "Create upstart for Redis" tee /etc/init/redis.conf <<EOT description "Redis" start on runlevel [23] stop on shutdown exec sudo -u $REDIS_USER /usr/local/bin/redis-server $REDIS_CONF_FILE respawn EOT } # Propro package: lib/ffmpeg.sh #!/usr/bin/env bash # http://askubuntu.com/a/148567 # https://trac.ffmpeg.org/wiki/UbuntuCompilationGuide # http://juliensimon.blogspot.ca/2013/08/howto-compiling-ffmpeg-x264-mp3-aac.html FFMPEG_VERSION="git" # @specify (or a version to download: "2.1.4") FFMPEG_YASM_VERSION="1.2.0" FFMPEG_XVID_VERSION="1.3.2" function get-ffmpeg-url { echo "http://ffmpeg.org/releases/ffmpeg-$FFMPEG_VERSION.tar.gz" } function get-ffmpeg-yasm-url { echo "http://www.tortall.net/projects/yasm/releases/yasm-$FFMPEG_YASM_VERSION.tar.gz" } function get-ffmpeg-xvid-url { echo "http://downloads.xvid.org/downloads/xvidcore-$FFMPEG_XVID_VERSION.tar.gz" } function ffmpeg-install { local tmpdir=$(get-tmp-dir) cd "$tmpdir" announce "Install Dependencies" install-packages build-essential git libfaac-dev libgpac-dev \ libjack-jackd2-dev libmp3lame-dev libopencore-amrnb-dev \ libopencore-amrwb-dev libsdl1.2-dev libtheora-dev libva-dev libvdpau-dev \ libvorbis-dev libxfixes-dev zlib1g-dev libgsm1-dev announce-item "Yasm" announce-item "> Download" download $(get-ffmpeg-yasm-url) announce-item "> Extract" extract yasm-$FFMPEG_YASM_VERSION.tar.gz cd yasm-$FFMPEG_YASM_VERSION announce-item "> Configure" ./configure announce-item "> Compile" make announce-item "> Install" make install make distclean cd .. announce-item "X264" announce-item "> Download" git clone --depth 1 git://git.videolan.org/x264 announce-item "> Configure" cd x264 ./configure --prefix=/usr/local --enable-shared announce-item "> Compile" make announce-item "> Install" make install make distclean cd .. announce-item "Xvid" announce-item "> Download" download $(get-ffmpeg-xvid-url) announce-item "> Extract" extract xvidcore-$FFMPEG_XVID_VERSION.tar.gz cd xvidcore/build/generic announce-item "> Configure" ./configure --prefix=/usr/local announce-item "> Compile" make announce-item "> Install" make install cd ../../.. announce "Download $FFMPEG_VERSION" if [ $FFMPEG_VERSION == "git" ]; then git clone --depth 1 git://source.ffmpeg.org/ffmpeg.git cd ffmpeg else download $(get-ffmpeg-url) announce "Extract" extract ffmpeg-$FFMPEG_VERSION.tar.gz cd ffmpeg-$FFMPEG_VERSION fi announce "Configure" ./configure --prefix=/usr/local --enable-gpl --enable-version3 \ --enable-nonfree --enable-shared --enable-libopencore-amrnb \ --enable-libopencore-amrwb --enable-libfaac --enable-libgsm \ --enable-libmp3lame --enable-libtheora --enable-libvorbis \ --enable-libx264 --enable-libxvid announce "Compile" make announce "Install" make install make distclean ldconfig -v cd ~/ rm -rf "$tmpdir" } # Propro package: lib/extras.sh #!/usr/bin/env bash EXTRA_PACKAGES="" # @specify function provision-extras { if [ -z "$EXTRA_PACKAGES" ]; then return 0 fi section "Extras" install-packages $EXTRA_PACKAGES } # Propro package: vagrant.sh #!/usr/bin/env bash VAGRANT_USER="vagrant" VAGRANT_DATA_DIR="/vagrant" # Propro package: vagrant/system.sh #!/usr/bin/env bash function vagrant-system-install-user-aliases { announce "Installing helper aliases for user: $VAGRANT_USER" tee -a /home/$VAGRANT_USER/.profile <<EOT alias be="bundle exec" alias r="bin/rails" alias v="cd $VAGRANT_DATA_DIR" cd $VAGRANT_DATA_DIR EOT } function vagrant-system-purge-grub-menu-config { ucf --purge /boot/grub/menu.lst } function provision-vagrant-system { section "Vagrant System" vagrant-system-purge-grub-menu-config system-upgrade system-configure-timezone system-configure-locale system-install-packages system-configure-shared-memory system-install-sources vagrant-system-install-user-aliases } # Propro package: vagrant/pg.sh #!/usr/bin/env bash function vagrant-pg-create-user { announce "Create database user: $VAGRANT_USER" su - $PG_USER -c "createuser -s $VAGRANT_USER" } function provision-vagrant-pg { section "PostgreSQL Server" pg-install-packages pg-tune vagrant-pg-create-user } # Propro package: vagrant/redis.sh #!/usr/bin/env bash function provision-vagrant-redis { section "Redis" redis-install } # Propro package: vagrant/rvm.sh #!/usr/bin/env bash VAGRANT_RVM_RUBY_VERSION="2.0.0" # @specify function provision-vagrant-rvm { rvm-install-for-user $VAGRANT_USER $VAGRANT_RVM_RUBY_VERSION } # Propro package: vagrant/node.sh #!/usr/bin/env bash function provision-vagrant-node { section "Node.js" node-install } # Propro package: vagrant/nginx.sh #!/usr/bin/env bash function provision-vagrant-nginx { section "Nginx" nginx-install nginx-configure nginx-conf-add-gzip nginx-conf-add-mimetypes announce "Adding Nginx config for Vagrant" tee "$NGINX_SITES_DIR/vagrant.conf" <<EOT upstream rack_app { server 127.0.0.1:3000 fail_timeout=0; } server { root $VAGRANT_DATA_DIR/public; access_log /dev/null; error_log /dev/null; try_files \$uri/index.html \$uri.html \$uri @upstream_app; location @upstream_app { proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header Host \$http_host; proxy_redirect off; proxy_pass http://rack_app; } } EOT } # Propro package: vagrant/ffmpeg.sh #!/usr/bin/env bash function provision-vagrant-ffmpeg { section "FFmpeg" ffmpeg-install } # Options from: vagrant.propro SYSTEM_SHMALL_PERCENT="0.65" SYSTEM_SHMMAX_PERCENT="0.35" PG_VERSION="9.3" REDIS_VERSION="2.8.9" VAGRANT_RVM_RUBY_VERSION="2.1.2" NODE_VERSION="0.10.28" NGINX_VERSION="1.6.0" NGINX_WORKER_CONNECTIONS="100" EXTRA_PACKAGES="man git-core libxslt-dev libxml2-dev" function main { provision-vagrant-system provision-vagrant-pg provision-vagrant-redis provision-vagrant-rvm provision-vagrant-node provision-vagrant-nginx provision-extras finished reboot-system } main
unspace/un-uploader-demo
server/propro/dist/vagrant.sh
Shell
gpl-2.0
23,787
#!/bin/sh module="cdata" device="cdata" module2="cdata_plat_dev" device2="cdata_plat_dev" # invoke rmmod with all arguments we got /sbin/rmmod $module $* || exit 1 /sbin/rmmod $module2 $* || exit 1 # Remove stale nodes rm -f /dev/${device} /dev/${device}
hugh712/my_driver_study
009_kobject_2/unload.sh
Shell
gpl-2.0
261
#! /bin/sh # # Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation version 2. # # This program is distributed "as is" WITHOUT ANY WARRANTY of any # kind, whether express or implied; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # Search for HID event number source "common.sh" source "st_log.sh" ############################# Functions ####################################### ############################### CLI Params ################################### ############################ DEFAULT Params ####################### ############# Do the work ########################################### HID_Get_Event() { device=$1 events=`ls -d $device/input/input*` for event in $events do inputevent=`basename "$event" | cut -c6-7` echo INPUT event is $hidevent done events=`ls -d $device/input/input$inputevent/event*` for event in $events do hidevent=`basename "$event" | cut -c6-7` echo HID event is $hidevent done }
rogerq/ltp-ddt
testcases/ddt/scripts/usb/usb_HID_get_event.sh
Shell
gpl-2.0
1,228
#!/bin/bash # this is the entry point to the docker container, and is only used there set -e export PATH=$PATH:/usr/local/bin/:/usr/local/sbin/ if [ $# -eq 0 ]; then echo "No arguments provided: specify either 'pktvisor-cli', 'pktvisor-pcap' or 'pktvisord'. Try:" echo "docker run ns1labs/pktvisor pktvisor-cli -h" echo "docker run ns1labs/pktvisor pktvisor-pcap --help" echo "docker run ns1labs/pktvisor pktvisord --help" exit 1 fi # backwards compatibility BINARY="$1" if [ "$BINARY" = 'pktvisor' ]; then BINARY='pktvisor-cli' fi # Add sleep to allow tty to be ready for Docker when using -it if [ "$BINARY" = 'pktvisor-cli' ]; then sleep 1 fi shift exec "$BINARY" "$@"
nsone/pktvisor
docker/entry.sh
Shell
gpl-2.0
691
#!/bin/sh # default variables fname=rec # set file name if [ $# -ge 1 ] ; then fname=$1 fi # create fifo rm -f /tmp/$fname.fifo mkfifo /tmp/$fname.fifo # spwan raw sox -t raw -r 22050 -s -w - -t raw /tmp/$fname.raw </tmp/$fname.fifo 3>/tmp/$fname.fifo & # remember process id for killing pid=$! # trap exit signal trap 'kill $pid ; rm -f /tmp/$fname.fifo ; exit' TERM INT # waiting to signal while true; do sleep 1 done
tiv-source/qtpcr
examples/raw.sh
Shell
gpl-2.0
432
#!/bin/bash # run with command line argument: # $1 <- host, e.g., 127.0.0.1 # $2 <- keyspace, e.g., demo # $3 <- file, e.g., 2012_medicare_thousand_data.csv javac -cp ".:./jars/*" Transform.java java -cp ".:./jars/*" Transform $1 $2 $3
ZheyuJin/CS8674.FALL2015.NEUSeattle
cassandra/cass_tabulate.sh
Shell
gpl-2.0
237
chmod 777 wpa_supplicant interface=`ifconfig -a | grep "00:23:A7" | cut -f1 -d ' '` if [ "$1" == "" ]; then ./wpa_supplicant -i$interface -Drsi -c wifi_settings.cfg else ./wpa_supplicant -i$interface -Drsi -c $1 -dd fi
ptdropper/linux-2.6.34.12
drivers/net/wireless/ganges/RS.GENR.LNX.SD_GPL/OSD/LINUX/release/supp.sh
Shell
gpl-2.0
221
#!/sbin/sh # # /system/addon.d/70-gapps.sh # . /tmp/backuptool.functions list_files() { cat <<EOF app/YouTube.apk EOF } case "$1" in backup) list_files | while read FILE DUMMY; do backup_file $S/$FILE done ;; restore) list_files | while read FILE REPLACEMENT; do R="" [ -n "$REPLACEMENT" ] && R="$S/$REPLACEMENT" [ -f "$C/$S/$FILE" ] && restore_file $S/$FILE $R done ;; pre-backup) # Stub ;; post-backup) # Stub ;; pre-restore) # Stub ;; post-restore) # Stub ;; esac
dotmaniac/famigo_gapps
gapps/Other/YouTube/addon.d/70-youtube.sh
Shell
gpl-2.0
544
#!/bin/bash # Add modules to an LZMA compressed initramfs # Copyright (C) 2007-2010 Daniel Collins <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. tmp="addmod.tmp" initramfs="" abort() { if [ $# -eq "1" ] then echo "$1" 1>&2 fi rm -rf "$tmp" exit 1 } cprog() { if which "$1" > /dev/null; then return 0 fi abort "No executable '$1' program found in \$PATH" } if [ $# -lt 2 ]; then abort "Usage: $0 <initramfs> <module> [<module> ...]" fi cprog tar cprog lzcat cprog find cprog cpio cprog lzma rm -rf "$tmp" mkdir -p "$tmp/modules/" for mod in "$@" do if [ -z "$initramfs" ] then initramfs="$mod" else is_ko=`echo "$mod" | grep -E '\.ko$'` is_tar=`echo "$mod" | grep -E '\.(tar(\.(gz|bz2))?|tgz)$'` is_tlz=`echo "$mod" | grep -E '\.(tar\.lzma|tlz)$'` if [ -n "$is_ko" ] then cp "$mod" "$tmp/modules/" || abort fi if [ -n "$is_tar" ] then tar -xf "$mod" -C "$tmp/modules/" || abort fi if [ -n "$is_tlz" ] then # TAR is really stupid tar --lzma -xf "$mod" -C "$tmp/modules/" || abort fi if [ -z "$is_ko" -a -z "$is_tar" -a -z "$is_tlz" ] then abort "Unknown file extension: $mod" fi fi done lzcat -S "" "$initramfs" > "$tmp/initramfs.cpio" || abort for f in `cpio -it --quiet < "$tmp/initramfs.cpio" | egrep '^modules\/.+'` do rm -f "$tmp/$f" done bash -c "cd \"$tmp\" && find modules -iname '*.ko' | cpio -o --format=newc --quiet --append -F initramfs.cpio" || abort lzma -9 "$tmp/initramfs.cpio" -c > "$initramfs" || abort rm -rf "$tmp"
solemnwarning/kexec-loader
addmod.sh
Shell
gpl-2.0
2,219
export PATH=/bin:/sbin:/usr/bin:/usr/sbin; sleep 600 logger 'SCIFI - Starting Snmpd' /etc/init.d/snmpd start
Sci-Fi/el-scifi
modules/SCIFIAPI/12/init_snmpd.sh
Shell
gpl-2.0
109
#!/bin/bash echo BB-ADC > /sys/devices/bone_capemgr.*/slots
beaverb1ll/SeniorDesign_MPPTC
systemd/enable-BBB-ADC.sh
Shell
gpl-2.0
59
#!/bin/bash memTotal_b=`free -b |grep Mem |awk '{print $2}'` memFree_b=`free -b |grep Mem |awk '{print $4}'` memBuffer_b=`free -b |grep Mem |awk '{print $6}'` memCache_b=`free -b |grep Mem |awk '{print $7}'` memTotal_m=`free -m |grep Mem |awk '{print $2}'` memFree_m=`free -m |grep Mem |awk '{print $4}'` memBuffer_m=`free -m |grep Mem |awk '{print $6}'` memCache_m=`free -m |grep Mem |awk '{print $7}'` CPUPer=`top -b -n1 | grep "Cpu(s)" | awk '{print $2 + $4}'` hdd=`df -lh | awk '{if ($6 == "/") { print $5 }}' | head -1 | cut -d'%' -f1` uptime=`uptime` ProcessCnt=`ps -A | wc -l` memUsed_b=$(($memTotal_b-$memFree_b-$memBuffer_b-$memCache_b)) memUsed_m=$(($memTotal_m-$memFree_m-$memBuffer_m-$memCache_m)) memUsedPrc=$((($memUsed_b*100)/$memTotal_b)) echo "✅Memory : $memTotal_m MB" echo "➖➖➖➖➖🎗" echo "✅Used : $memUsed_m MB - $memUsedPrc% used!" echo "➖➖➖➖➖🎗" echo "✅Total : $memTotal_b" echo "➖➖➖➖➖🎗" echo '✅CPU Usage : '"$CPUPer"'%' echo "➖➖➖➖➖🎗" echo '✅Hdd : '"$hdd"'%' echo "➖➖➖➖➖🎗" echo '✅Processes : '"$ProcessCnt" echo "➖➖➖➖➖🎗" echo '✅Uptime : '"$uptime" echo "➖➖➖➖➖🎗" echo "🌐 BOOM🎗BOOM 🌐"
cracker1375/Boom
data/cmd.sh
Shell
gpl-2.0
1,221
cd ~/Paparazzi/conf svn status | grep ^\! | cut -c8- | xargs svn rm svn status | grep ^\? | cut -c8- | xargs svn add cd ~/Paparazzi/sw svn status | grep ^\! | cut -c8- | xargs svn rm svn status | grep ^\? | cut -c8- | xargs svn add cd ~/Paparazzi/data/maps svn status | grep ^\! | cut -c8- | xargs svn rm svn status | grep ^\? | cut -c8- | xargs svn add cd ~/Paparazzi/var/logs svn status | grep ^\! | cut -c8- | xargs svn rm svn status | grep ^\? | cut -c8- | xargs svn add svn ci
bstark2/AggieAir_MESA
Commit.sh
Shell
gpl-2.0
483
#!/bin/sh chmod 755 src/main/resources/insecureCmd.sh mvn clean package cargo:run -Pdeploy -Drunenv=remote
h3xstream/Benchmark
runRemoteAccessibleBenchmark.sh
Shell
gpl-2.0
107
#!/bin/bash ############################################################################################ # # Per_sample_raw_reads.sh # # Script for creating raw Read 1 and Read 2 files for each individual sample of a 16S # This script take a users QIIME mapping file, the demultiplexed sequences file and the two raw read files as arguments. # A time-stamped log file of all steps that are conducted is created. # This file also shows input file MD5 checksums as well as MD5 checksums for all output files. # # TODO: Figure out a better way to filter in parallel, prob best done w/in the fastq_filter.py script to remove gnu parallel dependency. # Incorporate command line arguments using getopt flags instead of placement. # Allow for optional output directory to be specified instead of CWD. # # Created by Michael C. Nelson on 2014-09-09. # Last revised: 2015-06-23 # Revision #: 7 # Copyright 2014 Michael C. Nelson and the University of Connecticut. All rights reserved. # # This script is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################################ # Define intitial variables DATE=`date +%Y-%m-%d` TIME=`date +%H:%M` TM=`date +%Y%m%d-%H%M` LOG=Log_$TM.txt MAP2=MAP_$TM.txt #Create log file echo "Script executed on $DATE at $TIME using the command call: Per_sample_raw_reads.sh $*" | tee $LOG echo '' # Initiail sanity checking for script operation # Are the correct number of arguments given? if [ "$1" = "" ]; then echo '' | tee -a $LOG echo 'ERROR: No arguments were passed to script.' | tee -a $LOG echo 'USAGE: Per_sample_raw_reads.sh Map.txt seqs.fna Undetermined_R1.fastq.gz Undetermined_R2.fastq.gz' | tee -a $LOG echo '' | tee -a $LOG exit 1 elif [ "$(($# % 4))" != 0 ]; then echo '' | tee -a $LOG echo 'ERROR: Invalid number of arguments given.' | tee -a $LOG echo 'USAGE: Per_sample_raw_reads.sh Map.txt seqs.fna Undetermined_R1.fastq.gz Undetermined_R2.fastq.gz' | tee -a $LOG echo '' | tee -a $LOG exit 1 fi # Can the required input files be found? if [ ! -f $MAP ] && [ ! -f $seqsfna ] && [ ! -f $READ1 ] && [ ! -f $READ2 ]; then echo '' | tee -a $LOG echo 'ERROR: Required input files could not be found.' | tee -a $LOG echo 'Script must be executed in the directory containing the Undertermined Read 1, Read 2, and Index files.' | tee -a $LOG echo 'USAGE: Per_sample_raw_reads.sh Map.txt seqs.fna Undetermined_R1.fastq.gz Undetermined_R2.fastq.gz' | tee -a $LOG echo '' | tee -a $LOG exit 1 else MAP=$1 MD5MAP=`md5sum $1` seqsfna=$2 MD5SEQS=`md5sum $2` READ1=$3 MD5READ1=`md5sum $3` READ2=$4 MD5READ2=`md5sum $4` echo '' | tee -a $LOG echo "Using $MAP as the input mapping file." | tee -a $LOG echo $MD5MAP| tee -a $LOG # Changing any carriage returns to newlines in the mapping file... so above is a slight lie tr '\r' '\n' <$MAP>$MAP2 echo "Using $seqsfna as the input seqs.fna file." | tee -a $LOG echo $MD5SEQS | tee -a $LOG echo "Using $READ1 as the raw Read 1 file." | tee -a $LOG echo $MD5READ1 | tee -a $LOG echo "Using $READ2 as the raw Read 2 file." | tee -a $LOG echo $MD5READ2 | tee -a $LOG fi # Check to see if GNU parallel is installed. if hash parallel 2>/dev/null; then SMP=TRUE fi # Step 1: Parse out the raw read files for each sample line=1 # We start with line 1 total=`grep -c '^' $MAP2` # Determine how many lines are actually in the file (safer than wc -l if map doesn't have a final newline character) (( samples = $total - 1 )) # Total number of samples should be num lines minus header line echo '' | tee -a $LOG echo "There are $samples samples in your mapfile." | tee -a $LOG echo '' | tee -a $LOG DATE=`date +%Y-%m-%d` TIME=`date +%H:%M` echo "$DATE $TIME: Proceeding to demultiplex the raw reads into per-sample R1 and R2 files." | tee -a $LOG while [ $line -lt $total ] # While the current line number is less than the total number of sample lines, do # Do the following actions DATE=`date +%Y-%m-%d` # Reset Date TIME=`date +%H:%M` # Reset Time printf "$DATE $TIME " | tee -a $LOG # Print time stamp so user can track progress rate printf "Sample: $line " | tee -a $LOG # First we'll print the current sample number (( line++ )) # Now we need to increase the line count to dissociate from the header line sampleID=`sed -n "$line{p;q;}" $MAP2 | cut -f1,1` # Now we find out what the sample ID is names=$sampleID.txt # Set an output file for the read names based on the sample ID names2=$sampleID'2.txt' # Create name of second names file in case we're running in parallel mode searchID=$sampleID\_ # Set the search pattern, which is the sample ID printf "$sampleID " | tee -a $LOG # Print what the name of the names file is for each sample touch $names # Create the output file as empty count=`grep -c $searchID $seqsfna` # Check to see how many reads are in seqs.fna echo "$count seqs" | tee -a $LOG # Print out how many sequences are present for the sample grep $searchID $seqsfna | tr -d '>' | cut -d\ -f2,2 > $names # Compile the list of SeqIDs for dual_fastq_filter command RAW1=$sampleID"_R1.fastq.gz" # Define the Read1 output file RAW2=$sampleID"_R2.fastq.gz" # Define the Read2 output file dual_fastq_filter.py -f $READ1 -r $READ2 -o $RAW1 -p $RAW2 -n $names # Filter out the raw read1/read2 to gzipped files rm $names # We no longer need the names file so let's get rid of it done # Cleanup phase: step 1, re-zip the input files to again save file space and delete the "cleaned" map file. DATE=`date +%Y-%m-%d` TIME=`date +%H:%M` echo '' | tee -a $LOG echo "$DATE $TIME: Finished parsing out the raw Read1/Read2 files for each sample." rm $MAP2 # Step 2, calculate md5 checksums for all of the raw read files. These are needed for SRA submissions and also just nice to have. echo '' | tee -a $LOG DATE=`date +%Y-%m-%d` TIME=`date +%H:%M` echo "$DATE $TIME: Calculating md5 checksum values for all sample files." | tee -a $LOG md5sum *_R1.fastq.gz | tee -a $LOG | tee md5sums.txt # This could be done in parallel, but, eh. md5sum *_R2.fastq.gz | tee -a $LOG | tee -a md5sums.txt # Note that if the files are decompressed and then recompressed using a different method # (e.g. pigz) then the MD5 values will change. DATE=`date +%Y-%m-%d` TIME=`date +%H:%M` echo '' | tee -a $LOG echo '' | tee -a $LOG echo "$DATE $TIME: Script is now finished." | tee -a $LOG echo "Each sample should have a gzip compressed R1 and R2 read file that you will need to upload to the SRA." | tee -a $LOG echo "md5 checksum values have been calculated for these files and can be found in the md5sums.txt file." | tee -a $LOG
mcnelsonphd/misc_scripts
Per_Sample_Raw_Reads.sh
Shell
gpl-2.0
8,512
#!/bin/sh test_description='git rev-list --pretty=format test' . ./test-lib.sh test_tick test_expect_success 'setup' ' touch foo && git add foo && git-commit -m "added foo" && echo changed >foo && git-commit -a -m "changed foo" ' # usage: test_format name format_string <expected_output test_format() { cat >expect.$1 test_expect_success "format $1" " git rev-list --pretty=format:$2 master >output.$1 && git diff expect.$1 output.$1 " } test_format hash %H%n%h <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d 131a310eb913d107dd3c09a65d1651175898735d 131a310 commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 86c75cf EOF test_format tree %T%n%t <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d fe722612f26da5064c32ca3843aa154bdb0b08a0 fe72261 commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 4d5fcadc293a348e88f777dc0920f11e7d71441c 4d5fcad EOF test_format parents %P%n%p <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 86c75cf commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 EOF # we don't test relative here test_format author %an%n%ae%n%ad%n%aD%n%at <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d A U Thor [email protected] Thu Apr 7 15:13:13 2005 -0700 Thu, 7 Apr 2005 15:13:13 -0700 1112911993 commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 A U Thor [email protected] Thu Apr 7 15:13:13 2005 -0700 Thu, 7 Apr 2005 15:13:13 -0700 1112911993 EOF test_format committer %cn%n%ce%n%cd%n%cD%n%ct <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d C O Mitter [email protected] Thu Apr 7 15:13:13 2005 -0700 Thu, 7 Apr 2005 15:13:13 -0700 1112911993 commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 C O Mitter [email protected] Thu Apr 7 15:13:13 2005 -0700 Thu, 7 Apr 2005 15:13:13 -0700 1112911993 EOF test_format encoding %e <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 EOF test_format subject %s <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d changed foo commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 added foo EOF test_format body %b <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 EOF test_format colors %Credfoo%Cgreenbar%Cbluebaz%Cresetxyzzy <<'EOF' commit 131a310eb913d107dd3c09a65d1651175898735d foobarbazxyzzy commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 foobarbazxyzzy EOF cat >commit-msg <<'EOF' Test printing of complex bodies This commit message is much longer than the others, and it will be encoded in iso8859-1. We should therefore include an iso8859 character: ¡bueno! EOF test_expect_success 'setup complex body' ' git config i18n.commitencoding iso8859-1 && echo change2 >foo && git-commit -a -F commit-msg ' test_format complex-encoding %e <<'EOF' commit f58db70b055c5718631e5c61528b28b12090cdea iso8859-1 commit 131a310eb913d107dd3c09a65d1651175898735d commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 EOF test_format complex-subject %s <<'EOF' commit f58db70b055c5718631e5c61528b28b12090cdea Test printing of complex bodies commit 131a310eb913d107dd3c09a65d1651175898735d changed foo commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 added foo EOF test_format complex-body %b <<'EOF' commit f58db70b055c5718631e5c61528b28b12090cdea This commit message is much longer than the others, and it will be encoded in iso8859-1. We should therefore include an iso8859 character: ¡bueno! commit 131a310eb913d107dd3c09a65d1651175898735d commit 86c75cfd708a0e5868dc876ed5b8bb66c80b4873 EOF test_expect_success 'empty email' ' test_tick && C=$(GIT_AUTHOR_EMAIL= git commit-tree HEAD^{tree} </dev/null) && A=$(git show --pretty=format:%an,%ae,%ad%n -s $C) && test "$A" = "A U Thor,,Thu Apr 7 15:14:13 2005 -0700" || { echo "Eh? $A" >failure false } ' test_done
xlymian/git
t/t6006-rev-list-format.sh
Shell
gpl-2.0
3,897
rsync -r doc/html/ [email protected]:public_html/bsp/ (cd ..; rsync -r doc/html/ [email protected]:public_html/quake2/) ssh [email protected] 'cd public_html/bsp/; ./installdox -l Q2DOXYTAGS@../quake2'
lambda/wxQuake2
bsptools/upload.sh
Shell
gpl-2.0
220
#!/bin/sh KEYFILE=/home/root/privatekey.pem PUBKEY=/home/root/pubkey.pem OPENSSL=/usr/bin/openssl CERGEN=/usr/bin/Certificate_gen PUBKEYGEN=/usr/bin/Gen_publickey echo -e "\nGenerating Public Key from ${KEYFILE}" if [ ! -r $KEYFILE ] then echo "Private Key does not exist. Generate certificate before generating a public key" $CERGEN fi $PUBKEYGEN echo -e "\nPublic Key written to ${PUBKEY}\n"
houlixin/BBB-TISDK
example-applications/ti-crypto-examples-git/Gen_public_key/openssl_gen_pubkey.sh
Shell
gpl-2.0
407
#!/bin/bash yum install -y gcc gcc-c++ tcl perl readline-devel pcre-devel openssl-devel groupadd nginx useradd -g nginx nignx wget http://openresty.org/download/ngx_openresty-1.5.11.1.tar.gz tar xzf ngx_openresty-1.5.11.1.tar.gz cd ngx_openresty-1.5.11.1 ./configure --prefix=/opt/openresty-1.5.11.1 --with-luajit --with-http_iconv_module --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock --user=nginx --group=nginx --conf-path=/home/nginx/conf/nginx.conf --error-log-path=/home/nginx/logs/error.log --http-log-path=/home/nginx/logs/access.log gmake && gmake install
JianfuLi/shell_scripts
centos_openresty_install.sh
Shell
gpl-2.0
581
#!/bin/sh old_tty_settings=$(stty -g) stty -icanon trap 'stty "$old_tty_settings"; exit 0' INT echo "Press (CTRL-C) to interrupt..." while true; do nc -u localhost 6789 done
acerion/cwdaemon
examples/example.sh
Shell
gpl-2.0
180
#!/bin/sh set -o errexit pmount /dev/sdb1 cp uboot_update_tool/iptvubootupdate.bin /media/sdb1 pumount /dev/sdb1
dlintw/twpda-uboot
to_usb.sh
Shell
gpl-2.0
113