code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/usr/bin/env bash echo 'Installing erlang from Canonical Repositories installation...' apt-get -qy install erlang rabbitmq-server
jdantonio/vagrant
scripts/erlang.sh
Shell
mit
134
#! /bin/bash BASEDIR="`pwd`" SWEETROOT="../../" cd "$BASEDIR" # h0=g=f=1 TS=$((120)) OTS=$((TS*20)) RES=128 BENCH="galewsky" #VISCOSITY=100000 VISCOSITY=0 SIMTIME=720000 PARAMS="" #PARAMS+=" --output-file-mode bin" PARAMS+=" -M $RES" PARAMS+=" --dt $TS" PARAMS+=" -o $OTS -u $VISCOSITY -t $SIMTIME --benchmark-name $BENCH --timestepping-method=ln_erk --timestepping-order=4" PARAMS+=" --output-file-mode=bin" EXEC="$(ls -1 $SWEETROOT/build/swe_sphere_*_release)" echo "******************************************************************" echo "$EXEC $PARAMS" echo "******************************************************************" $EXEC $PARAMS || exit 1
schreiberx/sweet
benchmarks_sphere/galewsky_bin_output/run_2_benchmark.sh
Shell
mit
669
#!/bin/bash set -eu totp_code="$( pass show misc/totp-codes | jq -r -c '.[] | select( .name == "Point: Amazon Web Services" ) | .secret' | oathtool --base32 --totp - )" echo "Logging in... (TOTP code $totp_code)" "$HOME/git/point-tools/scripts/aws-get-session-token.sh" point "$totp_code"
ammongit/scripts
work/login-aws.sh
Shell
mit
294
#! /usr/local/bin/bash strategies=(identity reverse) usage() { echo "Usage: $0 [strategies] output_dir file" echo "available strategies:" for strat in ${strategies[@]} do echo -e " $strat" done } argc=$# if [ $argc -lt 3 ] then usage exit 1 fi nstrats=$(expr $argc - 2) declare -a strats for i in $(seq 1 $nstrats) do strats[$i]=$1 shift done output_dir=$1 shift filename=$1 shift if ! [ -d $output_dir ] then echo "Directory $output_dir does not exist" exit 2 fi if ! [ -e $filename ] then echo "Input file $filename does not exist" exit 3 fi for strat in ${strats[@]} do s=$strat s+="_" base=$(basename $filename) cp $filename "$output_dir/$s$base" line=$(skel -reorder -$strat "$output_dir/$s$base" --) echo $line echo "$base" done
Baltoli/skeletons
scripts/reorder.sh
Shell
mit
791
#!/bin/bash # install dotfiles DOT_FILES=( ansible.cfg .ctags .gemrc .gitconfig .gitignore .gitmessage.txt .pryrc .rubocop.yml .tigrc .tmux.conf .vimrc .vimrc.neobundle .zshenv .zshrc .zprofile) for file in ${DOT_FILES[@]} do ln -s $HOME/dotfiles/$file $HOME/ done # install bin for tmux BIN_FILES=( battery ) for file in ${BIN_FILES[@]} do ln -s $HOME/dotfiles/$file /usr/local/bin/ done # install $HOME/.vim/after/plugin/common-settings.vim [ ! -d $HOME/.vim/after/plugin ] && mkdir -p $HOME/.vim/after/plugin && ln -s $HOME/dotfiles/.vim/after/plugin/common-settings.vim $HOME/.vim/after/plugin/ # setup Neovim settings [ ! -d ~/.config/nvim ] && mkdir -p ~/.config/nvim NEOVIM_FILES=( dein.toml deinlazy.toml init.vim mapping.vim options.vim plugins.vim ) for file in ${NEOVIM_FILES[@]} do ln -s $HOME/dotfiles/nvim/$file $HOME/.config/nvim/ done
shifumin/setupfiles
setup_dotfiles.sh
Shell
mit
868
#!/bin/bash # Copyright 2010-2013 Matus Chochlik. Distributed under the Boost # Software License, Version 1.0. (See accompanying file # LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # exe_path=${1} lib_name=${2:-libGL.so} lib_hdr=${3:-$(dirname ${0})/../third_party/include/GL/glcorearb.h} if [ ${#exe_path} -eq 0 ] then echo "Usage: $(basename $0) <path-to-executable>" && exit 1 fi if [ ! -x ${exe_path} ] then echo "Error: ${exe_path} is not an executable" && exit 1 fi lib_path=$( ldd ${exe_path} | grep -e "${lib_name}" | while read name sep path rest do echo ${path} done ) if [ ${#lib_path} -eq 0 ] then echo "Error: Executable does not link to ${lib_name}" && exit 1 fi if [ ! -f "${lib_path}" ] then echo "Error: File ${lib_path} not found" && exit 1 fi if [ -h "${lib_path}" ] then lib_path="$(readlink -e ${lib_path})" fi tmp_file=$(mktemp) ltrace --demangle --library "${lib_path}" "${exe_path}" 2> "${tmp_file}" > /dev/null & exe_pid=$! sleep 3 kill ${exe_pid} cut -d'(' -f1 "${tmp_file}" | uniq | sort | uniq | grep -e '^gl' | while read gl_sym do $(dirname $0)/_get_gl_sym_ver.sh "${gl_sym}" ${lib_hdr} #| grep "${gl_sym}:" done #| #sed -n 's/^.*GL_VERSION_\([1-9]_[0-9]\).*$/\1/p' | #uniq | sort | uniq | #tail -1 rm -f ${tmp_file}
vif/3D-STG
ThirdParty/oglplus-develop/tools/_get_exe_gl_reqs.sh
Shell
mit
1,282
#!/usr/bin/env bash CODE_PATH=${CODE_PATH:-/home/onur/projects/research/focus/ner-tagger-tensorflow} EXP_NAME=${1:-default_exp_name} GPU=${2:-0} echo 'cd '${CODE_PATH}' && source /usr/local/bin/virtualenvwrapper.sh && workon dynet && source environment-variables && python control_experiments.py -m joint_ner_and_md with integration_mode=0 dynet_gpu='$GPU' embeddings_filepath="" word_lstm_dim=256 experiment_name='$EXP_NAME echo 'cd '${CODE_PATH}' && source /usr/local/bin/virtualenvwrapper.sh && workon dynet && source environment-variables && python control_experiments.py -m joint_ner_and_md with integration_mode=1 dynet_gpu='$GPU' embeddings_filepath="" word_lstm_dim=256 experiment_name='$EXP_NAME echo 'cd '${CODE_PATH}' && source /usr/local/bin/virtualenvwrapper.sh && workon dynet && source environment-variables && python control_experiments.py -m joint_ner_and_md with integration_mode=2 dynet_gpu='$GPU' embeddings_filepath="" word_lstm_dim=256 experiment_name='$EXP_NAME
onurgu/ner-tagger-tensorflow
scripts/run-configurations-for-three-integration_modes.sh
Shell
mit
987
#!/bin/bash sed -i 's/"psr-4": {/"psr-4": { "Padosoft\\\\Composer\\\\Test\\\\": ".\/vendor\/padosoft\/composer\/tests\/",/g' ./composer.json
alevento/composer
tests/config/sedCommand.sh
Shell
mit
140
#!/bin/bash xinput set-prop "ETPS/2 Elantech Touchpad" "libinput Tapping Enabled" 1 xinput set-prop "ETPS/2 Elantech Touchpad" 294 0
krystianbajno/scripts
battery/tap.sh
Shell
mit
133
#!/bin/bash rm src/* tar -zxf src.tgz for n in 1 2 3 4 5 6 7 8 9 A B C; do rm "Disk${n}"/* "Disk${n}.img" mv "src/DISK${n}" "Disk${n}" dd if=/dev/zero of="Disk${n}.img" bs=512 count=2880 mformat -i "Disk${n}.img" -f 1440 done echo -n>filelist.txt echo -n>failed.txt cd ./src ls|sed 's/\._$//g'|sed 's/_$//g'|while read filename; do grep -i -o ".:$filename" SETUP.INF|uniq|tr 'a-z' 'A-Z'>>../filelist.txt grep -i -o ".:$filename" CONTROL.INF|uniq|tr 'a-z' 'A-Z'>>../filelist.txt done cd .. cat filelist.txt|sort|uniq|while read info;do idx=${info%%:*} fname=${info#*:} mv src/${fname}* "Disk${idx}" -i||echo $idx $fname>>failed.txt done mv src/* Disk1 for n in 1 2 3 4 5 6 7 8 9 A B C; do mcopy -i "Disk${n}.img" "Disk${n}"/* ::/ done
frank-deng/retro-works
misc/processWin31Setup.sh
Shell
mit
745
#!/bin/bash my_dir="$(dirname "$0")" source $my_dir/config.sh if [[ $wd == "" ]]; then echo "Please specify working dir in config.sh" exit 1 fi # Make new dir outputDirName="output $(date "+%Y-%m-%d %H-%M-%S")" mkdir "$outputDirName" function async_collect { echo "-- $1 - Collecting..." output=`rsync -q "$username@$1:$wd/output.log" "$outputDirName/$1.log" 2>&1` if [[ $output == "" ]]; then echo "-- $1 - Success" else echo -e "-- $1 - Error\n$output" fi } while read host; do if [[ $host =~ ^#.* ]] || [[ $host == '' ]]; then continue fi async_collect $host & done < nodes.txt # Wait for all processes to finish wait
kabieror/planetlab-scripts
collect-output.sh
Shell
mit
661
echo echo "Installing Android tools" brew install --cask android-studio brew install --cask openmtp # official "Android File Transfer" app for macOS from Google comes with bugs brew install --cask jd-gui brew install apktool brew install dex2jar brew install jadx brew install scrcpy brew install --cask android-platform-tools
filipmaelbrancke/mac-install
scripts/android.sh
Shell
mit
327
#!/bin/bash -e echo "Doing merge" echo "I'm running as user $USER in dir $PWD" CLI="node ${PWD}/cli.js" jobData=( $($CLI -q '$.job.name' -q '$.job.id' -q '$.job.baseline.content[?(@.name === "commits")].id[-1:]' --format values load_file ./data.json) ) echo Job name: ${jobData[0]} jobId=${jobData[1]} echo Job id: $jobId revision=${jobData[2]} echo I will merge revision $revision res=( $($CLI -q '$.repository' -q '$.patches[-1:].change.newrev' -q '$.patches[-1:].change.refname' --format values read_type coderepo.revision $revision) ) repositoryId=${res[0]} commit=${res[1]} refname=${res[2]} changeAndPatch=$(echo $refname|cut -d'/' -f4-|tr / ,) echo "I will add code-review +2 to change $changeAndPatch" ssh -p 29418 $USER@localhost gerrit review $changeAndPatch --code-review '+2' $CLI -v --format jsonPretty merge_revision $revision echo "Merge done"
Combitech/codefarm
src/scripts/jobs/merge_revision.sh
Shell
mit
865
# src/bash/pgsql-runner/funcs/print-help.help.sh # v1.0.9 # --------------------------------------------------------- # todo: add doHelpPrintHelp comments ... # --------------------------------------------------------- doHelpPrintHelp(){ doLog "DEBUG START doHelpPrintHelp" cat doc/txt/pgsql-runner/helps/print-help.help.txt sleep 2 # add your action implementation code here ... doLog "DEBUG STOP doHelpPrintHelp" } # eof func doHelpPrintHelp # eof file: src/bash/pgsql-runner/funcs/print-help.help.sh
YordanGeorgiev/pgsql-runner
src/bash/pgsql-runner/helps/print-help.help.sh
Shell
mit
517
echo "This guide will install redis v2.8.19 to your system" # Update system sudo apt-get update sudo apt-get install build-essential sudo apt-get install tcl8.5 # Install redis # Download source google code wget http://download.redis.io/releases/redis-2.8.19.tar.gz tar xzf redis-2.8.19.tar.gz cd redis-2.8.19 make make test sudo make install cd utils sudo ./install_server.sh
indrasantosa/common-shell-ubuntu
datastore/redis.sh
Shell
mit
379
#!/bin/sh PATH=/usr/local/sbin:/usr/sbin:/sbin:/usr/local/bin:/usr/bin:/bin export PATH cd /home/vixie/work/yeti-dm || exit 1 yeticonf_dm="/home/vixie/work/yeticonf/dm" (cd $yeticonf_dm; git pull) 2>&1 | grep -v 'Already up-to-date.' # this is F-root iana_server="2001:500:2f::f" # # first, fetch the iana zone, and decide whether it has changed # dig @$iana_server +noidnout +onesoa +nocmd +nsid +nostats . axfr > iana-root.dns if dnssec-verify -o . iana-root.dns > dnssec-verify.out 2>&1; then : else cat dnssec-verify.out traceroute6 -q1 $iana_server exit 1 fi if [ ! -s iana-root.dns ]; then echo 'zero length or missing zone file from iana?' >&2 exit 1 fi reality=$(awk '$3 = "SOA" { print $7; exit }' iana-root.dns) policy=$(cat $yeticonf_dm/ns/iana-start-serial.txt) if [ $reality -ge $policy ]; then new_yaml=1 if [ -e yeti-root-servers.yaml ]; then if cmp -s $yeticonf_dm/ns/yeti-root-servers.yaml \ yeti_root-servers.yaml; then new_yaml=0 fi fi if [ $new_yaml -ne 0 ]; then rm -f yeti-root-servers.yaml cp $yeticonf_dm/ns/yeti-root-servers.yaml . fi fi new_zone=1 if [ -e iana-root.dns.old ]; then if cmp -s iana-root.dns iana-root.dns.old; then new_zone=0 fi fi if [ $new_zone -ne 0 ]; then rm -f iana-root.dns.old cp iana-root.dns iana-root.dns.old fi # # second, remake the conf-include file (allow-transfer, also-notify) # new_inc=0 if [ $reality -ge $policy ]; then new_inc=1 if perl scripts/yeti-mkinc.pl; then : else echo 'yeti-mkinc failed' >&2 exit 1 fi if [ -e named.yeti.inc.old ]; then if cmp -s named.yeti.inc named.yeti.inc.old; then new_inc=0 fi fi if [ $new_inc -ne 0 ]; then rndc -s yeti-dm reconfig rm -f named.yeti.inc.old cp named.yeti.inc named.yeti.inc.old fi fi # # third, if new zone, create the yeti zone based on the iana zone, and sign it # if [ $new_zone -ne 0 ]; then keys=$(perl scripts/yeti-mkdns.pl) if [ $? -ne 0 ]; then echo 'yeti-mkdns failed' >&2 exit 1 fi if dnssec-signzone -Q -R -o . -x -s now-1h -e now+167h \ yeti-root.dns $keys \ > dnssec-signzone.out 2>&1 then rndc -s yeti-dm reload . 2>&1 \ | grep -v 'zone reload up-to-date' else cat dnssec-signzone.out exit 1 fi fi exit
BII-Lab/Yeti-Project
script/TISF/cronrun-often.sh
Shell
mit
2,223
# os.bash # # Platform detection and OS functions. os.platform() { case "$(uname -rs)" in CYGWIN*) echo cygwin ;; *WSL2|*microsoft-standard) echo wsl2 ;; *Microsoft) echo wsl1 ;; Darwin*) echo osx ;; FreeBSD*) echo freebsd ;; Linux*) echo linux ;; esac }
ellipsis/ellipsis
src/os.bash
Shell
mit
456
#!/bin/bash if [ "$NODE_ENV" = "circleci" ]; then echo "> Starting api server" cd ~/cache/opencollective-api PG_DATABASE=opencollective_dvl npm start & API_PID=$! cd - echo "> Starting frontend server" npm start & FRONTEND_PID=$! # Record video and upload them if test fail on CI CYPRESS_CONFIG="video=true,videoUploadOnPasses=true" CYPRESS_RECORD="--record" else # Never record video in dev CYPRESS_CONFIG="video=false" CYPRESS_RECORD="" fi echo "" echo "> Starting server jest tests" jest test/server/* RETURN_CODE=$? if [ $RETURN_CODE -ne 0 ]; then echo "Error with jest tests, exiting" exit 1; fi echo "" echo "> Ensure cypress binary is installed (should normally be cached)" cypress install echo "> Running cypress tests" cypress run ${CYPRESS_RECORD} --config ${CYPRESS_CONFIG} RETURN_CODE=$? if [ $RETURN_CODE -ne 0 ]; then echo "Error with cypress e2e tests, exiting" exit 1; fi echo "" if [ "$NODE_ENV" = "circleci" ]; then echo "Killing all node processes" kill $API_PID; kill $FRONTEND_PID; echo "Exiting with code $RETURN_CODE" exit $RETURN_CODE fi
OpenCollective/frontend
scripts/run_e2e_tests.sh
Shell
mit
1,111
echo "postrm" # not sure what this does... %systemd_postun_with_restart hello.service
mh-cbon/go-bin-rpm
demo/rpm/postrm.sh
Shell
mit
87
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for CESA-2014:1148 # # Security announcement date: 2014-09-04 00:18:56 UTC # Script generation date: 2017-01-01 21:11:11 UTC # # Operating System: CentOS 5 # Architecture: x86_64 # # Vulnerable packages fix on version: # - squid.x86_64:2.6.STABLE21-7.el5_10 # # Last versions recommanded by security team: # - squid.x86_64:2.6.STABLE21-7.el5_10 # # CVE List: # - CVE-2013-4115 # - CVE-2014-3609 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo yum install squid.x86_64-2.6.STABLE21 -y
Cyberwatch/cbw-security-fixes
CentOS_5/x86_64/2014/CESA-2014:1148.sh
Shell
mit
638
make clean 2>&1 >/dev/null rm -f matcher echo "compiling..." if ! make CFLAGS="-ftest-coverage -fprofile-arcs -DDEBUG_OPT -DFLOATEVAL" 2>/dev/null >/dev/null; then echo "compile failed" exit 1 fi IFS=' ' for line in `cat sanitychecks.txt | grep -v '^#'` do echo CHECK $line if ! echo $line | (./matcher 2>&1); then echo "------------------" echo "SANITY CHECK FAILED" exit 1 fi echo "......................" done echo "all tests happy :D"
bl0ckeduser/symdiff
sanitycheck.sh
Shell
mit
454
../loadClassificationResults -dropAll bioInt /data2/topmodels/CCLEdrugPredictions/results.ra ../loadClassificationResults bioInt /data2/topmodels/CCLEdrugPredictions2/results.ra ../loadClassificationResults bioInt /data2/topmodels/CCLEdrugPredictions3/results.ra ../loadClassificationResults bioInt /data2/topmodels/LGG_survival/results.ra ../loadClassificationResults bioInt /data2/topmodels/expressionBvL_80_20/results.ra ../loadClassificationResults bioInt /data2/topmodels/expressionBvL_80_20_2/results.ra ../loadClassificationResults bioInt /data2/topmodels/expressionLumAvLumB_80_20/results.ra ../loadClassificationResults bioInt /data2/topmodels/expressionLumAvLumB_80_20_2/results.ra #../loadClassificationResults bioInt /data2/topmodels/paradigmBvL_80_20/results.ra ../loadClassificationResults bioInt /data2/topmodels/paradigmLumAvLumB_80_20/results.ra ../loadClassificationResults bioInt /data2/topmodels/gclDrugPredictions_run1/results.ra ../loadClassificationResults bioInt /data2/topmodels/gclDrugPredictions_run2/results.ra ##########OLD HIVE BUILD######## ##../loadClassificationResults -dropAll bioInt /hive/users/cszeto/TCGAcrossCancer1.0/Ba_V_Lu/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/Ba_V_Lu_80-20/results.ra ##../loadClassificationResults -dropAll bioInt /hive/users/cszeto/TCGAcrossCancer1.0/OV_platinumSensitivity/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/GBM_survival/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/GeneExpressionAtlasMesVgbm/results.ra #../loadClassificationResults -dropAll bioInt /hive/users/cszeto/CCLEdrugPredictions/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/CCLEdrugPredictions2/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/CCLEdrugPredictions3/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/6n6predictions/untreatedVlowdose/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/6n6predictions/untreatedVhighdose/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/6n6predictions/untreatedVsurvivor/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/6n6predictions/lowdoseVhighdose/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/6n6predictions/lowdoseVsurvivor/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/6n6predictions/highdoseVsurvivor/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/OV_platinumFreeInterval/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmBvL_80_20/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmIvM_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmIPvDM_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmSvL_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionBvL_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionBvL_80_20_2/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionIvM_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionIPvDM_80_20/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionIPvDM_80_20_2/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionSvL_80_20/results.ra ##../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionSvL_80_20_2/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionBRCAOV_BvL_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionBRCAOV_BvL_80_20_2/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionCvN_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionCvN_80_20_2/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionLumAvLumB_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmCvN_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmLumAvLumB/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/paradigmBRCAOV_BvL_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionBRCAOVLUSC_BvL_80_20/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/expressionBRCAOVLUSC_BvL_80_20_2/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/prostatePrimaryVsMet/topmodel/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/prostatePrimaryVsMet/copyNumberTopModel/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/prostatePrimaryVsMet/integratedTopModel/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/LGG_survival/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/LGG_survival_excludeCluster4/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/LGG_cluster1n2n4v3n5/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/LGG_cluster1n2v3n5/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/LGG_cluster1v5/results.ra #../loadClassificationResults bioInt /hive/users/cszeto/TCGAcrossCancer1.0/LGG_cluster2v3/results.ra
christopherszeto/hgClassifications
hgClassificationsViewer/hgClassificationsData/cancer2/defaults.sh
Shell
mit
5,806
#!/bin/sh # If a command fails then the deploy stops set -e printf "\033[0;32mDeploying updates to GitHub...\033[0m\n" # Clear public directory rm -rf public # Build the project. hugo -t cactus # Add changes to git. git add . # Commit changes. msg="Rebuilding site $(date)" if [ -n "$*" ]; then msg="$*" fi git commit -m "$msg" # Push source and build repos. git push origin master
imryan/imryan.github.io
deploy.sh
Shell
mit
390
#!/usr/bin/env bash #Run the Script from the folder you are in... CURRENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) pdflatex "$CURRENT_DIR/thesis_main.tex" RETVAL="$?" if [[ "${RETVAL}" -ne 0 ]] ; then echo "First pdflatex run failed" exit ${RETVAL} fi makeindex thesis_main.nlo -s nomencl.ist -o thesis_main.nls RETVAL="$?" if [[ "${RETVAL}" -ne 0 ]] ; then echo "makeindex run failed" exit ${RETVAL} fi biber "$CURRENT_DIR/thesis_main" RETVAL="$?" if [[ "${RETVAL}" -ne 0 ]] ; then echo "biber run failed" exit ${RETVAL} fi pdflatex "$CURRENT_DIR/thesis_main.tex" RETVAL="$?" if [[ "${RETVAL}" -ne 0 ]] ; then echo "Second pdflatex run failed" exit ${RETVAL} fi pdflatex "$CURRENT_DIR/thesis_main.tex" RETVAL="$?" if [[ "${RETVAL}" -ne 0 ]] ; then echo "Third pdflatex run failed" exit ${RETVAL} fi rm *.bbl > /dev/null rm *.blg > /dev/null rm *.aux > /dev/null rm *.bcf > /dev/null rm *.ilg > /dev/null rm *.lof > /dev/null rm *.log > /dev/null rm *.nlo > /dev/null rm *.nls* > /dev/null rm *.out > /dev/null rm *.toc > /dev/null rm *.run.xml > /dev/null echo "PDF Compile: Success" exit 0
koep/FOM-LaTeX-Template
compile.sh
Shell
mit
1,154
#!/usr/bin/env bash problems=0 total=0 for file in `find ./src -name "*.js" -or -name "*.jsx"` do total=$((total+1)) if [[ `head -1 $file` != "// @flow" ]]; then echo "missing '// @flow' in $file" problems=$((problems+1)) fi done if [[ "$problems" == 1 ]]; then echo "$problems of $total files is unchecked by flow" exit 1 elif [[ "$problems" -gt 1 ]]; then echo "$problems of $total files are unchecked by flow" exit 1 else echo "$total of $total files are checked by flow" exit 0 fi
coreyflynn/jubilation
scripts/flow-check.sh
Shell
mit
518
#!/usr/bin/env bash NUMBER_OF_APP="$(jq ". | length" backing/config-backing.json)" for i in `seq 1 $NUMBER_OF_APP` do REGX_NAME=".["$i-1"] | .name" REGX_PORT=".["$i-1"] | .port" MS_PORT="$(jq "$REGX_PORT" backing/config-backing.json | tr -d '/"')" PORT_MS_NAME="$(jq "$REGX_NAME" backing/config-backing.json | tr -d '/"')_PORT" export $PORT_MS_NAME=$MS_PORT REGX_HOST=".["$i-1"] | .host" MS_HOST="$(jq "$REGX_HOST" backing/config-backing.json | tr -d '/"')" HOST_MS_NAME="$(jq "$REGX_NAME" backing/config-backing.json | tr -d '/"')_HOST" export $HOST_MS_NAME=$MS_HOST REGX_URL=".["$i-1"] | .url" MS_URL="$(jq "$REGX_URL" backing/config-backing.json | sed 's:^.\(.*\).$:\1:')" URL_MS_NAME="$(jq "$REGX_NAME" backing/config-backing.json | tr -d '/"')_URL" export $URL_MS_NAME=$MS_URL REGX_PASS=".["$i-1"] | .password" MS_PASS="$(jq "$REGX_PASS" backing/config-backing.json | tr -d '/"')" PASS_MS_NAME="$(jq "$REGX_NAME" backing/config-backing.json | tr -d '/"')_PASSWORD" export $PASS_MS_NAME=$MS_PASS # REGX_URL=".["$i-1"] | .url" # MS_URL="$(jq "$REGX_URL" backing/config-backing.json | tr -d '/"')" # URL_MS_NAME="$(jq "$REGX_NAME" backing/config-backing.json | tr -d '/"')_URL" # export $URL_MS_NAME=$MS_URL # echo $MS_URL done
OElabed/ice-microservices
profiles/dev/backing/export-backing.sh
Shell
mit
1,330
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for DLA-44-1 # # Security announcement date: 2014-09-03 00:00:00 UTC # Script generation date: 2017-01-01 21:08:48 UTC # # Operating System: Debian 6 (Squeeze) # Architecture: i386 # # Vulnerable packages fix on version: # - libwpd:0.8.14-1+deb6u1 # # Last versions recommanded by security team: # - libwpd:0.8.14-1+deb6u1 # # CVE List: # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade libwpd=0.8.14-1+deb6u1 -y
Cyberwatch/cbw-security-fixes
Debian_6_(Squeeze)/i386/2014/DLA-44-1.sh
Shell
mit
591
#!/bin/bash ################################################################### ## This is a template file for new examples. It explains how to ## ## check for various things. ## ## ## ## An example script should exit with code 0 if the test passes, ## ## and with some other code if the test fails. ## ################################################################### ########################################################## # Various options that must be updated for each example N="40" EXAMPLE_DIRECTORY="example_$N" EXAMPLE_INPUT="example_$N.v" EXAMPLE_OUTPUT="bug_$N.v" EXTRA_ARGS=("$@") ########################################################## # Get the directory name of this script, and `cd` to that directory DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd "$DIR/$EXAMPLE_DIRECTORY" FIND_BUG_PY="$(cd "$DIR/.." && pwd)/find-bug.py" # Initialize common settings like the version of python . "$DIR/init-settings.sh" ABS_PATH="$(${PYTHON} -c 'import os.path; print(os.path.abspath("."))')" # Set up bash to be verbose about displaying the commands run PS4='$ ' set -x # Disable parallel make in subcalls to the bug minimizer because it screws with things . "$DIR/disable-parallel-make.sh" ###################################################################### # Create the output file (to normalize the number of "yes"es needed), # and run the script only up to the request for the regular # expression; then test that the output is as expected. # # If you don't need to test the output of the initial requests, feel # free to remove this section. # # Note that the -top argument only appears in Coq >= 8.4 # # Note also that the line numbers tend to be one larger in old # versions of Coq (<= 8.6?) EXPECTED_ERROR=$(cat <<EOF This file produces the following output when Coq'ed: Set : Type File "/tmp/tmp[A-Za-z0-9_/]\+\.v", line 1[0-9], characters 0-15: Error: The command has not failed\s\?! EOF ) # pre-build the files to normalize the output for the run we're testing find "$DIR/example_$N" \( -name "*.vo" -o -name "*.glob" \) -delete echo "y" | ${PYTHON} "$FIND_BUG_PY" "$EXAMPLE_INPUT" "$EXAMPLE_OUTPUT" "${EXTRA_ARGS[@]}" 2>/dev/null >/dev/null # kludge: create the .glob file so we don't run the makefile touch "${EXAMPLE_OUTPUT%%.v}.glob" ACTUAL_PRE="$((echo "y"; echo "y") | ${PYTHON} "$FIND_BUG_PY" "$EXAMPLE_INPUT" "$EXAMPLE_OUTPUT" "${EXTRA_ARGS[@]}" -l - 2>&1)" ACTUAL_PRE_ONE_LINE="$(echo "$ACTUAL_PRE" | tr '\n' '\1')" TEST_FOR="$(echo "$EXPECTED_ERROR" | tr '\n' '\1')" if [ "$(echo "$ACTUAL_PRE_ONE_LINE" | grep -c "$TEST_FOR")" -lt 1 ] then echo "Expected a string matching:" echo "$EXPECTED_ERROR" echo echo echo echo "Actual:" echo "$ACTUAL_PRE" ${PYTHON} "$DIR/prefix-grep.py" "$ACTUAL_PRE_ONE_LINE" "$TEST_FOR" exit 1 fi ######################################################################################################### ##################################################################### # Run the bug minimizer on this example; error if it fails to run # correctly. Make sure you update the arguments, etc. ${PYTHON} "$FIND_BUG_PY" "$EXAMPLE_INPUT" "$EXAMPLE_OUTPUT" "${EXTRA_ARGS[@]}" || exit $? ###################################################################### # Put some segment that you expect to see in the file here. Or count # the number of lines. Or make some other test. Or remove this block # entirely if you don't care about the minimized file. EXPECTED=$(cat <<EOF (\* -\*- mode: coq; coq-prog-args: ("-emacs"\( "-w" "-deprecated-native-compiler-option,-native-compiler-disabled"\)\? "-R" "\." "Top"\( "-top" "example_[0-9]\+"\)\?\( "-native-compiler" "ondemand"\)\?) -\*- \*) (\* File reduced by coq-bug-minimizer from original input, then from [0-9]\+ lines to [0-9]\+ lines, then from [0-9]\+ lines to [0-9]\+ lines \*) (\* coqc version [^\*]*\*) Fail Check Set\. EOF ) EXPECTED_ONE_LINE="$(echo "$EXPECTED" | grep -v '^$' | tr '\n' '\1')" ACTUAL="$(cat "$EXAMPLE_OUTPUT" | grep -v '^$' | tr '\n' '\1')" LINES="$(echo "$ACTUAL" | grep -c "$EXPECTED_ONE_LINE")" if [ "$LINES" -ne 1 ] then echo "Expected a string matching:" echo "$EXPECTED" echo "Got:" cat "$EXAMPLE_OUTPUT" | grep -v '^$' ${PYTHON} "$DIR/prefix-grep.py" "$ACTUAL" "$EXPECTED_ONE_LINE" exit 1 fi exit 0
JasonGross/coq-tools
examples/run-example-40.sh
Shell
mit
4,446
#!/bin/bash gyp nkf.gyp --depth=. -f make --generator-output=./out V=1 make -C out cp out/out/Default/nkf.so ./
hnakamur/lua-nkf-native
build.sh
Shell
mit
112
#!/usr/bin/bash #SBATCH --nodes 1 --ntasks 24 --mem 24G -p intel --time 3-0:00:00 #SBATCH --job-name=AntiSMASH #SBATCH --output=AntiSMASH.%A_%a.log CPUS=2 if [ $SLURM_CPUS_ON_NODE ]; then CPUS=$SLURM_CPUS_ON_NODE fi GENBANK=gbk EXT=gbk N=1 if [ $1 ]; then N=$1 elif [ ${SLURM_ARRAY_TASK_ID} ]; then N=${SLURM_ARRAY_TASK_ID} fi if [ -f config.txt ]; then source config.txt else echo "need config file to set some project-specific variables" exit fi OUTDIR=secondary_metabolite if [ ! -d $OUTDIR ]; then mkdir -p $OUTDIR fi TOTAL=$(ls $GENBANK/*.${EXT} | wc -l) if [ $N -gt $TOTAL ]; then echo "Only $TOTAL files in folder $GENBANK, skipping $N" exit elif [[ $N == 0 ]]; then echo "N must be between 1 and $TOTAL" exit fi INFILE=$(ls $GENBANK/*.${EXT} | sed -n ${N}p) echo "INFILE=$INFILE" OUT=$OUTDIR/$(basename ${INFILE} .${EXT}) module unload perl module unload perl module load antismash/4.1.0 module unload python/3 source activate antismash CPU=$SLURM_CPUS_ON_NODE antismash --taxon fungi -c $CPUS --outputfolder $OUT --clusterblast --subclusterblast --smcogs --knownclusterblast \ --borderpredict --asf \ --full-hmmer --cassis --clusterblast --smcogs --subclusterblast --knownclusterblast $INFILE
stajichlab/Comparative_pipeline
pipeline/run_antismash.sh
Shell
mit
1,227
#!/usr/bin/env bash set -e version=${1?No version supplied} echo "Stop application" ssh [email protected] "sudo docker rm -f atm" echo "Start application" ssh [email protected] "sudo docker run -d -e REDIS_HOST=atm-redis.c156rq.0001.apse2.cache.amazonaws.com --name=atm -p 8080:8080 \"nicholasren/atm-service:$1\""
aconex-atm/atm-service
scripts/deploy.sh
Shell
mit
322
#!/usr/bin/env bash # # AJAlabs.com # # macOS Default - macOS Sierra 10.12 = 11.1 # v0.6 beta # # This is a customized version of ~/.macos — http://mths.be/macos # Checkout http://secrets.blacktree.com for a extensive list of defaults # Only defaults that have been tested to work with macOS 10.12 are enabled # Exit unless host OS is macOS if [ `uname -s` != "Darwin" ]; then echo -e "\nThis script will only run on macOS" echo "exiting" exit 1 fi # Ask for the administrator password upfront sudo -v # Keep-alive: update existing `sudo` time stamp until `macos-defaults.sh` has finished while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null & ############################################################################### # General UI/UX # ############################################################################### # Set computer name (as done via System Preferences → Sharing) echo -e "\nType the new Computer Name and press [enter]." echo -e "or\nLeave blank and press [enter] to not modify the Computer Name." read newName if [ $newName ]; then sudo scutil --set ComputerName "$newName" sudo scutil --set HostName "$newName" sudo scutil --set LocalHostName "$newName" sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server NetBIOSName -string "$newName" fi # Disable the sound effects on boot sudo nvram SystemAudioVolume=" " # Set standby delay to 24 hours (default is 1 hour) sudo pmset -a standbydelay 86400 # Set sidebar icon size to medium defaults write NSGlobalDomain NSTableViewDefaultSizeMode -int 2 # Always show scrollbars defaults write NSGlobalDomain AppleShowScrollBars -string "Always" # Automatically quit printer app once the print jobs complete defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true ############################################################################### # SSD-specific tweaks # ############################################################################### #NOTE: Disregard the warning generated on macOS 11. # Disable the sudden motion sensor as it’s not useful for SSDs sudo pmset -a sms 0 ############################################################################### # Trackpad, mouse, keyboard, and input # ############################################################################### # Set a blazingly fast keyboard repeat rate defaults write NSGlobalDomain KeyRepeat -int 1 # Set a the delay before starting the repeat rate defaults write NSGlobalDomain InitialKeyRepeat -int 25 # Enable press-and-hold for key repeat defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false # Disable “natural” scrolling because it's stupid defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false # Disable auto-correct defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false ############################################################################### # Screen # ############################################################################### # Require password immediately after sleep or screen saver begins defaults write com.apple.screensaver askForPassword -int 1 defaults write com.apple.screensaver askForPasswordDelay -int 0 # Create the Screenshots folder if it does not exist mkdir -p "$HOME/Screenshots" # Save screenshots to the ~/Screenshots folder defaults write com.apple.screencapture location -string "$HOME/Screenshots" ############################################################################### # Finder # ############################################################################### # Finder: show hidden files by default defaults write com.apple.finder AppleShowAllFiles -bool true # Finder: show all filename extensions defaults write NSGlobalDomain AppleShowAllExtensions -bool true # Finder: show status bar defaults write com.apple.finder ShowStatusBar -bool true # Finder: allow text selection in Quick Look defaults write com.apple.finder QLEnableTextSelection -bool true # When performing a search, search the current folder by default defaults write com.apple.finder FXDefaultSearchScope -string "SCcf" # Disable the warning when changing a file extension defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false # Use column view in all Finder windows by default # Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv` defaults write com.apple.finder FXPreferredViewStyle -string "clmv" # Show the ~/Library folder chflags nohidden ~/Library # Avoid creating .DS_Store files on network volumes defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true ############################################################################### # Dock, Dashboard, and hot corners # ############################################################################### # Disable Dashboard defaults write com.apple.dashboard mcx-disabled -bool true ############################################################################### # Safari & WebKit # ############################################################################### # Set Safari’s home page to `about:blank` for faster loading defaults write com.apple.Safari HomePage -string "about:blank" # Prevent Safari from opening ‘safe’ files automatically after downloading defaults write com.apple.Safari AutoOpenSafeDownloads -bool false # Allow hitting the Backspace key to go to the previous page in history defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2BackspaceKeyNavigationEnabled -bool true # Enable Safari’s debug menu defaults write com.apple.Safari IncludeInternalDebugMenu -bool true ############################################################################### # Terminal # ############################################################################### # Only use UTF-8 in Terminal.app defaults write com.apple.terminal StringEncodings -array 4 # Use a modified version of the Pro theme by default in Terminal.app open "$HOME/.dotfiles/init/AJAlabs.terminal" sleep 2 # Wait a bit to make sure the theme is loaded defaults write com.apple.terminal "Default Window Settings" -string "AJAlabs" defaults write com.apple.terminal "Startup Window Settings" -string "AJAlabs" ############################################################################### # Disk Utility # ############################################################################### # Enable the debug menu in Disk Utility defaults write com.apple.DiskUtility DUDebugMenuEnabled -bool true defaults write com.apple.DiskUtility advanced-image-options -bool true ############################################################################### # Text Editing # ############################################################################### # Disable Smart Dashes defaults write -g NSAutomaticDashSubstitutionEnabled 0 # Disable Smart Quotes defaults write -g NSAutomaticQuoteSubstitutionEnabled 0 ############################################################################### # TextMate 2.0 # ############################################################################### # Enable Font Smoothing always on defaults write com.macromates.TextMate.preview fontSmoothing 1 # Set font size to 13 #defaults write com.macromates.textmate OakTextViewNormalFontSize -int 13 # Set font to Monaco #defaults write com.macromates.textmate OakTextViewNormalFontName -string "Monaco" echo "" echo "Oh dip! it worked! Restart/Log Out to apply the changes." echo ""
AJ-Acevedo/dotfiles
init/macos-defaults.sh
Shell
mit
8,205
#!/usr/bin/env bash # chrome-extension-search.sh # Stefan Wuensch 2017-08-31 # # In order to locate Chrome Extensions from only their ID, # this generates a Google search URL from each Extension ID # found in your Chrome Extensions folder. # # Why is this useful? One day all my Chrome Extensions disappeared. # Poof! Gone. # The only thing I had was a backup which showed the IDs such as # "edacconmaakjimmfgnblocblbcdcpbko". Not useful if you want to # figure out what it is to re-install it! This script at least # makes it quick to search Google for that ID, and it very likely # will get you a Chrome Store result. # # Optional: Supply an argument of an alternate directory to search. # (This can be very useful if you are trying to look at a Time Machine # backup and figure out which extensions you had at a certain date # in the past.) # # Output: Basic HTML you can use to track down an Extension. # # Example output: # <html> # <a href="https://www.google.com/search?q=fhgenkpocbhhddlgkjnfghpjanffonno">https://www.google.com/search?q=fhgenkpocbhhddlgkjnfghpjanffonno</a> <br> # <a href="https://www.google.com/search?q=jlmadbnpnnolpaljadgakjilggigioaj">https://www.google.com/search?q=jlmadbnpnnolpaljadgakjilggigioaj</a> <br> # </html> DIR="${HOME}/Library/Application Support/Google/Chrome/Default/Extensions" [[ $# -eq 1 ]] && DIR="${1}" cd "${DIR}" 2>/dev/null [[ $? -ne 0 ]] && echo "Can't 'cd' to \"${DIR}\" - bailing out." && exit 1 echo "<html>" ls | while read thing ; do # Chrome Extension IDs apparently are always 32 characters [[ ${#thing} -ne 32 ]] && >&2 echo "Skipping \"${thing}\" because it doesn't look like an Extension ID." && continue url="https://www.google.com/search?q=${thing}" echo "<a href=\"${url}\">${url}</a> <br>" done echo "</html>" # fin
stefan-wuensch/UNIX-tools
chrome-extension-search.sh
Shell
mit
1,796
#!/bin/bash ###################################################### ### Script for (semi)automatic Drupal Updates ### ### Author: https://github.com/fdellwing ### ### Date: 03.06.2019 ### ### Contact: [email protected] ### ###################################################### ###################################################### ### You may edit this values if they differ on ### ### your system. ### ###################################################### # The root path for your drupal installations WWW_PATH="/var/www/" # Database backup directory DB_BACKUP_PATH="/root/drupal_update_db_back/" # Log file directory LOG_PATH="/var/log/" # Drupal files owner (33=www-data) OID=33 # Drupal files group (33=www-data) GID=33 # Your systems MYSQL user settings # If you do not use a debian based system, # the file has to look like this: # [client] # host = localhost # user = debian-sys-maint # password = passphrase # socket = /var/run/mysqld/mysqld.sock CONF="/etc/mysql/debian.cnf" ###################################################### ### Important functions used by the script ### ###################################################### function set_maintenance { if [ "$D_VERSION" -eq 7 ]; then # Set maintenance mode drush @sites vset maintenance_mode 1 -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # Clear the cache to make sure we are in maintenance drush @sites cc all -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # clear cache else # Set maintenance mode drush @sites sset system.maintenance_mode 1 -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # Clear the cache to make sure we are in maintenance drush @sites cr -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # clear cache fi } function unset_maintenance { if [ "$D_VERSION" -eq 7 ]; then # Unset maintenance mode drush @sites vset maintenance_mode 0 -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # Clear the cache to make sure we are not in maintenance drush @sites cc all -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # clear cache else # Unset maintenance mode drush @sites sset system.maintenance_mode 0 -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # Clear the cache to make sure we are not in maintenance drush @sites cr -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # clear cache fi } ###################################################### ### The main script starts here. ### ###################################################### # Display usage if no parameters are given if [ -z "$1" ]; then echo "Usage: ./drupal-up.sh <foldername or file>" echo "Instead of a foldername, you can provide a file with foldernames" # Run the program if exactly one parameter is given elif [ -z "$2" ]; then # Delete old db backups find "$DB_BACKUP_PATH" -iname "*" -mtime +90 -delete 2> /dev/null || mkdir -p "$DB_BACKUP_PATH" # Clear the logfiles from previous run if [ ! -d "$LOG_PATH" ]; then mkdir -p "$LOG_PATH" fi date >| "$LOG_PATH"drupal-mysql.log date >| "$LOG_PATH"drupal-up.log # Check if the given parameter is a directory in WWW_PATH if [ -d "$WWW_PATH""$1" ]; then drupale=( "$1" ) else # If not, is it a file? if [ -e "$1" ]; then # Creates an array from the input file drupale=() while IFS=$'\n' read -r line; do drupale+=("$line"); done < <(cat "$1") else # If not, exit the script echo "----------------------" echo 'The given parameter is no existing directory or file.' echo "----------------------" exit 1 fi fi echo "----------------------" echo 'Starting update for '"${#drupale[@]}"' instances.' echo "----------------------" for drupal in "${drupale[@]}" do # Get the databases from the drupal settings datenbanken=() while IFS=$'\n' read -r line; do datenbanken+=("$line"); done < <(grep -R -h -E "^[[:space:]]*'database' => '" "$WWW_PATH""$drupal"/sites/*/settings.php | grep -Po "(?<==> ').*(?=')") TMP_PATH="$WWW_PATH""$drupal" cd "$TMP_PATH" || exit 1 D_VERSION=$(drush @sites status -y --format=json 2> /dev/null | grep 'drupal-version' | grep -Eo '[0-9]+\.' | head -c 1) echo "----------------------" echo 'Starting update for '"$drupal"'.' echo "----------------------" set_maintenance echo "----------------------" echo 'Site(s) moved to maintenance mode.' echo "----------------------" echo "----------------------" echo 'Starting '"${#datenbanken[@]}"' database backup(s).' echo "----------------------" # shellcheck disable=SC2034 i=1 # Create the DB backups for db in "${datenbanken[@]}" do # Dump the database in in self contained file # If the command fails, we need to stop or we can harm our drupal permanently if mysqldump --defaults-extra-file="$CONF" --add-drop-table "$db" | gzip > "$DB_BACKUP_PATH""$db""_""$(date +'%Y_%m_%d')".sql.gz 2>> "$LOG_PATH"drupal-mysql.log; then echo "----------------------" echo 'Database backup successfully created ('"$i"'/'"${#datenbanken[@]}"').' echo "----------------------" else echo "----------------------" echo "Error while creating the database backup, please check the logfile \"$LOG_PATH""drupal-mysql.log\"." echo "----------------------" unset_maintenance # If you are here, please read the log, because there is something wrong exit 1 fi ((i++)) done echo "----------------------" echo 'Starting update of drupal.' echo "----------------------" # Do the drupal update drush @sites rf -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log drush @sites up -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log echo "----------------------" echo 'Finishing update of drupal.' echo "----------------------" # To be sure, do a DB update drush @sites updatedb -y >> /dev/null 2>> "$LOG_PATH"drupal-up.log # Set the correct owner chown -R $OID:$GID "$TMP_PATH" unset_maintenance echo "----------------------" echo 'Site(s) moved out of maintenance mode, please check the website(s).' echo "----------------------" done # Clear error log from previous run date >| "$LOG_PATH"drupal-up-error.log # Put all the errors from the log to the error log grep error "$LOG_PATH"drupal-up.log >> "$LOG_PATH"drupal-up-error.log # Count the lines in error log >1 = there are errors LINECOUNT=$(wc -l "$LOG_PATH"drupal-up-error.log | cut -f1 -d' ') if [ "$LINECOUNT" -gt 1 ]; then echo "----------------------" echo 'All updates finished.' echo "There are some errors, please check the logfile \"$LOG_PATH""drupal-up-error.log\"." echo "----------------------" else echo "----------------------" echo 'All updates finished.' echo "----------------------" fi # Display usage if more than one parameter is given else echo "Usage: ./drupal-up.sh <foldername or file>" echo "Instead of a foldername, you can provide a file with foldernames" fi
fdellwing/drupal_up
drupal-up.sh
Shell
mit
6,953
#!/bin/sh # # Vivado(TM) # runme.sh: a Vivado-generated Runs Script for UNIX # Copyright 1986-2015 Xilinx, Inc. All Rights Reserved. # if [ -z "$PATH" ]; then PATH=/opt/Xilinx/Vivado/2015.4/ids_lite/ISE/bin/lin64:/opt/Xilinx/Vivado/2015.4/bin else PATH=/opt/Xilinx/Vivado/2015.4/ids_lite/ISE/bin/lin64:/opt/Xilinx/Vivado/2015.4/bin:$PATH fi export PATH if [ -z "$LD_LIBRARY_PATH" ]; then LD_LIBRARY_PATH=/opt/Xilinx/Vivado/2015.4/ids_lite/ISE/lib/lin64 else LD_LIBRARY_PATH=/opt/Xilinx/Vivado/2015.4/ids_lite/ISE/lib/lin64:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH HD_PWD='/home/dries/Projects/Basys3/FPGA-Z/FPGA-Z.runs/impl_1' cd "$HD_PWD" HD_LOG=runme.log /bin/touch $HD_LOG ISEStep="./ISEWrap.sh" EAStep() { $ISEStep $HD_LOG "$@" >> $HD_LOG 2>&1 if [ $? -ne 0 ] then exit fi } # pre-commands: /bin/touch .init_design.begin.rst EAStep vivado -log top.vdi -applog -m64 -messageDb vivado.pb -mode batch -source top.tcl -notrace
dries007/Basys3
FPGA-Z/FPGA-Z.runs/impl_1/runme.sh
Shell
mit
977
#! /bin/bash/ # Install terraform mkdir /tmp/terraform_installation pushd /tmp/terraform_installation # Remove any existing .zip files find . -name "terraform_*linux_amd64*zip*" -delete # Webscrape the dev's page for the URL of the latest version, and download it curl --silent https://www.terraform.io/downloads.html | grep -o 'https://releases.hashicorp.com/terraform/.*_linux_amd64.zip' | xargs wget --quiet # Install it -- by unzipping binaries mkdir -p $HOME/opt/terraform unzip terraform_*linux_amd64*zip -d $HOME/opt/terraform # Copy the binary to $PATH. This may be specific to the way my dotfiles are # set-up. The below looks for some mention of 'opt/terrafrom', and if it's not # found, looks for the line 'export PATH' and inserts it above that. if ! grep -Fxq 'opt/terraform' $HOME/.bash_profile; then line=$(cat $HOME/.bash_profile | grep -n 'export PATH' | grep -o '^[0-9]*') sed -i ${line}'i\# Add Terraform\nPATH=$PATH:$HOME/opt/terraform\n' \ $HOME/.bash_profile fi # Outta here popd
brendan-R/dotfiles
os/install_terraform.sh
Shell
mit
1,028
#!/bin/tcsh # #$ -cwd #$ -q ib1.q #$ -N all_dekode_jobs #$ -pe ib-hydra 8 date module add mpich2-intel module add software-2014 nohup python run_dekode_for.py date exit 0
albalu/dekode
submit_run_dekode.sh
Shell
mit
177
#!/bin/bash TAG=$1 docker tag lammps/lammps:centos7 lammps/lammps:${TAG}_centos7_openmpi_py3 docker tag lammps/lammps:rockylinux8 lammps/lammps:${TAG}_rockylinux8_openmpi_py3 docker tag lammps/lammps:ubuntu18.04 lammps/lammps:${TAG}_ubuntu18.04_openmpi_py3 docker tag lammps/lammps:ubuntu20.04 lammps/lammps:${TAG}_ubuntu20.04_openmpi_py3
lammps/lammps-packages
docker/apply_tag.sh
Shell
mit
339
#!/bin/bash yum install -y python-devel pip install virtualenv pip install virtualenvwrapper echo "Configure virtualenvwrapper..." cat >> /home/vagrant/.bashrc << EOF export WORKON_HOME='/home/vagrant/venvs' source /usr/bin/virtualenvwrapper.sh EOF
LandRegistry/mint
provision.sh
Shell
mit
255
#!/bin/bash echo 'Chapter number: ' read n echo 'Chapter name: ' read m mkdir CH$n-$m cd CH$n-$m mkdir images sed -e "s/{NUMBER}/$n/" ../Template/Slides.tex > CH$n-Slides.tex sed -e "s/{NUMBER}/$n/" ../Template/Handout.tex > CH$n-Handout.tex sed -e "s/{NUMBER}/$n/" ../Template/CMakeLists.txt > CMakeLists.txt cp ../Template/Content.tex CH$n-Content.tex
mvy/TC-INFO-ASR4-UPVM-YS
initiate.sh
Shell
mit
358
#!/bin/bash while getopts "n:s:t:b:m:d:" opt; do case "$opt" in n) name="${OPTARG}" ;; s) src="${OPTARG}" ;; t) target="${OPTARG}" ;; b) build="${OPTARG}" ;; m) mysrc="${OPTARG}" ;; d) ver="${OPTARG}" ;; *) usage ;; esac done src="${src:-./$name}" target="${target:-.}" build="${build:-mysql_release}" mysrc="${mysrc:-$target/mysql-$ver}" target="$(readlink -e "$target")" usage() { echo "usage: $0 (-d <download mysql version> | -m <mysql source>) -n <plugin name> [-t <target> -b <build> -s <plugin source>]" >&2 exit 1 } if [[ -z "$name" ]]; then usage fi if [[ -z "$mysrc" ]]; then echo "mysql source not exists" if [[ -z "$ver" ]]; then usage fi file="mysql-$ver.tar.gz" if [[ ! -e "$file" ]]; then wget "http://downloads.mysql.com/archives/get/file/$file" fi tar -xzvf "$file" fi if [[ ! -e "mysql-$ver/plugin/${name}" ]]; then cp -r "$src" "mysql-$ver/plugin/${name}" fi cd "$mysrc" if [[ ! -e plugin/${name}/${name}.so ]]; then cmake . -DBUILD_CONFIG="${build}" ncpu=$( grep "processor" /proc/cpuinfo | wc -l ) (( nproc=$ncpu*2 )) make -j $nproc "${name}" fi my_ver=$(gawk -F'[()" ]+' '$1=="SET"&&$2=="CPACK_PACKAGE_FILE_NAME"{print $3}' "CPackConfig.cmake") ver="${my_ver#*-}" cp plugin/${name}/${name}.so "$target/${name}_${ver}.so"
xiezhenye/my-plugin-builder
plugin_builder.sh
Shell
mit
1,366
set -e readonly NUMARGS=$# readonly INFOLDER=$1 readonly OUTFOLDER=$2 usage() { echo "USAGE: ./clone.sh base_image_folder out_folder" } makeandcopy() { mkdir "$OUTFOLDER" cp "$INFOLDER"/*-"$VMFILE"* "$OUTFOLDER"/ cp "$INFOLDER"/*.vmx "$OUTFOLDER"/ } main() { if [ $NUMARGS -le 1 ] then usage exit 1 fi if echo "$INFOLDER" | grep "[[:space:]]" then echo '$INFOLDER cannot contain spaces!' exit 1 fi if echo "$INFOLDER" | grep "/" then echo '$INFOLDER cannot contain slashes!' exit 1 fi VMFILE=`grep -E "(scsi|sata)0\:0\.fileName" "$INFOLDER"/*.vmx | grep -o "[0-9]\{6,6\}"` if [ -z "$VMFILE" ] then echo "No $VMFILE found!" exit 1 fi makeandcopy #reference snapshot SNAPSHOT=`grep -o "[^\"]*.vmsn" "$INFOLDER"/*.vmx || (cd "$INFOLDER" && ls -r *.vmsn) | tail -1` if [ -n "$SNAPSHOT" ] then sed -i -e '/checkpoint.vmState =/s/= .*/= "..\/'$INFOLDER'\/'$SNAPSHOT'"/' $OUTFOLDER/*.vmx sed -i -e 's/checkpoint.vmState.readOnly = "FALSE"/checkpoint.vmState.readOnly = "TRUE"/' $OUTFOLDER/*.vmx fi local fullbasepath=$(readlink -f "$INFOLDER")/ cd "$OUTFOLDER"/ sed -i '/sched.swap.derivedName/d' ./*.vmx #delete swap file line, will be auto recreated sed -i -e '/displayName =/ s/= .*/= "'$OUTFOLDER'"/' ./*.vmx #Change display name config value local escapedpath=$(echo "$fullbasepath" | sed -e 's/[\/&]/\\&/g') sed -i -e '/parentFileNameHint=/ s/="/="'"$escapedpath"'/' ./*-"$VMFILE".vmdk #change parent disk path # Forces generation of new MAC + DHCP, I think. sed -i '/ethernet0.generatedAddress/d' ./*.vmx sed -i '/ethernet0.addressType/d' ./*.vmx # Forces creation of a fresh UUID for the VM. Obviates the need for the line # commented out below: #echo 'answer.msg.uuid.altered="I copied it" ' >>./*.vmx sed -i '/uuid.location/d' ./*.vmx sed -i '/uuid.bios/d' ./*.vmx # Things that ghetto-esxi-linked-clones.sh did that we might want. I can only guess at their use/value. #sed -i '/scsi0:0.fileName/d' ${STORAGE_PATH}/$FINAL_VM_NAME/$FINAL_VM_NAME.vmx #echo "scsi0:0.fileName = \"${STORAGE_PATH}/${GOLDEN_VM_NAME}/${VMDK_PATH}\"" >> ${STORAGE_PATH}/$FINAL_VM_NAME/$FINAL_VM_NAME.vmx #sed -i 's/nvram = "'${GOLDEN_VM_NAME}.nvram'"/nvram = "'${FINAL_VM_NAME}.nvram'"/' ${STORAGE_PATH}/$FINAL_VM_NAME/$FINAL_VM_NAME.vmx #sed -i 's/extendedConfigFile = "'${GOLDEN_VM_NAME}.vmxf'"/extendedConfigFile = "'${FINAL_VM_NAME}.vmxf'"/' ${STORAGE_PATH}/$FINAL_VM_NAME/$FINAL_VM_NAME.vmx # delete machine id sed -i '/machine.id/d' *.vmx # add machine id sed -i -e "\$amachine.id=$OUTFOLDER" *.vmx # Register the machine so that it appears in vSphere. FULL_PATH=`pwd`/*.vmx VMID=`vim-cmd solo/registervm $FULL_PATH` # Power on the machine. vim-cmd vmsvc/power.on $VMID } main
pddenhar/esxi-linked-clone
clone.sh
Shell
mit
2,832
#!/bin/bash vagrant provision master vagrant ssh master -c "sudo salt \\* --state-output=mixed state.highstate"
JustinCarmony/fun-with-redis-2
bin/highstate.sh
Shell
mit
111
#!/bin/sh wget -q -O - https://deb.nodesource.com/setup_6.x | bash -
leodutra/shell-scripts
environment/ubuntu-14-04/repository-setup/node-js.sh
Shell
mit
69
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for DSA-2910-1 # # Security announcement date: 2014-04-18 00:00:00 UTC # Script generation date: 2017-01-01 21:06:54 UTC # # Operating System: Debian 6 (Squeeze) # Architecture: x86_64 # # Vulnerable packages fix on version: # - qemu-kvm:0.12.5+dfsg-5+squeeze11 # # Last versions recommanded by security team: # - qemu-kvm:0.12.5+dfsg-5+squeeze12 # # CVE List: # - CVE-2014-0150 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade qemu-kvm=0.12.5+dfsg-5+squeeze12 -y
Cyberwatch/cbw-security-fixes
Debian_6_(Squeeze)/x86_64/2014/DSA-2910-1.sh
Shell
mit
645
#!/bin/sh _COMPOSER=$DIRECTORY"composer.phar" _AUTOLOAD=$DIRECTORY"vendor/autoload.php" if [ ! -z $DIRECTORY ] then if [ ! -f $_COMPOSER ]; then curl -sS https://getcomposer.org/installer | php -- --install-dir=$DIRECTORY fi php $_COMPOSER install --dev --working-dir=$DIRECTORY else if [ ! -f $_COMPOSER ]; then curl -sS https://getcomposer.org/installer | php fi php $_COMPOSER install --dev fi echo "" echo "" if [ -f $_AUTOLOAD ]; then echo "-- The autoloader and all project dependencies have been installed --" echo "" fi
giovanniramos/PDO4You
install.sh
Shell
mit
550
julia -e "Base.compilecache(\"BinDeps\")" && \ julia -e "Base.compilecache(\"Cairo\")" && \ julia -e "Base.compilecache(\"Calculus\")" && \ julia -e "Base.compilecache(\"Clustering\")" && \ julia -e "Base.compilecache(\"Compose\")" && \ julia -e "Base.compilecache(\"DataArrays\")" && \ julia -e "Base.compilecache(\"DataFrames\")" && \ julia -e "Base.compilecache(\"DataFramesMeta\")" && \ julia -e "Base.compilecache(\"Dates\")" && \ julia -e "Base.compilecache(\"DecisionTree\")" && \ julia -e "Base.compilecache(\"Distributions\")" && \ julia -e "Base.compilecache(\"Distances\")" && \ julia -e "Base.compilecache(\"GLM\")" && \ julia -e "Base.compilecache(\"HDF5\")" && \ julia -e "Base.compilecache(\"HypothesisTests\")" && \ julia -e "Base.compilecache(\"JSON\")" && \ julia -e "Base.compilecache(\"KernelDensity\")" && \ julia -e "Base.compilecache(\"Loess\")" && \ #julia -e "Base.compilecache(\"Lora\")" && \ julia -e "Base.compilecache(\"MLBase\")" && \ julia -e "Base.compilecache(\"MultivariateStats\")" && \ julia -e "Base.compilecache(\"NMF\")" && \ julia -e "Base.compilecache(\"Optim\")" && \ julia -e "Base.compilecache(\"PDMats\")" && \ julia -e "Base.compilecache(\"RDatasets\")" && \ julia -e "Base.compilecache(\"SQLite\")" && \ julia -e "Base.compilecache(\"StatsBase\")" && \ julia -e "Base.compilecache(\"TextAnalysis\")" && \ julia -e "Base.compilecache(\"TimeSeries\")" && \ julia -e "Base.compilecache(\"ZipFile\")" && \ julia -e "Base.compilecache(\"Gadfly\")" julia -e "Base.compilecache(\"MLBase\")" && \ julia -e "Base.compilecache(\"Clustering\")"
QinetiQ-datascience/Docker-Data-Science
Scripts/Julia/install_julia_pkgs_from_src.sh
Shell
mit
1,582
#!/bin/bash -eux # codename of distro LSB_RELEASE=$(lsb_release -sc) # Prepare puppetlabs repo wget http://apt.puppetlabs.com/puppetlabs-release-${LSB_RELEASE}.deb dpkg -i puppetlabs-release-${LSB_RELEASE}.deb apt-get -y update # Install puppet/facter apt-get -y install puppet facter rm -f puppetlabs-release-${LSB_RELEASE}.deb # Install standard modules puppet module install puppetlabs-stdlib
uport/loopin
boxing/alphabox/packer/scripts/puppet.sh
Shell
mit
399
auto-format() { vim $@ +"argdo exec ':norm gg=G' | exec 'call maxmellon#remove_whitespace()'" +q! }
MaxMEllon/.dotfiles
zsh/functions/auto-format.zsh
Shell
mit
102
#!/bin/bash echo /opt/vc/lib > /etc/ld.so.conf ldconfig cp RPi_Cam_Web_Interface/bin/raspimjpeg /opt/vc/bin/ chmod 755 /opt/vc/bin/raspimjpeg if [ ! -e /usr/bin/raspimjpeg ]; then sudo ln -s /opt/vc/bin/raspimjpeg /usr/bin/raspimjpeg fi
droogmic/rpi-cam-web-docker
setup/docker-setup.sh
Shell
mit
241
#!/bin/bash db_name=$1 db_user=$2 osm_data_file=$3 echo "============ Multimodal graph builder ============" echo "======== Step 1 of 10: Cleanup environment... ========" mkdir -p tmp mkdir -p bak #rm csv/vertices.csv #rm csv/edges.csv #rm csv/car_parkings.csv echo "======== done! ========" # backup old database if it exists, or create a new one if not echo "======== Step 2 of 10: Preparing database... ========" if psql -lqt | cut -d \| -f 1 | grep -w $db_name; then # database exists pg_dump -h localhost -p 5432 -U $db_user -Fc -b -v -f "./bak/old_$db_name.backup" $db_name else # ruh-roh createdb -O $db_user $db_name psql -d $db_name -U $db_user -c "CREATE EXTENSION postgis;" fi echo "======== done! ========" # create initial multimodal graph tables including # modes, switch_types, edges, vertices and switch_points, populate initial data in # modes and switch_types echo "======== Step 3 of 10: Preparing multimodal graph tables in database ========" psql -d $db_name -U $db_user -f prepare_graph_tables.sql echo "==== Import modes data... ====" psql -d $db_name -U $db_user -c "\COPY modes (mode_name,mode_id) FROM './csv/modes.csv' WITH CSV HEADER" echo "==== done! ====" echo "==== Import switch_types data... ====" psql -d $db_name -U $db_user -c "\COPY switch_types (type_name,type_id) FROM './csv/switch_types.csv' WITH CSV HEADER" echo "==== done! ====" # import shapefiles with overriding the old geometry tables echo "======== Step 4 of 10: Import OpenStreetMap data... ========" osm2pgsql -s -l -c -d $db_name -p osm -U $db_user $osm_data_file psql -d $db_name -U $db_user -f add_primary_key_to_osm_tables.sql echo "======== done! ========" echo "======== Step 5 of 10: Import UnitedMaps public transport and utilities data... ========" for shp_file in ./shp/*.shp do echo "==== Importing $shp_file... ====" shp2pgsql -d -s 4326 -W latin1 $shp_file | psql -h localhost -d $db_name -U $db_user echo "==== done! ====" done echo "======== done! ========" # generate multimodal graph edges and vertices in csv files echo "======== Step 6 of 10: Build multimodal graph data in csv files... ========" #python build_mmgraph.py $osm_data_file tail -n +2 ./csv/public_transit_vertices.csv >> ./csv/vertices.csv tail -n +2 ./csv/public_transit_edges.csv >> ./csv/edges.csv echo "======== done! ========" echo "======== Step 7 of 10: Import multimodal graph data from csv files to database... ========" psql -c "TRUNCATE vertices, edges;" -d $db_name -U $db_user psql -c "\COPY vertices (out_degree,vertex_id,raw_point_id,mode_id,lon,lat) FROM './csv/vertices.csv' WITH CSV HEADER;" -d $db_name -U $db_user psql -c "\COPY edges (length,speed_factor,mode_id,from_id,to_id,edge_id,link_id,osm_id) FROM './csv/edges.csv' WITH CSV HEADER;" -d $db_name -U $db_user psql -d $db_name -U $db_user -f import_street_junctions.sql psql -d $db_name -U $db_user -f import_street_lines.sql psql -d $db_name -U $db_user -f import_car_parkings.sql echo "======== done! ========" #echo "======== Import initial switch points of car_parking type... ========" #psql -c "\COPY switch_points (cost,is_available,from_mode_id,to_mode_id,type_id,from_vertex_id,to_vertex_id,switch_point_id,ref_poi_id) FROM './csv/switch_points_car_parking.csv' WITH CSV HEADER;" -d $db_name -U $db_user echo "======== Step 8 of 10: Generating switch points in database, could be fairly time consuming, so please be patient... ========" echo "==== Creating nearest neighbor finding function in database... ====" psql -d $db_name -U $db_user -f pgis_nn.sql echo "==== done! ====" echo "==== Generating switch points around each car parking... ====" psql -d $db_name -U $db_user -f gen_car-parking_switch_points.sql echo "==== done! ====" echo "==== Generating switch points around each possible temp parking position... ====" psql -d $db_name -U $db_user -f gen_geo-conn_switch_points.sql echo "==== done! ====" echo "==== Generating switch points around each P+R... ====" psql -d $db_name -U $db_user -f gen_p+r_switch_points.sql echo "==== done! ====" echo "==== Generating switch points around each K+R... ====" psql -d $db_name -U $db_user -f gen_k+r_switch_points.sql echo "==== done! ====" echo "==== Generating switch points around each suburban station... ====" psql -d $db_name -U $db_user -f gen_s-bahn-station_switch_points.sql echo "==== done! ====" echo "==== Generating switch points around each underground station... ====" psql -d $db_name -U $db_user -f gen_u-bahn-station_switch_points.sql echo "==== done! ====" echo "==== Generating switch points around each tram station... ====" psql -d $db_name -U $db_user -f gen_tram-station_switch_points.sql echo "==== done! ====" # validate generated multimodal graphs echo "======== Step 9 of : Validating multimodal graph... ========" psql -d $db_name -U $db_user -f validate_graph.sql echo "======== done! ========" # clear temp files rm tmp/* # backup this new database echo "======== Step 10 of 10: Backup the database just built up.. ========" pg_dump -h localhost -p 5432 -U $db_user -Fc -b -v -f "./bak/new_$db_name.backup" $db_name echo "======== done! ========" echo "============ All done! ============"
tumluliu/mmgraphdb-builder
build_mmrp_db.sh
Shell
mit
5,181
#!/bin/bash date > /etc/box_build_time SSH_USER=${SSH_USERNAME:-vagrant} SSH_PASS=${SSH_PASSWORD:-vagrant} SSH_USER_HOME=${SSH_USER_HOME:-/home/${SSH_USER}} VAGRANT_INSECURE_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key" # Create Vagrant user (if not already present) if ! id -u $SSH_USER >/dev/null 2>&1; then echo "==> Creating $SSH_USER user" /usr/sbin/groupadd $SSH_USER /usr/sbin/useradd $SSH_USER -g $SSH_USER -G sudo -d $SSH_USER_HOME --create-home echo "${SSH_USER}:${SSH_PASS}" | chpasswd fi # Set up sudo echo "==> Giving ${SSH_USER} sudo powers" echo "${SSH_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/$SSH_USER chmod 440 /etc/sudoers.d/$SSH_USER # Fix stdin not being a tty if grep -q -E "^mesg n$" /root/.profile && sed -i "s/^mesg n$/tty -s \\&\\& mesg n/g" /root/.profile; then echo "==> Fixed stdin not being a tty." fi echo "==> Installing vagrant key" mkdir $SSH_USER_HOME/.ssh chmod 700 $SSH_USER_HOME/.ssh cd $SSH_USER_HOME/.ssh # https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub echo "${VAGRANT_INSECURE_KEY}" > $SSH_USER_HOME/.ssh/authorized_keys chmod 600 $SSH_USER_HOME/.ssh/authorized_keys chown -R $SSH_USER:$SSH_USER $SSH_USER_HOME/.ssh
KEAOSolutions/development_environment
packer/vagrant.sh
Shell
mit
1,602
#!/bin/bash prmd combine -m meta.json ./schemata > schema.json
richlab-corp/schema-to-db-json
test/fixtures/create.sh
Shell
mit
63
#!/bin/bash # SAVED SSH AGENT [[ -s "$HOME/.ssh/agent.out" ]] && source ~/.ssh/agent.out # make sure ssh agent is always running if ssh-add -l 2>&1 | grep -q -i -E 'could not open|No such file' || [[ ! -s "$HOME/.ssh/agent.out" ]] ; then eval `ssh-agent` &>/dev/null fi # save ssh agent info echo "export $(env | grep SSH_AUTH_SOCK | head -n 1)" > "$HOME/.ssh/agent.out"
rafecolton/bash-starter-kit
.bash_profile.d/ssh_agent.sh
Shell
mit
376
#!/usr/bin/expect -f spawn twine upload -r pypi dist/*.tar.gz expect "Enter your username:" send "$env(PYPI_USER)\n" expect "Enter your password:" send "$env(PYPI_PASSWORD)\n" interact
srgrr/RoundCreator
upload_pypi.sh
Shell
mit
196
#!/bin/sh ################################################################################ # Title : generateDocumentationAndDeploy.sh # Date created : 2016/02/22 # Notes : Edited by Stefan Moebius for TurboWavelets.Net project __AUTHOR__="Jeroen de Bruijn" # Preconditions: # - Packages doxygen doxygen-doc doxygen-latex doxygen-gui graphviz # must be installed. # - Doxygen configuration file must have the destination directory empty and # source code directory with a $(TRAVIS_BUILD_DIR) prefix. # - An gh-pages branch should already exist. See below for mor info on hoe to # create a gh-pages branch. # # Required global variables: # - TRAVIS_BUILD_NUMBER : The number of the current build. # - TRAVIS_COMMIT : The commit that the current build is testing. # - DOXYFILE : The Doxygen configuration file. # - GH_REPO_NAME : The name of the repository. # - GH_REPO_REF : The GitHub reference to the repository. # - GH_REPO_TOKEN : Secure token to the github repository. # # For information on how to encrypt variables for Travis CI please go to # https://docs.travis-ci.com/user/environment-variables/#Encrypted-Variables # or https://gist.github.com/vidavidorra/7ed6166a46c537d3cbd2 # For information on how to create a clean gh-pages branch from the master # branch, please go to https://gist.github.com/vidavidorra/846a2fc7dd51f4fe56a0 # # This script will generate Doxygen documentation and push the documentation to # the gh-pages branch of a repository specified by GH_REPO_REF. # Before this script is used there should already be a gh-pages branch in the # repository. # ################################################################################ ################################################################################ ##### Setup this script and get the current gh-pages branch. ##### echo 'Setting up the script...' # Exit with nonzero exit code if anything fails set -e # Create a clean working directory for this script. mkdir code_docs cd code_docs # Get the current gh-pages branch mkdir source cd source git clone https://git@$GH_REPO_REF cd $GH_REPO_NAME ls cd ../.. git clone https://git@$GH_REPO_REF cd $GH_REPO_NAME ls git checkout gh-pages ##### Configure git. # Set the push default to simple i.e. push only the current branch. git config --global push.default simple # Pretend to be an user called Travis CI. git config user.name "Travis CI" git config user.email "[email protected]" # Remove everything currently in the gh-pages branch. # GitHub is smart enough to know which files have changed and which files have # stayed the same and will only update the changed files. So the gh-pages branch # can be safely cleaned, and it is sure that everything pushed later is the new # documentation. rm -rf * cp ../source/$GH_REPO_NAME/TurboWavelets/ TurboWavelets -rf cp ../source/$GH_REPO_NAME/Doxyfile Doxyfile cp ../source/$GH_REPO_NAME/graphics/turbowavelets-logo-mini.png turbowavelets-logo-mini.png ls # Need to create a .nojekyll file to allow filenames starting with an underscore # to be seen on the gh-pages site. Therefore creating an empty .nojekyll file. # Presumably this is only needed when the SHORT_NAMES option in Doxygen is set # to NO, which it is by default. So creating the file just in case. echo "" > .nojekyll ################################################################################ ##### Generate the Doxygen code documentation and log the output. ##### echo 'Generating Doxygen code documentation...' # Redirect both stderr and stdout to the log file AND the console. doxygen $DOXYFILE 2>&1 | tee doxygen.log rm TurboWavelets -rf ################################################################################ ##### Upload the documentation to the gh-pages branch of the repository. ##### # Only upload if Doxygen successfully created the documentation. # Check this by verifying that the html directory and the file html/index.html # both exist. This is a good indication that Doxygen did it's work. if [ -d "html" ] && [ -f "html/index.html" ]; then echo 'Uploading documentation to the gh-pages branch...' # Add everything in this directory (the Doxygen code documentation) to the # gh-pages branch. # GitHub is smart enough to know which files have changed and which files have # stayed the same and will only update the changed files. git add --all # Commit the added files with a title and description containing the Travis CI # build number and the GitHub commit reference that issued this build. git commit -m "Deploy code docs to GitHub Pages Travis build: ${TRAVIS_BUILD_NUMBER}" -m "Commit: ${TRAVIS_COMMIT}" # Force push to the remote gh-pages branch. # The ouput is redirected to /dev/null to hide any sensitive credential data # that might otherwise be exposed. git push --force "https://${GH_REPO_TOKEN}@${GH_REPO_REF}" > /dev/null 2>&1 else echo '' >&2 echo 'Warning: No documentation (html) files have been found!' >&2 echo 'Warning: Not going to push the documentation to GitHub!' >&2 exit 1 fi
codeprof/TurboWavelets.Net
generateDocumentationAndDeploy.sh
Shell
mit
5,154
#!/bin/ash scriptpath="$( cd "$(dirname "$0")" ; pwd -P )" #!/bin/sh if [ "$#" -ne 1 ] || ! [ -f "$1" ]; then echo "Usage: $0 logfile" >&2 exit 1 fi logfile=$1 # Load config source $scriptpath/config.sh mailsend -f $mailsender -t $recipientshourly -smtp $mailsmtp -startssl -user $mailuser -auth -pass $mailpassword -port 587 -sub "Gargoyle Hourly $(date)" -mime-type "text/plain" -msg-body $logfile
hb128/gargoyle-flexible-quotas
send-mail-hourly.sh
Shell
mit
408
#!/bin/bash #SBATCH --partition=mono #SBATCH --ntasks=1 #SBATCH --time=4-0:00 #SBATCH --mem-per-cpu=8000 #SBATCH -J Deep-DAE_MLP_5_lin_bin_DAE_relu #SBATCH -e Deep-DAE_MLP_5_lin_bin_DAE_relu.err.txt #SBATCH -o Deep-DAE_MLP_5_lin_bin_DAE_relu.out.txt source /etc/profile.modules module load gcc module load matlab cd ~/deepLearn && srun ./deepFunction 5 'DAE' 'MLP' '128 1000 1000 1000 10' '0 1 1 1 1' '5_lin_bin' 'DAE_relu' "'iteration.n_epochs', 'learning.lrate', 'use_tanh', 'noise.drop', 'noise.level', 'rica.cost', 'cae.cost'" '200 1e-3 2 0.1 0.1 0 0' "'iteration.n_epochs', 'use_tanh'" '200 2'
aciditeam/matlab-ts
jobs/deepJobs_DAE_MLP_5_lin_bin_DAE_relu.sh
Shell
mit
611
#combine, sort and index a bam file samtools merge -f combined.bam */accepted_hits.bam samtools sort -f combined.bam combined.sorted samtools index combined.sorted.bam #filter a bam file for a particular chromosome set (see http://seqanswers.com/forums/showthread.php?t=6892) samtools view -h *sorted.bam | awk '$3=="chr1" || $3=="chr3" || /^@/' | samtools view -Sb -> onlychr1_3.bam.sorted.bam #calculated multi-mapped reads from a bam file (see http://seqanswers.com/forums/showpost.php?p=60498&postcount=4) samtools view -F 4 file.bam | awk '{printf $1"\n"}' | sort | uniq -d | wc -l
davebridges/biomolecule-scripts
RNAseq/Shell/samtools.sh
Shell
cc0-1.0
589
#!/bin/csh echo "-------------------" echo "- SH.DESIGN.Bash -" echo "- KO -" echo "-------------------" echo C-Shell script
dupuisa/i-CodeCNES
shell-rules/src/test/resources/SH/DESIGN/Bash/error.sh
Shell
epl-1.0
139
#!/bin/bash source $(dirname $0)/vars.sh test -f "$TWISTED_PID" && kill $(< "$TWISTED_PID") && rm -f "$TWISTED_PID"
lanjelot/twisted-honeypots
stop.sh
Shell
gpl-2.0
118
# function.inc.bash - funkybackup global functions # # For more details about the script and to me goto # http://www.funkytwig.com/blog/funkybackup # # (c) 2015 Ben Edwards (funkytwig.com) # You are alowed to use the script if you keep this head0er # and do not redistibute it, just send people to the URL # # Ver Coments # 0.5 Initial version function log { log_text="$logstamp $basename $1" if [ $interactive -eq 1 ]; then echo $log_text fi echo "$logstamp $basename($$) $1" >> $logfile } function write_error_date { echo $today > $lasterror } function error_today { # ***** not used anywhare yet # ***** if [ -s $lasterror ]; then read -r line < $lasterror else line = ""; # will make next bit echo N, (no errors today) fi if [ "$line" = "$today" ]; then echo "Y" else echo "N" fi } function log_file { while read line do log "$line" done < "$1" } function run_cmd { tmp_log=/tmp/$$_cmd.log log "$1" $1 > $tmp_log 2>&1 ret=$? if [ -f $tmp_log ]; then log_file $tmp_log fi if [ $ret -ne 0 ]; then log "$ret : $1" fi return $ret } function send_email { subject="$1" message="$2" file="$3" temp_file=/tmp/$0_$$.send_email.txt log "Sendind subject=$subject, message=$message, file=$file" echo $message >> $temp_file if [ ! -z $file ]; then # $file set echo >> $temp_file cat $file >> $temp_file fi mail -s "$subject" $to_email < $temp_file file="" # in case this is called twice } function error { log "ERROR $1" # to avoide lots or error emails see if an email has already been sent today send_email "Error from $0" "$1" write_error_date }
funkytwig/funkybackup
function.inc.bash
Shell
gpl-2.0
1,713
# # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is executed by build/envsetup.sh, and can use anything # defined in envsetup.sh. # # In particular, you can add lunch options with the add_lunch_combo # function: add_lunch_combo generic-eng add_lunch_combo cm_b8080h-userdebug add_lunch_combo cm_b8080h-eng
zyrill/android_device_lenovo_b8080h
vendorsetup.sh
Shell
gpl-2.0
870
#!/bin/bash ### USAGE: ## installs printer ${PRINTER_NAME} ## with driver ${PRINTER_DRIVER} ## and ${PRINTER_LOCATION} ## and ${PRINTER_CONNECTION} ## ## AND should be named after printer modell ## example: installPrinter_Brother_HL-7050.sh ## ## BECAUSE, enables installation of new printer with new name ## without problems #### START DEFINE PARAMETER PRINTER_NAME="Raum-216-Printer" PRINTER_LOCATION="Drucker im Raum 216" PRINTER_CONNECTION="socket://r216pr01" ## HELP to find printer modell: ## Find Print Driver with: ## >> lpinfo --make-and-model 'Lexmark' -m PRINTER_DRIVER="drv:///hpcups.drv/hp-laserjet_p2055dn-pcl3.ppd" #### END DEFINE PARAMETER ## check if printer ${PRINTER_NAME} already installed ## remove, if already installed, and enable installation of new one if [ "$(lpstat -v | grep ${PRINTER_NAME})" != "" ]; then lpadmin -x ${PRINTER_NAME} fi ## Options in lpadmin declared: # -E Enables the destination and accepts jobs # -p Specifies a PostScript Printer Description file to use with the printer. # -v device-uri # -m Sets a standard System V interface script or PPD file for the printer from the model directory or using one of the driver interfaces # -L Provides a textual location of the destination. # Note the two -E options. The first one (before -p) forces encryption when connecting to the server. The last one enables the destination and starts accepting jobs. lpadmin -E -p "${PRINTER_NAME}" -v ${PRINTER_CONNECTION} -m ${PRINTER_DRIVER} -L "${PRINTER_LOCATION}" -E # set as Default Printer lpadmin -d ${PRINTER_NAME}
edvapp/autoinstall
laus/scriptsForClasses/APP/R216/010-installPrinter-HP_LaserJet_p2055dn.sh
Shell
gpl-2.0
1,575
#!/bin/bash export PATH=/data/apps/bin:$PATH cd /data/Lacuna-Server-Open/bin perl generate_docs.pl > /dev/null killall -HUP start_server
plainblack/Lacuna-Server-Open
bin/restart_starman.sh
Shell
gpl-2.0
137
#!/bin/bash PROJECTS=~/projects GITHUB_HOME="https://raw.githubusercontent.com/lorenzocipriani/RaspberryPI" echo -e "\n\nInstall PicoBorg Reverse" if [ ! -d "${PROJECTS}/picoborgrev" ] then mkdir -p $PROJECTS/picoborgrev fi cd $PROJECTS/picoborgrev wget http://www.piborg.org/downloads/picoborgrev/examples.zip unzip examples.zip chmod +x install.sh ./install.sh cd ~ echo -e "\n\nInstall HC-SR04 Ultrasonic Range Sensor" if [ ! -d "${PROJECTS}/rangesensor_hc-sr04" ] then mkdir -p $PROJECTS/rangesensor_hc-sr04 fi wget -O $PROJECTS/rangesensor_hc-sr04/range_sensor.py $GITHUB_HOME/master/projects/rangesensor_hc-sr04/range_sensor.py echo -e "\n\nInstall HC-SR501 PIR Infrared Motion Sensor" if [ ! -d "${PROJECTS}/motionsensor_hc-sr501" ] then mkdir -p $PROJECTS/motionsensor_hc-sr501 fi wget -O $PROJECTS/motionsensor_hc-sr501/motion_sensor.py $GITHUB_HOME/master/projects/motionsensor_hc-sr501/motion_sensor.py
lorenzocipriani/RaspberryPI
config/ibm-coderdojo-projects.sh
Shell
gpl-2.0
924
# Openstack icehouse installation script on ubuntu 14.04 # by kasidit chanchio # vasabilab, dept of computer science, # Thammasat University, Thailand # # Copyright 2014 Kasidit Chanchio # # run with sudo or as root. # #!/bin/bash -x cd $HOME/OPSInstaller/controller pwd # echo "manual" > /etc/init/keystone.override apt-get -y install keystone apache2 libapache2-mod-wsgi cp files/keystone.conf /etc/keystone/keystone.conf echo "su -s /bin/sh -c \"keystone-manage db_sync\" keystone" su -s /bin/sh -c "keystone-manage db_sync" keystone # echo keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone # cp files/apache2.conf /etc/apache2/apache2.conf cp files/wsgi-keystone.conf /etc/apache2/sites-available/wsgi-keystone.conf ln -s /etc/apache2/sites-available/wsgi-keystone.conf /etc/apache2/sites-enabled service apache2 restart rm -f /var/lib/keystone/keystone.db
kasidit/openstack-mitaka-installer
documents/OPSInstaller.example/controller/exe-stage09-SUDO-keystone.sh
Shell
gpl-2.0
978
convert images/OCS-514-A.png -crop 1560x1411+0+0 +repage images/OCS-514-A-0.png convert images/OCS-514-A.png -crop 1560x557+0+1414 +repage images/OCS-514-A-1.png convert images/OCS-514-A.png -crop 1560x693+0+1976 +repage images/OCS-514-A-2.png convert images/OCS-514-A.png -crop 1560x453+0+2690 +repage images/OCS-514-A-3.png convert images/OCS-514-A.png -crop 1560x305+0+3170 +repage images/OCS-514-A-4.png convert images/OCS-514-A.png -crop 1560x385+0+3488 +repage images/OCS-514-A-5.png convert images/OCS-514-A.png -crop 1560x709+0+3884 +repage images/OCS-514-A-6.png # #/OCS-514.png convert images/OCS-514-B.png -crop 1519x863+0+0 +repage images/OCS-514-B-0.png convert -append images/OCS-514-A-6.png images/OCS-514-B-0.png images/OCS-514-A-6.png rm images/OCS-514-B-0.png convert images/OCS-514-B.png -crop 1519x314+0+870 +repage images/OCS-514-B-1.png convert images/OCS-514-B.png -crop 1519x476+0+1177 +repage images/OCS-514-B-2.png convert images/OCS-514-B.png -crop 1519x389+0+1666 +repage images/OCS-514-B-3.png convert images/OCS-514-B.png -crop 1519x71+0+2062 +repage images/OCS-514-B-4.png convert images/OCS-514-B.png -crop 1519x1413+0+2142 +repage images/OCS-514-B-5.png convert images/OCS-514-B.png -crop 1519x459+0+3568 +repage images/OCS-514-B-6.png convert images/OCS-514-B.png -crop 1519x553+0+4046 +repage images/OCS-514-B-7.png # #/OCS-514.png
jonnymwalker/Staroslavjanskij-Slovar
scripts/findindents.OCS-514.sh
Shell
gpl-2.0
1,367
python3 runmeka.py Arts1500 -f 5 -c 26 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/arts.log python3 runmeka.py birds -c 19 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/birds.log python3 runmeka.py Business1500 -c 30 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/business.log python3 runmeka.py CAL500 -c 174 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/cal.log python3 runmeka.py emotions -c 6 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/emotions.log python3 runmeka.py flags -c 7 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/flags.log python3 runmeka.py Health1500 -c 32 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/health.log python3 runmeka.py human3106 -c 14 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/human.log python3 runmeka.py plant978 -c 12 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/plant.log python3 runmeka.py scene -c 6 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/scene.log python3 runmeka.py yeast -c 14 -f 5 -mc meka.classifiers.multilabel.BR -o ./exp/meka/nbbr/ > ./exp/meka/nbbr/yeast.log python3 runmeka.py Arts1500 -c 26 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/arts.log python3 runmeka.py birds -c 19 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/birds.log python3 runmeka.py Business1500 -c 30 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/business.log python3 runmeka.py CAL500 -c 174 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/cal.log python3 runmeka.py emotions -c 6 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/emotions.log python3 runmeka.py flags -c 7 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/flags.log python3 runmeka.py Health1500 -c 32 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/health.log python3 runmeka.py human3106 -c 14 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/human.log python3 runmeka.py plant978 -c 12 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/plant.log python3 runmeka.py scene -c 6 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/scene.log python3 runmeka.py yeast -c 14 -f 5 -mc meka.classifiers.multilabel.CC -o ./exp/meka/nbcc/ > ./exp/meka/nbcc/yeast.log
nicoladimauro/dcsn
scripts/PGM16/runmekaNB.sh
Shell
gpl-2.0
2,676
# ---------------------------------------------------------------------------- # Calcula porcentagens. # Se informado um número, mostra sua tabela de porcentagens. # Se informados dois números, mostra a porcentagem relativa entre eles. # Se informados um número e uma porcentagem, mostra o valor da porcentagem. # Se informados um número e uma porcentagem com sinal, calcula o novo valor. # # Uso: zzporcento valor [valor|[+|-]porcentagem%] # Ex.: zzporcento 500 # Tabela de porcentagens de 500 # zzporcento 500.0000 # Tabela para número fracionário (.) # zzporcento 500,0000 # Tabela para número fracionário (,) # zzporcento 5.000,00 # Tabela para valor monetário # zzporcento 500 25 # Mostra a porcentagem de 25 para 500 (5%) # zzporcento 500 1000 # Mostra a porcentagem de 1000 para 500 (200%) # zzporcento 500,00 2,5% # Mostra quanto é 2,5% de 500,00 # zzporcento 500,00 +2,5% # Mostra quanto é 500,00 + 2,5% # # Autor: Aurelio Marinho Jargas, www.aurelio.net # Desde: 2008-12-11 # Versão: 6 # Licença: GPL # Requisitos: zztestar # Tags: número, cálculo # ---------------------------------------------------------------------------- zzporcento () { zzzz -h porcento "$1" && return local i porcentagem sinal local valor1="$1" local valor2="$2" local escala=0 local separador=',' local tabela='200 150 125 100 90 80 75 70 60 50 40 30 25 20 15 10 9 8 7 6 5 4 3 2 1' # Verificação dos parâmetros test -n "$1" || { zztool -e uso porcento; return 1; } # Remove os pontos dos dinheiros para virarem fracionários (1.234,00 > 1234,00) zztestar dinheiro "$valor1" && valor1=$(echo "$valor1" | sed 's/\.//g') zztestar dinheiro "$valor2" && valor2=$(echo "$valor2" | sed 's/\.//g') ### Vamos analisar o primeiro valor # Número fracionário (1.2345 ou 1,2345) if zztestar numero_fracionario "$valor1" then separador=$(echo "$valor1" | tr -d 0-9) escala=$(echo "$valor1" | sed 's/.*[.,]//') escala="${#escala}" # Sempre usar o ponto como separador interno (para os cálculos) valor1=$(echo "$valor1" | sed 'y/,/./') # Número inteiro ou erro else zztool -e testa_numero "$valor1" || return 1 fi ### Vamos analisar o segundo valor # O segundo argumento é uma porcentagem if test $# -eq 2 && zztool grep_var % "$valor2" then # O valor da porcentagem é guardado sem o caractere % porcentagem=$(echo "$valor2" | tr -d %) # Sempre usar o ponto como separador interno (para os cálculos) porcentagem=$(echo "$porcentagem" | sed 'y/,/./') # Há um sinal no início? if test "${porcentagem#[+-]}" != "$porcentagem" then sinal=$(printf %c $porcentagem) # pega primeiro char porcentagem=${porcentagem#?} # remove primeiro char fi # Porcentagem fracionada if zztestar numero_fracionario "$porcentagem" then # Se o valor é inteiro (escala=0) e a porcentagem fracionária, # é preciso forçar uma escala para que o resultado apareça correto. test $escala -eq 0 && escala=2 valor1="$valor1.00" # Porcentagem inteira ou erro elif ! zztool testa_numero "$porcentagem" then zztool erro "O valor da porcentagem deve ser um número. Exemplos: 2 ou 2,5." return 1 fi # O segundo argumento é um número elif test $# -eq 2 then # Ao mostrar a porcentagem entre dois números, a escala é fixa escala=2 # O separador do segundo número é quem "manda" na saída # Sempre usar o ponto como separador interno (para os cálculos) # Número fracionário if zztestar numero_fracionario "$valor2" then separador=$(echo "$valor2" | tr -d 0-9) valor2=$(echo "$valor2" | sed 'y/,/./') # Número normal ou erro else zztool -e testa_numero "$valor2" || return 1 fi fi # Ok. Dados coletados, analisados e formatados. Agora é hora dos cálculos. # Mostra tabela if test $# -eq 1 then for i in $tabela do printf "%s%%\t%s\n" $i $(echo "scale=$escala; $valor1*$i/100" | bc) done # Mostra porcentagem elif test $# -eq 2 then # Mostra a porcentagem relativa entre dois números if ! zztool grep_var % "$valor2" then echo "scale=$escala; $valor2*100/$valor1" | bc | sed 's/$/%/' # valor + n% é igual a… elif test "$sinal" = '+' then echo "scale=$escala; $valor1+$valor1*$porcentagem/100" | bc # valor - n% é igual a… elif test "$sinal" = '-' then echo "scale=$escala; $valor1-$valor1*$porcentagem/100" | bc # n% do valor é igual a… else echo "scale=$escala; $valor1*$porcentagem/100" | bc ### Saída antiga, uma mini tabelinha # printf "%s%%\t%s\n" "+$porcentagem" $(echo "scale=$escala; $valor1+$valor1*$porcentagem/100" | bc) # printf "%s%%\t%s\n" 100 "$valor1" # printf "%s%%\t%s\n" "-$porcentagem" $(echo "scale=$escala; $valor1-$valor1*$porcentagem/100" | bc) # echo # printf "%s%%\t%s\n" "$porcentagem" $(echo "scale=$escala; $valor1*$porcentagem/100" | bc) # # | sed "s/\([^0-9]\)\./\10./ ; s/^\./0./; y/./$separador/" fi fi | # Assegura 0.123 (em vez de .123) e restaura o separador original sed "s/^\./0./; y/./$separador/" }
faustovaz/funcoeszz
zz/zzporcento.sh
Shell
gpl-2.0
5,134
#!/bin/bash # # Test the capture engine of the Wireshark tools # # $Id: suite-capture.sh 43536 2012-06-28 22:56:06Z darkjames $ # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 2005 Ulf Lamping # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, writeto the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # common exit status values EXIT_OK=0 EXIT_COMMAND_LINE=1 EXIT_ERROR=2 WIRESHARK_CMD="$WIRESHARK -k" capture_test_output_print() { wait for f in "$@"; do if [[ -f "$f" ]]; then printf " --> $f\n" cat "$f" printf "\n" fi done } traffic_gen_ping() { # Generate some traffic for quiet networks. # This will have to be adjusted for non-Windows systems. # the following will run in the background and return immediately { date for (( x=28; x<=58; x++ )) # in effect: number the packets do # How does ping _not_ have a standard set of arguments? case $WS_SYSTEM in Windows) ping -n 1 -l $x www.wireshark.org ;; SunOS) /usr/sbin/ping www.wireshark.org $x 1 ;; *) # *BSD, Linux ping -c 1 -s $x www.wireshark.org ;; esac sleep 1 done date } > ./testout_ping.txt 2>&1 & } ping_cleanup() { wait rm -f ./testout_ping.txt } # capture exactly 10 packets capture_step_10packets() { if [ $SKIP_CAPTURE -ne 0 ] ; then test_step_skipped return fi traffic_gen_ping date > ./testout.txt $DUT -i $TRAFFIC_CAPTURE_IFACE $TRAFFIC_CAPTURE_PROMISC \ -w ./testout.pcap \ -c 10 \ -a duration:$TRAFFIC_CAPTURE_DURATION \ -f icmp \ >> ./testout.txt 2>&1 RETURNVALUE=$? date >> ./testout.txt if [ ! $RETURNVALUE -eq $EXIT_OK ]; then echo capture_test_output_print ./testout.txt # part of the Prerequisite checks # wrong interface ? output the possible interfaces $TSHARK -D test_step_failed "exit status of $DUT: $RETURNVALUE" return fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then capture_test_output_print ./testout.txt test_step_failed "No output file!" return fi # ok, we got a capture file, does it contain exactly 10 packets? $CAPINFOS ./testout.pcap > ./testout2.txt grep -Ei 'Number of packets:[[:blank:]]+10' ./testout2.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo $TSHARK -ta -r ./testout.pcap >> ./testout2.txt capture_test_output_print ./testout_ping.txt ./testout.txt ./testout2.txt # part of the Prerequisite checks # probably wrong interface, output the possible interfaces $TSHARK -D test_step_failed "No or not enough traffic captured. Probably the wrong interface: $TRAFFIC_CAPTURE_IFACE!" fi } # capture exactly 10 packets using "-w -" (piping to stdout) capture_step_10packets_stdout() { if [ $SKIP_CAPTURE -ne 0 ] ; then test_step_skipped return fi traffic_gen_ping date > ./testout.txt $DUT -i $TRAFFIC_CAPTURE_IFACE $TRAFFIC_CAPTURE_PROMISC \ -c 10 \ -a duration:$TRAFFIC_CAPTURE_DURATION \ -w - \ -f icmp \ > ./testout.pcap 2>>./testout.txt RETURNVALUE=$? date >> ./testout.txt if [ ! $RETURNVALUE -eq $EXIT_OK ]; then echo capture_test_output_print ./testout.txt $TSHARK -D test_step_failed "exit status of $DUT: $RETURNVALUE" return fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then test_step_failed "No output file!" return fi # ok, we got a capture file, does it contain exactly 10 packets? $CAPINFOS ./testout.pcap > ./testout2.txt 2>&1 grep -Ei 'Number of packets:[[:blank:]]+10' ./testout2.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo capture_test_output_print ./testout.txt ./testout2.txt $TSHARK -D test_step_failed "No or not enough traffic captured. Probably the wrong interface: $TRAFFIC_CAPTURE_IFACE!" fi } # capture packets via a fifo capture_step_fifo() { mkfifo 'fifo' (cat "${CAPTURE_DIR}dhcp.pcap"; sleep 1; tail -c +25 "${CAPTURE_DIR}dhcp.pcap") > fifo & $DUT -i fifo $TRAFFIC_CAPTURE_PROMISC \ -w ./testout.pcap \ -a duration:$TRAFFIC_CAPTURE_DURATION \ > ./testout.txt 2>&1 RETURNVALUE=$? rm 'fifo' if [ ! $RETURNVALUE -eq $EXIT_OK ]; then capture_test_output_print ./testout.txt test_step_failed "exit status of $DUT: $RETURNVALUE" return fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then test_step_failed "No output file!" return fi # ok, we got a capture file, does it contain exactly 8 packets? $CAPINFOS ./testout.pcap > ./testout.txt grep -Ei 'Number of packets:[[:blank:]]+8' ./testout.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo capture_test_output_print ./testout.txt test_step_failed "No or not enough traffic captured." fi } # capture packets via a fifo capture_step_stdin() { CONSOLE_LOG_ARGS="" if [ "$DUT" == "$WIRESHARK_CMD" -a "$WS_SYSTEM" == "Windows" ] ; then CONSOLE_LOG_ARGS="-o console.log.level:127" fi (cat "${CAPTURE_DIR}dhcp.pcap"; sleep 1; tail -c +25 "${CAPTURE_DIR}dhcp.pcap") | \ $DUT -i - $TRAFFIC_CAPTURE_PROMISC \ -w ./testout.pcap \ -a duration:$TRAFFIC_CAPTURE_DURATION \ $CONSOLE_LOG_ARGS \ > ./testout.txt 2> ./testerr.txt RETURNVALUE=$? if [ ! $RETURNVALUE -eq $EXIT_OK ]; then capture_test_output_print ./testout.txt ./testerr.txt ./dumpcap_debug_log.tmp test_step_failed "Exit status of $DUT: $RETURNVALUE" return fi if [ -n "$CONSOLE_LOG_ARGS" ] ; then grep "Wireshark is up and ready to go" ./testout.txt > /dev/null 2>&1 if [ $? -ne 0 ]; then test_step_failed "No startup message!" fi grep "Capture started" ./testerr.txt > /dev/null 2>&1 if [ $? -ne 0 ]; then test_step_failed "No capture started message!" fi grep "Capture stopped" ./testerr.txt > /dev/null 2>&1 if [ $? -ne 0 ]; then test_step_failed "No capture stopped message!" fi fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then test_step_failed "No output file!" return fi # ok, we got a capture file, does it contain exactly 8 packets? $CAPINFOS ./testout.pcap > ./testout.txt grep -Ei 'Number of packets:[[:blank:]]+8' ./testout.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo capture_test_output_print ./testout.txt test_step_failed "No or not enough traffic captured." fi } # capture exactly 2 times 10 packets (multiple files) capture_step_2multi_10packets() { if [ $SKIP_CAPTURE -ne 0 ] ; then test_step_skipped return fi traffic_gen_ping date > ./testout.txt $DUT -i $TRAFFIC_CAPTURE_IFACE $TRAFFIC_CAPTURE_PROMISC \ -w ./testout.pcap \ -c 10 \ -a duration:$TRAFFIC_CAPTURE_DURATION \ -f icmp \ >> ./testout.txt 2>&1 RETURNVALUE=$? date >> ./testout.txt if [ ! $RETURNVALUE -eq $EXIT_OK ]; then echo capture_test_output_print ./testout.txt # part of the Prerequisite checks # probably wrong interface, output the possible interfaces $TSHARK -D test_step_failed "exit status of $DUT: $RETURNVALUE" return fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then test_step_failed "No output file!" return fi # ok, we got a capture file, does it contain exactly 10 packets? $CAPINFOS ./testout.pcap > ./testout.txt grep -Ei 'Number of packets:[[:blank:]]+10' ./testout.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo capture_test_output_print ./testout.txt test_step_failed "Probably the wrong interface (no traffic captured)!" fi } # capture with a very unlikely read filter, packets must be zero afterwards capture_step_read_filter() { if [ $SKIP_CAPTURE -ne 0 ] ; then test_step_skipped return fi traffic_gen_ping # valid, but very unlikely filter date > ./testout.txt $DUT -i $TRAFFIC_CAPTURE_IFACE $TRAFFIC_CAPTURE_PROMISC \ -w ./testout.pcap \ -a duration:$TRAFFIC_CAPTURE_DURATION \ -R 'dcerpc.cn_call_id==123456' \ -c 10 \ -f icmp \ >> ./testout.txt 2>&1 RETURNVALUE=$? date >> ./testout.txt if [ ! $RETURNVALUE -eq $EXIT_OK ]; then echo capture_test_output_print ./testout.txt # part of the Prerequisite checks # wrong interface ? output the possible interfaces $TSHARK -D test_step_failed "exit status: $RETURNVALUE" return fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then test_step_failed "No output file!" return fi # ok, we got a capture file, does it contain exactly 0 packets? $CAPINFOS ./testout.pcap > ./testout.txt grep -Ei 'Number of packets:[[:blank:]]+0' ./testout.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo capture_test_output_print ./testout.txt test_step_failed "Capture file should contain zero packets!" fi } # capture with a snapshot length capture_step_snapshot() { if [ $SKIP_CAPTURE -ne 0 ] ; then test_step_skipped return fi traffic_gen_ping # capture with a snapshot length of 68 bytes for $TRAFFIC_CAPTURE_DURATION seconds # this should result in no packets greater than 68 bytes date > ./testout.txt $DUT -i $TRAFFIC_CAPTURE_IFACE $TRAFFIC_CAPTURE_PROMISC \ -w ./testout.pcap \ -s 68 \ -a duration:$TRAFFIC_CAPTURE_DURATION \ -f icmp \ >> ./testout.txt 2>&1 RETURNVALUE=$? date >> ./testout.txt if [ ! $RETURNVALUE -eq $EXIT_OK ]; then echo capture_test_output_print ./testout.txt # part of the Prerequisite checks # wrong interface ? output the possible interfaces $TSHARK -D test_step_failed "exit status: $RETURNVALUE" return fi # we should have an output file now if [ ! -f "./testout.pcap" ]; then test_step_failed "No output file!" return fi # use tshark to filter out all packets, which are larger than 68 bytes $TSHARK -r ./testout.pcap -w ./testout2.pcap -R 'frame.cap_len>68' > ./testout.txt 2>&1 if [ $? -ne 0 ]; then echo capture_test_output_print ./testout.txt test_step_failed "Problem running TShark!" return fi # ok, we got a capture file, does it contain exactly 0 packets? $CAPINFOS ./testout2.pcap > ./testout.txt grep -Ei 'Number of packets:[[:blank:]]+0' ./testout.txt > /dev/null if [ $? -eq 0 ]; then test_step_ok else echo capture_test_output_print ./testout.txt test_step_failed "Capture file should contain zero packets!" return fi } wireshark_capture_suite() { # k: start capture immediately # WIRESHARK_QUIT_AFTER_CAPTURE needs to be set. DUT="$WIRESHARK_CMD" test_step_add "Capture 10 packets" capture_step_10packets # piping to stdout doesn't work with Wireshark and capturing! #test_step_add "Capture 10 packets using stdout: -w -" capture_step_10packets_stdout if [ $TEST_FIFO ]; then test_step_add "Capture via fifo" capture_step_fifo fi test_step_add "Capture via stdin" capture_step_stdin # read filter doesn't work with Wireshark and capturing! #test_step_add "Capture read filter (${TRAFFIC_CAPTURE_DURATION}s)" capture_step_read_filter test_step_add "Capture snapshot length 68 bytes (${TRAFFIC_CAPTURE_DURATION}s)" capture_step_snapshot } tshark_capture_suite() { DUT=$TSHARK test_step_add "Capture 10 packets" capture_step_10packets test_step_add "Capture 10 packets using stdout: -w -" capture_step_10packets_stdout if [ $TEST_FIFO ]; then test_step_add "Capture via fifo" capture_step_fifo fi test_step_add "Capture via stdin" capture_step_stdin # tshark now using dumpcap for capturing, read filters won't work by definition #test_step_add "Capture read filter (${TRAFFIC_CAPTURE_DURATION}s)" capture_step_read_filter test_step_add "Capture snapshot length 68 bytes (${TRAFFIC_CAPTURE_DURATION}s)" capture_step_snapshot } dumpcap_capture_suite() { #DUT="$DUMPCAP -Q" DUT=$DUMPCAP test_step_add "Capture 10 packets" capture_step_10packets test_step_add "Capture 10 packets using stdout: -w -" capture_step_10packets_stdout if [ $TEST_FIFO ]; then test_step_add "Capture via fifo" capture_step_fifo fi test_step_add "Capture via stdin" capture_step_stdin # read (display) filters intentionally doesn't work with dumpcap! #test_step_add "Capture read filter (${TRAFFIC_CAPTURE_DURATION}s)" capture_step_read_filter test_step_add "Capture snapshot length 68 bytes (${TRAFFIC_CAPTURE_DURATION}s)" capture_step_snapshot } capture_cleanup_step() { ping_cleanup rm -f ./testout.txt rm -f ./testerr.txt rm -f ./testout2.txt rm -f ./testout.pcap rm -f ./testout2.pcap } capture_suite() { test_step_set_pre capture_cleanup_step test_step_set_post capture_cleanup_step test_remark_add "Capture - need some traffic on interface: \"$TRAFFIC_CAPTURE_IFACE\"" test_suite_add "Dumpcap capture" dumpcap_capture_suite test_suite_add "TShark capture" tshark_capture_suite test_suite_add "Wireshark capture" wireshark_capture_suite }
MavEtJu/wireshark-lean
test/suite-capture.sh
Shell
gpl-2.0
13,568
csp(){ local file=$1 sed -i 's/\s\+$//' $file } git-prompt(){ [[ $- == *i* ]] && . ~/myscripts/git-prompt which hub && ~/myscripts/hub.bash_completion.sh } alias gs='git status' alias gl='git pull' alias gp='git push' alias gd='git diff' alias ga='git add' alias gau='git add --update' alias gc='git commit -v' alias gca='git commit -v -a' alias gb='git branch' alias gba='git branch -a' alias gco='git checkout' alias gcob='git checkout -b' alias gcot='git checkout -t' alias gcotb='git checkout --track -b' alias glog='git log' alias glogp='git log --pretty=format:"%h %s" --graph' alias gll='git log --pretty=format:"%C(yellow)%h%Cred%d\\ %Creset%s%Cblue\\ [%cn]" --decorate --numstat' alias gba='git blame'
agustim/myscripts
git.sh
Shell
gpl-2.0
721
#!/bin/bash KERNEL_DIR=$PWD INITRAMFS_SRC_DIR=../sc02c_initramfs INITRAMFS_TMP_DIR=/tmp/sc02c_initramfs cpoy_initramfs() { if [ -d $INITRAMFS_TMP_DIR ]; then rm -rf $INITRAMFS_TMP_DIR fi cp -a $INITRAMFS_SRC_DIR $(dirname $INITRAMFS_TMP_DIR) rm -rf $INITRAMFS_TMP_DIR/.git find $INITRAMFS_TMP_DIR -name .gitignore | xargs rm } # check target BUILD_TARGET=$1 case "$BUILD_TARGET" in "AOSP" ) BUILD_DEFCONFIG=c1_sc02c_aosp_defconfig ;; "SAM" ) BUILD_DEFCONFIG=c1_sc02c_samsung_defconfig ;; "MULTI" ) BUILD_DEFCONFIG=c1_sc02c_multi_defconfig ;; * ) echo "error: not found BUILD_TARGET" && exit -1 ;; esac BIN_DIR=out/$BUILD_TARGET/bin OBJ_DIR=out/$BUILD_TARGET/obj mkdir -p $BIN_DIR mkdir -p $OBJ_DIR # generate boot splash header if [ ! -n "$3" ]; then read -p "select boots plash image (default:none) : " SPLASH_IMAGE_SELECT SPLASH_IMAGE=`find ./boot-splash/ -type f | grep $SPLASH_IMAGE_SELECT` else SPLASH_IMAGE=`find ./boot-splash/ -type f | grep $3` fi if [ -n "$SPLASH_IMAGE" ]; then # make simg2img if [ ! -e ./release-tools/bmp2splash/bmp2splash ]; then echo "make bmp2splash..." make -C ./release-tools/bmp2splash fi echo "generate bmp2splash header from $SPLASH_IMAGE..." ./release-tools/bmp2splash/bmp2splash $SPLASH_IMAGE > ./drivers/video/samsung/logo_rgb24_user.h if [ $? != 0 ]; then exit -1 fi export USER_BOOT_SPLASH=y else echo "not slect boot splash" fi # generate LOCALVERSION . mod_version # check and get compiler . cross_compile # set build env export ARCH=arm export CROSS_COMPILE=$BUILD_CROSS_COMPILE #export USE_SEC_FIPS_MODE=true export LOCALVERSION="-$BUILD_LOCALVERSION" echo "=====> BUILD START $BUILD_KERNELVERSION-$BUILD_LOCALVERSION" if [ ! -n "$2" ]; then echo "" read -p "select build? [(a)ll/(u)pdate/(z)Image default:update] " BUILD_SELECT else BUILD_SELECT=$2 fi # copy initramfs echo "" echo "=====> copy initramfs" cpoy_initramfs # make start if [ "$BUILD_SELECT" = 'all' -o "$BUILD_SELECT" = 'a' ]; then echo "" echo "=====> cleaning" make O=$OBJ_DIR clean cp -f ./arch/arm/configs/$BUILD_DEFCONFIG $OBJ_DIR/.config make -C $PWD O=$OBJ_DIR oldconfig || exit -1 fi if [ "$BUILD_SELECT" != 'zImage' -a "$BUILD_SELECT" != 'z' ]; then echo "" echo "=====> build start" if [ -e make.log ]; then mv make.log make_old.log fi nice -n 10 make O=$OBJ_DIR -j12 2>&1 | tee make.log fi # check compile error COMPILE_ERROR=`grep 'error:' ./make.log` if [ "$COMPILE_ERROR" ]; then echo "" echo "=====> ERROR" grep 'error:' ./make.log exit -1 fi # *.ko replace find $OBJ_DIR -name '*.ko' -exec cp -av {} $INITRAMFS_TMP_DIR/lib/modules/ \; # build zImage echo "" echo "=====> make zImage" nice -n 10 make O=$OBJ_DIR -j2 zImage CONFIG_INITRAMFS_SOURCE="$INITRAMFS_TMP_DIR" CONFIG_INITRAMFS_ROOT_UID=`id -u` CONFIG_INITRAMFS_ROOT_GID=`id -g` || exit 1 if [ ! -e $OUTPUT_DIR ]; then mkdir -p $OUTPUT_DIR fi echo "" echo "=====> CREATE RELEASE IMAGE" # clean release dir if [ `find $BIN_DIR -type f | wc -l` -gt 0 ]; then rm $BIN_DIR/* fi # copy zImage cp $OBJ_DIR/arch/arm/boot/zImage $BIN_DIR/zImage cp $OBJ_DIR/arch/arm/boot/zImage ./out/ echo " $BIN_DIR/zImage" echo " out/zImage" # create odin image cd $KERNEL_DIR/$BIN_DIR tar cf $BUILD_LOCALVERSION-odin.tar zImage md5sum -t $BUILD_LOCALVERSION-odin.tar >> $BUILD_LOCALVERSION-odin.tar mv $BUILD_LOCALVERSION-odin.tar $BUILD_LOCALVERSION-odin.tar.md5 echo " $BIN_DIR/$BUILD_LOCALVERSION-odin.tar.md5" # create cwm image cd $KERNEL_DIR/$BIN_DIR if [ -d tmp ]; then rm -rf tmp fi mkdir -p ./tmp/META-INF/com/google/android cp zImage ./tmp/ cp $KERNEL_DIR/release-tools/update-binary ./tmp/META-INF/com/google/android/ sed -e "s/@VERSION/$BUILD_LOCALVERSION/g" $KERNEL_DIR/release-tools/updater-script.sed > ./tmp/META-INF/com/google/android/updater-script cd tmp && zip -rq ../cwm.zip ./* && cd ../ SIGNAPK_DIR=$KERNEL_DIR/release-tools/signapk java -jar $SIGNAPK_DIR/signapk.jar $SIGNAPK_DIR/testkey.x509.pem $SIGNAPK_DIR/testkey.pk8 cwm.zip $BUILD_LOCALVERSION-signed.zip rm cwm.zip rm -rf tmp echo " $BIN_DIR/$BUILD_LOCALVERSION-signed.zip" # rename zImage for multiboot if [ "$BUILD_TARGET" = "MULTI" ]; then echo " rename $BIN_DIR/zImage => $BIN_DIR/zImage_gb" cp $BIN_DIR/zImage $BIN_DIR/zImage_gb fi cd $KERNEL_DIR echo "" echo "=====> BUILD COMPLETE $BUILD_KERNELVERSION-$BUILD_LOCALVERSION" exit 0
sakuramilk/sc02c_kernel_gb
_build.sh
Shell
gpl-2.0
4,429
#!/bin/bash # Check permission if [ "$UID" != 0 ]; then echo "Must be root to run this script." fi # Part 1 function run_part_1 { useradd student; passwd student; echo $SHELL; who; who -q; who -b; date; cal -m 1 2003; cal -y 2003; su student; exit; # Return to root } # Part 2 function run_part_2 { pwd; echo $PWD; cd ~ echo $HOME; mkdir ABC; touch abc; ls -r; ls -l; ls --format=horizontal; ls -a; cat abc cd ~ rm -i ./abc rmdir ABC # Generally, we use 'rm -R' to delete a folder } # Part 3 function run_part_3 { cd ~ mkdir memos echo "test" >> myfile # Write string to target file cat myfile >> large cp * ./memos/ ln large large.old; mv large large.new; mv large.new memos/large; tail -n 5 large; head -n 5 large; tail -c 10 large >> newfile; more myfile; less myfile; wc -l myfile cd ~ find ./ -ctime 7 -name "*" > 7days find ./ -name "passwd" find / -atime 7 paste large1 myfile>myfile2 ps -A | grep httpd # A demo of grep } # Params echo -e "This is an example of Experiment 1" echo -e "Please choose a part to run" read choice; if [[ $choice == "1" ]]; then run_part_1 elif [[ $choice == "2" ]]; then run_part_2 elif [[ $choice == "3" ]]; then run_part_3 else echo "Not Found!" fi
dstsmallbird/OSExpriment
exp1/exp1.sh
Shell
gpl-2.0
1,221
#!/bin/bash for RANGE in 0x00A0:0xD7FF 0xE000:0xFDCF 0xFDF0:0xFFFD \ 0x10000:0x1FFFD 0x20000:0x2FFFD \ 0x30000:0x3FFFD 0x40000:0x4FFFD \ 0x50000:0x5FFFD 0x60000:0x6FFFD \ 0x70000:0x7FFFD 0x80000:0x8FFFD \ 0x90000:0x9FFFD 0xA0000:0xAFFFD \ 0xB0000:0xBFFFD 0xC0000:0xCFFFD \ 0xD0000:0xDFFFD 0xE0000:0xEFFFD \ 0xF0000:0xFFFFD 0x1000000:0x10FFFD do ( cat <<END #\#CIF_2.0 data_all_allowed_Unicode END perl <<PERLSCRIPT use strict; use warnings; binmode STDOUT, "utf8"; my( \$s, \$e ) = split ':', "${RANGE}"; \$s = hex \$s; \$e = hex \$e; printf '_start_range_%04X "', \$s; for my \$i (\$s..\$e) { if( (\$i & 0x0F) == 0 ) { printf "\"\n_values_from_%04X \"", \$i; } print chr( \$i ); } print "\"\n"; PERLSCRIPT ) | ./cifparse done
sauliusg/cod-tools
src/components/codcif/tests/cifparse_135.sh
Shell
gpl-2.0
900
#!/bin/bash # ********************************************************** # maj_ff_iceweasel.sh # Script de rétrogradage de iceweasel-backports en version esr # 20160316 # ********************************************************** #Couleurs ROUGE="\\033[1;31m" VERT="\\033[1;32m" BLEU="\\033[1;34m" JAUNE="\\033[1;33m" COLTITRE="\033[1;35m" # Rose COLDEFAUT="\033[0;33m" # Brun-jaune COLCMD="\033[1;37m" # Blanc COLERREUR="\033[1;31m" # Rouge COLTXT="\033[0;37m" # Gris COLINFO="\033[0;36m" # Cyan COLPARTIE="\033[1;34m" # Bleu # On rend le script "cretin-resistant" [ -e /var/www/se3 ] && echo "Malheureux... Ce script est à exécuter sur les clients Linux, pas sur le serveur !" && exit 1 # Au cas ou dpkg aurait été interrompu : dpkg --configure -a # Test et désinstallation éventuelle de iceweasel-release if [ -e "/etc/apt/sources.list.d/mozbackports.list" ]; then echo -e "$COLINFO" echo "mozbackports.list existe," echo -e "$COLTXT" test_moz_1=$(cat /etc/apt/sources.list.d/mozbackports.list | grep "^deb http") if [ -n "$test_moz_1" ] ; then sed -i 's/^deb\ http:\/\/mozilla.debian/#deb\ http:\/\/mozilla.debian/g' /etc/apt/sources.list.d/mozbackports.list sed -i 's/iceweasel/firefox/g' /etc/apt/sources.list.d/mozbackports.list echo -e "$COLINFO" echo "mozbackports.list modifié." echo -e "$COLDEFAUT" echo "On désinstalle la version actuellement installée..." echo -e "$COLTXT" apt-get remove -y iceweasel iceweasel-l10n-fr else echo -e "$COLINFO" echo "mozbackports.list a déjà été modifié" echo -e "$COLTXT" fi else TEST_MOZ=$(cat /etc/apt/sources.list | grep "^deb http://mozilla.debian.net") if [ -n "$TEST_MOZ" ] ; then sed -i 's/^deb\ http:\/\/mozilla.debian/#deb\ http:\/\/mozilla.debian/g' /etc/apt/sources.list sed -i 's/iceweasel/firefox/g' /etc/apt/sources.list echo -e "$COLINFO" echo "sources.list modifié." echo -e "$COLDEFAUT" echo "On désinstalle la version actuellement installée..." echo -e "$COLTXT" apt-get remove -y iceweasel iceweasel-l10n-fr else echo -e "$COLINFO" echo "sources.list a déjà été modifié." echo -e "$COLTXT" fi echo -e "$COLINFO" echo "Rien à faire." echo -e "$COLTXT" fi # Mise à jour des paquets echo -e "$COLINFO" echo "Mise à jour des paquets..." echo -e "$COLTXT" apt-get update echo -e "$COLINFO" echo "Installation ou mise à jour d'iceweasel..." echo -e "$COLTXT" apt-get install -y iceweasel iceweasel-l10n-fr echo -e "$VERT" echo "Installation terminée." echo -e "$COLTXT"
jcmousse/clinux
se3/alancer/maj_ff_iceweasel.sh
Shell
gpl-2.0
2,506
#!/bin/sh ip=$4 sed -i '' -e "/${ip}/d" /tmp/ip_activity_pppoe sed -i '' -e "/${ip}/d" /tmp/ip_activity exit 0
mysticall/imslu
conf/freebsd/usr/local/etc/imslu/scripts/pppoe_link_down.sh
Shell
gpl-2.0
112
#!/bin/sh # Cause I'm very, very lazy ; ) ./clean.sh echo "> Adding EVERYTHING in folder to" echo " SVN version control!" svn add * >> /dev/null 2>&1 svn add */* >> /dev/null 2>&1 svn add */*/* >> /dev/null 2>&1 svn add */*/*/* >> /dev/null 2>&1 svn add */*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/*/*/*/*/*/* >> /dev/null 2>&1 svn add */*/*/*/*/*/*/*/*/*/*/*/*/*/* >> /dev/null 2>&1
ProjectZeroSlackr/ProjectZeroSlackr-SVN
svn-add-all.sh
Shell
gpl-2.0
753
#!/bin/bash # Module spécifique à Archlinux # ------------------------------------------------------------------------------ Titre="Installation ${bold}arch${offbold}${blue}linux${Reset}" TypeFile='bash' # Ne seront concerné que ce type de fichier Appli='Archlinux' # # ------------------------------------------------------------------------------ # Structure Directories # ------------------------------------------------------------------------------ DirArch="$HOME/workspace/archlinux/installation-guide" DirMenu="$DirArch/contents" # # ------------------------------------------------------------------------------ # archlinux # | # | # `-- installation-guide # | # |-- contents [ Lignes du menu sans .bash et _ ] # | |-- 0.+.Initialisation.bash # | |-- 1.*_Pre-installation.bash # | |-- 1.1_Set_the_keyboard_layout.bash # | |-- 1.2_Verify_the_boot_mode.bash # | ... # ------------------------------------------------------------------------------ # $OUTILSRC/typ/archlinux.bash
RC69/RCsite1
outilsRC/app/archlinux.bash
Shell
gpl-2.0
1,879
#!/bin/sh # Copyright (C) 1999-2005 ImageMagick Studio LLC # # This program is covered by multiple licenses, which are described in # LICENSE. You should have received a copy of LICENSE with this # package; otherwise see http://www.imagemagick.org/script/license.php. . ${srcdir}/tests/common.shi ${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_truecolor10.dpx EPSI
atmark-techno/atmark-dist
user/imagemagick/tests/rwfile_EPSI_truecolor10.sh
Shell
gpl-2.0
366
make ARCH=arm CROSS_COMPILE= 'HOSTCC=ccache /usr/bin/gcc -B/usr/bin/' uImage
wangxingchao/oriole
buImage.sh
Shell
gpl-2.0
77
#!/bin/bash # # Copyright (C) 2010-2012 Eugen Feller, INRIA <[email protected]> # # This file is part of Snooze, a scalable, autonomic, and # energy-aware virtual machine (VM) management framework. # # This program is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation, either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses>. # perform_virtual_cluser_tasks () { echo "$log_tag Creating virtual cluster, propagating images, and starting" prepare_and_create_virtual_cluster $1 $2 propagate_base_image propagate_virtual_cluster $1 start_virtual_cluster $1 } configure_mapreduce () { case "$1" in 'one_data') echo "$log_tag Configuring MapReduce with one data node VM per physical machine" $python "$mapreduce_filter_compute_and_data_script" $snoozeclient_output_formatted $mapreduce_data_nodes_file $mapreduce_compute_nodes_file $python "$mapreduce_script" "mapreduce_separatedata" "--data" `cat $mapreduce_data_nodes_file` "--compute" `cat $mapreduce_compute_nodes_file` local master_node=$(get_first_host `cat $mapreduce_data_nodes_file`) echo $master_node > $mapreduce_master_node ;; 'variable_data') echo "$log_tag Configuring MapReduce with variable number of compute and data VMs" echo "$log_tag Number of data nodes:" read number_of_data_nodes echo "$log_tag Number of compute nodes:" read number_of_compute_nodes data_nodes=`cat $virtual_machine_hosts | sed -n 1,"$number_of_data_nodes"p` number_of_virtual_machines=$(get_number_of_virtual_machines) compute_nodes=`cat $virtual_machine_hosts | sed -n $(($number_of_data_nodes+1)),"$number_of_virtual_machines"p` $python "$mapreduce_script" "mapreduce_separatedata" "--data" $data_nodes "--compute" $compute_nodes local master_node=$(get_first_host $data_nodes) echo $master_node > $mapreduce_master_node ;; 'normal') local hosts_list=$(get_virtual_machine_hosts) echo "$log_tag Configuring MapReduce in normal mode on: $hosts_list" $python "$mapreduce_script" "mapreduce_normal" "--hosts" $hosts_list local master_node=$(get_first_host $hosts_list) echo $master_node > $mapreduce_master_node ;; *) echo "$log_tag Unknown command received!" ;; esac } start_mapreduce_test_case () { case "$1" in 'start') echo "$log_tag Cluster name:" read cluster_name echo "$log_tag Number of VMs:" read number_of_vms perform_virtual_cluser_tasks $cluster_name $number_of_vms ;; 'storage') local hosts_list=$(get_virtual_machine_hosts) $python $mapreduce_script "storage" "--hosts" $hosts_list "--job_id" $mapreduce_storage_jobid ;; 'configure') echo "$log_tag Configuration mode (normal, one_data, variable_data):" read configuration_mode configure_mapreduce $configuration_mode ;; 'benchmark') echo "$log_tag Benchmark name (e.g. dfsio, dfsthroughput, mrbench, nnbench, pi, teragen, terasort, teravalidate, censusdata, censusbench, wikidata, wikibench):" read benchmark_name local master_node=$(get_hadoop_master_node) $python "$mapreduce_script" "benchmark" "--name" $benchmark_name "--master" $master_node ;; *) echo "$log_tag Unknown command received!" ;; esac }
efeller/snoozedeploy
grid5000/experiments/scripts/mapreduce_test_case.sh
Shell
gpl-2.0
3,927
#!/bin/bash # Renew the certificate certbot renew --preferred-challenges http for cert in /etc/letsencrypt/live/* do if [ -d "$cert" ] then cat $cert/fullchain.pem $cert/privkey.pem > /opt/ssl/$(echo $cert | cut -d '/' -f 5).pem fi done
acc61287/ScriptTools
update-certs.sh
Shell
gpl-2.0
271
#!/bin/bash cd /tmp iozone -a > /unacloud/cluster/results`hostname`.txt
UnaCloud/unacloudIaaS1.0
escritorio/runIozone.sh
Shell
gpl-2.0
71
# # FILE DISCONTINUED HERE # UPDATED VERSION AT # https://gitlab.com/yeupou/stalag13/raw/master/usr/local/bin/gpg-grabsub.sh # # | | # \_V_// # \/=|=\/ # [=v=] # __\___/_____ # /..[ _____ ] # /_ [ [ M /] ] # /../.[ [ M /@] ] # <-->[_[ [M /@/] ] # /../ [.[ [ /@/ ] ] # _________________]\ /__/ [_[ [/@/ C] ] # <_________________>>0---] [=\ \@/ C / / # ___ ___ ]/000o /__\ \ C / / # \ / /....\ \_/ / # ....\||/.... [___/=\___/ # . . . . [...] [...] # . .. . [___/ \___] # . 0 .. 0 . <---> <---> # /\/\. . . ./\/\ [..] [..] # #!/bin/bash # # Copyright (c) 2015 Mathieu Roy <yeupou--gnu.org> # http://yeupou.wordpress.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA red="\e[31m" yellow="\e[33m" cyan="\e[36m" bold="\e[1m" reset="\e[0m" echo -e "${red}This script assumes that you already set up a primary key and sub keys" echo -e "(one to sign, one to en/decrypt)${reset}" # gpg dir should not already exists dir=~/.gnupg if [ -d "$dir" ]; then echo "$dir already exists, we wont mess with it" && exit 1; fi # get hostname of the should be secured box echo "Master key host to contact via SSH?" read host echo -e "${cyan}Keys:${reset}" ssh $host 'gpg --list-key' # set the primary key echo -e "Primary key id? (pub 4096R/${yellow}??????????${reset})" read primary # local import keys ssh $host 'gpg --export-secret-key --armor' | gpg --import - # remove the secret key of the primary echo -e "${cyan}Removing the primary key from the set, approve please...${reset}" temp=$(mktemp) gpg --export-secret-subkeys $primary > $temp gpg --delete-secret-keys $primary gpg --import $temp rm -f $temp gpg --list-keys # set a local password echo -e "${cyan}Set a new local password that will differ from the primary and save:${reset}" gpg --edit-key $primary passwd # EOF
yeupou/stalag13
usr/local/bin/gpg-grabsub.sh
Shell
gpl-2.0
2,933
#!/bin/sh # # Copyright (c) International Business Machines Corp., 2003 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Written by Prakash Narayana ([email protected]) # and Michael Reed ([email protected]) # # A script that will test isofs on Linux system. # It makes ISO9660 file system with different options and also # mounts the ISO9660 file system with different mount options. # TCID=isofs TST_TOTAL=77 . test.sh NO_CLEANUP="" usage() { echo "USAGE: $0 <optional> -n -h" exit } cleanup() { if [ "$NO_CLEANUP" = "no" ]; then tst_resm TINFO "Temporary directory $PWD was not removed" else tst_rmdir fi } max_depth=3 max_dirs=4 gen_fs_tree() { local cur_path="$1" local cur_depth="$2" if [ "$cur_depth" -gt "$max_depth" ]; then return fi for i in $(seq 1 $max_dirs); do local new_path="$cur_path/subdir_$i" mkdir -p "$new_path" dd if=/dev/urandom of="$new_path/file" bs=1024 count=100 &> /dev/null gen_fs_tree "$new_path" $((cur_depth + 1)) done } while getopts :hnd: arg; do case $arg in h) echo "" echo "n - The directories created will not be removed" echo "h - Help options" echo "" usage echo "" ;; n) NO_CLEANUP="no" ;; esac done tst_require_root tst_tmpdir TST_CLEANUP=cleanup MNT_POINT="$PWD/mnt" MAKE_FILE_SYS_DIR="$PWD/files" mkdir -p -m 777 $MNT_POINT mkdir -p $MAKE_FILE_SYS_DIR # Generated directories and files mkdir -p $MAKE_FILE_SYS_DIR gen_fs_tree "$MAKE_FILE_SYS_DIR" 1 # Make ISO9660 file system with different options. # Mount the ISO9660 file system with different mount options. for mkisofs_opt in \ " " \ "-J" \ "-hfs -D" \ " -R " \ "-R -J" \ "-f -l -D -J -L -R" \ "-allow-lowercase -allow-multidot -iso-level 3 -f -l -D -J -L -R" do rm -f isofs.iso mkisofs -o isofs.iso -quiet $mkisofs_opt $MAKE_FILE_SYS_DIR 2> /dev/null if [ $? -eq 0 ]; then tst_resm TPASS \ "mkisofs -o isofs.iso -quiet $mkisofs_opt $MAKE_FILE_SYS_DIR" else tst_resm TFAIL \ tst_resm TFAIL "mkisofs -o isofs.iso -quiet $mkisofs_opt $MAKE_FILE_SYS_DIR" continue fi for mount_opt in \ "loop" \ "loop,norock" \ "loop,nojoliet" \ "loop,block=512,unhide" \ "loop,block=1024,cruft" \ "loop,block=2048,nocompress" \ "loop,check=strict,map=off,gid=bin,uid=bin" \ "loop,check=strict,map=acorn,gid=bin,uid=bin" \ "loop,check=relaxed,map=normal" \ "loop,block=512,unhide,session=2" # "loop,sbsector=32" do mount -t iso9660 -o $mount_opt isofs.iso $MNT_POINT if [ $? -ne 0 ]; then tst_resm TFAIL \ "mount -t iso9660 -o $mount_opt isofs.iso $MNT_POINT" continue fi ls -lR $MNT_POINT > /dev/null if [ $? -ne 0 ]; then tst_resm TFAIL "ls -lR $MNT_POINT" fi umount $MNT_POINT if [ $? -ne 0 ]; then tst_brkm TFAIL "umount $MNT_POINT" fi tst_resm TPASS "mount/umount with \"$mount_opt\" options" done done tst_exit
Havner/ltp
testcases/kernel/fs/iso9660/isofs.sh
Shell
gpl-2.0
3,517
#!/bin/bash # Install isutf8 program (from package "moreutils" at least in linux mint) # in order to use this script # # This script assumes that the current working directory is the root of the # wesnoth repository. set -e find src/ -type f -print0 | xargs -0 isutf8 -- find data/ -not -name "*.png" -not -name "*.ogg" -not -name "*.jpg" -not -name "*.wav" -not -name "*.gif" -not -name "*.xcf" -type f -print0 | xargs -0 isutf8 -- find po/ -type f -print0 | xargs -0 isutf8 --
dailin/wesnoth
utils/travis/check_utf8.sh
Shell
gpl-2.0
480
#!/bin/bash ### BEGIN INIT INFO # Provides: gvd # Required-Start: $network # Required-Stop: $network # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start GaVer daemon (gvd) ### END INIT INFO # # Load the module to parse the config file # if [ ! -f /opt/gaver/bin/config-parser.sh ] then echo "Config Parser Not Found, please check the instalation"; exit 0; fi source /opt/gaver/bin/config-parser.sh; CONFIG_FILE="/opt/gaver/etc/gvd.ini" if [ ! -f $CONFIG_FILE ] then echo "Config File Not Found ($CONFIG_FILE)"; exit 0; fi config_parser $CONFIG_FILE; # Parse Config file config.section.Bin; bin=$gvd; pid=$pid; config.section.GaVer; cfg_addr=$addr; cfg_port=$port; cfg_mtu=$mtu; cfg_os=$overal_bps; cfg_ss=$socket_bps; cfg_rmem=$rmem; cfg_wmem=$wmem; cfg_netstat=$netstat; cfg_error=$error; config.section.Api; cfg_listen=$listen_api; # Ser if the daemon are there test -x $bin || exit 0 function make_arg() { args=""; if [ "$cfg_addr" != "" ] then args=$args"-a $cfg_addr " fi if [ "$cfg_port" != "" ] then args=$args"-p $cfg_port " fi if [ "$cfg_mtu" != "" ] then args=$args"-m $cfg_mtu " fi if [ "$cfg_os" != "" ] then args=$args"-s $cfg_os " fi if [ "$cfg_ss" != "" ] then args=$args"-S $cfg_ss " fi if [ "$cfg_rmem" != "" ] then args=$args"-r $cfg_rmem " fi if [ "$cfg_wmem" != "" ] then args=$args"-w $cfg_wmem " fi if [ "$cfg_listen" != "" ] then args=$args"-l $cfg_listen " fi if [ "$pid" != "" ] then args=$args"-P $pid " fi if [ "$cfg_netstat" != "" ] then args=$args"-n $cfg_netstat " fi if [ "$cfg_error" != "" ] then args=$args"-e $cfg_error " fi echo $args } ARG=`make_arg` if [ $# -eq 2 ] then if [ $2 == "debug" ] then echo $ARG exit 0 fi fi case $1 in start) echo "Starting GaVer" start-stop-daemon --start --exec $bin --pidfile $pid --umask 022 -- $ARG ;; stop) echo "Stoping GaVer" start-stop-daemon --stop --pidfile $pid if [ $? == "0" ] then rm -f $pid fi ;; *) echo "Usage: /etc/init.d/gaverd {start|stop}" exit 1 ;; esac
emilianobilli/gaver
script/gaverd.sh
Shell
gpl-2.0
2,219
#!/bin/bash #echo "Change start-ap-managed-wifi.sh." #echo "Example for wlan1 as AP: bash set-router.sh wlan1" router=$1 oldline=$(grep -F 'router=' ~/.openplotter/Network/.openplotter/start-ap-managed-wifi.sh) newline="router=${router}" sudo sed -i "s/${oldline}/${newline}/g" ~/.openplotter/Network/.openplotter/start-ap-managed-wifi.sh
e-sailing/openplotter
Network/set-router.sh
Shell
gpl-2.0
341
#!/bin/bash # Script for manual Date Input and Graph & HTML export #Show Year for Typeerrors Debug year="2016" #Ask for the Day to Generate the Graph and Data read -p "Please give in YYYY-MM-DD:" date echo "You use: "$date" for Export! In the Year "$year" !" #Import from YOUR File ! You have to edit them manual! cat speedtestausgabe.csv | grep $date > $date".csv" mv $date.csv /home/pi/auto-speedtest/Graph-Builder/test.csv cd /home/pi/auto-speedtest/Graph-Builder/ python3 graph-builder.py mv download.png $date"-download.png" mv upload.png $date"-upload.png" mv ping.png $date"-ping.png" mv test.csv $date".csv" #Todo: File Upload on SFTP in Folder #Todo: HTML input after SFTP Transfer (PHP Include via Text Document ?) #Sort 4 HTML Export html1="<tr><th><h2>Wochentag<br>"$date"</h2></th>" html2='<th><a href="/' html3=$year"/"$date"-download.png" html4='"><img src="/' html5=$year"/"$date"-download.png" html6='" width="300" height="200" alt="DownloadGraphic"></a></th>' html7='<th><a href="/' html8=$year"/"$date"-upload.png" html9='"><img src="/' html10=$year"/"$date"-upload.png" html11='" width="300" height="200" alt="UploadGraphic"></a></th>' html12='<th><a href="/' html13=$year"/"$date"-ping.png" html14='"><img src="/' html15=$year"/"$date-"ping.png" html16='" width="300" height="200" alt="PingGraphic"></a></th>' html17='<th><a href="/' html18=$year"/"$date".csv" html19='">CSV-Link</a></th></tr>' #Export HTML to put in in a Website echo $html1$html2$html3$html4$html5$html6$html7$html8$html9$html10$html11$html12$html13$html14$html15$html16$html17$html18$html19
Wlanfr3ak/auto-speedtest
generator.sh
Shell
gpl-2.0
1,586
#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" DEDOP_BIN="$( cd "${DIR}/../../.." && pwd )" open "${DEDOP_BIN}/dedop-shell.command"
DeDop/dedop-conda
recipe/dedop-shell.app/Contents/MacOS/launch-dedop-shell-in-terminal.sh
Shell
gpl-3.0
156
#!/bin/bash # action=test #train # plot # for trv in v0 v1 v2 v3; do # trlabel=dtr-train-$trv # common=" --training-label $trlabel --n-max-queries 150000 --n-max-procs 7" # --cgroup among-families --tvar affinity"# ; trseed=0 --training-seed $trseed # ./test/dtr-scan.py $action --label dtr-train-v3 $common # done # exit 0 action=plot #test # train for cg in within-families among-families; do for tv in affinity delta-affinity; do lfn=_output/dtr-scan/$cg-$tv.txt echo "" >$lfn for trv in v0 v1 v2 v3; do trlabel=dtr-train-$trv common=" --training-label $trlabel --n-max-queries 150000 --n-max-procs 7 --cgroup $cg --tvar $tv" # ; trseed=0 --training-seed $trseed ./test/dtr-scan.py $action --label dtr-train-v0 $common >>$lfn ./test/dtr-scan.py $action --label dtr-train-v1 $common >>$lfn ./test/dtr-scan.py $action --label dtr-train-v2 $common >>$lfn ./test/dtr-scan.py $action --label dtr-train-v3 $common >>$lfn done done done
psathyrella/partis
bin/run-dtr-scan.sh
Shell
gpl-3.0
992
#!/bin/bash set -eux . $TESTSLIB/apt.sh update_core_snap_for_classic_reexec() { # it is possible to disable this to test that snapd (the deb) works # fine with whatever is in the core snap if [ "$MODIFY_CORE_SNAP_FOR_REEXEC" != "1" ]; then echo "Not modifying the core snap as requested via MODIFY_CORE_SNAP_FOR_REEXEC" return fi # We want to use the in-tree snap/snapd/snap-exec/snapctl, because # we re-exec by default. # To accomplish that, we'll just unpack the core we just grabbed, # shove the new snap-exec and snapctl in there, and repack it. # First of all, unmount the core core="$(readlink -f /snap/core/current || readlink -f /snap/ubuntu-core/current)" snap="$(mount | grep " $core" | awk '{print $1}')" umount --verbose "$core" # Now unpack the core, inject the new snap-exec/snapctl into it unsquashfs "$snap" cp /usr/lib/snapd/snap-exec squashfs-root/usr/lib/snapd/ cp /usr/bin/snapctl squashfs-root/usr/bin/ # also inject new version of snap-confine and snap-scard-ns cp /usr/lib/snapd/snap-discard-ns squashfs-root/usr/lib/snapd/ cp /usr/lib/snapd/snap-confine squashfs-root/usr/lib/snapd/ # also add snap/snapd because we re-exec by default and want to test # this version cp /usr/lib/snapd/snapd squashfs-root/usr/lib/snapd/ cp /usr/lib/snapd/info squashfs-root/usr/lib/snapd/ cp /usr/bin/snap squashfs-root/usr/bin/snap # repack, cheating to speed things up (4sec vs 1.5min) mv "$snap" "${snap}.orig" if [[ "$SPREAD_SYSTEM" == ubuntu-14.04-* ]]; then # trusty does not support -Xcompression-level 1 mksquashfs squashfs-root "$snap" -comp gzip else mksquashfs squashfs-root "$snap" -comp gzip -Xcompression-level 1 fi rm -rf squashfs-root # Now mount the new core snap mount "$snap" "$core" # Make sure we're running with the correct copied bits for p in /usr/lib/snapd/snap-exec /usr/lib/snapd/snap-confine /usr/lib/snapd/snap-discard-ns /usr/bin/snapctl /usr/lib/snapd/snapd /usr/bin/snap; do if ! cmp ${p} ${core}${p}; then echo "$p in tree and $p in core snap are unexpectedly not the same" exit 1 fi done } prepare_each_classic() { mkdir -p /etc/systemd/system/snapd.service.d if [ -z "${SNAP_REEXEC:-}" ]; then rm -f /etc/systemd/system/snapd.service.d/reexec.conf else cat <<EOF > /etc/systemd/system/snapd.service.d/reexec.conf [Service] Environment=SNAP_REEXEC=$SNAP_REEXEC EOF fi } prepare_classic() { apt_install_local ${GOPATH}/snapd_*.deb if snap --version |MATCH unknown; then echo "Package build incorrect, 'snap --version' mentions 'unknown'" snap --version apt-cache policy snapd exit 1 fi if /usr/lib/snapd/snap-confine --version | MATCH unknown; then echo "Package build incorrect, 'snap-confine --version' mentions 'unknown'" /usr/lib/snapd/snap-confine --version apt-cache policy snap-confine exit 1 fi mkdir -p /etc/systemd/system/snapd.service.d cat <<EOF > /etc/systemd/system/snapd.service.d/local.conf [Unit] StartLimitInterval=0 [Service] Environment=SNAPD_DEBUG_HTTP=7 SNAPD_DEBUG=1 SNAPPY_TESTING=1 EOF mkdir -p /etc/systemd/system/snapd.socket.d cat <<EOF > /etc/systemd/system/snapd.socket.d/local.conf [Unit] StartLimitInterval=0 EOF if [ "$REMOTE_STORE" = staging ]; then . $TESTSLIB/store.sh setup_staging_store fi # Snapshot the state including core. if [ ! -f $SPREAD_PATH/snapd-state.tar.gz ]; then ! snap list | grep core || exit 1 # use parameterized core channel (defaults to edge) instead # of a fixed one and close to stable in order to detect defects # earlier snap install --${CORE_CHANNEL} core snap list | grep core # ensure no auto-refresh happens during the tests snap set core refresh.disabled=true echo "Ensure that the grub-editenv list output is empty on classic" output=$(grub-editenv list) if [ -n "$output" ]; then echo "Expected empty grub environment, got:" echo "$output" exit 1 fi systemctl stop snapd.service snapd.socket update_core_snap_for_classic_reexec systemctl daemon-reload mounts="$(systemctl list-unit-files | grep '^snap[-.].*\.mount' | cut -f1 -d ' ')" services="$(systemctl list-unit-files | grep '^snap[-.].*\.service' | cut -f1 -d ' ')" for unit in $services $mounts; do systemctl stop $unit done tar czf $SPREAD_PATH/snapd-state.tar.gz /var/lib/snapd /snap /etc/systemd/system/snap-*core*.mount systemctl daemon-reload # Workaround for http://paste.ubuntu.com/17735820/ for unit in $mounts $services; do systemctl start $unit done systemctl start snapd.socket fi } setup_reflash_magic() { # install the stuff we need apt-get install -y kpartx busybox-static apt_install_local ${GOPATH}/snapd_*.deb apt-get clean snap install --${CORE_CHANNEL} core # install ubuntu-image snap install --devmode --edge ubuntu-image # needs to be under /home because ubuntu-device-flash # uses snap-confine and that will hide parts of the hostfs IMAGE_HOME=/home/image mkdir -p $IMAGE_HOME # modify the core snap so that the current root-pw works there # for spread to do the first login UNPACKD="/tmp/core-snap" unsquashfs -d $UNPACKD /var/lib/snapd/snaps/core_*.snap # FIXME: netplan workaround mkdir -p $UNPACKD/etc/netplan # set root pw by concating root line from host and rest from core want_pw="$(grep ^root /etc/shadow)" echo "$want_pw" > /tmp/new-shadow tail -n +2 /etc/shadow >> /tmp/new-shadow cp -v /tmp/new-shadow $UNPACKD/etc/shadow cp -v /etc/passwd $UNPACKD/etc/passwd # ensure spread -reuse works in the core image as well if [ -e /.spread.yaml ]; then cp -av /.spread.yaml $UNPACKD fi # we need the test user in the image # see the comment in spread.yaml about 12345 sed -i "s/^test.*$//" $UNPACKD/etc/{shadow,passwd} chroot $UNPACKD addgroup --quiet --gid 12345 test chroot $UNPACKD adduser --quiet --no-create-home --uid 12345 --gid 12345 --disabled-password --gecos '' test echo 'test ALL=(ALL) NOPASSWD:ALL' >> $UNPACKD/etc/sudoers.d/99-test-user echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> $UNPACKD/etc/sudoers.d/99-ubuntu-user # modify sshd so that we can connect as root sed -i 's/\(PermitRootLogin\|PasswordAuthentication\)\>.*/\1 yes/' $UNPACKD/etc/ssh/sshd_config # FIXME: install would be better but we don't have dpkg on # the image # unpack our freshly build snapd into the new core snap dpkg-deb -x ${SPREAD_PATH}/../snapd_*.deb $UNPACKD # add gpio and iio slots cat >> $UNPACKD/meta/snap.yaml <<-EOF slots: gpio-pin: interface: gpio number: 100 direction: out iio0: interface: iio path: /dev/iio:device0 EOF # build new core snap for the image snapbuild $UNPACKD $IMAGE_HOME # FIXME: fetch directly once its in the assertion service cp "$TESTSLIB/assertions/pc-${REMOTE_STORE}.model" $IMAGE_HOME/pc.model # FIXME: how to test store updated of ubuntu-core with sideloaded snap? IMAGE=all-snap-amd64.img # ensure that ubuntu-image is using our test-build of snapd with the # test keys and not the bundled version of usr/bin/snap from the snap. # Note that we can not put it into /usr/bin as '/usr' is different # when the snap uses confinement. cp /usr/bin/snap $IMAGE_HOME export UBUNTU_IMAGE_SNAP_CMD=$IMAGE_HOME/snap # download pc-kernel snap for the specified channel snap download --channel="$KERNEL_CHANNEL" pc-kernel /snap/bin/ubuntu-image -w $IMAGE_HOME $IMAGE_HOME/pc.model \ --channel edge \ --extra-snaps $IMAGE_HOME/core_*.snap \ --extra-snaps $PWD/pc-kernel_*.snap \ --output $IMAGE_HOME/$IMAGE rm ./pc-kernel* # mount fresh image and add all our SPREAD_PROJECT data kpartx -avs $IMAGE_HOME/$IMAGE # FIXME: hardcoded mapper location, parse from kpartx mount /dev/mapper/loop2p3 /mnt mkdir -p /mnt/user-data/ cp -ar /home/gopath /mnt/user-data/ # create test user home dir mkdir -p /mnt/user-data/test # using symbolic names requires test:test have the same ids # inside and outside which is a pain (see 12345 above), but # using the ids directly is the wrong kind of fragile chown --verbose test:test /mnt/user-data/test # we do what sync-dirs is normally doing on boot, but because # we have subdirs/files in /etc/systemd/system (created below) # the writeable-path sync-boot won't work mkdir -p /mnt/system-data/etc/systemd (cd /tmp ; unsquashfs -v $IMAGE_HOME/core_*.snap etc/systemd/system) cp -avr /tmp/squashfs-root/etc/systemd/system /mnt/system-data/etc/systemd/ # FIXUP silly systemd mkdir -p /mnt/system-data/etc/systemd/system/snapd.service.d cat <<EOF > /mnt/system-data/etc/systemd/system/snapd.service.d/local.conf [Unit] StartLimitInterval=0 [Service] Environment=SNAPD_DEBUG_HTTP=7 SNAPD_DEBUG=1 SNAPPY_TESTING=1 SNAPPY_USE_STAGING_STORE=$SNAPPY_USE_STAGING_STORE ExecStartPre=/bin/touch /dev/iio:device0 EOF mkdir -p /mnt/system-data/etc/systemd/system/snapd.socket.d cat <<EOF > /mnt/system-data/etc/systemd/system/snapd.socket.d/local.conf [Unit] StartLimitInterval=0 EOF umount /mnt kpartx -d $IMAGE_HOME/$IMAGE # the reflash magic # FIXME: ideally in initrd, but this is good enough for now cat > $IMAGE_HOME/reflash.sh << EOF #!/bin/sh -ex mount -t tmpfs none /tmp cp /bin/busybox /tmp cp $IMAGE_HOME/$IMAGE /tmp sync # blow away everything /tmp/busybox dd if=/tmp/$IMAGE of=/dev/sda bs=4M # and reboot /tmp/busybox sync /tmp/busybox echo b > /proc/sysrq-trigger EOF chmod +x $IMAGE_HOME/reflash.sh # extract ROOT from /proc/cmdline ROOT=$(cat /proc/cmdline | sed -e 's/^.*root=//' -e 's/ .*$//') cat >/boot/grub/grub.cfg <<EOF set default=0 set timeout=2 menuentry 'flash-all-snaps' { linux /vmlinuz root=$ROOT ro init=$IMAGE_HOME/reflash.sh console=ttyS0 initrd /initrd.img } EOF } prepare_all_snap() { # we are still a "classic" image, prepare the surgery if [ -e /var/lib/dpkg/status ]; then setup_reflash_magic REBOOT fi # verify after the first reboot that we are now in the all-snap world if [ $SPREAD_REBOOT = 1 ]; then echo "Ensure we are now in an all-snap world" if [ -e /var/lib/dpkg/status ]; then echo "Rebooting into all-snap system did not work" exit 1 fi fi echo "Wait for firstboot change to be ready" while ! snap changes | grep "Done"; do snap changes || true snap change 1 || true sleep 1 done echo "Ensure fundamental snaps are still present" . $TESTSLIB/names.sh for name in $gadget_name $kernel_name $core_name; do if ! snap list | grep $name; then echo "Not all fundamental snaps are available, all-snap image not valid" echo "Currently installed snaps" snap list exit 1 fi done # ensure no auto-refresh happens during the tests snap set core refresh.disabled=true # Snapshot the fresh state (including boot/bootenv) if [ ! -f $SPREAD_PATH/snapd-state.tar.gz ]; then # we need to ensure that we also restore the boot environment # fully for tests that break it BOOT="" if ls /boot/uboot/*; then BOOT=/boot/uboot/ elif ls /boot/grub/*; then BOOT=/boot/grub/ else echo "Cannot determine bootdir in /boot:" ls /boot exit 1 fi systemctl stop snapd.service snapd.socket tar czf $SPREAD_PATH/snapd-state.tar.gz /var/lib/snapd $BOOT systemctl start snapd.socket fi }
michihenning/snapd
tests/lib/prepare.sh
Shell
gpl-3.0
12,685
cp lib/JCuda-All-0.6.0-bin-linux-x86_64/* lib/ cp lib/JCuda-All-0.6.0-bin-apple-x86_64/* lib/ java -Dfile.encoding=UTF8 -Xmx1536M -Xss1M -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=256m -jar sbt-launch-*.jar "start-script"
tberg12/ocular
make_run_script.sh
Shell
gpl-3.0
227
#!/bin/sh # Script called when pressing the ON/OFF button once echo "Triggered shutdown" | ulogger -t "ShortPress" -p I BLDC_Test_Bench -M 2 & /bin/ardrone3_shutdown.sh
stiliajohny/Bebop-On-Off-Button
shortpress_1.sh
Shell
gpl-3.0
172
#!/bin/bash mkdir build cd build cmake .. -DCMAKE_INSTALL_PREFIX=$PREFIX -DOPENSSL_ROOT_DIR=$PREFIX -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DCMAKE_PREFIX_PATH=$PREFIX make -j2 make install
fallen/artiq
conda/libssh2/build.sh
Shell
gpl-3.0
215
#!/bin/bash PIDFILE="/home/radja/rsyncd.pid" RSYNC_CONFIG="/home/radja/.snap-rsyncd.conf" RSYNC_LOG="/home/radja/rsync.log" LOG="/home/radja/create-snapshot.log" USER="radja" RUSER="${USER}" LOCALPORT=9999 REMOTEPORT=8888 REMOTEHOST="external" PID="" CMD="hourly" { for i in monthly weekly daily hourly; do [ -n "`echo $@ | grep $i`" ] && CMD=$i done echo Starting: `date` [ -f ${PIDFILE} ] && echo "ERROR: ${PIDFILE} already exists" >&2 && exit 1 trap '[ -n "${PID}" ] && kill ${PID}' SIGHUP SIGINT SIGQUIT SIGTERM rsync --daemon --config="${RSYNC_CONFIG}" --address=127.0.0.1 --port="${LOCALPORT}" --log-file="${RSYNC_LOG}" -v sleep 3 [ -f ${PIDFILE} ] || { echo "ERROR: ${PIDFILE} doesn't exists" >&2 && exit 2; } PID=`cat ${PIDFILE}` sudo -u "${USER}" -H ssh -R localhost:${REMOTEPORT}:localhost:"${LOCALPORT}" "${RUSER}"@"${REMOTEHOST}" sudo snapshot-hourly.sh "$CMD" ##{ ps p ${PID} >& /dev/null && ps p $! >& /dev/null; } || echo "ERROR: Some of necessary processes does not exist" >&2 && cleanup_and_exit 2 ##sudo -Hu radja ssh radja@"${REMOTEHOST}" sudo snapshot-hourly.sh #assert $PID [ -n "${PID}" ] && kill ${PID} # TODO: send e-main messages # TODO: clean it # TODO: check all configuration files (and executable files) have correct permissions (root only write) if we launch process by root # TODO: more consistent programms' names and conf- files' names # TODO: configuration files for "server" and "client" scripts (port for communication etc) # TODO: back backups (from server to laptop): database and web-applications echo Ending: `date` } 1>>${LOG} 2>&1
mutanabbi/rrb
create-snapshot.sh
Shell
gpl-3.0
1,672
#!/bin/bash # # GenesisPHP - Crear Común # # Copyright (C) 2016 Guillermo Valdes Lozano [email protected] # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Yo soy SOY="[Crear Común]" # Constantes que definen los tipos de errores EXITO=0 E_FATAL=99 # Validar que exista el directorio Eva if [ ! -d ../Eva ]; then echo "$SOY ERROR: No existe el directorio Eva." echo " Debe ejecutar este script en el directorio del sistema." echo " O mejor con Dios.sh que se encarga de ésto." exit $E_FATAL fi # Validar que exista el directorio adan/lib if [ ! -d adan/lib ]; then echo "$SOY ERROR: No existe el directorio adan/lib. El script IniciarNuevoSistema.sh debería haberlo hecho." exit $E_FATAL fi # Validar que exista el directorio adan/lib/Semillas if [ ! -d adan/lib/Semillas ]; then echo "$SOY ERROR: No existe el directorio adan/lib/Semillas. El script IniciarNuevoSistema.sh debería haberlo hecho." exit $E_FATAL fi # Crear vínculos de adan/lib/* cd adan/lib for DIR in `ls ../../../Eva/adan/lib` do if [ ! -h $DIR ]; then echo "$SOY Creando el vínculo adan/lib/$DIR..." ln -s ../../../Eva/adan/lib/$DIR if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude crear el vínculo para adan/lib/$DIR" exit $E_FATAL fi fi done cd ../../ # Si existe htdocs será eliminado if [ -d "htdocs" ]; then echo "$SOY ELIMINANDO los directorios y archivos de htdocs..." rm -rf htdocs if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude eliminar htdocs" exit $E_FATAL fi fi # Crear el directorio htdocs echo "$SOY Creando el directorio htdocs..." mkdir htdocs if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude crear el directorio htdocs" exit $E_FATAL fi # Cambiarse al directorio htdocs echo "$SOY Cambiándose a htdocs..." cd htdocs # Copiar archivos de la raiz echo "$SOY Copiando los archivos PHP de la raiz..." cp ../../Eva/htdocs/*.php . if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude copiar los archivos de la raiz." exit $E_FATAL fi # Copiar favicon if [ -e ../../Eva/htdocs/favicon.ico ]; then echo "$SOY Copiando favicon.ico..." cp ../../Eva/htdocs/favicon.ico . if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude copiar favicon.ico" exit $E_FATAL fi fi # Copiar directorios for DIR in bin css fonts img imagenes js do echo "$SOY Copiando $DIR..." cp -r ../../Eva/htdocs/$DIR . if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude copiar $DIR" exit $E_FATAL fi done if [ -d imagenes/pruebas ]; then rm -rf imagenes/pruebas fi # Crear el directorio htdocs/lib echo "$SOY Creando el directorio htdocs/lib..." mkdir lib if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude crear el directorio htdocs/lib" exit $E_FATAL fi # Cambiarse al directorio htdocs/lib echo "$SOY Cambiándose a htdocs/lib..." cd lib if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No me pude cambiar a htdocs/lib" exit $E_FATAL fi # Copiar directorios de htdocs/lib for DIR in AdmAutentificaciones AdmBitacora AdmCadenero AdmDepartamentos AdmIntegrantes AdmModulos AdmRoles AdmSesiones AdmUsuarios Base2 Inicio Michelf Personalizar do echo "$SOY Copiando $DIR..." cp -r ../../../Eva/htdocs/lib/$DIR . if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No pude copiar $DIR" exit $E_FATAL fi done # Cambiarse al directorio htdocs/bin echo "$SOY Cambiándose a htdocs/bin..." cd ../bin if [ "$?" -ne $EXITO ]; then echo "$SOY ERROR: No me pude cambiar a htdocs/bin" exit $E_FATAL fi # Crear vínculos en bin echo "$SOY Creando vínculo de lib en bin..." ln -s ../lib . echo "$SOY Creando vínculo de imagenes en bin..." ln -s ../imagenes . echo "$SOY Script terminado." exit $EXITO
guivaloz/GenesisPHP
Eva/adan/bin/CrearComun.sh
Shell
gpl-3.0
4,459
# Git utility functions GIT_TRACKED="" GIT_TRACKED_FILLED=0 git_is_tracked() { local file="$1" # Fill list of tracked files only once if [ $GIT_TRACKED_FILLED -eq 0 ]; then GIT_TRACKED="$(git ls-files)" GIT_TRACKED_FILLED=1 #echo "--------" #echo "$GIT_TRACKED" #echo "--------" fi # Remove leading './' from filename if any file="$(echo $file | sed 's:^\.\/::')" echo "$GIT_TRACKED" | grep -q "$file" } git_has_changes() { local file="$1" ! git diff-files --quiet "$file" } git_list_staged() { git diff --cached --name-only --diff-filter=d | \ tr '\n' ' ' }
elboulangero/Overcooked
scripts/code/lib-git.sh
Shell
gpl-3.0
622
#!/bin/sh docker build -t nginx_dynamic .
alexellis/docker-arm
images/nginx_dynamic/build.sh
Shell
gpl-3.0
44