code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/bin/bash # Run in the tools directory. cd "$(dirname $0)" # Setup and build bindgen. if [ "$(uname)" == "Linux" ]; then export LIBCLANG_PATH=/usr/lib/llvm-3.8/lib; else export LIBCLANG_PATH=`brew --prefix llvm38`/lib/llvm-3.8/lib; fi # Make sure we have llvm38. if [ ! -x "$(command -v clang++-3.8)" ]; then echo "llmv38 must be installed. Mac users should |brew install llvm38|, Linux varies by distro." exit 1 fi export LD_LIBRARY_PATH=$LIBCLANG_PATH export DYLD_LIBRARY_PATH=$LIBCLANG_PATH # Check for multirust if [ ! -x "$(command -v multirust)" ]; then echo "multirust must be installed." exit 1 fi # Don't try to clone twice. if [ ! -d rust-bindgen ]; then git clone https://github.com/ecoal95/rust-bindgen.git cd rust-bindgen git checkout sm-hacks-rebase-squashed else cd rust-bindgen fi multirust override nightly cargo build
aeischeid/servo
ports/geckolib/tools/setup_bindgen.sh
Shell
mpl-2.0
873
#!/bin/bash cat "$1" | cut -d ',' -f 2 | tr 'ATCG' 'TAGC' | paste -d, "$1" - | cut -d ',' -f 1,3 > "$2"
cfe-lab/Kive
kive/FixtureFiles/execute_result_tests_rm/Sandboxes/useralice_run4_F0DdJj/step2/complement.sh
Shell
bsd-3-clause
120
#for file in dm0; do for slice in `seq 1 3`; do xcombine_vol_data ../topo_input/slice_file${slice} $file topo OUTPUT_SUM . 0; mv ${file}.mesh ${file}_${slice}.mesh; done; done #for file in mu_kernel kappa_kernel; do xcombine_vol_data ../topo_input/slice_file $file topo OUTPUT_SUM . 0; done for file in mu_kernel_smooth kappa_kernel_smooth; do xcombine_vol_data ../topo_input/slice_file $file topo OUTPUT_SUM . 0; done
kbai/specfem3d
utils/ADJOINT_TOMOGRAPHY_TOOLS/iterate_adj/cluster/sum_kernel/combine.bash
Shell
gpl-2.0
421
#!/bin/bash # # mta-server-with-linux-libs.sh # mta-server64-with-linux-libs.sh # # Launch mta-server after adding 'linux-libs' to the library search path # if [[ "$0" != *"64"* ]]; then # 32 bit LAUNCHER="mta-server" LINUXLIBS="x86/linux-libs" else # 64 bit LAUNCHER="mta-server64" LINUXLIBS="x64/linux-libs" fi DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" LIBDIR=$DIR"/"$LINUXLIBS if [[ "$LD_LIBRARY_PATH" == "" ]]; then # LD_LIBRARY_PATH is empty export LD_LIBRARY_PATH=$LIBDIR elif [[ "$LD_LIBRARY_PATH" != *"$LIBDIR"* ]]; then # LD_LIBRARY_PATH does not contain LIBDIR export LD_LIBRARY_PATH=$LD_LIBRARY_PATH";"$LIBDIR fi # Pass command line arguments to launcher CMD="$DIR/$LAUNCHER $*" $CMD exit $?
Necktrox/mtasa-blue
Shared/data/linux-libs/mta-server64-with-linux-libs.sh
Shell
gpl-3.0
762
#!/bin/bash ########################################################## # This script provides final build automation for the repo. # It's responsible for pushing the build. Once your # branch has been merged by master then use the pull script # to finalize syncing up your git and github repos. # # PRE-REQS: # 1. Make sure you have node, npm and grunt installed # 2. Before you can use this repo: npm init # # # INSTRUCTIONS: # # 0. MAKE SURE you look thru the steps here and that everything is in order before you hit the button. The only file # that should be pending for commit is CHANGELOG and/or package.json. # 1. Make sure in package.json to name your releases after the version number, such as v2.0.0, v2.0.1, v2.1.1, etc. # 2. To submit changes to your github branch: npm run push. You can still make changes on this branch if you need to. # 3. After those changes have been merged on esri/master then sync up gh-pages: npm run pull # 4. After those changes have been merged on esri/gh-pages: 'git pull upstream gh-pages' then 'git push origin gh-pages' # ########################################################## # SET VARS VERSION=$(node --eval "console.log(require('./package.json').version);") NAME=$(node --eval "console.log(require('./package.json').name);") # Checkout temp branch for release git checkout v$VERSION # Add files, get ready to commit. # CHANGELOG should have pending changes read -p "Press [Enter] to add git files..." git add CHANGELOG.md git add package.json git add dist # Create the release zip file echo "creating zip of /dist" zip -r $NAME-v$VERSION.zip dist # Run gh-release to create the tag and push release to github read -p "Press [Enter] to push a release file..." gh-release --assets $NAME-v$VERSION.zip # Remove the temp zip file rm $NAME-v$VERSION.zip # Commit changes + commit message git commit -m "$VERSION" # Push to origin read -p "Press [Enter] to push commits to origin..." git push origin v$VERSION echo "zip file deleted" echo "push script: done" echo "Go to your github branch $VERSION, review changes then create pull request to esri/master" echo "Once the PR is accepted and merged then run the pull script"
EnzeZY/offline-editor-js
scripts/push.sh
Shell
apache-2.0
2,181
#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. rm -rf gen-javabean gen-py py rm -rf jvm/org/apache/storm/generated thrift --gen java:beans,hashcode,nocamel,generated_annotations=undated --gen py:utf8strings storm.thrift for file in gen-javabean/org/apache/storm/generated/* ; do cat java_license_header.txt ${file} > ${file}.tmp mv -f ${file}.tmp ${file} done cat py_license_header.txt gen-py/__init__.py > gen-py/__init__.py.tmp mv gen-py/__init__.py.tmp gen-py/__init__.py for file in gen-py/storm/* ; do cat py_license_header.txt ${file} > ${file}.tmp mv -f ${file}.tmp ${file} done mkdir -p jvm/org/apache/storm mv gen-javabean/org/apache/storm/generated jvm/org/apache/storm/generated mv gen-py py rm -rf gen-javabean
F30/storm
storm-client/src/genthrift.sh
Shell
apache-2.0
1,485
#!/bin/sh # Run ACATS with the GNU Ada compiler # The following functions are to be customized if you run in cross # environment or want to change compilation flags. Note that for # tests requiring checks not turned on by default, this script # automatically adds the needed flags to pass (ie: -gnato or -gnatE). # gccflags="-O3 -fomit-frame-pointer -funroll-all-loops -finline-functions" # gnatflags="-gnatN" gccflags="-O2" gnatflags="-gnatws" target_run () { eval $EXPECT -f $testdir/run_test.exp $* } # End of customization section. display_noeol () { printf "$@" printf "$@" >> $dir/acats.sum printf "$@" >> $dir/acats.log } display () { echo "$@" echo "$@" >> $dir/acats.sum echo "$@" >> $dir/acats.log } log () { echo "$@" >> $dir/acats.sum echo "$@" >> $dir/acats.log } dir=`${PWDCMD-pwd}` if [ "$testdir" = "" ]; then echo You must use make check or make check-ada exit 1 fi if [ "$dir" = "$testdir" ]; then echo "error: srcdir must be different than objdir, exiting." exit 1 fi target_gnatchop () { gnatchop --GCC="$GCC_DRIVER" $* } target_gnatmake () { echo gnatmake --GCC=\"$GCC\" $gnatflags $gccflags $* -largs $EXTERNAL_OBJECTS --GCC=\"$GCC\" gnatmake --GCC="$GCC" $gnatflags $gccflags $* -largs $EXTERNAL_OBJECTS --GCC="$GCC" } target_gcc () { $GCC $gccflags $* } clean_dir () { rm -f "$binmain" *.o *.ali > /dev/null 2>&1 } find_main () { ls ${i}?.adb > ${i}.lst 2> /dev/null ls ${i}*m.adb >> ${i}.lst 2> /dev/null ls ${i}.adb >> ${i}.lst 2> /dev/null main=`tail -1 ${i}.lst` } EXTERNAL_OBJECTS="" # Global variable to communicate external objects to link with. rm -f $dir/acats.sum $dir/acats.log display "Test Run By $USER on `date`" display " === acats configuration ===" target=`$GCC -dumpmachine` display target gcc is $GCC display `$GCC -v 2>&1` display host=`gcc -dumpmachine` display target=$target display `type gnatmake` gnatls -v >> $dir/acats.log display "" display " === acats support ===" display_noeol "Generating support files..." rm -rf $dir/support mkdir -p $dir/support cd $dir/support cp $testdir/support/*.ada $testdir/support/*.a $testdir/support/*.tst $dir/support # Find out the size in bit of an address on the target target_gnatmake $testdir/support/impbit.adb >> $dir/acats.log 2>&1 target_run $dir/support/impbit > $dir/support/impbit.out 2>&1 target_bit=`cat $dir/support/impbit.out` echo target_bit="$target_bit" >> $dir/acats.log # Find out a suitable asm statement # Adapted from configure.ac gcc_cv_as_dwarf2_debug_line case "$target" in ia64*-*-* | s390*-*-*) target_insn="nop 0" ;; mmix-*-*) target_insn="swym 0" ;; *) target_insn="nop" ;; esac echo target_insn="$target_insn" >> $dir/acats.log sed -e "s,ACATS4GNATDIR,$dir,g" \ < $testdir/support/impdef.a > $dir/support/impdef.a sed -e "s,ACATS4GNATDIR,$dir,g" \ -e "s,ACATS4GNATBIT,$target_bit,g" \ -e "s,ACATS4GNATINSN,$target_insn,g" \ < $testdir/support/macro.dfs > $dir/support/MACRO.DFS sed -e "s,ACATS4GNATDIR,$dir,g" \ < $testdir/support/tsttests.dat > $dir/support/TSTTESTS.DAT cp $testdir/tests/cd/*.c $dir/support cp $testdir/tests/cxb/*.c $dir/support grep -v '^#' $testdir/norun.lst | sort > $dir/support/norun.lst rm -rf $dir/run mv $dir/tests $dir/tests.$$ 2> /dev/null rm -rf $dir/tests.$$ & mkdir -p $dir/run cp -pr $testdir/tests $dir/ for i in $dir/support/*.ada $dir/support/*.a; do host_gnatchop $i >> $dir/acats.log 2>&1 done # These tools are used to preprocess some ACATS sources # they need to be compiled native on the host. host_gnatmake -q -gnatws macrosub.adb if [ $? -ne 0 ]; then display "**** Failed to compile macrosub" exit 1 fi ./macrosub > macrosub.out 2>&1 gcc -c cd300051.c host_gnatmake -q -gnatws widechr.adb if [ $? -ne 0 ]; then display "**** Failed to compile widechr" exit 1 fi ./widechr > widechr.out 2>&1 rm -f $dir/support/macrosub rm -f $dir/support/widechr rm -f $dir/support/*.ali rm -f $dir/support/*.o display " done." # From here, all compilations will be made by the target compiler display_noeol "Compiling support files..." target_gcc -c *.c if [ $? -ne 0 ]; then display "**** Failed to compile C code" exit 1 fi target_gnatchop *.adt >> $dir/acats.log 2>&1 target_gnatmake -c -gnato -gnatE *.ads >> $dir/acats.log 2>&1 target_gnatmake -c -gnato -gnatE *.adb >> $dir/acats.log 2>&1 display " done." display "" display " === acats tests ===" if [ $# -eq 0 ]; then chapters=`cd $dir/tests; echo [a-z]*` else chapters=$* fi glob_countn=0 glob_countok=0 glob_countu=0 for chapter in $chapters; do display Running chapter $chapter ... if [ ! -d $dir/tests/$chapter ]; then display "*** CHAPTER $chapter does not exist, skipping." display "" continue fi cd $dir/tests/$chapter ls *.a *.ada *.adt *.am *.dep 2> /dev/null | sed -e 's/\(.*\)\..*/\1/g' | \ cut -c1-7 | sort | uniq | comm -23 - $dir/support/norun.lst \ > $dir/tests/$chapter/${chapter}.lst countn=`wc -l < $dir/tests/$chapter/${chapter}.lst` glob_countn=`expr $glob_countn + $countn` counti=0 for i in `cat $dir/tests/$chapter/${chapter}.lst`; do counti=`expr $counti + 1` extraflags="" grep $i $testdir/overflow.lst > /dev/null 2>&1 if [ $? -eq 0 ]; then extraflags="$extraflags -gnato" fi grep $i $testdir/elabd.lst > /dev/null 2>&1 if [ $? -eq 0 ]; then extraflags="$extraflags -gnatE" fi grep $i $testdir/stackcheck.lst > /dev/null 2>&1 if [ $? -eq 0 ]; then extraflags="$extraflags -fstack-check" fi grep $i $testdir/ada95.lst > /dev/null 2>&1 if [ $? -eq 0 ]; then extraflags="$extraflags -gnat95" fi test=$dir/tests/$chapter/$i mkdir $test && cd $test >> $dir/acats.log 2>&1 if [ $? -ne 0 ]; then display "FAIL: $i" failed="${failed}${i} " clean_dir continue fi target_gnatchop -c -w `ls ${test}*.a ${test}*.ada ${test}*.adt ${test}*.am ${test}*.dep 2> /dev/null` >> $dir/acats.log 2>&1 main="" find_main if [ -z "$main" ]; then sync find_main fi binmain=`echo $main | sed -e 's/\(.*\)\..*/\1/g'` echo "BUILD $main" >> $dir/acats.log EXTERNAL_OBJECTS="" case $i in cxb30*) EXTERNAL_OBJECTS="$dir/support/cxb30040.o $dir/support/cxb30060.o $dir/support/cxb30130.o $dir/support/cxb30131.o";; ca1020e) rm -f ca1020e_func1.adb ca1020e_func2.adb ca1020e_proc1.adb ca1020e_proc2.adb > /dev/null 2>&1;; ca14028) rm -f ca14028_func2.ads ca14028_func3.ads ca14028_proc1.ads ca14028_proc3.ads > /dev/null 2>&1;; cxh1001) extraflags="-a -f"; echo "pragma Normalize_Scalars;" > gnat.adc esac if [ "$main" = "" ]; then display "FAIL: $i" failed="${failed}${i} " clean_dir continue fi target_gnatmake $extraflags -I$dir/support $main >> $dir/acats.log 2>&1 if [ $? -ne 0 ]; then display "FAIL: $i" failed="${failed}${i} " clean_dir continue fi echo "RUN $binmain" >> $dir/acats.log cd $dir/run if [ ! -x $dir/tests/$chapter/$i/$binmain ]; then sync fi target_run $dir/tests/$chapter/$i/$binmain > $dir/tests/$chapter/$i/${i}.log 2>&1 cd $dir/tests/$chapter/$i cat ${i}.log >> $dir/acats.log egrep -e '(==== |\+\+\+\+ |\!\!\!\! )' ${i}.log > /dev/null 2>&1 if [ $? -ne 0 ]; then grep 'tasking not implemented' ${i}.log > /dev/null 2>&1 if [ $? -ne 0 ]; then display "FAIL: $i" failed="${failed}${i} " else log "UNSUPPORTED: $i" glob_countn=`expr $glob_countn - 1` glob_countu=`expr $glob_countu + 1` fi else log "PASS: $i" glob_countok=`expr $glob_countok + 1` fi clean_dir done done display " === acats Summary ===" display "# of expected passes $glob_countok" display "# of unexpected failures `expr $glob_countn - $glob_countok`" if [ $glob_countu -ne 0 ]; then display "# of unsupported tests $glob_countu" fi if [ $glob_countok -ne $glob_countn ]; then display "*** FAILURES: $failed" fi display "$0 completed at `date`" exit 0
SanDisk-Open-Source/SSD_Dashboard
uefi/gcc/gcc-4.6.3/gcc/testsuite/ada/acats/run_all.sh
Shell
gpl-2.0
8,370
#!/usr/bin/env bash # **zaqar.sh** # Sanity check that Zaqar started if enabled echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Settings # ======== # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc is_service_enabled zaqar-server || exit 55 $CURL_GET http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!" set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" echo "*********************************************************************"
pacinete/devstack
exercises/zaqar.sh
Shell
apache-2.0
1,244
#!/bin/bash set -eu -o pipefail # create destination folder OUTDIR=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $OUTDIR # install files chmod +x $SRC_DIR/ShortStack # https://bioconda.github.io/troubleshooting.html#usr-bin-perl-or-usr-bin-python-not-found sed -i.bak '1 s|^.*$|#!/usr/bin/env perl|g' $SRC_DIR/ShortStack cp $SRC_DIR/ShortStack $OUTDIR cp $SRC_DIR/README $OUTDIR # create executables BINDIR=$PREFIX/bin mkdir -p $BINDIR ShortStack=$BINDIR/ShortStack echo "#!/bin/bash" > $ShortStack; echo 'DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )' >> $ShortStack; echo '$DIR/../share/'$(basename $OUTDIR)/ShortStack '$@' >> $ShortStack; chmod +x $ShortStack
ostrokach/bioconda-recipes
recipes/shortstack/build.sh
Shell
mit
691
#! /bin/sh # # Some of these should really be done by man2html # # The ~/xxx links don't really work -- netscape doesn't expand the home # directory of the user running navigator # sed -e 's|<B>gnu.bash.bug</B>|<A HREF="news:gnu.bash.bug">gnu.bash.bug</A>|g' \ -e 's|<I>/bin/bash</I>|<A HREF="file:/bin/bash"><I>/bin/bash</I></A>|g' \ -e 's|<I>/etc/profile</I>|<A HREF="file:/etc/profile"><I>/etc/profile</I></A>|g' \ -e 's|<I>~/.bash_profile</I>|<A HREF="file:~/.bash_profile"><I>~/.bash_profile</I></A>|g' \ -e 's|<I>~/.bash_login</I>|<A HREF="file:~/.bash_login"><I>~/.bash_login</I></A>|g' \ -e 's|<I>~/.profile</I>|<A HREF="file:~/.profile"><I>~/.profile</I></A>|g' \ -e 's|<I>~/.bashrc</I>|<A HREF="file:~/.bashrc"><I>~/.bashrc</I></A>|g' \ -e 's|<I>~/.bash_logout</I>|<A HREF="file:~/.bash_logout"><I>~/.bash_logout</I></A>|g' \ -e 's|<I>~/.bash_history</I>|<A HREF="file:~/.bash_history"><I>~/.bash_history</I></A>|g' \ -e 's|<I>~/.inputrc</I>|<A HREF="file:~/.inputrc"><I>~/.inputrc</I></A>|g' \ -e 's|<I>/etc/inputrc</I>|<A HREF="file:/etc/inputrc"><I>/etc/inputrc</I></A>|g'
coreswitch/zebra
vendor/github.com/coreswitch/openconfigd/cli/doc/htmlpost.sh
Shell
apache-2.0
1,127
set -u -e . ~/redishub/bin/rhlogging.sh bot=${bot-} if [ $# -gt 0 ] then if echo "$1" | grep '^[a-z][\-a-z]*_bot$' then bot=`echo "$1" | sed -n 's/^[^_]*_bot$/\1/p' || echo ''` fi fi echo bot [$bot] [ -n "$bot" ] cd ~/.bot.$bot || cd ~/.${bot}_bot || cd ~/.${bot} pwd ls -l echo botToken=`cat token` botSecret=`cat secret` rhinfo botToken $botToken rhinfo botSecret $botSecret >&1 echo "curl -s https://api.telegram.org/bot$botToken/getMe" curlb() { curl -s "https://api.telegram.org/bot$botToken/$1" > res if head -1 res | grep '^["\[{]' then cat res | python -mjson.tool else cat res fi } c0getMe() { curlb getMe } c0getUpdates() { curlb getUpdates?limit=100 } c1getUpdates() { offset=$1 curlb getUpdates?offset=$offset&limit=100 | python -mjson.tool } c2sendMessage() { chatId=$1 text=$2 uri="sendMessage?chat_id=$chatId&text=$text" echo uri "$uri" curlb "$uri" } jsontool() { python -mjson.tool } c0testWebhook() { webhookUrl=`cat ~/.bot.redishub/webhook.url` echo "webhookUrl $webhookUrl" echo "curl -s '$webhookUrl'" curl -s -X POST -d '"test data"' "$webhookUrl" } c1setWebhook() { webhookUrl=$1 echo "webhookUrl $webhookUrl" echo | openssl s_client -connect api.telegram.org:443 | grep 'CN=' openssl x509 -text -in cert.pem | grep 'CN=' rhinfo "curl -s -F [email protected] 'https://api.telegram.org/bot$botToken/setWebhook?url=$webhookUrl'" if ! curl -s "https://api.telegram.org/bot$botToken/setWebhook?url=$webhookUrl" > res then >&2 echo "curl $?" else echo >> res cat res | jsontool fi } c0setWebhook() { webhookUrl=`cat webhook.url` c1setWebhook $webhookUrl } c0testWebhook() { webhookUrl=`cat ~/.bot.redishub/webhook.url` curl -v -s -X POST -d '{ "message": { "text": "verify", "from": { "id": 1234, "username": "test" }, "chat": { "chat_id": 3456 } } }' $webhookUrl } command=getMe if [ $# -gt 0 ] then command=$1 shift fi c$#$command $@
evanx/rquery
scripts/telegram.sh
Shell
isc
2,050
#!/usr/bin/bash rm -rf src-book mkdir src-book pushd src for SRCFILE in $(find . -name "*.md") do mdexec -template='```sh {{.Output}}```' $SRCFILE > ../src-book/$SRCFILE done popd mdbook build rsync -a src/webshots html-book/ rsync -a src/consoleshots html-book/
howeyc/ledger
ledger/book/genbook.bash
Shell
isc
267
#!/bin/bash # # domain name should be a fqdn e.g. vmhost.somedomain.com # disk image source should be a directory containing 1 or more disk.img files # and a cloud-init seed image named config.img # if [ $# -lt 2 ]; then echo "Usage $0 <domain name> <disk image source>" exit 1 fi GUEST=$1 IMAGE_SRC=$2 IMAGE_DEST="/var/lib/libvirt/images/${GUEST}" VMEM=1024 VCPUS=2 VROOTSIZE=20G NETWORK="bridge=virbr0,model=virtio" sudo mkdir -p $IMAGE_DEST sudo qemu-img resize $IMAGE_SRC/disk.img +${VROOTSIZE} sudo cp $IMAGE_SRC/disk.img* $IMAGE_DEST sudo cp $IMAGE_SRC/config.img $IMAGE_DEST sudo chown libvirt-qemu:kvm $IMAGE_DEST/*.img* virt-install \ --name ${GUEST} \ --ram ${VMEM} \ --vcpus=${VCPUS} \ --autostart \ --memballoon virtio \ --graphics vnc,listen=0.0.0.0 --network ${NETWORK} \ --boot hd \ --disk path=/var/lib/libvirt/images/${GUEST}/disk.img,format=qcow2,bus=virtio \ --disk path=/var/lib/libvirt/images/${GUEST}/config.img,bus=virtio \ --noautoconsole
ajbonner/unix
libvirt/virt-inst.sh
Shell
mit
993
#!/bin/sh docker build -t tools/maven:3.2.5 .
pesan/docker-maven
build.sh
Shell
mit
46
#!/bin/sh # emit_warnings_for_todos.sh # # Created by Alexander I. Kovács on 2/11/14. # Copyright (c) 2014 TenTen. All rights reserved. # # @source https://github.com/DesignatedInitializer/XcodeBuildPhaseScripts # # @reference http://matthewmorey.com/xcode-build-phases/ # # Adding this script as a build phase to the normal build target introduces a lot of noise, especially when # you prefer your builds to have no warnings at all or tread warnings as errors (GCC_TREAT_WARNINGS_AS_ERRORS, -Werror). # One good way to run this, then, is to add an Aggregate build phase and build it whenever you want to quality check # you project. You may also want to run a linter with that build target. KEYWORDS="TODO:|FIXME:|\?\?\?:|\!\!\!:" find "$SOURCE_ROOT"/Sources \( -name "*.h" -or -name "*.m" \) -print0 | xargs -0 egrep --with-filename --line-number --only-matching "($KEYWORDS).*\$" | perl -p -e "s/($KEYWORDS)/ warning: \$1/"
DesignatedInitializer/XcodeBuildPhaseScripts
emit_warnings_for_todos.sh
Shell
mit
940
#!/bin/sh fifo_name=$(mktemp -u -t dbfifo.XXXXXX) if ! mkfifo "$fifo_name" ; then echo "Cant create fifo!" >&2 exit 1 fi trap 'rm -f "$fifo_name"' EXIT dir=`dirname $0` ${dir}/.dropbox-dist/dropboxd >${fifo_name} 2>/dev/null & export APP_PID=$! while read -r line do if echo "$line" | grep -q cli_link_nonce then echo "$line" | cut -d ' ' -f3 break fi done < ${fifo_name} kill ${APP_PID} wait ${APP_PID} 2>/dev/null
waitxd/dropbox-link
dropbox-link.sh
Shell
mit
433
#! /bin/bash pushd .. mvn test popd diff -u ../target/test-classes/StandardSqlTest-ref.txt ../target/test-classes/StandardSqlTest-last.txt --color=auto
ewanld/fjdbc-sql
scripts/test.sh
Shell
mit
153
#!/bin/bash if [[ $EUID -ne 0 ]]; then echo "Tokumei installation must be run as root!" 1>&2; exit fi filesizelimit_def='104857600' read -p "Attachment file size limit (bytes) [$filesizelimit_def]: " filesizelimit filesizelimit=${filesizelimit:-$filesizelimit_def} filesizelimit_human=$(echo "$filesizelimit" | awk '{ split( "K M G" , v ) s=0 while($1>1024) { $1/=1024 s++ } print int($1) v[s] }') domain_def='tokumei.co' read -p "Domain [$domain_def]: " domain domain=${domain:-$domain_def} siteTitle_def='Tokumei' read -p "Site title [$siteTitle_def]: " siteTitle siteTitle=${siteTitle:-$siteTitle_def} siteSubTitle_def='Anonymous microblogging' read -p "Site subtitle [$siteSubTitle_def]: " siteSubTitle siteSubTitle=${siteSubTitle:-$siteSubTitle_def} meta_description_def='What you have to say is more important than who you are' read -p "Site description [$meta_description_def]: " meta_description meta_description=${meta_description:-$meta_description_def} trendinginterval_def='24' read -p "Trending interval (hours) [$trendinginterval_def]: " trendinginterval trendinginterval=$(echo "$trendinginterval" | awk '{print ($1 * 3600)}') charlimit_def='300' read -p "Post character limit [$charlimit_def]: " charlimit charlimit=${charlimit:-$charlimit_def} email_def='[email protected]' read -p "Admin email address [$email_def]: " email email=${email:-$email_def} bitcoin_def='1Q31UMtim2ujr3VX5QcS3o95VF2ceiwzzc' read -p "Bitcoin donation address [$bitcoin_def]: " bitcoin bitcoin=${bitcoin:-$bitcoin_def} paypal_business_def='NCX75ZH9GLZD6' read -p "PayPal donation business ID [$paypal_business_def]: " paypal_business paypal_business=${paypal_business:-$paypal_business_def} paypal_location_def='CA' read -p "PayPal donation location [$paypal_location_def]: " paypal_location paypal_location=${paypal_location:-$paypal_location_def} paypal_name_def='Tokumei' read -p "PayPal donation name [$paypal_name_def]: " paypal_name paypal_name=${paypal_name:-$paypal_name_def} paypal_currency_def='USD' read -p "PayPal donation currency [$paypal_currency_def]: " paypal_currency paypal_currency=${paypal_currency:-$paypal_currency_def} rssDesc_def=$siteTitle' RSS' read -p "RSS feed description [$rssDesc_def]: " rssDesc rssDesc=${rssDesc:-$rssDesc_def} webmaster_def=$email' (John Smith)' read -p "RSS feed webmaster [$webmaster_def]: " webmaster webmaster=${webmaster:-$webmaster_def} user_name_def='tokumeister' read -p "Admin username [$user_name_def]: " user_name user_name=${user_name:-$user_name_def} user_password_def=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 24 | sed 1q) read -p "Admin password [$user_password_def]: " user_password user_password=${user_password:-$user_password_def} echo 'Installing dependencies...' apt-get -y update apt-get -y install nginx 9base git golang curl libimage-exiftool-perl echo 'Configuring nginx...' mkdir -p /etc/nginx/ssl chmod -R 600 /etc/nginx/ssl openssl dhparam -out /etc/nginx/ssl/dhparams-$domain.pem 4096 cat <<EOF >/etc/nginx/sites-available/$domain server { server_name $domain www.$domain; return 301 https://\$host\$request_uri; access_log off; error_log off; } server { listen 443; ssl on; ssl_certificate /etc/letsencrypt/live/$domain/fullchain.pem; ssl_certificate_key /etc/letsencrypt/live/$domain/privkey.pem; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; add_header Strict-Transport-Security "max-age=31536000"; ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'; ssl_dhparam /etc/nginx/ssl/dhparams-$domain.pem; ssl_session_cache shared:SSL:10m; ssl_session_timeout 10m; server_name $domain www.$domain; access_log off; error_log off; client_max_body_size $filesizelimit_human; root /var/www/$domain/sites/\$host/; index index.html; location = / { return 301 https://\$host/p/timeline; } location / { try_files \$uri @werc; } location /pub/ { root /var/www/$domain; try_files \$uri =404; } location = /favicon.ico { root /var/www/$domain; try_files /var/www/$domain/sites/\$host/\$uri /pub/default_favicon.ico =404; } error_page 404 = @werc; location @werc { include fastcgi_params; fastcgi_pass localhost:3333; } } EOF ln -s /etc/nginx/sites-available/$domain /etc/nginx/sites-enabled/$domain echo 'Installing SSL certificate...' mkdir -p /opt/certbot cd /opt/certbot if [ ! -f certbot-auto ] ; then wget https://dl.eff.org/certbot-auto chmod a+x certbot-auto fi service nginx stop /opt/certbot/certbot-auto certonly --standalone -d $domain -d www.$domain chmod 600 /etc/letsencrypt/live/$domain/* service nginx start echo 'Installing Tokumei...' mkdir -p /var/www git clone https://git.tokumei.co/tokumei/tokumei /var/www/$domain cd /var/www/$domain/sites ln -s tokumei.co $domain ln -s tokumei.co www.$domain charlimit=$(printf '%s\n' "$charlimit" | sed 's/[[\.*^$(){}?+|/]/\\&/g') filesizelimit=$(printf '%s\n' "$filesizelimit" | sed 's/[[\.*^$(){}?+|/]/\\&/g') trendinginterval=$(printf '%s\n' "$trendinginterval" | sed 's/[[\.*^$(){}?+|/]/\\&/g') siteTitle=$(printf '%s\n' "$siteTitle" | sed 's/[[\.*^$(){}?+|/]/\\&/g') siteSubTitle=$(printf '%s\n' "$siteSubTitle" | sed 's/[[\.*^$(){}?+|/]/\\&/g') meta_description=$(printf '%s\n' "$meta_description" | sed 's/[[\.*^$(){}?+|/]/\\&/g') email=$(printf '%s\n' "$email" | sed 's/[[\.*^$(){}?+|/]/\\&/g') bitcoin=$(printf '%s\n' "$bitcoin" | sed 's/[[\.*^$(){}?+|/]/\\&/g') paypal_business=$(printf '%s\n' "$paypal_business" | sed 's/[[\.*^$(){}?+|/]/\\&/g') paypal_location=$(printf '%s\n' "$paypal_location" | sed 's/[[\.*^$(){}?+|/]/\\&/g') paypal_name=$(printf '%s\n' "$paypal_name" | sed 's/[[\.*^$(){}?+|/]/\\&/g') paypal_currency=$(printf '%s\n' "$paypal_currency" | sed 's/[[\.*^$(){}?+|/]/\\&/g') rssDesc=$(printf '%s\n' "$rssDesc" | sed 's/[[\.*^$(){}?+|/]/\\&/g') webmaster=$(printf '%s\n' "$webmaster" | sed 's/[[\.*^$(){}?+|/]/\\&/g') sed -i "s/^#sitePrivate/sitePrivate/; s/^charlimit=.*$/charlimit=$charlimit/; s/^filesizelimit=.*$/filesizelimit=$filesizelimit/; s/^trendinginterval=.*$/trendinginterval=$trendinginterval/; s/^siteTitle=.*$/siteTitle="\'"$siteTitle"\'"/; s/^siteSubTitle=.*$/siteSubTitle="\'"$siteSubTitle"\'"/; s/^meta_description=.*$/meta_description="\'"$meta_description"\'"/; s/^email=.*$/email="\'"$email"\'"/; s/^bitcoin=.*$/bitcoin="\'"$bitcoin"\'"/; s/^paypal_business=.*$/paypal_business="\'"$paypal_business"\'"/; s/^paypal_location=.*$/paypal_location="\'"$paypal_location"\'"/; s/^paypal_name=.*$/paypal_name="\'"$paypal_name"\'"/; s/^paypal_currency=.*$/paypal_currency="\'"$paypal_currency"\'"/; s/^rssDesc=.*$/rssDesc="\'"$rssDesc"\'"/; s/^webmaster=.*$/webmaster="\'"$webmaster"\'"/" tokumei.co/_werc/config sed -i "s/^#posts_users_only/posts_users_only/; s/^#groups_allowed_posts/groups_allowed_posts/" tokumei.co/p/_werc/config sed -i "s/\/www\/tokumei/\/var\/www\/$domain/" ../bin/aux/trending.rc cd .. PATH=$PATH:/usr/lib/plan9/bin ./bin/aux/addwuser.rc "$user_name" "$user_password" posters repliers cd bin/aux (crontab -l 2>/dev/null; echo '0 0 * * * PATH=$PATH:/usr/lib/plan9/bin /var/www/'$domain'/bin/aux/trending.rc') | crontab - (crontab -l 2>/dev/null; echo '0 0 1 */2 * /opt/certbot/certbot-auto renew --quiet --no-self-upgrade') | crontab - echo 'Installing and starting cgd...' mkdir -p /usr/local/go GOPATH=/usr/local/go go get github.com/uriel/cgd /usr/local/go/bin/cgd -f -c /var/www/$domain/bin/werc.rc >/dev/null 2>1 & echo 'Done installing Tokumei.'
tokumeico/tokumei-www
privclear.sh
Shell
mit
8,452
# rh-git29-git-all \ # devtoolset-6-gcc-c++ devtoolset-6-gdb \ ./repoquery-recursive.py \ pssh clusterssh htop iftop iperf3 fio inxi lshw ansible \ python2-pip python2-paramiko python-ipython-console \ python34 python34-setuptools python34-pip python34-paramiko python34-pg8000 \ tmux collectl glances atop statgrab-tools sysbench bonnie++ \ iptraf-ng nethogs vnstat jnettop ngrep nload \ | wget -Ni - || exit 1 rm -f *i686.rpm wget -N 'http://www.slac.stanford.edu/~abh/bbcp/bin/amd64_rhel60/bbcp' || exit 1 #wget -N 'https://downloads.sourceforge.net/project/nmon/nmon16e_x86_rhel65'||exit 1 wget -N 'https://downloads.sourceforge.net/project/nmon/nmon16g_x86.tar.gz'||exit 1 #wget -N 'https://docs.broadcom.com/docs-and-downloads/raid-controllers/raid-controllers-common-files/8-07-14_MegaCLI.zip' || exit 1
Zor-X-L/offline-utils
centos7-utils/download.sh
Shell
mit
821
#!/bin/bash set -e source /build/buildconfig set -x apt-get clean rm -rf /build/build.sh rm -rf /tmp/* /var/tmp/*
mlehner/docker
baseimage/cleanup.sh
Shell
mit
115
#!/bin/bash # # Notify of Homebrew updates via Notification Center on Mac OS X # # Author: Chris Streeter http://www.chrisstreeter.com # Requires: terminal-notifier. Install with: # brew install terminal-notifier TERM_APP='/Applications/Terminal.app' BREW_EXEC='/usr/local/bin/brew' TERMINAL_NOTIFIER='/usr/local/Cellar/terminal-notifier/1.7.1/bin/terminal-notifier' # NOTIF_ARGS="-sender com.apple.Terminal" $BREW_EXEC update 2>&1 > /dev/null outdated=`$BREW_EXEC outdated --quiet` pinned=`$BREW_EXEC list --pinned` # Remove pinned formulae from the list of outdated formulae outdated=`comm -1 -3 <(echo "$pinned") <(echo "$outdated")` if [ -z "$outdated" ] ; then if [ -e $TERMINAL_NOTIFIER ]; then # No updates available $TERMINAL_NOTIFIER $NOTIF_ARGS \ -title "No Homebrew Updates Available" \ -message "No updates available yet for any homebrew packages." fi else # We've got an outdated formula or two # Nofity via Notification Center if [ -e $TERMINAL_NOTIFIER ]; then lc=$((`echo "$outdated" | wc -l`)) outdated=`echo "$outdated" | tail -$lc` message=`echo "$outdated" | head -5` if [ "$outdated" != "$message" ]; then message="Some of the outdated formulae are: $message" else message="The following formulae are outdated: $message" fi # Send to the Nofication Center # $TERMINAL_NOTIFIER $NOTIF_ARGS $TERMINAL_NOTIFIER \ -title "Homebrew Update(s) Available" -message "$message" fi fi
ikosenn/cray-cray
brew-notifier.sh
Shell
mit
1,574
#!/bin/bash echo "PRE build script #1"
ashmckenzie/percheron
spec/unit/support/pre_build_script1.sh
Shell
mit
40
#!/usr/bin/env bash BAR_ICON="" while true; do checkupdates=$(checkupdates) if [[ "" == "$checkupdates" ]]; then updates=0 else updates=$(echo "$checkupdates" | wc -l) fi updatesinfo="$BAR_ICON $updates" if echo "$checkupdates" | grep "^linux\|^systemd" | grep -v 'linux-firmware' > /dev/null 2>&1; then echo "%{F#e53935}$updatesinfo%{F-}" elif [[ $updates -gt 100 ]]; then echo "%{F#fb8c00}$updatesinfo%{F-}" elif [[ $updates -gt 50 ]]; then echo "%{F#fdd835}$updatesinfo%{F-}" else echo "$updatesinfo" fi sleep 1800 done
BlackIkeEagle/dotfiles
.config/polybar/scripts/updates.sh
Shell
mit
622
/usr/bin/python rideindego-rumps.py
josepvalls/rideindegochecker
rideindego.command
Shell
mit
36
#!/bin/bash f=generator.caffemodel if [ ! -f "${f}" ]; then echo "Downloading ..." wget http://s.anhnguyen.me/181007__generator.caffemodel -O ${f} fi ls ${f} echo "Done."
Evolving-AI-Lab/ppgn
nets/generator/noiseless/download.sh
Shell
mit
179
# zsh mac # path export PATH="/usr/local/sbin:$PATH" # OpenGL Mac OS X (gcc) alias opengl="gcc -framework GLUT -framework OpenGL" # brew install android-sdk if [ -d /usr/local/opt/android-sdk ]; then export ANDROID_HOME=/usr/local/opt/android-sdk fi # brew install zsh-syntax-highlighting source /usr/local/opt/zsh-history-substring-search/zsh-history-substring-search.zsh source /usr/local/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh source /usr/local/share/zsh-navigation-tools/zsh-navigation-tools.plugin.zsh source /usr/local/share/zsh-autosuggestions/zsh-autosuggestions.zsh ## JAVA # adb path export PATH=$PATH:/Applications/sdk/platform-tools # brew path. export PATH="/usr/local/sbin:$PATH"
mziyut/.zsh
mac.zsh
Shell
mit
722
alias reload!='. ~/.zshrc' # browsers alias chrome='open -a "Google Chrome"' # oracle alias sqlplus="~/sftw/oracle/instantclient_12_1/sqlplus"
AravinthPanch/dotfiles
zsh/aliases.zsh
Shell
mit
145
#!/bin/sh _script=$0 DIR="$(dirname $_script)" phpdoc run -vvv --ansi --progressbar --directory $DIR/src,$DIR/examples --target $DIR/doc --title wellid --log $DIR/phpdoc.log echo $DIR
broeser/wellid
phpdoc.sh
Shell
mit
186
#!/bin/bash # Reports compressed file sizes for each JS file in dygraphs. # This list needs to be kept in sync w/ the one in dygraph-dev.js # and the one in jsTestDriver.conf. for file in \ dygraph-layout.js \ dygraph-canvas.js \ dygraph.js \ dygraph-utils.js \ dygraph-gviz.js \ dygraph-interaction-model.js \ dygraph-range-selector.js \ dygraph-tickers.js \ rgbcolor/rgbcolor.js \ strftime/strftime-min.js \ dashed-canvas.js \ plugins/base.js \ plugins/annotations.js \ plugins/axes.js \ plugins/chart-labels.js \ plugins/grid.js \ plugins/legend.js \ plugins/install.js \ ; do base_size=$(cat $file | wc -c) cat $file \ | perl -ne 'print unless m,REMOVE_FOR_COMBINED,..m,/REMOVE_FOR_COMBINED,' \ > /tmp/dygraph.js min_size=$(java -jar yuicompressor-2.4.2.jar /tmp/dygraph.js | gzip -c | wc -c) echo "$min_size ($base_size) $file" done
cavorite/dygraphs
file-size-stats.sh
Shell
mit
857
/home3/redwards/opt/iq-tree/current/bin/iqtree -nt 16 -s seqs.B.rc.trim.aln -bb 1000 -alrt 1000
linsalrob/crAssphage
Combined_Analysis/Analysis/PrimerB/bootstrap/iqtree.sh
Shell
mit
96
#! /bin/bash # Cleanup folder rm -rf _assets # Recreate folder mkdir -p _assets/website/ mkdir -p _assets/ebook/ # Compile JS browserify src/js/core/index.js | uglifyjs -mc > _assets/website/gitbook.js browserify src/js/theme/index.js | uglifyjs -mc > _assets/website/theme.js # Compile Website CSS lessc -clean-css src/less/website.less _assets/website/style.css # Compile eBook CSS lessc -clean-css src/less/ebook.less _assets/ebook/ebook.css lessc -clean-css src/less/pdf.less _assets/ebook/pdf.css lessc -clean-css src/less/mobi.less _assets/ebook/mobi.css lessc -clean-css src/less/epub.less _assets/ebook/epub.css # Copy fonts mkdir -p _assets/website/fonts cp -R node_modules/font-awesome/fonts/ _assets/website/fonts/fontawesome/ # Copy icons mkdir -p _assets/website/images cp images/synapse.png _assets/website/images/ cp node_modules/gitbook-logos/output/favicon.ico _assets/website/images/ cp node_modules/gitbook-logos/output/apple-touch-icon-152.png _assets/website/images/apple-touch-icon-precomposed-152.png
Synapse-Cmf/documentation
theme/gitbook-plugin-theme-synapse/src/build.sh
Shell
mit
1,031
#!/bin/sh echo ":::" echo "Installing and configuring Virtualbox" sudo pacman -S qt4 virtualbox-guest-iso virtualbox-host-modules # load vboxdrv module on startup sudo sh -c "echo \"vboxdrv\" > /etc/modules-load.d/virtualbox.conf" echo ":::" echo "You should now install Virtualbox extensions from AUR" echo "e.g. yaourt -S virtualbox-ext-oracle"
brandonlichtenwalner/arch-install
package-install/virtualbox.sh
Shell
mit
350
EPOCHS=5 CUDA_VISIBLE_DEVICES=0 python cifar10_cnn_mgpu.py --epochs $EPOCHS CUDA_VISIBLE_DEVICES=0,1 python cifar10_cnn_mgpu.py --mgpu --epochs $EPOCHS CUDA_VISIBLE_DEVICES=0,1,2,3 python cifar10_cnn_mgpu.py --mgpu --epochs $EPOCHS CUDA_VISIBLE_DEVICES=0,1,2,3,4,5 python cifar10_cnn_mgpu.py --mgpu --epochs $EPOCHS CUDA_VISIBLE_DEVICES=0 python cifar10_cnn_mgpu.py --epochs $EPOCHS CUDA_VISIBLE_DEVICES=0,1 python cifar10_cnn_mgpu.py --mgpu --epochs $EPOCHS --nccl CUDA_VISIBLE_DEVICES=0,1,2,3 python cifar10_cnn_mgpu.py --mgpu --epochs $EPOCHS --nccl CUDA_VISIBLE_DEVICES=0,1,2,3,4,5 python cifar10_cnn_mgpu.py --mgpu --epochs $EPOCHS --nccl # python remove_progressbar_from_logs.py # grep -A 1 '50000/50000' original_7gforce_results_nocr.txt |grep -A 2 'Epoch 5/5'|grep '50000/50000'
rossumai/keras-multi-gpu
experiments/keras_tensorflow/avolkov1/original/run_original_7gforce.sh
Shell
mit
794
#!/bin/bash echo "Cheking resolv.conf file" grep "research.ravenpack.com" /etc/resolv.conf >> /dev/null if [ $? -eq 0 ];then echo "resolv.conf looks fine" else echo "Adding FQDN to your resolv.conf file" echo "search research.ravenpack.com private.aws.ravenpack.com development.ravenpack.com" >> /etc/resolv.conf fi
joserc87/config-files
scripts/setup-dns-fqdn.sh
Shell
mit
328
#!/bin/bash # # # install application specific to the zsh shell echo "##################################################" echo "# Topic Installer: ZSH" echo "##################################################" # install oh-my-zsh echo " #" echo " # Installation: oh-my-zsh" echo " ##################################################" if [[ -d "$HOME/.oh-my-zsh" ]]; then echo "" echo "› oh-my-zsh is already installed. Ignoring..." else # installing oh-my-zsh according to https://github.com/robbyrussell/oh-my-zsh/blob/master/README.markdown echo "" echo "› Cloning and installing oh-my-zsh." cd "$HOME/Downloads" sh -c "$(wget https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -)" # making zsh the default shell echo "" echo "› Making ZSH the default shell. Please enter your password." echo "› chsh -s $(grep /zsh$ /etc/shells | tail -1)" chsh -s "$(grep /zsh$ /etc/shells | tail -1)" fi echo "" echo "Done."
mspl13/dotfiles
zsh/install.sh
Shell
mit
978
#!/bin/bash # # Usage: # # ./sar_seq.sh yyyymmdd1 ... yyyymmddn dims enl significance echo '***** Multitemporal PolSAR Change Detection **********' echo '***** Radarsat2 quad or dualpol imagery **************' echo '***** Pre-processed with MapReady ********************' echo '******************************************************' n=$[$#-3] declare -i nn nn=$n-2 significance="${@: -1}" enl=("${@: -2}") dims=("${@: -3}") last=("${@: -4}") outfn='sarseq('$1'-'$nn'-'$last').tif' echo 'number of images ' $n echo 'ENL ' $enl echo 'spatial subset ' $dims echo 'significance level ' $significance imdir='/home/imagery/' # test for T3 (quadpol) or C2 (dualpol) directory dir=$imdir$(ls -l $imdir | grep 'MapReady$' | grep $1 | awk '{print $9}') subdir='/'$(ls -l $dir | grep [CT][23] | awk '{print $9}')'/' dir=$imdir$(ls -l $imdir | grep 'MapReady$' | grep $1 | awk '{print $9}')$subdir for ((i=1; i<=$n; i++)) do dir=$imdir$(ls -l $imdir | grep 'MapReady$' | grep $1 | awk '{print $9}')$subdir fni=$dir'polSAR.tif' [[ $fni = None ]] && exit 1 fn[i]=$fni shift done s="${fn[*]}" fns=${s//" "/","} python /home/sar_seq.py -d $dims -s $significance -m $fns $outfn $enl
mortcanty/SARDocker
src/sar_seq_rs2.sh
Shell
mit
1,223
#!/usr/bin/env bash curl http://127.0.0.1:4000/demo-playlist/dyn/guts/arm.m3u > arm.m3u curl http://127.0.0.1:4000/demo-playlist/dyn/guts/dykc.m3u > dykc.m3u curl http://127.0.0.1:4000/demo-playlist/dyn/guts/homework.m3u > homework.m3u curl http://127.0.0.1:4000/demo-playlist/dyn/guts/oscar.m3u > oscar.m3u curl http://127.0.0.1:4000/demo-playlist/dyn/guts/cpu.m3u > cpu.m3u
samwhelp/demo-playlist
all/guts/update.sh
Shell
mit
377
#!/usr/bin/env bash echo echo [+] Restarting compute services echo sleep 2 service nova-compute restart
SocialGeeks/vagrant-openstack
scripts/compute-configuration.sh
Shell
mit
109
#!/bin/sh set -e echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" install_framework() { if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then local source="${BUILT_PRODUCTS_DIR}/$1" elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" elif [ -r "$1" ]; then local source="$1" fi local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" if [ -L "${source}" ]; then echo "Symlinked..." source="$(readlink "${source}")" fi # use filter instead of exclude so missing patterns dont' throw errors echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\"" rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" local basename basename="$(basename -s .framework "$1")" binary="${destination}/${basename}.framework/${basename}" if ! [ -r "$binary" ]; then binary="${destination}/${basename}" fi # Strip invalid architectures so "fat" simulator / device frameworks work on device if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then strip_invalid_archs "$binary" fi # Resign the code if required by the build settings to avoid unstable apps code_sign_if_enabled "${destination}/$(basename "$1")" # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then local swift_runtime_libs swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]}) for lib in $swift_runtime_libs; do echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\"" rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" code_sign_if_enabled "${destination}/${lib}" done fi } # Signs a framework with the provided identity code_sign_if_enabled() { if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then # Use the current code_sign_identitiy echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\"" /usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1" fi } # Strip invalid architectures strip_invalid_archs() { binary="$1" # Get architectures for current file archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)" stripped="" for arch in $archs; do if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then # Strip non-valid architectures in-place lipo -remove "$arch" -output "$binary" "$binary" || exit 1 stripped="$stripped $arch" fi done if [[ "$stripped" ]]; then echo "Stripped $binary of architectures:$stripped" fi } if [[ "$CONFIGURATION" == "Debug" ]]; then install_framework "$BUILT_PRODUCTS_DIR/FLEX/FLEX.framework" fi
hanjt/calendar
calendar/Pods/Target Support Files/Pods-calendar/Pods-calendar-frameworks.sh
Shell
mit
3,483
#!/bin/sh -e # allow a custom EOxServer location to override the default one if needed export EOX_ROOT=${1:-"/var/eoxserver"} # Locate sudo (when available) for commands requiring the superuser. # Allows setup of a custom autoconf instance located in the non-root user-space. SUDO=`which sudo` # Add CRS 900913 if not present if ! grep -Fxq "<900913> +proj=tmerc +lat_0=0 +lon_0=21 +k=1 +x_0=21500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs <>" /usr/share/proj/epsg ; then $SUDO sh -c 'echo "# WGS 84 / Pseudo-Mercator" >> /usr/share/proj/epsg' $SUDO sh -c 'echo "<900913> +proj=tmerc +lat_0=0 +lon_0=21 +k=1 +x_0=21500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs <>" >> /usr/share/proj/epsg' fi cd /var/wmm $SUDO python setup.py install # EOxServer cd "$EOX_ROOT/" $SUDO python setup.py develop cd /var/eoxsmagnetism $SUDO python setup.py develop # Configure EOxServer autotest instance cd /var/eoxsmagnetism/instance # Prepare DBs python manage.py syncdb --noinput --traceback python manage.py loaddata auth_data.json --traceback # Create admin user python manage.py shell 1>/dev/null 2>&1 <<EOF from django.contrib.auth import authenticate from django.contrib.auth.models import User if authenticate(username='admin', password='admin') is None: User.objects.create_user('admin','[email protected]','admin') EOF # Collect static files python manage.py collectstatic --noinput
ESA-VirES/eoxserver-magnetism
vagrant/scripts/development_installation.sh
Shell
mit
1,442
git add -A git commit -m $1 git push origin database git checkout master git pull git merge database git push origin master git checkout database
ethz-nus/nan-codefest
server/sync-this-shit.sh
Shell
mit
146
# This script takes care of testing your crate set -ex main() { # Build debug and release targets cross build --target $TARGET cross build --target $TARGET --release if [ ! -z $DISABLE_TESTS ]; then return fi # Run tests on debug and release targets. cross test --target $TARGET cross test --target $TARGET --release } # we don't run the "test phase" when doing deploys if [ -z $TRAVIS_TAG ]; then main fi
posborne/rust-nix
ci/script.sh
Shell
mit
455
#!/bin/bash PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin export PATH # Check if user is root if [ $(id -u) != "0" ]; then echo "Error : You must be root to run this script, please use root to install mysql" exit 1 fi clear echo "==================================================" echo "ecstore mysql for CentOS/RadHat Linux Server" echo "==================================================" cur_dir=$(pwd) #set mysql root password echo "==================================" mysqlrootpwd="root" echo "Please input the root password of mysql:" read -p "(Default password: root):" mysqlrootpwd if [ "$mysqlrootpwd" = "" ]; then mysqlrootpwd="root" fi echo "==================================" echo "Mysql root password :$mysqlrootpwd" echo "==================================" # do you want to install the InnoDB Storage Engine?
quan993210/shopexDocument
install/mysql.sh
Shell
mit
880
# -*- sh -*- info_command() { printf "%s:\\n%s\\n\\n" "OS" "$(uname -a)" printf "%s:\\n%s\\n\\n" "SHELL" "$($SHELL --version)" printf "%s:\\n%s\\n\\n" "ASDF VERSION" "$(asdf_version)" printf "%s:\\n%s\\n\\n" "ASDF ENVIRONMENT VARIABLES" "$(env | grep -E "ASDF_DIR|ASDF_DATA_DIR|ASDF_CONFIG_FILE|ASDF_DEFAULT_TOOL_VERSIONS_FILENAME")" printf "%s:\\n%s\\n\\n" "ASDF INSTALLED PLUGINS" "$(asdf plugin list --urls --refs)" } info_command "$@"
asdf-vm/asdf
lib/commands/command-info.bash
Shell
mit
451
local OLD_EMAIL CORRECT_EMAIL SHOULD_PUSH echo -n "Old Email: " read OLD_EMAIL echo -n "Correct email: " read CORRECT_EMAIL echo -n "Should push changes [Y/n]: " read SHOULD_PUSH SHOULD_PUSH=${SHOULD_PUSH:-Y} local CMD="git filter-branch -f --env-filter ' \ if [ "\$GIT_COMMITTER_EMAIL" = "$OLD_EMAIL" ]; \ then \ export GIT_COMMITTER_EMAIL="$CORRECT_EMAIL"; \ fi; \ if [ "\$GIT_AUTHOR_EMAIL" = "$OLD_EMAIL" ]; then \ export GIT_AUTHOR_EMAIL="$CORRECT_EMAIL"; \ fi' --tag-name-filter cat -- --branches --tags" eval $CMD if [ "$SHOULD_PUSH" = "Y" ]; then git push --force --tags origin 'refs/heads/*' fi
tbremer/dotfiles
zsh/git-reset-email.zsh
Shell
mit
623
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for DLA-2937-1 # # Security announcement date: 2014-05-27 00:00:00 UTC # Script generation date: 2017-01-01 21:09:00 UTC # # Operating System: Debian 6 (Squeeze) # Architecture: x86_64 # # Vulnerable packages fix on version: # - mod-wsgi:3.3-2+deb6u1 # # Last versions recommanded by security team: # - mod-wsgi:3.3-2+deb6u1 # # CVE List: # - CVE-2014-0240 # - CVE-2014-0242 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade mod-wsgi=3.3-2+deb6u1 -y
Cyberwatch/cbw-security-fixes
Debian_6_(Squeeze)/x86_64/2014/DLA-2937-1.sh
Shell
mit
632
#!/bin/bash -ev # # Installation Script # Written by: Tommy Lincoln <[email protected]> # Github: https://github.com/pajamapants3000 # Legal: See LICENSE in parent directory # # # Dependencies #************** # Begin Required #glib-2.44.1 #js-17.0.0 # End Required # Begin Recommended # End Recommended # Begin Optional #gobject-introspection-1.44.0 #docbook_xml-4.5 #docbook_xsl-1.78.1 #gtk_doc-1.24 #libxslt-1.1.28 #linux_pam-1.2.1 # End Optional # Begin Kernel # End Kernel # # Installation #************** # Check for previous installation: PROCEED="yes" REINSTALL=0 grep polkit-0.113 /list-$CHRISTENED"-"$SURNAME > /dev/null && ((\!$?)) &&\ REINSTALL=1 && echo "Previous installation detected, proceed?" && read PROCEED [ $PROCEED = "yes" ] || [ $PROCEED = "y" ] || exit 0 # Download: wget http://www.freedesktop.org/software/polkit/releases/polkit-0.113.tar.gz # md5sum: echo "4b77776c9e4f897dcfe03b2c34198edf polkit-0.113.tar.gz" | md5sum -c ;\ ( exit ${PIPESTATUS[0]} ) # if ! (cat /etc/group | grep polkitd > /dev/null); then pathappend /usr/sbin as_root groupadd -fg 27 polkitd as_root useradd -c "PolicyKit_Daemon_Owner" -d /etc/polkit-1 -u 27 \ -g polkitd -s /bin/false polkitd pathremove /usr/sbin fi # tar -xvf polkit-0.113.tar.gz cd polkit-0.113 grep linux_pam /list-$CHRISTENED"-"$SURNAME > /dev/null && \ ./configure --prefix=/usr \ --sysconfdir=/etc \ --localstatedir=/var \ --disable-static \ --enable-libsystemd-login=no || \ ./configure --prefix=/usr \ --sysconfdir=/etc \ --localstatedir=/var \ --disable-static \ --enable-libsystemd-login=no \ --with-authfw=shadow make # Test (system dbus session; ignore consolekit db missing warning): if (grep dbus /list-$CHRISTENED"-"$SURNAME > /dev/null); then make check fi # as_root make install # as_root tee /etc/pam.d/polkit-1 << "EOF" # Begin /etc/pam.d/polkit-1 auth include system-auth account include system-account password include system-password session include system-session # End /etc/pam.d/polkit-1 EOF cd .. as_root rm -rf polkit-0.113 # # Add to installed list for this computer: echo "polkit-0.113" >> /list-$CHRISTENED"-"$SURNAME # ###################################################
pajamapants3000/BLFS_scripts_etc
scripts/polkit-0.113.sh
Shell
mit
2,446
#!/bin/sh prog=labudde_test.m descr="labudde_test.m (octfile)" depends="labudde_test.m labudde.oct test_common.m check_octave_file.m tf2Abcd.m" tmp=/tmp/$$ here=`pwd` if [ $? -ne 0 ]; then echo "Failed pwd"; exit 1; fi fail() { echo FAILED ${0#$here"/"} $descr 1>&2 cd $here rm -rf $tmp exit 1 } pass() { echo PASSED ${0#$here"/"} $descr cd $here rm -rf $tmp exit 0 } trap "fail" 1 2 3 15 mkdir $tmp if [ $? -ne 0 ]; then echo "Failed mkdir"; exit 1; fi for file in $depends;do \ cp -R src/$file $tmp; \ if [ $? -ne 0 ]; then echo "Failed cp "$file; fail; fi \ done cd $tmp if [ $? -ne 0 ]; then echo "Failed cd"; fail; fi # # the output should look like this # cat > test.ok << 'EOF' Using labudde octfile d=[ 1.0000000000000000 -10.1056655234449586 46.8017429779240715 -131.0797709418646377 246.6262448531584255 -327.2492857402048116 312.4364383270706185 -214.6036226093469281 103.9185724124888139 -33.7843355785909765 6.6363989830178109 -0.5967165560107490 ] max(max(abs(A)))=327.249 rcond(A)=5.54491e-06 poly(A)=[ 1.0000000000000000 -10.1056655234449710 46.8017429779241922 -131.0797709418651209 246.6262448531596760 -327.2492857402065738 312.4364383270723238 -214.6036226093478660 103.9185724124893113 -33.7843355785911115 6.6363989830178385 -0.5967165560107505 ] norm(d-poly(A))=2.99522e-12 labudde(A)=[ -10.1056655234449586 46.8017429779240715 -131.0797709418646377 246.6262448531584255 -327.2492857402048116 312.4364383270706185 -214.6036226093469281 103.9185724124888139 -33.7843355785909765 6.6363989830178109 -0.5967165560107490 ] norm(d(2:end)-labudde(A))=0 poly(A)=[ 1.0000000000000000 -0.0000000000000095 1.8669208760999940 -0.0000000000000157 2.2147829705999902 -0.0000000000000137 2.2883188634999905 -0.0000000000000085 2.0751642793999956 -0.0000000000000037 1.5701398180999981 -0.0000000000000009 1.0247030921999996 0.0000000000000002 0.5684534800999999 0.0000000000000003 0.2633896209999994 0.0000000000000001 0.0887207127999999 -0.0000000000000000 0.0197382406999999 ] norm(d0-poly(A))=2.92264e-14 labudde(A)=[ -0.0000000000000000 1.8669208761000000 0.0000000000000000 2.2147829706000000 0.0000000000000000 2.2883188634999998 0.0000000000000000 2.0751642794000000 0.0000000000000000 1.5701398180999999 0.0000000000000000 1.0247030922000002 0.0000000000000000 0.5684534801000001 0.0000000000000000 0.2633896210000000 0.0000000000000000 0.0887207128000000 0.0000000000000000 0.0197382407000000 ] norm(d0(2:end)-labudde(A))=2.57768e-16 EOF if [ $? -ne 0 ]; then echo "Failed output cat"; fail; fi # # run and see if the results match # echo "Running $descr" octave --no-gui -q $prog >test.out 2>&1 if [ $? -ne 0 ]; then echo "Failed running $descr"; fail; fi diff -Bb test.ok test.out if [ $? -ne 0 ]; then echo "Failed diff -Bb"; fail; fi # # this much worked # pass
robertgj/DesignOfIIRFilters
test/01/t0163a.sh
Shell
mit
2,916
#!/bin/bash mkdir -p "${SRCROOT}/../build" which pact-mock-service pact-mock-service start --pact-specification-version 2.0.0 --log "${SRCROOT}/../build/pact.log" --pact-dir "${SRCROOT}/../build/pacts" -p 1234
bgannin/pact-consumer-swift
scripts/start_server.sh
Shell
mit
212
#!/bin/bash SSR_SERVER001=${SSR_SERVER001:-0.0.0.1} SSR_SERVER002=${SSR_SERVER002:-0.0.0.2} SSR_SERVER003=${SSR_SERVER003:-0.0.0.3} SSR_SERVER004=${SSR_SERVER004:-0.0.0.4} SSR_SV_PORT=${SSR_SV_PORT:-8888} SSR_PASS=${SSR_PASS:-mpswd} SSR_METH=${SSR_METH:-aes-256-cfb} SSR_PROTO=${SSR_PROTO:-auth_sha1_v2} SSR_OBFS=${SSR_OBFS:-http_simple} SSR_OBFPA=${SSR_OBFPA:-baidu.com} sed -i "s/0.0.0.1/$SSR_SERVER001/g" /opt/proxychain/shadowsocks.json sed -i "s/0.0.0.2/$SSR_SERVER002/g" /opt/proxychain/shadowsocks.json sed -i "s/0.0.0.3/$SSR_SERVER003/g" /opt/proxychain/shadowsocks.json sed -i "s/0.0.0.4/$SSR_SERVER004/g" /opt/proxychain/shadowsocks.json sed -i "s/8888/$SSR_SV_PORT/g" /opt/proxychain/shadowsocks.json sed -i "s/mpswd/$SSR_PASS/g" /opt/proxychain/shadowsocks.json sed -i "s/aes-256-cfb/$SSR_METH/g" /opt/proxychain/shadowsocks.json sed -i "s/auth_sha1_v2/$SSR_PROTO/g" /opt/proxychain/shadowsocks.json sed -i "s/http_simple/$SSR_OBFS/g" /opt/proxychain/shadowsocks.json sed -i "s/baidu.com/$SSR_OBFPA/g" /opt/proxychain/shadowsocks.json exec "$@"
arthurliang/dockerfiles
proxychain/init.sh
Shell
mit
1,059
#!/bin/bash # this script runs a few functional tests to make sure that everything # is working properly. on error in any subcommands, it should not quit # and finally exit with a non-zero exit code if any of the commands # failed # get the directory of this script and use it to correctly find the # examples directory # http://stackoverflow.com/a/9107028/564709 BASEDIR=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd) # annoying problem that md5 (OSX) and md5sum (Linux) are not the same # in coreutils which md5 > /dev/null if [ $? -ne 0 ]; then md5 () { md5sum $1 | awk '{print $1}' } fi # formatting functions red () { echo $'\033[31m'"$1"$'\033[0m' } # function to update exit code and throw error if update value is # non-zero EXIT_CODE=0 update_status () { if [[ $1 -ne 0 ]]; then red "$2" fi EXIT_CODE=$(expr ${EXIT_CODE} + $1) } # function for running test on a specific example to validate that the # checksum of results is consistent validate_example () { # manipulate the list of arguments passed to this function via # http://stackoverflow.com/a/10308353/564709 args=("$@") test_checksum=${args[-1]} unset args[${#args[@]}-1] # run textract on an example document and make sure the md5sum is # the same as what we expect echo running textract "${args[@]}"... textract "${args[@]}" -o dummy.txt update_status $? 'textract failed!' # cat dummy.txt local_checksum=$(md5sum dummy.txt | awk '{print $1}') rm -f dummy.txt # hack to compute checksum of resulting archive since tarballs of # files with the same content are apparently not guaranteed to # have the same md5 hash if [ "${local_checksum}" != "${test_checksum}" ]; then red "ERROR--CHECKSUM FOR TEST '$@' DOES NOT MATCH" red " local checksum=${local_checksum}" red " test checksum=${test_checksum}" update_status 1 "" fi } # run a few examples to make sure the checksums match what they are # supposed to. if you update an example, be sure to update the # checksum by just running this script and determining what the # correct checksum is validate_example ${BASEDIR}/docx/i_heart_word.docx 35b515d5e9d68af496f9233eb81547be validate_example ${BASEDIR}/pptx/i_love_powerpoint.pptx a5bc9cbe9284d4c81c1106a8137e4a4d validate_example ${BASEDIR}/doc/i_heart_word.doc 8c6b87285e7d5498cff369fe4536a54b validate_example ${BASEDIR}/pdf/i_heart_pdfs.pdf 06719d714211174a3851ac4cee880fe1 validate_example -m pdfminer ${BASEDIR}/pdf/i_heart_pdfs.pdf d4377783e5fbde756d3a195bfd103be0 validate_example ${BASEDIR}/txt/little_bo_peep.txt 1c5fb4478d84c3b3296746e491895ada validate_example ${BASEDIR}/html/snow-fall.html acc2d8c49094e56474006cab3d3768eb validate_example ${BASEDIR}/html/what-we-do.html 1fb0263bf62317365cb30246d9e094be validate_example ${BASEDIR}/eml/example.eml cb59a5fad8ed8b849e15d53449b1de3f validate_example ${BASEDIR}/ps/example.ps bdd41be3e24d7ded69be1e5732f7c8fc validate_example ${BASEDIR}/json/json_is_my_best_friend.json dc0503f1b5a213d67cc08829b12df99e validate_example ${BASEDIR}/odt/i_heart_odt.odt f64b15c1acf5cebb1a91896221696da7 # exit with the sum of the status exit ${EXIT_CODE}
shobhitmittal/textract
tests/run_functional_tests.sh
Shell
mit
3,202
#!/bin/sh cd "${0%/*}" echo "Starting application...\n\n" ./crazymk
open-fun/crazymk
scripts/run.sh
Shell
mit
67
#!/bin/bash pushd $(dirname $0) &>/dev/null source build.env || exit $? APP_DIR=Banshee.app BUNDLE=$APP_DIR/Contents/MacOS LIB_PREFIX=$BUILD_PREFIX/lib [[ -d "${LIB_PREFIX}" ]] || { echo "ERROR: ${LIB_PREFIX} does not exist; dependencies must first be built" exit 1 } rm -rf $APP_DIR cp -rf app-bundle-data Banshee.app if [ -d release-install ]; then # we are building the .app from a tarball cp -rf release-install/lib/banshee-1/* $BUNDLE mkdir $BUNDLE/share cp -rf release-install/share/banshee-1 $BUNDLE/share cp -rf release-install/share/locale $BUNDLE/share else # we are building from a svn build cp -rf ../../bin/* $BUNDLE fi mkdir -p $BUNDLE/gstreamer-0.10 # Copy all runtime dependencies for bundling cp $BUILD_PREFIX/bin/{gst-launch,gst-inspect}-0.10 $BUNDLE &>/dev/null find $LIB_PREFIX -name *.dylib -type f -exec cp {} $BUNDLE \; &>/dev/null find $LIB_PREFIX/gstreamer-0.10 -name *.so -type f -exec cp {} $BUNDLE/gstreamer-0.10 \; &>/dev/null find $LIB_PREFIX/mono -name *.dll* -not -name *policy* -type f -exec cp {} $BUNDLE \; &>/dev/null # Nuke any dangling chads find $APP_DIR -type d -iregex '.*\.svn$' | xargs rm -rf pushd $BUNDLE &>/dev/null # Rebuild symlinks for link in $(find $LIB_PREFIX -name \*.dylib -type l); do ln -s "$(basename $(readlink $link))" "$(basename $link)" done # Relocate libraries for dep in $(find . -type f \( -name \*.dylib -o -name \*.so -o -name gst-\*-\*.\* \)); do echo -n "Processing $dep: " relocs=0 link_deps=$(otool -L $dep | cut -f2 | cut -f1 -d' ') dep_id=./$(basename $(otool -D $dep | tail -n1)) for link_dep in $link_deps; do if [ "x${link_dep:0:${#LIB_PREFIX}}" = "x$LIB_PREFIX" ]; then install_name_tool -change $link_dep ./$(basename $link_dep) -id $dep_id $dep relocs=$(($relocs + 1)) fi done echo "$relocs relocations" done popd &>/dev/null popd &>/dev/null
eeejay/banshee
build/osx/make-app-bundle.sh
Shell
mit
1,866
#ip=192.168.20.235 ip=10.8.0.235 odbc_srv='IntegraDb' usr='zam' pass='lis' database='gstdb' #sqsh -S SERV -U user -P passwd -D db -L bcp_colsep=',' -m bcp # -C 'select * from some_table where foo=bar' > /path/to/output.out # Define table variable sqsh -S $odbc_srv -U $usr -P $pass -D $database -L bcp_colsep=' ' -m bcp -C " insert into sistemas.dbo.disponibilidad_tbl_hist_unidades_gst_indicators select unidad, id_flota, flota, xid_status, xestatus, id_status, estatus, tipo_status, clasification_name ,group_name, operador, remolque, id_area, area, id_tipo_operacion, segmento, description, compromise ,status_viaje, desc_viaje, status_taller, desc_taller, units_type, 0 --as 'iseditable' , id_area_taller, area_taller ,id_orden, fecha_inicio,fecha_ingreso, fecha_prometida ,lastTrip ,id_remolque1 ,id_remolque2 ,id_dolly ,f_despachado ,StatusDays ,convert(nvarchar(max),current_timestamp,23) --'2021-08-10' --created ,1 -- status from sistemas.dbo.disponibilidad_currentview_rpt_unidades_gst_indicators -- where -- created = convert(nvarchar(max),current_timestamp,23)"
ambagasdowa/kml
cron/disponibilidad_historical_trigger_job.sh
Shell
mit
2,516
# make sure errors stop the script set -e echo "add patch-package" yarn add $1 alias patch-package=./node_modules/.bin/patch-package echo "@microsoft/mezzurite-core => @types/angular should not contain patch-package" if grep patch-package ./node_modules/@microsoft/mezzurite-core/node_modules/@types/angular/index.d.ts ; then exit 1 fi echo "edit @microsoft/mezzurite-core => @types/angular" yarn replace angular patch-package ./node_modules/@microsoft/mezzurite-core/node_modules/@types/angular/index.d.ts echo "SNAPSHOT: create the patch" patch-package @microsoft/mezzurite-core/@types/angular echo "END SNAPSHOT" echo "the patch file was created" ls patches/@microsoft+mezzurite-core++@types+angular+1.6.53.patch echo "reinstall node_modules" yarn rimraf node_modules yarn echo "@microsoft/mezzurite-core => @types/angular should not contain patch-package" if grep patch-package ./node_modules/@microsoft/mezzurite-core/node_modules/@types/angular/index.d.ts ; then exit 1 fi echo "SNAPSHOT: run patch-package" patch-package echo "END SNAPSHOT" echo "@microsoft/mezzurite-core => @types/angular should contain patch-package" grep patch-package ./node_modules/@microsoft/mezzurite-core/node_modules/@types/angular/index.d.ts
ds300/patch-package
integration-tests/nested-scoped-packages/nested-scoped-packages.sh
Shell
mit
1,240
#!/bin/zsh # zsh settings for history export HISTIGNORE="&:ls:[bf]g:exit:reset:clear:cd:cd ..:cd.." export HISTSIZE=25000 export HISTFILE=~/.zsh_history export SAVEHIST=25000 setopt APPEND_HISTORY setopt INC_APPEND_HISTORY setopt SHARE_HISTORY setopt HIST_IGNORE_ALL_DUPS setopt HIST_IGNORE_SPACE setopt HIST_REDUCE_BLANKS setopt HIST_SAVE_NO_DUPS setopt HIST_VERIFY setopt EXTENDED_HISTORY
soleblaze/dotfiles
zsh/history.zsh
Shell
mit
393
set -e if [ "$LANE" = "ios" ]; then brew update brew install yarn brew outdated node || brew upgrade node brew outdated yarn || brew upgrade yarn elif [ "$LANE" = "android" ]; then node --version npm install -g yarn fi
idehub/react-native-google-analytics-bridge
.travis/before_install.sh
Shell
mit
242
#! /bin/sh # Runs the docker container mounting the current directory in the working directory of the container # and leaves you at a bash prompt # Usage : # ./run.sh # Run from the directory containing the Dockerfile docker run -v `pwd`:/var/lib/blog -p 8000:4000 -t -i saltatory/blog /bin/bash
saltatory/saltatory.github.io
docker-run.sh
Shell
mit
297
#!/usr/bin/env bash declare -a fac_users=("fernando" "ximenes" "guilherme" "liulin" "ana" "alexandre") function create_groups { group=fac echo "creating group " $group sudo adduser --quiet --disabled-password --shell /bin/bash --home /home/fac --gecos "User" $group --ingroup $group sudo passwd $user sudo adduser $group --ingroup $group; # need to choose a password for fac user sudo usermod -G sudo $group } function create_fac_users { group=fac for user in "${fac_users[@]}"; do echo "creating user " $user sudo adduser --quiet --disabled-password --shell /bin/bash --home /home/$user --gecos "User" $user sudo passwd $user sudo usermod -G sudo,$user -g $group $user done } function install_linux_packages { printf "\n--- installing git ---\n"; sudo apt-get -y install git #echo "installing git..."; sudo apt-get install git printf "\n--- installing g++ ---\n"; sudo apt-get -y install g++ printf "\n--- installing gfortran ---\n"; sudo apt-get -y install gfortran printf "\n--- installing libreadline6-dev ---\n"; sudo apt-get -y install libreadline6-dev printf "\n--- installing re2c ---\n"; sudo apt-get -y install re2c printf "\n--- installing htop ---\n"; sudo apt-get -y install htop printf "\n--- installing swig ---\n"; sudo apt-get -y install swig } function install_epics_base { # epics base sudo mkdir -p /usr/local/epics sudo mkdir -p /usr/local/epics/R3.14.12.6 sudo ln -s /usr/local/epics/R3.14.12.6 /usr/local/epics/R3.14 sudo chown -R $(whoami) /usr/local/epics/ cd ~/Downloads wget https://www.aps.anl.gov/epics/download/base/baseR3.14.12.6.tar.gz tar xzf baseR3.14.12.6.tar.gz mv base-3.14.12.6 base mv base /usr/local/epics/R3.14.12.6 cd /usr/local/epics/R3.14.12.6/base make -j32 } function install_epics_extensions { source ~/.bashrc # epics extensions sudo rm -rf /usr/local/epics/R3.14.12.6/extensions mkdir -p /usr/local/epics/R3.14.12.6/extensions cd /usr/local/epics/R3.14.12.6/extensions makeBaseExt.pl -t simple # sequencer cd ~/Downloads wget http://www-csr.bessy.de/control/SoftDist/sequencer/releases/seq-2.2.4.tar.gz tar xzf seq-2.2.4.tar.gz mv seq-2.2.4 /usr/local/epics/R3.14/extensions cd /usr/local/epics/R3.14/extensions/seq-2.2.4 sed -i 's/\/home\/franksen\/src\/epics-base\/3.14-12-5/\/usr\/local\/epics\/R3.14\/base/g' ./configure/RELEASE make -j32 # procserver cd ~/Downloads wget https://sourceforge.net/projects/procserv/files/latest/download -O procServ-2.7.0.tar.gz tar xzf procServ-2.7.0.tar.gz mv procServ-2.7.0 /usr/local/epics/R3.14/extensions cd /usr/local/epics/R3.14/extensions/procServ-2.7.0 ./configure make -j32 sudo make install # gateway cd ~/Downloads wget https://launchpad.net/epics-gateway/trunk/2.0.6.0/+download/gateway2_0_6_0.tar.gz tar xzf gateway2_0_6_0.tar.gz mv gateway2_0_6_0 /usr/local/epics/R3.14/extensions/src cd /usr/local/epics/R3.14.12.6/extensions/src/gateway2_0_6_0/configure echo "EPICS_BASE = /usr/local/epics/R3.14/base" > RELEASE.local cd ../../ sed -i 's/DIRS +=/DIRS += gateway2_0_6_0/g' ./Makefile cd /usr/local/epics/R3.14/extensions make -j32 } function config_git { user=$1 echo "configuring git as " $user " ..." sudo apt-get install git git config --global core.editor vim git config --global push.default simple read -p "enter user.name: " git_user_name git config --global user.name $git_use_name read -p "enter user.email: " git_user_email git config --global user.email $git_user_email } function create_fac_files { read -p "this will delete /home/fac_files if it already exists. continue? (yes/[no]): " user_input if [[ "$user_input" != "yes" ]] then exit fi sudo rm -rf /home/fac_files sudo mkdir -p /home/fac_files; sudo chown -R fac /home/fac_files echo "--- !!! MANUALLY configure /etf/fstab ---" echo "sudo vi /etc/fstab # and edit UUID=<some_ID> / ext4 errors=remount-ro,acl 0 1" echo " # and edit UUID=<some_ID> /home/ ext4 defaults,acl 0 2" echo "" read -p "type 'ok' and hit <enter> when done to proceed: " user_input if [[ "$user_input" != "ok" ]] then exit fi echo "" sudo mount -oremount / #sudo mount -oremount /home/ # in case your /home is in a different partition sudo chgrp -R fac /home/fac_files sudo setfacl -Rdm u::rwx,g:fac:rwx,o::r /home/fac_files sudo setfacl -Rm u::rwx,g:fac:rwx,o::r /home/fac_files cd /home/fac_files/ cd /home/fac_files; mkdir lnls-fac lnls-sirius lnls-ima sudo chown -R fac /home/fac_files } function install_fac_scripts_repo { printf "\n--- installing sirius-fac/scripts ---\n" cd /home/fac_files/lnls-fac git clone [email protected]:lnls-fac/scripts sudo make -C scripts/etc develop sudo make -C scripts/bin develop } function install_siriusbashrc_for_user { echo -e "\n$(cat ~/.bashrc)" > ~/.bashrc echo -e "fi\n$(cat ~/.bashrc)" > ~/.bashrc echo -e " source \"\$SIRIUSBASHRC\"\n$(cat ~/.bashrc)" > ~/.bashrc echo -e "if [ -f \"\$SIRIUSBASHRC\" ] ; then\n$(cat ~/.bashrc)" > ~/.bashrc echo -e "SIRIUSBASHRC=/usr/local/etc/bashrc-sirius\n$(cat ~/.bashrc)" > ~/.bashrc } function install_python { printf "\n--- installing build-essential ---\n"; sudo apt-get -y install build-essential printf "\n--- installing checkinstall ---\n"; sudo apt-get -y install checkinstall printf "\n--- installing libreadline-gplv2-dev ---\n"; sudo apt-get -y install libreadline-gplv2-dev printf "\n--- installing libncursesw5-dev ---\n"; sudo apt-get -y install libncursesw5-dev printf "\n--- installing libssl-dev ---\n"; sudo apt-get -y install libssl-dev printf "\n--- installing libsqlite3-dev ---\n"; sudo apt-get -y install libsqlite3-dev printf "\n--- installing tk-dev ---\n"; sudo apt-get -y install tk-dev printf "\n--- installing libgdbm-dev ---\n"; sudo apt-get -y install libgdbm-dev printf "\n--- installing libc6-dev ---\n"; sudo apt-get -y install libc6-dev printf "\n--- installing libbz2-dev ---\n"; sudo apt-get -y install libbz2-dev cd ~/Downloads wget https://www.python.org/ftp/python/3.6.1/Python-3.6.1.tgz tar xzf Python-3.6.1.tgz cd Python-3.6.1/ ./configure --enable-shared --with-ensurepip=install make -j32 #make -j32 test sudo make altinstall sudo ldconfig sudo ln -f -s /usr/local/bin/python3.6 /usr/bin/python-sirius } function install_python_packages { printf "\n--- installing libffi6 ---\n"; sudo apt-get -y install libffi6 printf "\n--- installing libffi-dev ---\n"; sudo apt-get -y install libffi-dev printf "\n--- installing libfreetype6 ---\n"; sudo apt-get -y install libfreetype6 printf "\n--- installing libfreetype6-dev ---\n"; sudo apt-get -y install libfreetype6-dev printf "\n--- installing libpng3 ---\n"; sudo apt-get -y install libpng3 printf "\n--- installing nmap ---\n"; sudo apt-get -y install nmap printf "\n--- installing dvipng ---\n"; sudo apt-get -y install dvipng printf "\n--- installing python-nmap ---\n"; sudo pip3.6 install python-nmap printf "\n--- installing wakeonlan ---\n"; sudo pip3.6 install wakeonlan printf "\n--- installing requests ---\n"; sudo pip3.6 install requests printf "\n--- installing pyqtgraph ---\n"; sudo pip3.6 install pyqtgraph printf "\n--- installing pandas ---\n"; sudo pip3.6 install pandas printf "\n--- installing psutil ---\n"; sudo pip3.6 install psutil printf "\n--- installing termcolor ---\n"; sudo pip3.6 install termcolor printf "\n--- installing sh ---\n"; sudo pip3.6 install sh printf "\n--- installing cairocffi ---\n"; sudo pip3.6 install cairocffi printf "\n--- installing matplotlib ---\n"; sudo pip3.6 install matplotlib printf "\n--- installing scipy ---\n"; sudo pip3.6 install scipy printf "\n--- installing jupyter ---\n"; sudo pip3.6 install jupyter } function configure_hosts { source ~/.bashrc sudo chown fac.fac /etc/hosts && sudo chmod g+wr /etc/hosts fac-hosts-update.py } function install_pyepics_pcaspy { printf "\n--- installing pyepics ---\n"; sudo pip3.6 install pyepics source ~/.bashrc sudo -E pip3.6 install pcaspy } function clone_lnls_sirius_machine_applications { cd /home/fac_files/lnls-sirius/ printf "\n--- clonning control-system-constants ---\n"; git clone [email protected]:lnls-sirius/control-system-constants.git printf "\n--- clonning dev-packages ---\n"; git clone [email protected]:lnls-sirius/dev-packages.git printf "\n--- clonning machine-applications ---\n"; git clone [email protected]:lnls-sirius/machine-applications.git } function clone_lnls_sirius_hla { printf "\n--- clonning hla ---\n"; git clone [email protected]:lnls-sirius/hla.git } function clone_lnls_fac { cd /home/fac_files/lnls-fac/ } #create_groups #create_fac_users #install_linux_packages #install_epics_base #install_epics_extensions #config_git #create_fac_files #install_fac_scripts_repo #install_siriusbashrc_for_user #install_python #install_python_packages #configure_hosts #install_pyepics_pcaspy #clone_lnls_sirius_machine_applications #clone_lnls_sirius_hla #clone_lnls_fac
lnls-fac/scripts
bin/rip/sirius-computer-setup.bash
Shell
mit
8,964
#!/bin/bash set -eu -o pipefail outdir=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $outdir mkdir -p $PREFIX/bin cp ./chexmix_v0.4.jar $outdir cp $RECIPE_DIR/chexmix.sh $outdir/chexmix chmod +x $outdir/chexmix ln -s $outdir/chexmix $PREFIX/bin
jerowe/bioconda-recipes
recipes/chexmix/build.sh
Shell
mit
261
sudo apt-get update sudo apt-get -y install unzip sudo apt-get -y install vim sudo apt-get -y install python-software-properties python g++ make sudo add-apt-repository -y ppa:chris-lea/node.js sudo apt-get update sudo apt-get -y install nodejs=0.10.25-1chl1~precise1 mkdir -p ~/closure cd ~/closure sudo apt-get -y install git ant openjdk-6-jdk git clone https://code.google.com/p/closure-compiler/ . ant echo "CLOSURE_PATH=\"/home/vagrant/closure/\"" >> ~/.bashrc sudo npm install -g [email protected]
maciejjaskowski/node.js-grunt-closure
node-env.sh
Shell
mit
509
sudo apt-add-repository -y ppa:paolorotolo/copy sudo apt-get -y update sudo apt-get -y install copy
lovromazgon/ubuntu-reinstall-scripts
apps/install_copy.sh
Shell
mit
99
../play compile ../play stop ../play start
duanp0128/PlayHongKongResort
restart.sh
Shell
mit
43
/bin/sh ./install --with-CC=gcc4 \ --with-CXX=g++4 \ --with-CXXLINK=g++4 \ --with-GC_LIBDIRS='-L/usr/local/gcc-4.0.2/lib -lgc' \ --with-GC_INCLDIRS='-I/usr/local/gcc-4.0.2/include' \ --with-DOT=/usr/bin/dot \ --with-TCL_INCLDIRS='-I/usr/local/include' \ --with-TCL_LIBDIRS='-L/usr/local/lib -L/usr/X11/lib -L/usr/X11R6/lib'
nxt4hll/roccc-2.0
roccc-compiler/src/NuSuif/suif2/install4ahawk.sh
Shell
epl-1.0
352
#!/bin/bash s3cmd sync -P dist/ s3://blog.eikeland.se --cf-invalidate --cf-invalidate-default-index --add-header "Cache-Control: max-age=600"
stianeikeland/bloggr-clj
sync.sh
Shell
epl-1.0
142
#!/usr/bin/env bash # Create the .m2 folder if it does not exist [ -d $HOME/.m2 ] || mkdir $HOME/.m2 # Copy the settings.xml file to .m2 [ -f $HOME/.m2/settings.xml ] || rm $HOME/.m2/settings.xml cp ./travis_settings.xml $HOME/.m2/settings.xml
subclipse/subclipse
.travis-archive/setup_travis.sh
Shell
epl-1.0
246
#! /usr/bin/env bash $XGETTEXT *.cpp -o $podir/libfolderarchivesettings.pot
kolab-groupware/kdepim-runtime
resources/folderarchivesettings/Messages.sh
Shell
gpl-2.0
76
#! /bin/sh # Copyright (C) 2004-2017 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # Make sure that Makefile.in is not overwritten on error. Otherwise # although the rebuilding rules would fail on error, they would # succeed if make is run a second time immediately afterwards. # Report from Harlan Stenn. . test-init.sh echo AC_OUTPUT>>configure.ac : > Makefile.am $ACLOCAL # Create Makefile.in before configure. configure ensures files # generated by it or later are newer than configure, so this allows # us to avoid a $sleep before updating Makefile.am below. $AUTOMAKE $AUTOCONF ./configure $MAKE cat >Makefile.am <<END AUTOMAKE_OPTIONS = -Werror INCLUDES = -Ifoo foo_SOURCES = unused END # This repetition is deliberate; see heading comments. $MAKE && exit 1 $MAKE && exit 1 :
pylam/automake
t/werror2.sh
Shell
gpl-2.0
1,401
#!/bin/bash APATH=/ROOTPATH/mProject PATH_LIB=${APATH}/library_jui function copyLib { cp -r ${PATH_LIB}/$1/lib/*.* ${PATH_LIB}/$1/libUse/ } function copyLibAll { copyLib replaceFileLib copyLib TreeLib copyLib XMLLibrary copyLib dataStruct copyLib strLibrary } function exportLib { export LD_LIBRARY_PATH=${PATH_LIB}/replaceFileLib/lib:${PATH_LIB}/TreeLib/lib:${PATH_LIB}/XMLLibrary/lib:${PATH_LIB}/dataStruct/lib:${PATH_LIB}/strLibrary/lib } function cleanBuild { make clean } function normal_build { make } function g_build { make CFLAGS=-g } case $1 in 1) cleanBuild normal_build copyLibAll ;; 2) cleanBuild g_build ;; 3) ./run.sh 1 exportLib ./$2 ;; 4) ./run.sh 2 exportLib valgrind -v --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$2 ;; 5) exportLib ./$2 ;; *) echo "1 -- normal build" echo "2 -- debug build" echo "3 -- normal run" echo "4 -- debug run" echo "5 -- normal run" ;; esac
Chalearm/MachineLearning-
mProject/fileApp/fileReplaceApp/arun_temp.sh
Shell
gpl-2.0
1,168
#!/bin/sh # -------------------------------------------------------------------------- # buildscript for osx 32bit/64bit # -------------------------------------------------------------------------- # Processing Wrapper for the Oculus Rift library # http://github.com/xohm/SimpleOculusRift # -------------------------------------------------------------------------- # prog: Max Rheiner / Interaction Design / zhdk / http://iad.zhdk.ch/ # date: 06/24/2014 (m/d/y) # ---------------------------------------------------------------------------- # Change those vars to the folders you have on your system: # -DEIGEN3D_INCLUDE = folder of Eigen3d headers # -DBOOST_ROOT = folder of Boost root # -DBOOST_LIBRARYDIR = folder of Boost library folder # -DP5_JAR = filepath to your core.jar (Processing) # ---------------------------------------------------------------------------- # optional, but gives a clean build rm -r build # check if build folder exists if [ ! -d "build" ]; then mkdir build fi cd ./build echo "--- generate cmake ---" cmake -DCMAKE_BUILD_TYPE=Release \ -DEIGEN3D_INCLUDE=/usr/local/include/eigen3/ \ -DOCULUSRIFT_DIR=/Users/max/Documents/libs/OculusSDK \ -DP5_JAR=/Applications/Processing.app/Contents/Java/core/library/core.jar \ -DCMAKE_OSX_ARCHITECTURES="x86_64" \ .. # -DCMAKE_OSX_ARCHITECTURES="i386;x86_64" \ echo "--- build ---" # build with 6 threads, verbose is optional, but otherwise you can't see the compiler directives #make -j 6 VERBOSE=1 make -j6 # change name path for a local library # boost install_name_tool -change /usr/local/lib/libboost_system-mt.dylib @loader_path/./osx/libboost_system-mt.dylib libSimpleOculusRift.jnilib install_name_tool -change /usr/local/lib/libGLEW.1.10.0.dylib @loader_path/./osx/libGLEW.1.10.0.dylib libSimpleOculusRift.jnilib echo "Lib paths:" otool -L libSimpleOculusRift.jnilib echo "--- copy ---" # copy the library cp SimpleOculusRift.jar ../dist/all/SimpleOculusRift/library cp libSimpleOculusRift.jnilib ../dist/all/SimpleOculusRift/library # copy the doc cp -r ./doc/* ../dist/all/SimpleOculusRift/documentation/
xohm/SimpleOculusRift
buildOsx.sh
Shell
gpl-2.0
2,158
#!/system/bin/sh #Set governor items #echo 378000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq; echo 1512000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq; echo 1512000 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq; echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_booted; DISLIT=0 [ "`/system/xbin/busybox grep -i 1 /data/.disable_touchlight`" ] && DISLIT=1 if [ "$DISLIT" == 1 ]; then echo 0 > /sys/class/leds/button-backlight/max_brightness; echo 0 > /sys/class/leds/button-backlight/brightness; fi; echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle; echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse; # General Tweaks, thanks to Osmosis and Malaroths for most of this echo 512 > /sys/block/mmcblk0/queue/nr_requests; echo 256 > /sys/block/mmcblk0/queue/read_ahead_kb; echo 2 > /sys/block/mmcblk0/queue/rq_affinity; echo 0 > /sys/block/mmcblk0/queue/nomerges; echo 0 > /sys/block/mmcblk0/queue/rotational; echo 0 > /sys/block/mmcblk0/queue/add_random; echo 0 > /sys/block/mmcblk0/queue/iostats; echo 8192 > /proc/sys/vm/min_free_kbytes # Cache Tweaks, thanks to brees75 for this stuff echo 2048 > /sys/devices/virtual/bdi/0:18/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/0:19/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/1:0/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:1/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:2/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:3/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:4/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:5/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:6/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:7/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:8/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:9/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:10/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:11/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:12/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:13/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:14/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/1:15/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/7:0/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:1/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:2/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:3/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:4/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:5/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:6/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:7/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:8/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:9/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:10/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:11/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:12/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:13/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:14/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:15/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:16/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:17/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:18/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:19/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:20/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:21/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:22/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:23/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:24/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:25/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:26/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:27/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:28/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:29/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:30/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:31/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:32/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:33/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:34/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:35/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:36/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:37/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/7:38/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/179:0/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/179:8/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/179:16/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/253:0/read_ahead_kb echo 2048 > /sys/devices/virtual/bdi/254:0/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:1/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:2/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:3/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:4/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:5/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:6/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:7/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:8/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:9/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:10/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:11/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:12/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:13/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:14/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:15/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:16/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:17/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:18/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:19/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:20/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:21/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:22/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:23/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:24/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:25/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:26/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:27/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:28/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:29/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:30/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:31/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:32/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:33/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:34/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:35/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:36/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:37/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/254:38/read_ahead_kb echo 256 > /sys/devices/virtual/bdi/default/read_ahead_kb echo $(date) END of post-init.sh
Perseus71/KT747
kernel/usr/initramfs/sbin/post-init.sh
Shell
gpl-2.0
6,766
#!/bin/tcsh -xef # ============ # this Jim's pre-proc script for It's a (heavily) edited version of the # script automatically created by uber_subject_proc.py # # it has been rebuilt to work for study or test. # # it assumes that when it is called, the tcsh shell is currently in the master working directory for the analysis (eg scaleup_study) # arg1 subj eg PARC_sub_2699 # subject to use # arg2 anat_suffix # suffix to use for anatomical # arg3 func_type # type of functional to use (Study or Test) # # # This script is just for 2956, the one with the lack of Rx for runs 1-3 # in speial 2 I'm doing runs 4-6. many steps have been done already so they are commented out. # ============ # =========================== setup ============================ # the user may specify a single subject to run with # if ( $#argv > 0 ) then # set subj = $argv[1] # endif # # ## user may also specify the anat suffix to use (but don't) # if ( $#argv > 1 ) then # set anat_suffix = $argv[2] # else # set anat_suffix = 'none_specified' # endif # # ## type of functional to use (Study or Test) # if ( $#argv > 2 ) then # set func_type = $argv[3] # endif ### setting Params set subj = PARC_sub_2956 set func_type = Test set anat_suffix = fs_brain # set list of runs set runs = (`count -digits 2 4 6`) # assign output directory name set output_dir = $subj.results # verify that the results directory does not yet exist # if ( -d $output_dir ) then # echo output dir "$subj.results" already exists # exit # endif # # # # create results and stimuli directories # mkdir $output_dir # mkdir $output_dir/stimuli # # # ============================ fetch files ============================ # # # copy raw anatomy to results dir # 3dcopy \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/anat/${subj}_FSPGR_1.nii.gz \ # $output_dir/${subj}_FSPGR_1 # # # FS directory # set fs_dir = '/Volumes/group/iang/biac3/gotlib7/data/PARC/finishedRecons_Marissa' # # # freesurfer sub is slightly different of form PARC_1234 # set fs_sub = `echo $subj| sed 's/_sub_/_/g'` # # # copy brain over # cp -v ${fs_dir}/${fs_sub}/mri/brain.mgz $output_dir/fs_brain.mgz # # #convert via freesurfer command # mri_convert --out_orientation RAI $output_dir/fs_brain.mgz $output_dir/fs_brain.{subj}.nii.gz # # #3d copy to get in afni format. # 3dcopy $output_dir/fs_brain.{subj}.nii.gz $output_dir/${subj}_${anat_suffix} # # ============================ block: tcat ============================ # apply 3dTcat to copy input dsets to results dir, while # removing the first 3 TRs # 3dTcat -prefix $output_dir/pb00.$subj.r01.tcat \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/func/${subj}_${func_type}_run_1.nii.gz'[3..$]' # 3dTcat -prefix $output_dir/pb00.$subj.r02.tcat \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/func/${subj}_${func_type}_run_2.nii.gz'[3..$]' # 3dTcat -prefix $output_dir/pb00.$subj.r03.tcat \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/func/${subj}_${func_type}_run_3.nii.gz'[3..$]' # 3dTcat -prefix $output_dir/pb00.$subj.r04.tcat \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/func/${subj}_${func_type}_run_4.nii.gz'[3..$]' # 3dTcat -prefix $output_dir/pb00.$subj.r05.tcat \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/func/${subj}_${func_type}_run_5.nii.gz'[3..$]' # 3dTcat -prefix $output_dir/pb00.$subj.r06.tcat \ # /Users/Jim/PARC_study/scandata_for_analysis/${subj}/func/${subj}_${func_type}_run_6.nii.gz'[3..$]' # ------------------------------------------------------- # enter the results directory (can begin processing data) cd $output_dir # ========================== auto block: outcount ========================== # data check: compute outlier fraction for each volume touch out.pre_ss_warn.txt foreach run ( $runs ) 3dToutcount -automask -fraction -polort 2 -legendre \ pb00.$subj.r$run.tcat+orig > outcount.r$run.1D # outliers at TR 0 might suggest pre-steady state TRs if ( `1deval -a outcount.r$run.1D"{0}" -expr "step(a-0.4)"` ) then echo "** TR #0 outliers: possible pre-steady state TRs in run $run" \ >> out.pre_ss_warn.txt endif end # catenate outlier counts into a single time series cat outcount.r*.1D > outcount_rall.1D # ================================= tshift ================================= # time shift data so all slice timing is the same foreach run ( $runs ) 3dTshift -tzero 0 -quintic -prefix pb01.$subj.r$run.oblique.tshift \ pb00.$subj.r$run.tcat+orig end # ================================= deoblique ======== foreach run ( $runs ) 3dWarp -oblique2card -prefix pb01.$subj.r$run.tshift \ pb01.$subj.r$run.oblique.tshift+orig end # at end of deobliquing, tshift is aligned. (well, starts off better) ========================= count trs ================================= touch tr_Counts.txt foreach run ( $runs ) 3dinfo -nv pb01.$subj.r$run.tshift+orig >> tr_Counts.txt end set tr_counts = `cat tr_Counts.txt` # ======= Jim's alignment. Uses skull stripped brain from freesurfer ==== # ###### 2956 change - changed base ###### align_epi_anat.py -epi2anat -anat ${subj}_${anat_suffix}+orig \ -anat_has_skull no \ -epi pb01.$subj.r04.tshift+orig -epi_base 5 \ -epi_strip 3dAutomask \ -cost nmi \ -volreg off -tshift off # create an all-1 dataset to mask the extents of the warp 3dcalc -a pb01.$subj.r04.tshift+orig -expr 1 -prefix rm.epi.all1 # ======= Jim's vol reg. Uses skull stripped brain from freesurfer ==== # foreach run ( $runs ) # register each volume to the base 3dvolreg -verbose -zpad 1 -base pb01.$subj.r04.tshift+orig'[5]' \ -1Dfile dfile.r$run.1D -prefix rm.epi.volreg.r$run \ -cubic \ -1Dmatrix_save mat.r$run.vr.aff12.1D \ pb01.$subj.r$run.tshift+orig # catenate volreg and epi2anat transformations cat_matvec -ONELINE \ ${subj}_${anat_suffix}_al_mat.aff12.1D -I \ mat.r$run.vr.aff12.1D > mat.r$run.warp.aff12.1D # apply catenated xform : volreg and epi2anat 3dAllineate -base ${subj}_${anat_suffix}+orig \ -input pb01.$subj.r$run.tshift+orig \ -1Dmatrix_apply mat.r$run.warp.aff12.1D \ -mast_dxyz 3 \ -prefix rm.epi.nomask.r$run # warp the all-1 dataset for extents masking 3dAllineate -base ${subj}_${anat_suffix}+orig \ -input rm.epi.all1+orig \ -1Dmatrix_apply mat.r$run.warp.aff12.1D \ -mast_dxyz 3 -final NN -quiet \ -prefix rm.epi.1.r$run # make an extents intersection mask of this run 3dTstat -min -prefix rm.epi.min.r$run rm.epi.1.r$run+orig end cat dfile.r*.1D > dfile_rall.1D # ---------------------------------------- # create the extents mask: mask_epi_extents+orig # (this is a mask of voxels that have valid data at every TR) 3dMean -datum short -prefix rm.epi.mean rm.epi.min.r*.HEAD 3dcalc -a rm.epi.mean+orig -expr 'step(a-0.999)' -prefix mask_epi_extents.2 # and apply the extents mask to the EPI data # (delete any time series with missing data) foreach run ( $runs ) 3dcalc -a rm.epi.nomask.r$run+orig -b mask_epi_extents+orig \ -expr 'a*b' -prefix pb02.$subj.r$run.volreg end # create an anat_final dataset, aligned with stats 3dcopy ${subj}_${anat_suffix}+orig anat_final2.$subj #### end Jim volreg edit ####### # ================================== blur ================================== # blur each volume of each run foreach run ( $runs ) 3dmerge -1blur_fwhm 4.0 -doall -prefix pb03.$subj.r$run.blur \ pb02.$subj.r$run.volreg+orig end # ================================== mask ================================== # create 'full_mask' dataset (union mask) foreach run ( $runs ) 3dAutomask -dilate 1 -prefix rm.mask_r$run pb03.$subj.r$run.blur+orig end # get mean and compare it to 0 for taking 'union' 3dMean -datum short -prefix rm.mean rm.mask*.HEAD 3dcalc -a rm.mean+orig -expr 'ispositive(a-0)' -prefix full_mask2.$subj # ---- create subject anatomy mask, mask_anat.$subj+orig ---- # (resampled from aligned anat) 3dresample -master full_mask2.$subj+orig -input ${subj}_${anat_suffix}+orig \ -prefix rm.resam.anat # convert to binary anat mask; fill gaps and holes 3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.anat+orig \ -prefix mask_anat.$subj # compute overlaps between anat and EPI masks 3dABoverlap -no_automask full_mask2.$subj+orig mask_anat2.$subj+orig \ |& tee out.mask_overlap.txt # ================================= scale ================================== # scale each voxel time series to have a mean of 100 # (be sure no negatives creep in) # (subject to a range of [0,200]) foreach run ( $runs ) 3dTstat -prefix rm.mean_r$run pb03.$subj.r$run.blur+orig 3dcalc -a pb03.$subj.r$run.blur+orig -b rm.mean_r$run+orig \ -expr 'min(200, a/b*100)*step(a)*step(b)' \ -prefix pb04.$subj.r$run.scale end # ================== write 1d files for censor foreach run ( $runs ) echo $run set trs_for_run = `3dinfo -nv pb01.$subj.r$run.tshift+orig` echo $trs_for_run 1d_tool.py -infile dfile.r$run.1D -set_run_lengths ${trs_for_run} \ -demean -write motion_demean.r$run.1D 1d_tool.py -infile dfile.r$run.1D -set_run_lengths ${trs_for_run} \ -show_censor_count -censor_prev_TR \ -censor_motion 0.3 motion_r${run}_${subj} end # # # ================== auto block: generate review scripts =================== # # # generate a review script for the unprocessed EPI data # gen_epi_review.py -script @epi_review.$subj \ # -dsets pb00.$subj.r*.tcat+orig.HEAD # # # generate scripts to review single subject results # # (try with defaults, but do not allow bad exit status) # gen_ss_review_scripts.py -mot_limit 0.3 -exit0 # # # ========================== auto block: finalize ========================== # remove temporary files \rm -f rm.* # \rm -f pb00* # \rm -f pb02* if the basic subject review script is here, run it (want this to be the last text output) if ( -e @ss_review_basic ) ./@ss_review_basic |& tee out.ss_review.$subj.txt # return to parent directory cd .. echo "execution finished: `date`"
sorensje/dissertation
preproc_2956_sepcial2.sh
Shell
gpl-2.0
10,847
#!/bin/bash # Daniel Fernandez Rodriguez <gmail.com daferoes> # https://github.com/danifr/miscellaneous # # This script will download, build and install/update OpenAFS # # # Usage: sh openafs_update.sh $OPENAFS_RELEASE # Ex: sh openafs_update.sh 1.8.3 # OPENAFS_RELEASE=$1 WORKING_DIR='/tmp' EPEL_RPM='http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm' OPENAFS_LATEST_URL="https://www.openafs.org/pages/release/latest.html" OPENAFS_ALL_URL="https://www.openafs.org/dl/openafs/" if [[ $UID -ne 0 ]]; then echo '[ERROR] You need to run this program as root... Exiting' exit 1 fi if [[ -z $OPENAFS_RELEASE ]]; then echo "[WARN] OpenAFS release version not provided" echo "[WARN] Please specify the version you want to install. ex: ./openafs_update.sh 1.8.3" echo "[WARN] Check all available releases --> $OPENAFS_ALL_URL" echo "" echo "[INFO] ~~ Let's try some curl + grep magic! ~~" echo "[INFO] Getting latest stable release number from $OPENAFS_LATEST_URL..." OPENAFS_LATEST=$(curl -s $OPENAFS_LATEST_URL | grep -o -P '(?<=<title>OpenAFS ).*(?=</title>)') read -p "Do you want to build and install release v$OPENAFS_LATEST? [y/n]: " GO_LATEST if [[ ${GO_LATEST,,} == 'y' ]]; then OPENAFS_RELEASE=$OPENAFS_LATEST else read -p "Do you want to specifying the release number to build? (ex: 1.8.3 or ENTER to skip): " OPENAFS_RELEASE if [[ -z $OPENAFS_RELEASE ]]; then echo "[ERROR] OpenAFS release numeber not provided. Exiting." exit 1 fi fi fi echo "[INFO] Changing working directory to $WORKING_DIR..." cd $WORKING_DIR read -p "Do you want to install all dependencies? [recommended] [y/n]: " COMPLETE if [[ ${COMPLETE,,} == 'y' ]]; then if [[ $(cat /etc/system-release) == *CentOS* ]]; then echo "[INFO] Enabling EPEL Repository..." wget $EPEL_RPM > /dev/null rpm -ivh epel-release-7-8.noarch.rpm fi echo "[INFO] Installing dependencies..." yum install -y rpm-build bison flex kernel-devel kernel-devel-x86_64 \ krb5-devel ncurses-devel pam-devel perl-ExtUtils-Embed perl-devel \ wget swig yum groupinstall -y 'Development Tools' yum install -y krb5-workstation echo "[INFO] Configuring krb5.conf file..." wget http://linux.web.cern.ch/linux/docs/krb5.conf -O /etc/krb5.conf echo "[INFO] Everything seems OK. Let's start with the OpenAFS upgrade..." fi echo "[INFO] Downloading openafs-$OPENAFS_RELEASE..." if [[ $OPENAFS_RELEASE != *pre* ]]; then wget -A src.rpm -r -l 1 -nd --quiet -e robots=off \ https://www.openafs.org/dl/openafs/$OPENAFS_RELEASE/ > /dev/null else wget -A src.rpm -r -l 1 -nd --quiet -e robots=off \ https://www.openafs.org/dl/openafs/candidate/$OPENAFS_RELEASE/ > /dev/null fi # some bash magic to get OpenAFS full version (release + compilation) FILENAME=$(ls *.src.rpm) echo "[INFO] $FILENAME successfully downloaded" OPENAFS_RELEASE_FULL=${FILENAME%.src.rpm} OPENAFS_RELEASE=${OPENAFS_RELEASE_FULL##*openafs-} echo "[INFO] Rebuilding package..." rpmbuild --rebuild $FILENAME cd ~/rpmbuild/RPMS/x86_64/ # more bash magic to get KERNEL_VERSION and ARCHITECTURE from uname -r KERNEL_RELEASE=$(uname -r) KERNEL_RELEASE_ARRAY=(${KERNEL_RELEASE//./ }) ARRAY_LENGTH=${#KERNEL_RELEASE_ARRAY[@]} ARCH="${KERNEL_RELEASE_ARRAY[$ARRAY_LENGTH-2]}.${KERNEL_RELEASE_ARRAY[$ARRAY_LENGTH-1]}" KERNEL_VERSION=${KERNEL_RELEASE%.$ARCH} KERNEL_VERSION=${KERNEL_VERSION//-/_} echo [INFO] Installing OpenAFS v$OPENAFS_RELEASE for kernel $KERNEL_VERSION ARCH $ARCH rm -f openafs-kpasswd-* openafs-server* yum install -y *.rpm THISCELL_DIR='/usr/vice/etc/' echo "[INFO] Creating $THISCELL_DIR directory..." mkdir -p $THISCELL_DIR echo "cern.ch" > $THISCELL_DIR/ThisCell echo "[INFO] Deleting $FILENAME from $WORKING_DIR..." rm $WORKING_DIR/$FILENAME echo "[INFO] Restarting openafs-client service..." systemctl restart openafs-client.service if [ $? -eq 0 ]; then echo '[INFO] All done :D' echo '[INFO] To start using it, you will need valid kerberos ticket: kinit <username>@CERN.CH And also mount the afs share on the our system: aklog -c cern.ch -k CERN.CH After doing it, you will be able to access your personal share from: /afs/cern.ch/user/<first_letter_username>/<username> ' else echo "[ERROR] Failed to start openafs-client.service. Please check error trace." exit 1 fi
danifr/miscellaneous
CERN_OpenAFS/openafs_update.sh
Shell
gpl-2.0
4,347
#! /bin/sh # Copyright (C) 2013-2017 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # Expose part of automake bug#13928: if the subdir-objects option is # in use and a source file is listed in a _SOURCES variable with a # leading $(srcdir) component, Automake will generate a Makefile that # tries to create the corresponding object file in $(srcdir) as well. required=cc . test-init.sh cat >> configure.ac <<'END' AC_PROG_CC AM_PROG_CC_C_O AM_CONDITIONAL([OBVIOUS], [:]) AC_CONFIG_FILES([sub/Makefile]) AC_OUTPUT END cat > Makefile.am <<'END' AUTOMAKE_OPTIONS = subdir-objects SUBDIRS = sub LESS = more noinst_PROGRAMS = test test2 test_SOURCES = $(srcdir)/test.c test2_SOURCES = $(indir) indir = if OBVIOUS indir += ${srcdir}/$(LESS)/test.c else endif test-objs: ls -la @srcdir@ . : test ! -f @srcdir@/test.$(OBJEXT) test -f test.$(OBJEXT) test ! -f @srcdir@/more/test.$(OBJEXT) test -f more/test.$(OBJEXT) : test ! -f @srcdir@/bar.$(OBJEXT) test -f bar.$(OBJEXT) test ! -f @srcdir@/baz.$(OBJEXT) test -f baz.$(OBJEXT) : test ! -d @srcdir@/$(DEPDIR) test ! -d @srcdir@/more/$(DEPDIR) test -d $(DEPDIR) test -d more/$(DEPDIR) check-local: test-objs END mkdir sub cat > sub/Makefile.am <<'END' AUTOMAKE_OPTIONS = subdir-objects bin_PROGRAMS = foo foo_SOURCES = foo.h \ $(top_srcdir)/bar.c \ ${top_srcdir}/baz.c END $ACLOCAL $AUTOCONF $AUTOMAKE -a mkfiles='Makefile.in sub/Makefile.in' $EGREP '(test|ba[rz])\.|DEPDIR|dirstamp|srcdir' $mkfiles # For debugging. $EGREP '\$.(top_)?srcdir./(test|ba[rz])\.[o$]' $mkfiles && exit 1 $FGREP '\$.(top_)?srcdir./.*$(am__dirstamp)' $mkfiles && exit 1 $FGREP '\$.(top_)?srcdir./.*$(DEPDIR)' $mkfiles && exit 1 cat > test.c <<'END' int main (void) { return 0; } END mkdir more cp test.c more/test.c echo 'int foo (void);' > sub/foo.h cat > bar.c <<'END' #include "foo.h" int main (void) { return foo (); } END cat > baz.c <<'END' #include "foo.h" int foo (void) { return 0; } END mkdir build cd build ../configure $MAKE $MAKE test-objs $MAKE distcheck :
pylam/automake
t/subobj-vpath-pr13928.sh
Shell
gpl-2.0
2,682
#!/bin/bash rm .version # Bash Color green='\033[01;32m' red='\033[01;31m' blink_red='\033[05;31m' restore='\033[0m' clear # Resources THREAD="-j$(grep -c ^processor /proc/cpuinfo)" KERNEL="Image" DTBIMAGE="dtb" DEFCONFIG="phasma_defconfig" # Kernel Details VER=".R39.bullhead." # Paths KERNEL_DIR=`pwd` REPACK_DIR="${HOME}/android/AK-OnePone-AnyKernel2" PATCH_DIR="${HOME}/android/AK-OnePone-AnyKernel2/patch" MODULES_DIR="${HOME}/android/AK-OnePone-AnyKernel2/modules" ZIP_MOVE="${HOME}/android/AK-releases" ZIMAGE_DIR="${HOME}/android/bullhead/arch/arm64/boot/" # Functions function clean_all { rm -rf $MODULES_DIR/* cd ~/android/bullhead/out/kernel rm -rf $DTBIMAGE git reset --hard > /dev/null 2>&1 git clean -f -d > /dev/null 2>&1 cd $KERNEL_DIR echo make clean && make mrproper } function make_kernel { echo make $DEFCONFIG make $THREAD } function make_modules { rm `echo $MODULES_DIR"/*"` find $KERNEL_DIR -name '*.ko' -exec cp -v {} $MODULES_DIR \; } function make_dtb { $REPACK_DIR/tools/dtbToolCM -2 -o $REPACK_DIR/$DTBIMAGE -s 2048 -p scripts/dtc/ arch/arm64/boot/ } function make_boot { cp -vr $ZIMAGE_DIR/Image.gz-dtb ~/android/bullhead/out/kernel/zImage . appendramdisk.sh } function make_zip { cd ~/android/bullhead/out zip -r9 `echo $AK_VER`.zip * mv `echo $AK_VER`.zip $ZIP_MOVE cd $KERNEL_DIR } DATE_START=$(date +"%s") echo -e "${green}" echo "-----------------" echo "Making Kylo Kernel:" echo "-----------------" echo -e "${restore}" while read -p "Do you want to use UBERTC 4.9(1) or UBERTC 5.3(2)? " echoice do case "$echoice" in 1 ) export CROSS_COMPILE=${HOME}/android/uberbuild/out/aarch64-linux-android-4.9-kernel/bin/aarch64-linux-android- TC="UBER4.9" echo echo "Using UBERTC 4.9" break ;; 2 ) export CROSS_COMPILE=${HOME}/android/uberbuild/out/aarch64-linux-android-5.3-kernel/bin/aarch64-linux-android- TC="UBER5.3" echo echo "Using UBERTC 5.3" break ;; 3 ) export CROSS_COMPILE=${HOME}/android/linarobuild/out/aarch64-linux-android-4.9-kernel/bin/aarch64-linux-android- TC="LINARO4.9" echo echo "Using Linaro 4.9" break ;; 4 ) export CROSS_COMPILE=${HOME}/android/linarobuild/out/aarch64-linux-android-5.3-kernel/bin/aarch64-linux-android- TC="LINARO5.3" echo echo "Using Linaro 5.3" break ;; 5 ) export CROSS_COMPILE=${HOME}/android/uberbuild/out/aarch64-linux-android-6.0-kernel/bin/aarch64-linux-android- TC="UBER6.0" echo echo "Using UBERTC 6.0" break ;; 7 ) export CROSS_COMPILE=${HOME}/android/aarch64-linux-android-4.9/bin/aarch64-linux-android- TC="AOSP4.9" echo echo "Using AOSP 4.9" break ;; 8 ) export CROSS_COMPILE=${HOME}/android/uberbuild/out/aarch64-linux-android-7.0-kernel/bin/aarch64-linux-android- TC="UBER7.0" echo echo "Using UBER 7.0" break ;; * ) echo echo "Invalid try again!" echo ;; esac done # Vars BASE_AK_VER="Phasma" AK_VER="$BASE_AK_VER$VER$TC" export LOCALVERSION=~`echo $AK_VER` export LOCALVERSION=~`echo $AK_VER` export ARCH=arm64 export SUBARCH=arm64 export KBUILD_BUILD_USER=mdalexca export KBUILD_BUILD_HOST=DarkRoom echo while read -p "Do you want to clean stuffs (y/n)? " cchoice do case "$cchoice" in y|Y ) clean_all echo echo "All Cleaned now." break ;; n|N ) break ;; * ) echo echo "Invalid try again!" echo ;; esac done echo while read -p "Do you want to build?" dchoice do case "$dchoice" in y|Y ) make_kernel make_dtb make_modules make_boot make_zip break ;; n|N ) break ;; * ) echo echo "Invalid try again!" echo ;; esac done echo -e "${green}" echo "-------------------" echo "Build Completed in:" echo "-------------------" echo -e "${restore}" DATE_END=$(date +"%s") DIFF=$(($DATE_END - $DATE_START)) echo "Time: $(($DIFF / 60)) minute(s) and $(($DIFF % 60)) seconds." echo
chrisc93/bullhead
build.sh
Shell
gpl-2.0
3,883
#!/bin/bash # Copyright 2014 Guoguo Chen, 2015 GoVivace Inc. (Nagendra Goel) # 2017 Vimal Manohar # Apache 2.0 # Some basic error checking, similar to steps/decode.sh is added. set -e set -o pipefail # Begin configuration section. transform_dir= # this option won't normally be used, but it can be used if you # want to supply existing fMLLR transforms when decoding. iter= model= # You can specify the model to use (e.g. if you want to use the .alimdl) stage=0 nj=4 cmd=run.pl max_active=7000 beam=13.0 lattice_beam=6.0 acwt=0.083333 # note: only really affects pruning (scoring is on lattices). num_threads=1 # if >1, will use gmm-latgen-faster-parallel parallel_opts= # ignored now. scoring_opts= allow_partial=true # note: there are no more min-lmwt and max-lmwt options, instead use # e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20" skip_scoring=false # End configuration section. echo "$0 $@" # Print the command line for logging [ -f ./path.sh ] && . ./path.sh; # source the path. . parse_options.sh || exit 1; if [ $# != 3 ]; then echo "$0: This is a special decoding script for segmentation where we" echo "use one decoding graph per segment. We assume a file HCLG.fsts.scp exists" echo "which is the scp file of the graphs for each segment." echo "This will normally be obtained by steps/cleanup/make_biased_lm_graphs.sh." echo "This script does not estimate fMLLR transforms; you have to use" echo "the --transform-dir option if you want to use fMLLR." echo "" echo "Usage: $0 [options] <graph-dir> <data-dir> <decode-dir>" echo " e.g.: $0 exp/tri2b/graph_train_si284_split \\" echo " data/train_si284_split exp/tri2b/decode_train_si284_split" echo "" echo "where <decode-dir> is assumed to be a sub-directory of the directory" echo "where the model is." echo "" echo "main options (for others, see top of script file)" echo " --config <config-file> # config containing options" echo " --nj <nj> # number of parallel jobs" echo " --iter <iter> # Iteration of model to test." echo " --model <model> # which model to use (e.g. to" echo " # specify the final.alimdl)" echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs." echo " --transform-dir <trans-dir> # dir to find fMLLR transforms " echo " --acwt <float> # acoustic scale used for lattice generation " echo " --scoring-opts <string> # options to local/score.sh" echo " --num-threads <n> # number of threads to use, default 1." exit 1; fi graphdir=$1 data=$2 dir=$3 mkdir -p $dir/log if [ -e $dir/final.mdl ]; then srcdir=$dir elif [ -e $dir/../final.mdl ]; then srcdir=$(dirname $dir) else echo "$0: expected either $dir/final.mdl or $dir/../final.mdl to exist" exit 1 fi sdata=$data/split$nj; [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; echo $nj > $dir/num_jobs if [ -z "$model" ]; then # if --model <mdl> was not specified on the command line... if [ -z $iter ]; then model=$srcdir/final.mdl; else model=$srcdir/$iter.mdl; fi fi if [ $(basename $model) != final.alimdl ] ; then # Do not use the $srcpath -- look at the path where the model is if [ -f $(dirname $model)/final.alimdl ] && [ -z "$transform_dir" ]; then echo -e '\n\n' echo $0 'WARNING: Running speaker independent system decoding using a SAT model!' echo $0 'WARNING: This is OK if you know what you are doing...' echo -e '\n\n' fi fi for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fsts.scp; do [ ! -f $f ] && echo "$0: no such file $f" && exit 1; done utils/lang/check_phones_compatible.sh $graph_dir/phones.txt $srcdir/phones.txt # Split HCLG.fsts.scp by input utterance n1=$(cat $graphdir/HCLG.fsts.scp | wc -l) n2=$(cat $data/feats.scp | wc -l) if [ $n1 != $n2 ]; then echo "$0: expected $n2 graphs in $graphdir/HCLG.fsts.scp, got $n1" fi mkdir -p $dir/split_fsts sort -k1,1 $graphdir/HCLG.fsts.scp > $dir/HCLG.fsts.sorted.scp utils/filter_scps.pl --no-warn -f 1 JOB=1:$nj \ $sdata/JOB/feats.scp $dir/HCLG.fsts.sorted.scp $dir/split_fsts/HCLG.fsts.JOB.scp HCLG=scp:$dir/split_fsts/HCLG.fsts.JOB.scp if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi echo "$0: feature type is $feat_type"; splice_opts=`cat $srcdir/splice_opts 2>/dev/null` || true # frame-splicing options. cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` || true delta_opts=`cat $srcdir/delta_opts 2>/dev/null` || true thread_string= [ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" case $feat_type in delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; *) echo "Invalid feature type $feat_type" && exit 1; esac if [ ! -z "$transform_dir" ]; then # add transforms to features... echo "Using fMLLR transforms from $transform_dir" [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist" && exit 1 [ ! -s $transform_dir/num_jobs ] && \ echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; nj_orig=$(cat $transform_dir/num_jobs) if [ $nj -ne $nj_orig ]; then # Copy the transforms into an archive with an index. echo "$0: num-jobs for transforms mismatches, so copying them." for n in $(seq $nj_orig); do cat $transform_dir/trans.$n; done | \ copy-feats ark:- ark,scp:$dir/trans.ark,$dir/trans.scp || exit 1; feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/trans.scp ark:- ark:- |" else # number of jobs matches with alignment dir. feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" fi fi if [ $stage -le 0 ]; then if [ -f "$graphdir/num_pdfs" ]; then [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \ { echo "Mismatch in number of pdfs with $model"; exit 1; } fi $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ --acoustic-scale=$acwt --allow-partial=$allow_partial --word-symbol-table=$graphdir/words.txt \ $model "$HCLG" "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; fi if ! $skip_scoring ; then [ ! -x local/score.sh ] && \ echo "$0: Not scoring because local/score.sh does not exist or not executable." && exit 1; local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir || { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } fi exit 0;
michellemorales/OpenMM
kaldi/egs/wsj/s5/steps/cleanup/decode_segmentation.sh
Shell
gpl-2.0
7,261
#!/bin/sh a=0 while [ "$a" -lt 1000 ] # this is loop1 do a=`expr $a + 1` ./NTNU_check.sh done
sunejak/EmbeddedJetty
src/main/scripts/7_Jan_2015/NTNU_loop.sh
Shell
gpl-2.0
105
#!/bin/sh die() { echo "$@" 1>&2 dump_call_stack # http://tldp.org/LDP/abs/html/exitcodes.html#EXITCODESREF # According to the above table, exit codes 1 - 2, 126 - 165, and 255 [1] have special meanings, # and should therefore be avoided for user-specified exit parameters. exit 99 } dump_call_stack() { : } [ -z "$BASHPID" ] && return dump_call_stack() { local stack_depth=${#FUNCNAME[@]} local i for i in $(seq 0 $stack_depth); do [[ $i -eq $stack_depth ]] && break echo " ${BASH_SOURCE[i+1]}:${BASH_LINENO[i]}: ${FUNCNAME[i+1]}" >&2 done }
dbshch/lkp-tests
lib/debug.sh
Shell
gpl-2.0
566
#!/bin/sh ./clean_all.sh javac -O jiv2/Main.java jar cf ../bin/jiv2.jar COPYING jiv2 cp ../bin/jiv2.jar ~/www/Atlases/ cp ../bin/jiv2.jar ../demo_web/jiv2-bin/ ./clean_all.sh
BIC-MNI/JIV2
compile_all.sh
Shell
gpl-2.0
179
#!/bin/bash [ -z "$UHP_HOME" ] && source $HOME/.bash_profile [ -z "$UHP_HOME" ] && { echo "UHP_HOME not set." exit 1 } sh $UHP_HOME/bin/stop-web.sh sh $UHP_HOME/bin/start-web.sh cd $UHP_HOME/uhpweb/test for py in `ls $UHP_HOME/uhpweb/test` do if [[ "$py" == *py ]] then python $py fi done
uhp/uhp
uhpweb/test/main.sh
Shell
gpl-2.0
325
# Setup Postfix function ee_mod_setup_postfix() { EE_EMAIL=$($EE_CONFIG_GET wordpress.email) if [[ $EE_EMAIL = "" ]]; then EE_EMAIL=$(git config user.email) fi EE_HOSTNAME=$(hostname -f) #We previously not used this package. So, if some one don't have Postfix-MySQL installed, #Postfix will not work ee_lib_echo "Installing Postfix-MySQL, please wait..." $EE_APT_GET install postfix-mysql \ || ee_lib_error "Unable to install Postfix-MySQL, exit status = " $? ee_lib_echo "Setting up Postfix, please wait..." #Configure Master.cf sed -i 's/#submission/submission/' /etc/postfix/master.cf && sed -i 's/#smtps/smtps/' /etc/postfix/master.cf \ || ee_lib_error "Unable to setup details in master.cf file, exit status = " $? # Handle SMTP authentication using Dovecot" # On Debian6 following command not work ( Postfix < 2.8 ) # postconf "smtpd_sasl_type = dovecot" # The -e option is no longer needed with Postfix version 2.8 and later. postconf -e "smtpd_sasl_type = dovecot" postconf -e "smtpd_sasl_path = private/auth" postconf -e "smtpd_sasl_auth_enable = yes" postconf -e "smtpd_relay_restrictions = permit_sasl_authenticated, permit_mynetworks, reject_unauth_destination" # Disable SSL for POODLE postconf -e "smtpd_tls_mandatory_protocols=!SSLv2,!SSLv3" postconf -e "smtp_tls_mandatory_protocols=!SSLv2,!SSLv3" postconf -e "smtpd_tls_protocols=!SSLv2,!SSLv3" postconf -e "smtp_tls_protocols=!SSLv2,!SSLv3" # other destination domains should be handled using virtual domains postconf -e "mydestination = localhost" # using Dovecot's LMTP for mail delivery and giving it path to store mail postconf -e "virtual_transport = lmtp:unix:private/dovecot-lmtp" # virtual mailbox setups postconf -e "virtual_uid_maps = static:5000" postconf -e "virtual_gid_maps = static:5000" postconf -e "virtual_mailbox_domains = mysql:/etc/postfix/mysql/virtual_domains_maps.cf" postconf -e "virtual_mailbox_maps = mysql:/etc/postfix/mysql/virtual_mailbox_maps.cf" postconf -e "virtual_alias_maps = mysql:/etc/postfix/mysql/virtual_alias_maps.cf" #postconf "message_size_limit = 20971520" # Setting up Postfix MySQL configuration mkdir -p /etc/postfix/mysql cp -av /usr/share/easyengine/mail/virtual_alias_maps.cf /etc/postfix/mysql/virtual_alias_maps.cf &>> $EE_COMMAND_LOG && \ cp -av /usr/share/easyengine/mail/virtual_domains_maps.cf /etc/postfix/mysql/virtual_domains_maps.cf &>> $EE_COMMAND_LOG && \ cp -av /usr/share/easyengine/mail/virtual_mailbox_maps.cf /etc/postfix/mysql/virtual_mailbox_maps.cf &>> $EE_COMMAND_LOG \ || ee_lib_error "Unable to copy Postfix MySQL configuration files, exit status = " $? # Configure self signed SSL for Postfix ee_lib_echo "Generating self signed certificate for Postfix, please wait..." openssl req -new -x509 -days 3650 -nodes -subj /commonName=${EE_HOSTNAME}/emailAddress=${EE_EMAIL} -out /etc/ssl/certs/postfix.pem -keyout /etc/ssl/private/postfix.pem &>> $EE_COMMAND_LOG chmod 0600 /etc/ssl/private/postfix.pem postconf -e smtpd_tls_cert_file=/etc/ssl/certs/postfix.pem postconf -e smtpd_tls_key_file=/etc/ssl/private/postfix.pem }
shitalp/easyengine
src/modules/stack/install/mail/ee_mod_setup_postfix.sh
Shell
gpl-2.0
3,134
#!/bin/bash /bin/aws ec2 describe-instances --query 'Reservations[*].Instances[*].[State.Name, InstanceId, PublicIpAddress, Platform, Tags[?Key==`Name`]| [0].Value ]' --output text | /bin/grep running > instances.txt restart=0 if [ -f instances-old.txt ]; then diff instances.txt instances-old.txt > /dev/null 2>&1 restart=$? fi if [ ${restart} -ne 0 ]; then rm -f /etc/icinga/hosts/*.cfg while read line do # running i-6b9f20bd 50.16.120.205 windows astragalus # running i-b939ee51 54.157.112.125 None proxy instanceid=$(echo ${line} | awk '{print $2}') publicip=$(echo ${line} | awk '{print $3}') publicdns=$(getent hosts ${publicip} | awk '{print $NF}') platform=$(echo ${line} | awk '{print $4}') instancename="$(echo ${line} | awk '{$1=$2=$3=$4=""; print}' | sed -e "s/ //g" )" case ${instancename} in astragalus|astragalus-staging) echo "define host{ use windows-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups windows-servers }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; proxy) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups linux-servers,proxy-servers }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; vmsnagios) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups linux-servers,nagios-servers }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; access4-1|access4-2) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups linux-servers,access4-servers,glassfish-servers-domain2,glassfish-servers-domain4,tomcat-servers }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; vms-access-staging) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups linux-servers,vms-access-servers,glassfish-servers-domain1,glassfish-servers-domain2,glassfish-servers-domain3,glassfish-servers-domain4,tomcat-servers }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; vms-access-dev) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups linux-servers,vms-access-servers,glassfish-servers-domain1,glassfish-servers-domain2,glassfish-servers-domain3,tomcat-servers }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; vms-access1|vms-access2) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups linux-servers,vms-access-servers,glassfish-servers-domain1,glassfish-servers-domain2 }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; *) echo "define host{ use linux-server host_name ${instancename}-${instanceid} alias ${publicdns} address ${publicdns} hostgroups selfregistered }" > /etc/icinga/hosts/${instancename}-${instanceid}.cfg ;; esac done < instances.txt service icinga reload mv -f instances.txt instances-old.txt fi
jmatis/icinga-aws-selfregister
icinga-selfregister.sh
Shell
gpl-2.0
3,702
#! /bin/sh -e # tup - A file-based build system # # Copyright (C) 2012-2022 Mike Shal <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # Same as t5076, only the first directory doesn't exist when we do the # initial compilation. . ./tup.sh mkdir b echo 'int x;' > b/foo.h echo '#include "foo.h"' > ok.c cat > Tupfile << HERE : ok.c |> gcc -c %f -o %o -Ia -Ib |> ok.o HERE update tup_dep_exist b foo.h . 'gcc -c ok.c -o ok.o -Ia -Ib' sym_check ok.o x mkdir a echo 'int y;' > a/foo.h update tup_dep_exist a foo.h . 'gcc -c ok.c -o ok.o -Ia -Ib' sym_check ok.o y # Make sure we don't have a dependency on the directory anymore. tup_dep_no_exist . a . 'gcc -c ok.c -o ok.o -Ia -Ib' eotup
gittup/tup
test/t5077-ghost-subdir2.sh
Shell
gpl-2.0
1,305
#!/bin/sh # # YAGARTO toolchain # # Copyright (C) 2006-2012 by Michael Fischer # [email protected] # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # #--------------------------------------------------------------------------------- # Call all scripts for building the PDF #--------------------------------------------------------------------------------- echo "Start of build:" > 12-temp.txt date >> 12-temp.txt cd binutils-build make install-pdf cd .. cd gcc-build make install-pdf cd .. cd newlib-build make pdf cp arm-none-eabi/newlib/libc/libc.pdf $prefix/share/doc cp arm-none-eabi/newlib/libm/libm.pdf $prefix/share/doc cd .. cd gdb-build make install-pdf cd .. #rm $prefix/share/doc/libquadmath.pdf echo "End of build:" >> 12-temp.txt date >> 12-temp.txt mv 12-temp.txt 12-ready.txt
ARMinARM/arm-toolchain-build-scripts
12-build-pdf.sh
Shell
gpl-2.0
2,077
#!/bin/bash NCORES=4 unamestr=`uname` if [[ "$unamestr" == "Linux" ]]; then NCORES=`grep -c ^processor /proc/cpuinfo` fi if [[ "$unamestr" == "Darwin" ]]; then NCORES=`sysctl -n hw.ncpu` fi rm -rf deploy rm -rf build mkdir build cd build cmake ../ make -j $NCORES cd .. mkdir deploy cp ./build/redumis deploy/ cp ./build/graphchecker deploy/ cp ./build/sort_adjacencies deploy/ cp ./build/online_mis deploy/ cp ./build/wmis/branch_reduce deploy/weighted_branch_reduce #cp ./build/wmis/merge_graph_weights deploy/ cp ./build/wmis/weighted_ls deploy/weighted_local_search rm -rf build
sebalamm/KaMIS
compile_withcmake.sh
Shell
gpl-2.0
605
#!/bin/sh # prints to stdout matrix with given size filled with given values if [ $# -lt 1 ]; then echo "Usage: $0 <number of rows> [ <number of columns> [ <value to fill with> ] ]" exit 1 fi ROWS=$1 if [ $# -gt 1 ]; then COLUMNS=$2 echo "# $ROWS $COLUMNS" else COLUMNS=$ROWS echo "# $ROWS" fi; if [ $# -gt 2 ]; then FILL_WITH=$3 else FILL_WITH=1 fi i=0 while [ $i -lt $ROWS ]; do j=0 while [ $j -lt $COLUMNS ]; do echo -n "$FILL_WITH " j=$((j+1)) done; echo "" i=$((i+1)) done
uquendo/calc
test/apps/quest01-1/square-ones/gen_square_matrix.sh
Shell
gpl-2.0
517
#! /bin/sh # Copyright (C) 2002-2018 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # Make sure the current version can be required. . test-init.sh amver=$($AUTOMAKE --version | sed -e 's/.* //;1q') # Does the extracted version number seems legit? case $amver in *[0-9].[0-9]*) ;; *) fatal_ "couldn't extract version number from automake" ;; esac cat > Makefile.am << END AUTOMAKE_OPTIONS = $amver END $ACLOCAL $AUTOMAKE :
komh/automake-os2
t/version6.sh
Shell
gpl-2.0
1,040
#!/bin/bash # Copyright (C) 2009 Przemyslaw Pawelczyk <[email protected]> # (C) 2017 Jan Schulz <[email protected]> ## ## This script is licensed under the terms of the MIT license. ## https://opensource.org/licenses/MIT # Fail on all errors set -o pipefail # We always start in the current users home directory so that names always start there cd ~ ### ARGUMENT PARSING ### SCRIPT="${0}" DIRECTORY=$(systemd-escape --unescape -- "$2") if [[ -z "$DIRECTORY" ]] || [[ ! -d "$DIRECTORY" ]] ; then echo "Need a directory name in the current users home directory as second argument. Aborting." exit 1 fi if [[ -z "${1}" ]] ; then echo "Need a command as first argument. Aborting." exit 1 else if [[ "sync" == "${1}" ]] ; then COMMAND=sync elif [[ "listen" == "${1}" ]] ; then COMMAND=listen else echo "Unknown command. Aborting." exit 1 fi fi ### LOCKFILE BOILERPLATE ### LOCKFILE="/run/user/"$(id -u)"/"$(basename "$0")"_"${DIRECTORY//\//_}"" LOCKFD=99 # PRIVATE _lock() { flock -"$1" "$LOCKFD"; } _no_more_locking() { _lock u; _lock xn && rm -f "$LOCKFILE"; } _prepare_locking() { eval "exec "$LOCKFD">\""$LOCKFILE"\""; trap _no_more_locking EXIT; } # ON START _prepare_locking # PUBLIC exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail exlock() { _lock x; } # obtain an exclusive lock shlock() { _lock s; } # obtain a shared lock unlock() { _lock u; } # drop a lock ### SYNC SCRIPT ### # Idea: only let one script run, but if the sync script is called a second time # make sure we sync a second time, too sync_directory() { _directory="${1}" reset_timer_and_exit() { echo "Retriggered google drive sync ('${_directory}')" && touch -m $LOCKFILE && exit; } exlock_now || reset_timer_and_exit if ping -c1 -W1 -q accounts.google.com >/dev/null 2>&1; then true # pass else echo "Google drive server not reachable, NOT syncing..." unlock exit 0 fi TIME_AT_START=0 TIME_AT_END=1 while [[ "${TIME_AT_START}" -lt "${TIME_AT_END}" ]]; do echo "Syncing '${_directory}'..." TIME_AT_START="$(stat -c %Y "$LOCKFILE")" grive -p "${_directory}" 2>&1 | grep -v -E "^Reading local directories$|^Reading remote server file list$|^Synchronizing files$|^Finished!$" TIME_AT_END="$(stat -c %Y "$LOCKFILE")" echo "Sync of '${_directory}' done." done # always exit ok, so that we never go into a wrong systemd state unlock exit 0 } ### LISTEN TO CHANGES IN DIRECTORY ### listen_directory() { _directory="${1}" type inotifywait >/dev/null 2>&1 || { echo >&2 "I require inotifywait but it's not installed. Aborting."; exit 1; } echo "Listening for changes in '${_directory}'" while true #run indefinitely do # Use a different call to not need to change exit into return inotifywait -q -r -e modify,attrib,close_write,move,create,delete --exclude ".grive_state|.grive" "${_directory}" > /dev/null 2>&1 && ${SCRIPT} sync $(systemd-escape "${_directory}") #echo ${SCRIPT} "${_directory}" done # always exit ok, so that we never go into a wrong systemd state exit 0 } if [[ "${COMMAND}" == listen ]] ; then listen_directory "${DIRECTORY}" else sync_directory "${DIRECTORY}" fi # always exit ok, so that we never go into a wrong systemd state exit 0
vitalif/grive2
systemd/grive-sync.sh
Shell
gpl-2.0
3,346
#!/usr/bin/env bash # $Id: dieIfNotRoot.sh 1128 2012-08-31 15:44:45Z gab $ source $(cd $(dirname "$0") ; pwd)/bootstrap.sh
octo-online/libkoca
t/dieIfNotRoot.sh
Shell
gpl-2.0
123
#!/bin/sh # 安装可执行文件和库,这里就不做输入校验了,这里安装的含义除了将文件放置在对应位置外, # 还包括移除调试信息、设置权限的含义。因此安装之后一定要保留原二进制文件,用于处理调 # 试信息。 # 两种使用方式 # - ./install_binaries_and_libraries.sh /path/to/build/dir # 当只有一个参数时,即编译构建的目录,安装全部的库和可执行程序 # - ./install_binaries_and_libraries.sh /path/to/src /path/to/dest # 当超过一个参数时,去第一个参数作为源地址,第二个参数为目标地址,用于安装特定文件 bmy_install() { SRC=$1 TARGET=$2 echo "Installing $TARGET" cp $SRC $TARGET.new chrpath -r /home/bbs/lib $TARGET.new 1>/dev/null install -s -m 550 $TARGET.new $TARGET rm $TARGET.new } install_library() { bmy_install $1 /home/bbs/lib/$2 } install_binary() { bmy_install $1 /home/bbs/bin/$2 } install_www() { bmy_install $1 /home/apache/cgi-bin/bbs/$2 } if [ "$#" -ne 1 ]; then # 单独安装一个文件,使用绝对路径 bmy_install $1 $2 else BUILD_DIR=$1 install_library $BUILD_DIR/libytht/libytht.so libytht.so install_library $BUILD_DIR/libbmy/libbmy.so libbmy.so install_library $BUILD_DIR/libythtbbs/libythtbbs.so libythtbbs.so install_binary $BUILD_DIR/src/bbs/bbs bbs install_binary $BUILD_DIR/src/bbsd/bbsd bbsd install_binary $BUILD_DIR/src/bbs.chatd/bbs.chatd bbs.chatd install_binary $BUILD_DIR/src/bbsnet/bbsnet bbsnet install_binary $BUILD_DIR/src/telnet/telnet telnet install_binary $BUILD_DIR/src/thread/thread thread install_binary $BUILD_DIR/atthttpd/atthttpd atthttpd install_www $BUILD_DIR/nju09/www/www www install_binary $BUILD_DIR/local_utl/averun/averun averun install_binary $BUILD_DIR/local_utl/auto_rm_junk/auto_rm_junk auto_rm_junk install_binary $BUILD_DIR/local_utl/autoclear/autoclear autoclear install_binary $BUILD_DIR/local_utl/autoundeny/autoundeny autoundeny install_binary $BUILD_DIR/local_utl/bbslogd/bbslogd bbslogd install_binary $BUILD_DIR/local_utl/bbspop3d/bbspop3d bbspop3d install_binary $BUILD_DIR/local_utl/bbsstatlog/bbsstatlog bbsstatlog install_binary $BUILD_DIR/local_utl/bbsstatproclog/bbsstatproclog bbsstatproclog install_binary $BUILD_DIR/local_utl/bbstop/bbstop bbstop install_binary $BUILD_DIR/local_utl/bm/bm bm install_binary $BUILD_DIR/local_utl/changeboardname/changeboardname changeboardname install_binary $BUILD_DIR/local_utl/check_ulevel/check_ulevel check_ulevel install_binary $BUILD_DIR/local_utl/clear_attach/clear_attach clear_attach install_binary $BUILD_DIR/local_utl/clear_junk/clear_junk clear_junk install_binary $BUILD_DIR/local_utl/combine_arc/combine_arc combine_arc install_binary $BUILD_DIR/local_utl/cpersonal/cpersonal cpersonal install_binary $BUILD_DIR/local_utl/find_lost_mail/find_lost_mail find_lost_mail install_binary $BUILD_DIR/local_utl/find_rm_lost/find_rm_lost find_rm_lost install_binary $BUILD_DIR/local_utl/finddf/finddf finddf install_binary $BUILD_DIR/local_utl/fixdir/fixdir fixdir install_binary $BUILD_DIR/local_utl/id_boards/id_boards id_boards install_binary $BUILD_DIR/local_utl/makeindex3/makeindex3 makeindex3 install_binary $BUILD_DIR/local_utl/mergeb/mergeb mergeb install_binary $BUILD_DIR/local_utl/nbstat/nbstat nbstat install_binary $BUILD_DIR/local_utl/newboards/newboards newboards install_binary $BUILD_DIR/local_utl/newtop10/newtop10 newtop10 install_binary $BUILD_DIR/local_utl/postfile/postfile postfile #install_binary $BUILD_DIR/local_utl/printSecLastMark/printSecLastMark printSecLastMark #install_binary $BUILD_DIR/local_utl/printSecLastUpdate/printSecLastUpdate printSecLastUpdate install_binary $BUILD_DIR/local_utl/ptyexec/ptyexec ptyexec install_binary $BUILD_DIR/local_utl/repsync/repsync repsync install_binary $BUILD_DIR/local_utl/save_brc/save_brc save_brc install_binary $BUILD_DIR/local_utl/searchDIR/searchDIR searchDIR install_binary $BUILD_DIR/local_utl/searchLastMark/searchLastMark searchLastMark install_binary $BUILD_DIR/local_utl/selpersonal/selpersonal selpersonal install_binary $BUILD_DIR/local_utl/setdefaultkey/setdefaultkey setdefaultkey install_binary $BUILD_DIR/local_utl/sortdir/sortdir sortdir install_binary $BUILD_DIR/local_utl/transuu2bin/transuu2bin transuu2bin install_binary $BUILD_DIR/local_utl/watchman/watchman watchman fi
bmybbs/bmybbs
install_binaries_and_libraries.sh
Shell
gpl-2.0
5,144
#!/bin/bash # Device Info DEVICEBASE="khadas" BOARDFAMILY="vims" PLATFORMREPO="https://github.com/volumio/platform-khadas.git" BUILD="armv7" NONSTANDARD_REPO=no # yes requires "non_standard_repo() function in make.sh LBLBOOT="BOOT" LBLIMAGE="volumio" LBLDATA="volumio_data" # Partition Info BOOT_TYPE=msdos # msdos or gpt BOOT_START=16 BOOT_END=80 IMAGE_END=3900 BOOT=/mnt/boot BOOTDELAY=1 BOOTDEV="mmcblk1" BOOTPART=/dev/mmcblk1p1 BOOTCONFIG=env.system.txt TARGETBOOT="/dev/mmcblk0p1" TARGETDEV="/dev/mmcblk0" TARGETDATA="/dev/mmcblk0p3" TARGETIMAGE="/dev/mmcblk0p2" HWDEVICE= USEKMSG="yes" UUIDFMT="yes" # yes|no (actually, anything non-blank) FACTORYCOPY="yes" # Modules to load (as a blank separated string array) MODULES="nls_cp437" # Additional packages to install (as a blank separated string) #PACKAGES="" # initramfs type RAMDISK_TYPE=image # image or gzip (ramdisk image = uInitrd, gzip compressed = volumio.initrd) non_standard_repo() { : } fetch_bootpart_uuid() { echo "[info] replace BOOTPART device by ${FLASH_PART} UUID value" UUIDBOOT=$(blkid -s UUID -o value ${FLASH_PART}) BOOTPART="UUID=${UUIDBOOT}" } is_dataquality_ok() { return 0 } write_device_files() { cp ${PLTDIR}/${BOARDFAMILY}/boot/Image $ROOTFSMNT/boot cp ${PLTDIR}/${BOARDFAMILY}/boot/boot.ini $ROOTFSMNT/boot mkimage -A arm64 -O linux -T script -C none -a 0 -e 0 -n "aml_autoscript" -d ${PLTDIR}/${BOARDFAMILY}/boot/aml_autoscript.cmd $ROOTFSMNT/boot/aml_autoscript cp ${PLTDIR}/${BOARDFAMILY}/boot/env.txt $ROOTFSMNT/boot mkdir /mnt/volumio/rootfs/boot/dtb cp -R ${PLTDIR}/${BOARDFAMILY}/boot/dtb/kvim3l_linux.dtb $ROOTFSMNT/boot/dtb } write_device_bootloader() { dd if=${PLTDIR}/${BOARDFAMILY}/uboot/u-boot.VIM3L.sd.bin of=${LOOP_DEV} bs=444 count=1 conv=fsync dd if=${PLTDIR}/${BOARDFAMILY}/uboot/u-boot.VIM3L.sd.bin of=${LOOP_DEV} bs=512 skip=1 seek=1 conv=fsync } copy_device_bootloader_files() { mkdir /mnt/volumio/rootfs/boot/u-boot cp ${PLTDIR}/${BOARDFAMILY}/uboot/u-boot.VIM3L.sd.bin $ROOTFSMNT/boot/u-boot } write_boot_parameters() { echo " BOOTARGS_USER=loglevel=0 quiet splash bootdelay=1 bootpart=/dev/mmcblk1p1 imgpart=/dev/mmcblk1p2 datapart=/dev/mmcblk1p3 DTB=dtb/kvim3l_linux.dtb " > $ROOTFSMNT/boot/env.system.txt }
volumio/Build
installer/board-config/mp1/mkinstall_config.sh
Shell
gpl-2.0
2,286
#!/sbin/sh SLOT=$(for i in `cat /proc/cmdline`; do echo $i | grep slot | dd bs=1 skip=24 2>/dev/null; done) BOOTPATH="/dev/block/bootdevice/by-name/boot$SLOT" /tmp/busybox dd if=$BOOTPATH of=/tmp/boot.img
mdalexca/marlin
out/kernel/slotdetect.sh
Shell
gpl-2.0
208
#!/bin/bash set -e cachedir=.cache mkdir -p $cachedir if [[ ! -d build ]]; then mkdir -p build fi libversion=2 libVFile="./libversion" libdir="./lib" currentversion=0 needsdownload="true" if [ -f $libVFile ]; then while read line; do currentversion=$line continue done < $libVFile fi if [ $currentversion -ge $libversion ]; then needsdownload="false" fi if [ ! -d $libdir ]; then needsdownload="true" fi if [[ "$needsdownload" = "true" ]]; then echo "New libraries need to be downloaded, the script might ask you for sudo access password" rm -rf ./lib if [[ -f $cachedir/orctlibs.zip ]]; then rm -rf $cachedir/orctlibs.zip fi if [[ -d /usr/local/cross-tools/orctlibs ]]; then sudo rm -rf /usr/local/cross-tools/orctlibs fi if [[ -d $cachedir/orctlibs ]]; then rm -rf $cachedir/orctlibs fi curl https://download.openrct2.website/dev/lib/mingw -o $cachedir/orctlibs.zip mkdir -p $cachedir/orctlibs pushd $cachedir/orctlibs unzip -uaq ../orctlibs.zip popd sudo mkdir -p /usr/local/cross-tools/orctlibs mkdir -p lib sudo cp -rf $cachedir/orctlibs/glob/* /usr/local/cross-tools/orctlibs/. cp -rf $cachedir/orctlibs/local/* ./lib/. echo $libversion > $libVFile fi pushd build cmake -DCMAKE_TOOLCHAIN_FILE=../CMakeLists_mingw.txt -DCMAKE_BUILD_TYPE=Debug .. make popd if [[ ! -h openrct2.dll ]]; then ln -s build/openrct2.dll openrct2.dll fi if [[ -z "$DISABLE_G2_BUILD" ]]; then echo Building: data/g2.dat ./build_g2.sh > /dev/null 2>&1 fi if [[ -t 1 ]]; then echo -e "\nDone! Run OpenRCT2 by typing:\n\n\033[95mwine openrct2.exe\n\033[0m" else echo -e "\nDone! Run OpenRCT2 by typing:\n\nwine openrct2.exe\n" fi
DrWeir/OpenRCT2
build.sh
Shell
gpl-3.0
1,798
#!/usr/bin/bash # # Shutdown the mongod gracefully. # mongo --quiet <<HERE >/dev/null use admin db.shutdownServer() HERE sleep 3
TrailingDots/async_py_messaging
async_py_messaging/scripts/shutdown.sh
Shell
gpl-3.0
130
#! /usr/bin/env bash # # This file is part of AirStashPlayer. # Copyright (c) 2016 Wearable Inc. # # AirStashPlayer is based on ijkPlayer: # Copyright (c) 2013-2014 Zhang Rui <[email protected]> # # Portions of ijkPlayer are based on kxmovie: # Copyright (c) 2012 Konstantin Boukreev. All rights reserved. # # AirStashPlayer is free software: you can redistribute it and/or # modify it under the terms of version 3 of the GNU Lesser General # Public License as published by the Free Software Foundation. # # AirStashPlayer is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with AirStashPlayer. If not, see # <http://www.gnu.org/licenses/>. if [ -z "$ANDROID_NDK" -o -z "$ANDROID_NDK" ]; then echo "You must define ANDROID_NDK, ANDROID_SDK before starting." echo "They must point to your NDK and SDK directories.\n" exit 1 fi REQUEST_TARGET=$1 REQUEST_SUB_CMD=$2 ACT_ABI_32="armv5 armv7a x86" ACT_ABI_64="armv5 armv7a arm64 x86 x86_64" ACT_ABI_ALL=$ALL_ABI_64 UNAME_S=$(uname -s) FF_MAKEFLAGS= if which nproc >/dev/null then FF_MAKEFLAGS=-j`nproc` elif [ "$UNAME_S" = "Darwin" ] && which sysctl >/dev/null then FF_MAKEFLAGS=-j`sysctl -n machdep.cpu.thread_count` fi do_sub_cmd () { SUB_CMD=$1 if [ -L "./android-ndk-prof" ]; then rm android-ndk-prof fi if [ "$PARAM_SUB_CMD" = 'prof' ]; then echo 'profiler build: YES'; ln -s ../../../../../../prof/android-ndk-profiler/jni android-ndk-prof else echo 'profiler build: NO'; ln -s ../../../../../../prof/android-ndk-profiler-dummy/jni android-ndk-prof fi case $SUB_CMD in prof) $ANDROID_NDK/ndk-build $FF_MAKEFLAGS ;; clean) $ANDROID_NDK/ndk-build clean ;; rebuild) $ANDROID_NDK/ndk-build clean $ANDROID_NDK/ndk-build $FF_MAKEFLAGS ;; *) $ANDROID_NDK/ndk-build $FF_MAKEFLAGS ;; esac } do_ndk_build () { PARAM_TARGET=$1 PARAM_SUB_CMD=$2 case "$PARAM_TARGET" in armv5|armv7a) cd "player/player-$PARAM_TARGET/src/main/jni" do_sub_cmd $PARAM_SUB_CMD cd - ;; arm64|x86|x86_64) cd "player/player-$PARAM_TARGET/src/main/jni" if [ "$PARAM_SUB_CMD" = 'prof' ]; then PARAM_SUB_CMD=''; fi do_sub_cmd $PARAM_SUB_CMD cd - ;; esac } case "$REQUEST_TARGET" in "") do_ndk_build armv7a; ;; armv5|armv7a|arm64|x86|x86_64) do_ndk_build $REQUEST_TARGET $REQUEST_SUB_CMD; ;; all32) for ABI in $ACT_ABI_32 do do_ndk_build "$ABI" $REQUEST_SUB_CMD; done ;; all|all64) for ABI in $ACT_ABI_64 do do_ndk_build "$ABI" $REQUEST_SUB_CMD; done ;; clean) for ABI in $ACT_ABI_ALL do do_ndk_build "$ABI" clean; done ;; *) echo "Usage:" echo " compile-player.sh armv5|armv7a|arm64|x86|x86_64" echo " compile-player.sh all|all32" echo " compile-player.sh all64" echo " compile-player.sh clean" ;; esac
AirStash/AirStashPlayer
android/compile-player.sh
Shell
gpl-3.0
3,458
#!/bin/bash :>command.txt filelist=`ls ~/ws/LA.UM.0.0/out/target/product/msm8996/system/lib` for file in $filelist do echo 'adb push ' $file '/system/lib' >> command.txt done echo ' ' >> command.txt for file in $filelist do echo 'adb push ' $file '/system/lib64' >> command.txt done
anonymouss/configure
script/foreach.sh
Shell
gpl-3.0
289
#!/usr/bin/env sh echo "#" echo "# starting registry auto pusher process..." echo "#" set -e VERSION=`echo $1 | cut -d: -f2 | cut -d- -f1` BASE_IMAGE=`echo $1 | cut -d: -f1` BASE_REPO=`echo $REGISTRYREGISTRY | cut -d/ -f3` if [ "$USERNAME" != "" ] || [ "$PASSWORD" != "" ] then DOCKER_PASS=`echo "$PASSWORD" | base64 -d` docker login -u $USERNAME -p $DOCKER_PASS $REGISTRY else exit 1 fi docker pull $1 # lastest image tag docker tag $1 "$BASE_IMAGE:latest" #docker push $BASE_REPO/$BASE_IMAGE:latest docker push $BASE_IMAGE:latest # versioned image tag docker tag $1 "$BASE_IMAGE:$VERSION" #docker push $BASE_REPO/$BASE_IMAGE:$VERSION docker push $BASE_IMAGE:$VERSION
amenezes/registry-auto-pusher
update_registry.sh
Shell
gpl-3.0
682
#!/bin/bash # params: # URMDB_USER # URMDB_PWD # URMDB_DBHOST # URMDB_DBNAME P_SQLFILE=$1 P_LOGFILE=$2 export PGPASSWORD="$URMDB_PWD" psql -A -a -t -d $URMDB_DBNAME -h $URMDB_DBHOST -U $URMDB_USER < $P_SQLFILE > $P_LOGFILE 2>&1
releasemgn/urm
database/sql/postgres/linux/applyscript.sh
Shell
gpl-3.0
230
while true do sample_data="$(rabbitmqctl list_queues name consumers messages | grep $1 | cut -f2- | sed 's/ /\t/')" echo -e "$(date '+%Y-%m-%d %H:%M:%S.%N')\t$sample_data" done
f3flight/openstack-tools
rabbitmq/monitor-rabbitmq-queue.sh
Shell
gpl-3.0
181
sudo cp /usr/share/X11/xkb/symbols/ir ir.back && echo "OK" sudo cp ir /usr/share/X11/xkb/symbols/ir && echo "OK" sudo cp /usr/share/X11/xkb/rules/evdev.xml evdev.xml.back && echo "OK" sudo cp evdev.xml /usr/share/X11/xkb/rules/evdev.xml && echo "OK" sudo cp /usr/share/X11/xkb/rules/evdev.lst evdev.lst.back && echo "OK" sudo cp evdev.lst /usr/share/X11/xkb/rules/evdev.lst && echo "OK"
behnamhatami/Persian-Qwerty
replace.sh
Shell
gpl-3.0
387